diff --git a/CMakeLists.txt b/CMakeLists.txt index e55a57852a..b0667982b2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -22,7 +22,7 @@ option(RUN_PYTHON_TEST "Run Python API tests if building with Python" OFF) option(ENABLE_GUROBI "Enable use the Gurobi optimizer" OFF) option(ENABLE_OPENBLAS "Do symbolic bound tighting using blas" ON) # Not available on Windows option(CODE_COVERAGE "Add code coverage" OFF) # Available only in debug mode - +option(BUILD_TORCH "Build libtorch" ON) ################### ## Git variables ## ################### @@ -30,19 +30,19 @@ option(CODE_COVERAGE "Add code coverage" OFF) # Available only in debug mode # Get the name of the working branch execute_process( - COMMAND git rev-parse --abbrev-ref HEAD - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} - OUTPUT_VARIABLE GIT_BRANCH - OUTPUT_STRIP_TRAILING_WHITESPACE + COMMAND git rev-parse --abbrev-ref HEAD + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + OUTPUT_VARIABLE GIT_BRANCH + OUTPUT_STRIP_TRAILING_WHITESPACE ) add_definitions("-DGIT_BRANCH=\"${GIT_BRANCH}\"") # Get the latest abbreviated commit hash of the working branch execute_process( - COMMAND git log -1 --format=%h - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} - OUTPUT_VARIABLE GIT_COMMIT_HASH - OUTPUT_STRIP_TRAILING_WHITESPACE + COMMAND git log -1 --format=%h + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + OUTPUT_VARIABLE GIT_COMMIT_HASH + OUTPUT_STRIP_TRAILING_WHITESPACE ) add_definitions("-DGIT_COMMIT_HASH=\"${GIT_COMMIT_HASH}\"") @@ -60,9 +60,9 @@ set(COMMON_DIR "${SRC_DIR}/common") set(BASIS_DIR "${SRC_DIR}/basis_factorization") if (MSVC) - set(SCRIPT_EXTENSION bat) + set(SCRIPT_EXTENSION bat) else() - set(SCRIPT_EXTENSION sh) + set(SCRIPT_EXTENSION sh) endif() ########## @@ -85,20 +85,20 @@ add_definitions(-DBOOST_NO_CXX98_FUNCTION_BASE) set(BOOST_VERSION 1.84.0) set(BOOST_DIR "${TOOLS_DIR}/boost-${BOOST_VERSION}") if (MSVC) - set(BOOST_ROOT "${BOOST_DIR}/win_installed") - set(Boost_NAMESPACE libboost) + set(BOOST_ROOT "${BOOST_DIR}/win_installed") + set(Boost_NAMESPACE libboost) elseif (${CMAKE_SIZEOF_VOID_P} EQUAL 4 AND NOT MSVC) - set(BOOST_ROOT "${BOOST_DIR}/installed32") + set(BOOST_ROOT "${BOOST_DIR}/installed32") else() - set(BOOST_ROOT "${BOOST_DIR}/installed") + set(BOOST_ROOT "${BOOST_DIR}/installed") endif() set(Boost_USE_DEBUG_RUNTIME FALSE) find_package(Boost ${BOOST_VERSION} COMPONENTS program_options timer chrono thread) # Find boost if (NOT ${Boost_FOUND}) - execute_process(COMMAND ${TOOLS_DIR}/download_boost.${SCRIPT_EXTENSION} ${BOOST_VERSION}) - find_package(Boost ${BOOST_VERSION} REQUIRED COMPONENTS program_options timer chrono thread regex) + execute_process(COMMAND ${TOOLS_DIR}/download_boost.${SCRIPT_EXTENSION} ${BOOST_VERSION}) + find_package(Boost ${BOOST_VERSION} REQUIRED COMPONENTS program_options timer chrono thread regex) endif() set(LIBS_INCLUDES ${Boost_INCLUDE_DIRS}) list(APPEND LIBS ${Boost_LIBRARIES}) @@ -146,36 +146,63 @@ endif() file(GLOB DEPS_ONNX "${ONNX_DIR}/*.cc") include_directories(SYSTEM ${ONNX_DIR}) +############# +## Pytorch ## +############# + +if (${BUILD_TORCH}) + message(STATUS "Using pytorch") + if (NOT DEFINED BUILD_TORCH) + set(BUILD_TORCH $ENV{TORCH_HOME}) + add_definitions(-DBUILD_TORCH) + endif() + add_compile_definitions(BUILD_TORCH) + set(PYTORCH_VERSION 2.2.1) + find_package(Torch ${PYTORCH_VERSION} QUIET) + if (NOT Torch_FOUND) + set(PYTORCH_DIR "${TOOLS_DIR}/libtorch-${PYTORCH_VERSION}") + list(APPEND CMAKE_PREFIX_PATH ${PYTORCH_DIR}) + if(NOT EXISTS "${PYTORCH_DIR}") + execute_process(COMMAND ${TOOLS_DIR}/download_libtorch.sh ${PYTORCH_VERSION}) + set(Torch_NO_SYSTEM_PATHS ON) + endif() + set(Torch_DIR ${PYTORCH_DIR}/share/cmake/Torch) + find_package(Torch ${PYTORCH_VERSION} REQUIRED) + endif() + set(TORCH_CXX_FLAGS "-Wno-error=array-bounds") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}") + list(APPEND LIBS ${TORCH_LIBRARIES}) +endif () ############ ## Gurobi ## ############ if (${ENABLE_GUROBI}) - message(STATUS "Using Gurobi for LP relaxation for bound tightening") - if (NOT DEFINED GUROBI_DIR) - set(GUROBI_DIR $ENV{GUROBI_HOME}) - endif() - add_compile_definitions(ENABLE_GUROBI) - - set(GUROBI_LIB1 "gurobi_c++") - set(GUROBI_LIB2 "gurobi110") - - add_library(${GUROBI_LIB1} SHARED IMPORTED) - set_target_properties(${GUROBI_LIB1} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi_c++.a) - list(APPEND LIBS ${GUROBI_LIB1}) - target_include_directories(${GUROBI_LIB1} INTERFACE ${GUROBI_DIR}/include/) - - add_library(${GUROBI_LIB2} SHARED IMPORTED) - - # MACOSx uses .dylib instead of .so for its Gurobi downloads. - if (APPLE) - set_target_properties(${GUROBI_LIB2} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi110.dylib) - else() - set_target_properties(${GUROBI_LIB2} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi110.so) - endif () - - list(APPEND LIBS ${GUROBI_LIB2}) - target_include_directories(${GUROBI_LIB2} INTERFACE ${GUROBI_DIR}/include/) + message(STATUS "Using Gurobi for LP relaxation for bound tightening") + if (NOT DEFINED GUROBI_DIR) + set(GUROBI_DIR $ENV{GUROBI_HOME}) + endif() + add_compile_definitions(ENABLE_GUROBI) + + set(GUROBI_LIB1 "gurobi_c++") + set(GUROBI_LIB2 "gurobi110") + + add_library(${GUROBI_LIB1} SHARED IMPORTED) + set_target_properties(${GUROBI_LIB1} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi_c++.a) + list(APPEND LIBS ${GUROBI_LIB1}) + target_include_directories(${GUROBI_LIB1} INTERFACE ${GUROBI_DIR}/include/) + + add_library(${GUROBI_LIB2} SHARED IMPORTED) + + # MACOSx uses .dylib instead of .so for its Gurobi downloads. + if (APPLE) + set_target_properties(${GUROBI_LIB2} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi110.dylib) + else() + set_target_properties(${GUROBI_LIB2} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi110.so) + endif () + + list(APPEND LIBS ${GUROBI_LIB2}) + target_include_directories(${GUROBI_LIB2} INTERFACE ${GUROBI_DIR}/include/) endif() ############## @@ -183,30 +210,30 @@ endif() ############## if (NOT MSVC AND ${ENABLE_OPENBLAS}) - set(OPENBLAS_VERSION 0.3.19) - - set(OPENBLAS_LIB openblas) - set(OPENBLAS_DEFAULT_DIR "${TOOLS_DIR}/OpenBLAS-${OPENBLAS_VERSION}") - if (NOT OPENBLAS_DIR) - set(OPENBLAS_DIR ${OPENBLAS_DEFAULT_DIR}) - endif() - - message(STATUS "Using OpenBLAS for matrix multiplication") - add_compile_definitions(ENABLE_OPENBLAS) - if(NOT EXISTS "${OPENBLAS_DIR}/installed/lib/libopenblas.a") - message("Can't find OpenBLAS, installing. If OpenBLAS is installed please use the OPENBLAS_DIR parameter to pass the path") - if (${OPENBLAS_DIR} STREQUAL ${OPENBLAS_DEFAULT_DIR}) - message("Installing OpenBLAS") - execute_process(COMMAND ${TOOLS_DIR}/download_openBLAS.sh ${OPENBLAS_VERSION}) - else() - message(FATAL_ERROR "Can't find OpenBLAS in the supplied directory") - endif() - endif() - - add_library(${OPENBLAS_LIB} SHARED IMPORTED) - set_target_properties(${OPENBLAS_LIB} PROPERTIES IMPORTED_LOCATION ${OPENBLAS_DIR}/installed/lib/libopenblas.a) - list(APPEND LIBS ${OPENBLAS_LIB}) - target_include_directories(${OPENBLAS_LIB} INTERFACE ${OPENBLAS_DIR}/installed/include) + set(OPENBLAS_VERSION 0.3.19) + + set(OPENBLAS_LIB openblas) + set(OPENBLAS_DEFAULT_DIR "${TOOLS_DIR}/OpenBLAS-${OPENBLAS_VERSION}") + if (NOT OPENBLAS_DIR) + set(OPENBLAS_DIR ${OPENBLAS_DEFAULT_DIR}) + endif() + + message(STATUS "Using OpenBLAS for matrix multiplication") + add_compile_definitions(ENABLE_OPENBLAS) + if(NOT EXISTS "${OPENBLAS_DIR}/installed/lib/libopenblas.a") + message("Can't find OpenBLAS, installing. If OpenBLAS is installed please use the OPENBLAS_DIR parameter to pass the path") + if (${OPENBLAS_DIR} STREQUAL ${OPENBLAS_DEFAULT_DIR}) + message("Installing OpenBLAS") + execute_process(COMMAND ${TOOLS_DIR}/download_openBLAS.sh ${OPENBLAS_VERSION}) + else() + message(FATAL_ERROR "Can't find OpenBLAS in the supplied directory") + endif() + endif() + + add_library(${OPENBLAS_LIB} SHARED IMPORTED) + set_target_properties(${OPENBLAS_LIB} PROPERTIES IMPORTED_LOCATION ${OPENBLAS_DIR}/installed/lib/libopenblas.a) + list(APPEND LIBS ${OPENBLAS_LIB}) + target_include_directories(${OPENBLAS_LIB} INTERFACE ${OPENBLAS_DIR}/installed/include) endif() ########### @@ -239,7 +266,7 @@ set(INPUT_PARSERS_DIR input_parsers) include(ProcessorCount) ProcessorCount(CTEST_NTHREADS) if(CTEST_NTHREADS EQUAL 0) - set(CTEST_NTHREADS 1) + set(CTEST_NTHREADS 1) endif() # --------------- set build type ---------------------------- @@ -247,20 +274,20 @@ set(BUILD_TYPES Release Debug MinSizeRel RelWithDebInfo) # Set the default build type to Production if(NOT CMAKE_BUILD_TYPE) - set(CMAKE_BUILD_TYPE - Release CACHE STRING "Options are: Release Debug MinSizeRel RelWithDebInfo" FORCE) - # Provide drop down menu options in cmake-gui - set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS ${BUILD_TYPES}) + set(CMAKE_BUILD_TYPE + Release CACHE STRING "Options are: Release Debug MinSizeRel RelWithDebInfo" FORCE) + # Provide drop down menu options in cmake-gui + set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS ${BUILD_TYPES}) endif() message(STATUS "Building ${CMAKE_BUILD_TYPE} build") #-------------------------set code coverage----------------------------------# # Allow coverage only in debug mode only in gcc if(CODE_COVERAGE AND CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_BUILD_TYPE MATCHES Debug) - message(STATUS "Building with code coverage") - set(COVERAGE_COMPILER_FLAGS "-g -O0 --coverage" CACHE INTERNAL "") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}") - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --coverage") + message(STATUS "Building with code coverage") + set(COVERAGE_COMPILER_FLAGS "-g -O0 --coverage" CACHE INTERNAL "") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --coverage") endif() # We build a static library that is the core of the project, the link it to the @@ -273,7 +300,7 @@ set(MARABOU_EXE Marabou${CMAKE_EXECUTABLE_SUFFIX}) add_executable(${MARABOU_EXE} "${ENGINE_DIR}/main.cpp") set(MARABOU_EXE_PATH "${BIN_DIR}/${MARABOU_EXE}") add_custom_command(TARGET ${MARABOU_EXE} POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy $ ${MARABOU_EXE_PATH} ) + COMMAND ${CMAKE_COMMAND} -E copy $ ${MARABOU_EXE_PATH} ) set(MPS_PARSER_PATH "${BIN_DIR}/${MPS_PARSER}") @@ -314,10 +341,10 @@ find_package(Threads REQUIRED) list(APPEND LIBS Threads::Threads) if (BUILD_STATIC_MARABOU) - # build a static library - target_link_libraries(${MARABOU_LIB} ${LIBS} -static) + # build a static library + target_link_libraries(${MARABOU_LIB} ${LIBS} -static) else() - target_link_libraries(${MARABOU_LIB} ${LIBS}) + target_link_libraries(${MARABOU_LIB} ${LIBS}) endif() target_include_directories(${MARABOU_LIB} PRIVATE ${LIBS_INCLUDES}) @@ -347,10 +374,10 @@ endif() set(PYTHON32 FALSE) if(${BUILD_PYTHON}) execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c" - "import struct; print(struct.calcsize('@P'));" - RESULT_VARIABLE _PYTHON_SUCCESS - OUTPUT_VARIABLE PYTHON_SIZEOF_VOID_P - ERROR_VARIABLE _PYTHON_ERROR_VALUE) + "import struct; print(struct.calcsize('@P'));" + RESULT_VARIABLE _PYTHON_SUCCESS + OUTPUT_VARIABLE PYTHON_SIZEOF_VOID_P + ERROR_VARIABLE _PYTHON_ERROR_VALUE) # message("PYTHON SIZEOF VOID p ${PYTHON_SIZEOF_VOID_P}") if (PYTHON_SIZEOF_VOID_P EQUAL 4 AND NOT ${FORCE_PYTHON_BUILD}) set(PYTHON32 TRUE) @@ -370,8 +397,8 @@ endif() # Actually build Python if (${BUILD_PYTHON}) - set(PYBIND11_VERSION 2.10.4) - set(PYBIND11_DIR "${TOOLS_DIR}/pybind11-${PYBIND11_VERSION}") + set(PYBIND11_VERSION 2.10.4) + set(PYBIND11_DIR "${TOOLS_DIR}/pybind11-${PYBIND11_VERSION}") # This is suppose to set the PYTHON_EXECUTABLE variable # First try to find the default python version @@ -383,7 +410,7 @@ if (${BUILD_PYTHON}) if (NOT EXISTS ${PYBIND11_DIR}) message("didnt find pybind, getting it") - execute_process(COMMAND ${TOOLS_DIR}/download_pybind11.${SCRIPT_EXTENSION} ${PYBIND11_VERSION}) + execute_process(COMMAND ${TOOLS_DIR}/download_pybind11.${SCRIPT_EXTENSION} ${PYBIND11_VERSION}) endif() add_subdirectory(${PYBIND11_DIR}) @@ -394,7 +421,7 @@ if (${BUILD_PYTHON}) target_include_directories(${MARABOU_PY} PRIVATE ${LIBS_INCLUDES}) set_target_properties(${MARABOU_PY} PROPERTIES - LIBRARY_OUTPUT_DIRECTORY ${PYTHON_LIBRARY_OUTPUT_DIRECTORY}) + LIBRARY_OUTPUT_DIRECTORY ${PYTHON_LIBRARY_OUTPUT_DIRECTORY}) if(NOT MSVC) target_compile_options(${MARABOU_LIB} PRIVATE -fPIC ${RELEASE_FLAGS}) endif() @@ -425,8 +452,8 @@ target_compile_options(${MARABOU_TEST_LIB} PRIVATE ${CXXTEST_FLAGS}) add_custom_target(build-tests ALL) add_custom_target(check - COMMAND ctest --output-on-failure -j${CTEST_NTHREADS} $$ARGS - DEPENDS build-tests build_input_parsers ${MARABOU_EXE}) + COMMAND ctest --output-on-failure -j${CTEST_NTHREADS} $$ARGS + DEPENDS build-tests build_input_parsers ${MARABOU_EXE}) # Decide which tests to run and execute set(TESTS_TO_RUN "") @@ -452,32 +479,32 @@ if (NOT ${TESTS_TO_RUN} STREQUAL "") # make ctest verbose set(CTEST_OUTPUT_ON_FAILURE 1) add_custom_command( - TARGET build-tests - POST_BUILD - COMMAND ctest --output-on-failure -L "\"(${TESTS_TO_RUN})\"" -j${CTEST_NTHREADS} $$ARGS + TARGET build-tests + POST_BUILD + COMMAND ctest --output-on-failure -L "\"(${TESTS_TO_RUN})\"" -j${CTEST_NTHREADS} $$ARGS ) endif() if (${BUILD_PYTHON} AND ${RUN_PYTHON_TEST}) if (MSVC) add_custom_command( - TARGET build-tests - POST_BUILD - COMMAND cp ${PYTHON_API_DIR}/Release/* ${PYTHON_API_DIR} + TARGET build-tests + POST_BUILD + COMMAND cp ${PYTHON_API_DIR}/Release/* ${PYTHON_API_DIR} ) endif() add_custom_command( - TARGET build-tests - POST_BUILD - COMMAND ${PYTHON_EXECUTABLE} -m pytest ${PYTHON_API_DIR}/test + TARGET build-tests + POST_BUILD + COMMAND ${PYTHON_EXECUTABLE} -m pytest ${PYTHON_API_DIR}/test ) endif() # Add the input parsers add_custom_target(build_input_parsers) add_dependencies(build_input_parsers ${MPS_PARSER} ${ACAS_PARSER} - ${BERKELEY_PARSER}) + ${BERKELEY_PARSER}) add_subdirectory(${SRC_DIR}) add_subdirectory(${TOOLS_DIR}) diff --git a/src/basis_factorization/GaussianEliminator.h b/src/basis_factorization/GaussianEliminator.h index 2177021e55..6f93605ff6 100644 --- a/src/basis_factorization/GaussianEliminator.h +++ b/src/basis_factorization/GaussianEliminator.h @@ -19,7 +19,7 @@ #include "LUFactors.h" #define GAUSSIAN_LOG( x, ... ) \ - LOG( GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING, "GaussianEliminator: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING, "GaussianEliminator: %s\n", x ) class GaussianEliminator { diff --git a/src/basis_factorization/LUFactorization.h b/src/basis_factorization/LUFactorization.h index 400d53eb88..ae4befd5e7 100644 --- a/src/basis_factorization/LUFactorization.h +++ b/src/basis_factorization/LUFactorization.h @@ -22,7 +22,7 @@ #include "List.h" #define LU_FACTORIZATION_LOG( x, ... ) \ - LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "LUFactorization: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "LUFactorization: %s\n", x ) class EtaMatrix; class LPElement; diff --git a/src/basis_factorization/SparseFTFactorization.h b/src/basis_factorization/SparseFTFactorization.h index 906f5b205e..b885cab4b2 100644 --- a/src/basis_factorization/SparseFTFactorization.h +++ b/src/basis_factorization/SparseFTFactorization.h @@ -24,7 +24,7 @@ #include "Statistics.h" #define SFTF_FACTORIZATION_LOG( x, ... ) \ - LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "SparseFTFactorization: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "SparseFTFactorization: %s\n", x ) /* This class performs a sparse FT factorization of a given matrix. diff --git a/src/basis_factorization/SparseGaussianEliminator.h b/src/basis_factorization/SparseGaussianEliminator.h index 48078b42d9..fd6a061dce 100644 --- a/src/basis_factorization/SparseGaussianEliminator.h +++ b/src/basis_factorization/SparseGaussianEliminator.h @@ -23,7 +23,7 @@ #include "Statistics.h" #define SGAUSSIAN_LOG( x, ... ) \ - LOG( GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING, "SparseGaussianEliminator: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING, "SparseGaussianEliminator: %s\n", x ) class SparseGaussianEliminator { diff --git a/src/basis_factorization/SparseLUFactorization.h b/src/basis_factorization/SparseLUFactorization.h index 7b925fec48..7d75ebe3c4 100644 --- a/src/basis_factorization/SparseLUFactorization.h +++ b/src/basis_factorization/SparseLUFactorization.h @@ -22,7 +22,7 @@ #include "SparseLUFactors.h" #define BASIS_FACTORIZATION_LOG( x, ... ) \ - LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "SparseLUFactorization: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "SparseLUFactorization: %s\n", x ) class EtaMatrix; class LPElement; diff --git a/src/cegar/IncrementalLinearization.h b/src/cegar/IncrementalLinearization.h index ddf5b00fcf..9260e7e57c 100644 --- a/src/cegar/IncrementalLinearization.h +++ b/src/cegar/IncrementalLinearization.h @@ -20,7 +20,7 @@ #include "Query.h" #define INCREMENTAL_LINEARIZATION_LOG( x, ... ) \ - LOG( GlobalConfiguration::CEGAR_LOGGING, "IncrementalLinearization: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::CEGAR_LOGGING, "IncrementalLinearization: %s\n", x ) class Engine; class IQuery; diff --git a/src/common/Debug.h b/src/common/Debug.h index 55dfda4a92..3d0cb9554f 100644 --- a/src/common/Debug.h +++ b/src/common/Debug.h @@ -27,7 +27,7 @@ #endif #ifndef NDEBUG -#define LOG( x, f, y, ... ) \ +#define MARABOU_LOG( x, f, y, ... ) \ { \ if ( ( x ) ) \ { \ @@ -35,7 +35,7 @@ } \ } #else -#define LOG( x, f, y, ... ) \ +#define MARABOU_LOG( x, f, y, ... ) \ { \ } #endif diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index f9a074f076..6cbb8c9bb1 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -119,6 +119,15 @@ const bool GlobalConfiguration::WRITE_JSON_PROOF = false; const unsigned GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH = 3; const unsigned GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS = 10; +const GlobalConfiguration::PdgBoundType GlobalConfiguration::PGD_BOUND_TYPE = + GlobalConfiguration::ATTACK_INPUT; +const unsigned GlobalConfiguration::PGD_DEFAULT_NUM_ITER = 10; +const unsigned GlobalConfiguration::PGD_NUM_RESTARTS = 4; +const double GlobalConfiguration::ATTACK_INPUT_RANGE = 1000; +const unsigned GlobalConfiguration::CW_DEFAULT_ITERS = 1000; +const unsigned GlobalConfiguration::CW_NUM_RESTARTS = 4; +const double GlobalConfiguration::CW_LR = 1e-2; + #ifdef ENABLE_GUROBI const unsigned GlobalConfiguration::GUROBI_NUMBER_OF_THREADS = 1; const bool GlobalConfiguration::GUROBI_LOGGING = false; @@ -143,6 +152,8 @@ const bool GlobalConfiguration::ONNX_PARSER_LOGGING = false; const bool GlobalConfiguration::SOI_LOGGING = false; const bool GlobalConfiguration::SCORE_TRACKER_LOGGING = false; const bool GlobalConfiguration::CEGAR_LOGGING = false; +const bool GlobalConfiguration::CUSTOM_DNN_LOGGING = true; +const bool GlobalConfiguration::CW_LOGGING = true; const bool GlobalConfiguration::USE_SMART_FIX = false; const bool GlobalConfiguration::USE_LEAST_FIX = false; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index 3104edf79d..b34a508a7e 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -263,6 +263,19 @@ class GlobalConfiguration */ static const unsigned MAX_ROUNDS_OF_BACKWARD_ANALYSIS; + enum PdgBoundType { + ATTACK_INPUT = 0, + ATTACK_OUTPUT = 1 + }; + static const PdgBoundType PGD_BOUND_TYPE; + static const unsigned PGD_DEFAULT_NUM_ITER; + static const unsigned PGD_NUM_RESTARTS; + static const double ATTACK_INPUT_RANGE; + + static const unsigned CW_DEFAULT_ITERS; + static const unsigned CW_NUM_RESTARTS; + static const double CW_LR; + #ifdef ENABLE_GUROBI /* The number of threads Gurobi spawns @@ -292,6 +305,8 @@ class GlobalConfiguration static const bool SOI_LOGGING; static const bool SCORE_TRACKER_LOGGING; static const bool CEGAR_LOGGING; + static const bool CUSTOM_DNN_LOGGING; + static const bool CW_LOGGING; }; #endif // __GlobalConfiguration_h__ diff --git a/src/configuration/Options.cpp b/src/configuration/Options.cpp index 657ddb6b7c..4e05fc295a 100644 --- a/src/configuration/Options.cpp +++ b/src/configuration/Options.cpp @@ -71,6 +71,7 @@ void Options::initializeDefaultValues() _intOptions[SEED] = 1; _intOptions[NUM_BLAS_THREADS] = 1; _intOptions[NUM_CONSTRAINTS_TO_REFINE_INC_LIN] = 30; + _intOptions[ATTACK_TIMEOUT] = 60; /* Float options @@ -91,7 +92,7 @@ void Options::initializeDefaultValues() _stringOptions[SUMMARY_FILE] = ""; _stringOptions[SPLITTING_STRATEGY] = "auto"; _stringOptions[SNC_SPLITTING_STRATEGY] = "auto"; - _stringOptions[SYMBOLIC_BOUND_TIGHTENING_TYPE] = "deeppoly"; + _stringOptions[SYMBOLIC_BOUND_TIGHTENING_TYPE] = "alphacrown"; _stringOptions[MILP_SOLVER_BOUND_TIGHTENING_TYPE] = "none"; _stringOptions[QUERY_DUMP_FILE] = ""; _stringOptions[IMPORT_ASSIGNMENT_FILE_PATH] = "assignment.txt"; @@ -189,10 +190,12 @@ SymbolicBoundTighteningType Options::getSymbolicBoundTighteningType() const return SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING; else if ( strategyString == "deeppoly" ) return SymbolicBoundTighteningType::DEEP_POLY; + else if (strategyString == "alphacrown") + return SymbolicBoundTighteningType::ALPHA_CROWN; else if ( strategyString == "none" ) return SymbolicBoundTighteningType::NONE; else - return SymbolicBoundTighteningType::DEEP_POLY; + return SymbolicBoundTighteningType::ALPHA_CROWN; } MILPSolverBoundTighteningType Options::getMILPSolverBoundTighteningType() const diff --git a/src/configuration/Options.h b/src/configuration/Options.h index 4dd4f31fd5..3afec75a56 100644 --- a/src/configuration/Options.h +++ b/src/configuration/Options.h @@ -154,6 +154,8 @@ class Options // The strategy used for initializing the soi SOI_INITIALIZATION_STRATEGY, + // Adversarial attack timeout in seconds + ATTACK_TIMEOUT, // The procedure/solver for solving the LP LP_SOLVER }; diff --git a/src/engine/CDSmtCore.h b/src/engine/CDSmtCore.h index 8cbeebc5c0..a352f2c6ec 100644 --- a/src/engine/CDSmtCore.h +++ b/src/engine/CDSmtCore.h @@ -76,7 +76,7 @@ #include "context/cdlist.h" #include "context/context.h" -#define SMT_LOG( x, ... ) LOG( GlobalConfiguration::SMT_CORE_LOGGING, "CDSmtCore: %s\n", x ) +#define SMT_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::SMT_CORE_LOGGING, "CDSmtCore: %s\n", x ) class EngineState; class Engine; diff --git a/src/engine/CWAttack.cpp b/src/engine/CWAttack.cpp new file mode 100644 index 0000000000..27b3c5744c --- /dev/null +++ b/src/engine/CWAttack.cpp @@ -0,0 +1,256 @@ +#include "CWAttack.h" + +#ifdef BUILD_TORCH + +CWAttack::CWAttack( NLR::NetworkLevelReasoner *networkLevelReasoner ) + : networkLevelReasoner( networkLevelReasoner ) + , _device( torch::cuda::is_available() ? torch::kCUDA : torch::kCPU ) + , _model( std::make_unique( networkLevelReasoner ) ) + , _iters( GlobalConfiguration::CW_DEFAULT_ITERS ) + , _restarts( GlobalConfiguration::CW_NUM_RESTARTS ) + , _specLossWeight( 1e-2 ) + , _adversarialInput( nullptr ) + , _adversarialOutput( nullptr ) +{ + _inputSize = _model->getLayerSizes().first(); + getBounds( _inputBounds, GlobalConfiguration::PdgBoundType::ATTACK_INPUT ); + getBounds( _outputBounds, GlobalConfiguration::PdgBoundType::ATTACK_OUTPUT ); + + _inputLb = torch::tensor( _inputBounds.first.getContainer(), torch::kFloat32 ).to( _device ); + _inputUb = torch::tensor( _inputBounds.second.getContainer(), torch::kFloat32 ).to( _device ); + + auto vars = generateSampleAndEpsilon(); + _x0 = vars.first; +} + +CWAttack::~CWAttack() +{ + if ( _adversarialInput ) + delete[] _adversarialInput; + if ( _adversarialOutput ) + delete[] _adversarialOutput; +} + +bool CWAttack::runAttack() +{ + CW_LOG( "-----Starting CW attack-----" ); + auto adversarial = findAdvExample(); + auto advInput = adversarial.first.to( torch::kDouble ); + auto advPred = adversarial.second.to( torch::kDouble ); + + bool isFooled = + isWithinBounds( advInput, _inputBounds ) && isWithinBounds( advPred, _outputBounds ); + + auto inputPtr = advInput.data_ptr(); + auto predPtr = advPred.data_ptr(); + size_t outSize = advPred.size( 1 ); + + if ( isFooled ) + { + _adversarialInput = new double[_inputSize]; + _adversarialOutput = new double[outSize]; + std::copy( inputPtr, inputPtr + _inputSize, _adversarialInput ); + std::copy( predPtr, predPtr + outSize, _adversarialOutput ); + } + CW_LOG( "Input Lower Bounds : " ); + for ( auto &bound : _inputBounds.first.getContainer() ) + printValue( bound ); + CW_LOG( "Input Upper Bounds : " ); + for ( auto &bound : _inputBounds.second.getContainer() ) + printValue( bound ); + + CW_LOG( "Adversarial Input:" ); + for ( int i = 0; i < advInput.numel(); ++i ) + { + CW_LOG( Stringf( "x%u=%.3lf", i, inputPtr[i] ).ascii() ); + } + CW_LOG( "Output Lower Bounds : " ); + for ( auto &bound : _outputBounds.first.getContainer() ) + printValue( bound ); + CW_LOG( "Output Upper Bounds : " ); + for ( auto &bound : _outputBounds.second.getContainer() ) + printValue( bound ); + + CW_LOG( "Adversarial Prediction: " ); + for ( int i = 0; i < advPred.numel(); ++i ) + { + CW_LOG( Stringf( "y%u=%.3lf", i, predPtr[i] ).ascii() ); + } + + + if ( isFooled ) + { + CW_LOG( "Model fooled: Yes \n ------ CW Attack Succeed ------\n" ); + } + else + CW_LOG( "Model fooled: No \n ------ CW Attack Failed ------\n" ); + // Concretize assignments if attack succeeded + if ( _adversarialInput ) + networkLevelReasoner->concretizeInputAssignment( _assignments, _adversarialInput ); + return isFooled; +} + + +void CWAttack::getBounds( std::pair, Vector> &bounds, signed type ) const +{ + unsigned layerIndex = type == GlobalConfiguration::PdgBoundType::ATTACK_INPUT + ? 0 + : networkLevelReasoner->getNumberOfLayers() - 1; + const NLR::Layer *layer = networkLevelReasoner->getLayer( layerIndex ); + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + bounds.first.append( layer->getLb( i ) ); + bounds.second.append( layer->getUb( i ) ); + } +} + +std::pair CWAttack::generateSampleAndEpsilon() +{ + Vector sample( _inputSize, 0.0 ), eps( _inputSize, 0.0 ); + for ( unsigned i = 0; i < _inputSize; ++i ) + { + double lo = _inputBounds.first.get( i ), hi = _inputBounds.second.get( i ); + if ( std::isfinite( lo ) && std::isfinite( hi ) ) + { + sample[i] = 0.5 * ( lo + hi ); + eps[i] = 0.5 * ( hi - lo ); + } + else + { + sample[i] = 0.0; + eps[i] = GlobalConfiguration::ATTACK_INPUT_RANGE; + } + } + auto s = torch::tensor( sample.getContainer(), torch::kFloat32 ).unsqueeze( 0 ).to( _device ); + auto e = torch::tensor( eps.getContainer(), torch::kFloat32 ).to( _device ); + return { s, e }; +} + +torch::Tensor CWAttack::calculateLoss( const torch::Tensor &pred ) +{ + auto lb = torch::tensor( _outputBounds.first.data(), torch::kFloat32 ).to( _device ); + auto ub = torch::tensor( _outputBounds.second.data(), torch::kFloat32 ).to( _device ); + auto ubv = torch::sum( torch::square( torch::relu( pred - ub ) ) ); + auto lbv = torch::sum( torch::square( torch::relu( lb - pred ) ) ); + return ( ubv + lbv ).to( _device ); +} + +std::pair CWAttack::findAdvExample() +{ + torch::Tensor bestAdv, bestPred; + double bestL2 = std::numeric_limits::infinity(); + double timeoutForAttack = ( Options::get()->getInt( Options::ATTACK_TIMEOUT ) == 0 + ? FloatUtils::infinity() + : Options::get()->getInt( Options::ATTACK_TIMEOUT ) ); + CW_LOG( Stringf( "Adversarial attack timeout set to %f\n", timeoutForAttack ).ascii() ); + timespec startTime = TimeUtils::sampleMicro(); + torch::Tensor advExample; + for ( unsigned r = 0; r < _restarts; ++r ) + { + unsigned long timePassed = TimeUtils::timePassed( startTime, TimeUtils::sampleMicro() ); + if ( static_cast( timePassed ) / MICROSECONDS_TO_SECONDS > timeoutForAttack ) + { + throw MarabouError( MarabouError::TIMEOUT, "Attack failed due to timeout" ); + } + torch::Tensor delta = torch::zeros_like( _x0, torch::requires_grad() ).to( _device ); + torch::optim::Adam optimizer( { delta }, + torch::optim::AdamOptions( GlobalConfiguration::CW_LR ) ); + + for ( unsigned it = 0; it < _iters; ++it ) + { + torch::Tensor prevExample = advExample; + advExample = ( _x0 + delta ).clamp( _inputLb, _inputUb ); + // Skip the equality check on the first iteration + if ( ( it > 0 && prevExample.defined() && advExample.equal( prevExample ) ) || + !isWithinBounds( advExample, _inputBounds ) ) + break; + auto pred = _model->forward( advExample ); + auto specLoss = calculateLoss( pred ); + auto l2norm = torch::sum( torch::pow( advExample - _x0, 2 ) ); + auto loss = l2norm + _specLossWeight * specLoss; + + optimizer.zero_grad(); + loss.backward(); + optimizer.step(); + + if ( specLoss.item() == 0.0 ) + { + double curL2 = l2norm.item(); + if ( curL2 < bestL2 ) + { + bestL2 = curL2; + bestAdv = advExample.detach(); + bestPred = pred.detach(); + } + } + } + } + + if ( !bestAdv.defined() ) + { + bestAdv = ( _x0 + torch::zeros_like( _x0 ) ).clamp( _inputLb, _inputUb ); + bestPred = _model->forward( bestAdv ); + } + + return { bestAdv, bestPred }; +} + +bool CWAttack::isWithinBounds( const torch::Tensor &sample, + const std::pair, Vector> &bounds ) +{ + torch::Tensor flatInput = sample.view( { -1 } ); + if ( flatInput.numel() != (int)bounds.first.size() || + flatInput.numel() != (int)bounds.second.size() ) + throw std::runtime_error( "Mismatch in sizes of input and bounds" ); + + for ( int64_t i = 0; i < flatInput.size( 0 ); ++i ) + { + double v = flatInput[i].item(); + double lo = bounds.first.get( i ), hi = bounds.second.get( i ); + if ( std::isinf( lo ) && std::isinf( hi ) ) + continue; + if ( std::isinf( lo ) ) + { + if ( v > hi ) + return false; + } + else if ( std::isinf( hi ) ) + { + if ( v < lo ) + return false; + } + else if ( v < lo || v > hi ) + return false; + } + return true; +} + +double CWAttack::getAssignment( int index ) +{ + return _assignments[index]; +} + +void CWAttack::printValue( double value ) +{ + if ( std::isinf( value ) ) + { + if ( value < 0 ) + { + CW_LOG( "-inf" ); + } + else + { + CW_LOG( "inf" ); + } + } + else if ( std::isnan( value ) ) + { + CW_LOG( "nan" ); + } + else + { + CW_LOG( Stringf( "%.3lf", value ).ascii() ); + } +} + +#endif // BUILD_TORCH diff --git a/src/engine/CWAttack.h b/src/engine/CWAttack.h new file mode 100644 index 0000000000..742c99fe7b --- /dev/null +++ b/src/engine/CWAttack.h @@ -0,0 +1,64 @@ +#ifndef __CWATTACK_H__ +#define __CWATTACK_H__ +#ifdef BUILD_TORCH + +#include "CustomDNN.h" +#include "InputQuery.h" +#include "Options.h" + +#include +#include +#include + +#define CW_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::CW_LOGGING, "CW: %s\n", x ) + +/** + CWAttack implements the Carlini–Wagner L2 adversarial attack, + optimizing min ||δ||_2^2 + c * specLoss(x0 + δ) with Adam and restarts. +*/ +class CWAttack +{ +public: + enum { + MICROSECONDS_TO_SECONDS = 1000000 + }; + CWAttack( NLR::NetworkLevelReasoner *networkLevelReasoner ); + ~CWAttack(); + + /** + Runs the CW attack. Returns true if a valid adversarial example is found. + */ + bool runAttack(); + double getAssignment( int index ); + +private: + NLR::NetworkLevelReasoner *networkLevelReasoner; + torch::Device _device; + std::unique_ptr _model; + + unsigned _inputSize; + unsigned _iters; + unsigned _restarts; + double _specLossWeight; + + std::pair, Vector> _inputBounds; + std::pair, Vector> _outputBounds; + torch::Tensor _inputLb; + torch::Tensor _inputUb; + torch::Tensor _x0; + + Map _assignments; + double *_adversarialInput; + double *_adversarialOutput; + + void getBounds( std::pair, Vector> &bounds, signed type ) const; + std::pair generateSampleAndEpsilon(); + torch::Tensor calculateLoss( const torch::Tensor &predictions ); + std::pair findAdvExample(); + static bool isWithinBounds( const torch::Tensor &sample, + const std::pair, Vector> &bounds ); + static void printValue( double value ); +}; + +#endif // BUILD_TORCH +#endif // __CWATTACK_H__ diff --git a/src/engine/CustomDNN.cpp b/src/engine/CustomDNN.cpp new file mode 100644 index 0000000000..3fbdb8d186 --- /dev/null +++ b/src/engine/CustomDNN.cpp @@ -0,0 +1,302 @@ +#include "NetworkLevelReasoner.h" +#include "CustomDNN.h" +#ifdef BUILD_TORCH +namespace NLR { +CustomRelu::CustomRelu( const NetworkLevelReasoner *nlr, unsigned layerIndex ) + : _networkLevelReasoner( nlr ) + , _reluLayerIndex( layerIndex ) +{ +} + +torch::Tensor CustomRelu::forward( torch::Tensor x ) const +{ + return CustomReluFunction::apply( x, _networkLevelReasoner, _reluLayerIndex ); +} + +CustomMaxPool::CustomMaxPool( const NetworkLevelReasoner *nlr, unsigned layerIndex ) + : _networkLevelReasoner( nlr ) + , _maxLayerIndex( layerIndex ) +{ +} + +torch::Tensor CustomMaxPool::forward( torch::Tensor x ) const +{ + return CustomMaxPoolFunction::apply( x, _networkLevelReasoner, _maxLayerIndex ); +} + +void CustomDNN::setWeightsAndBiases( torch::nn::Linear &linearLayer, + const Layer *layer, + unsigned sourceLayer, + unsigned inputSize, + unsigned outputSize ) +{ + Vector> layerWeights( outputSize, Vector( inputSize ) ); + Vector layerBiases( outputSize ); + + // Fetch weights and biases from networkLevelReasoner + for ( unsigned j = 0; j < outputSize; j++ ) + { + for ( unsigned k = 0; k < inputSize; k++ ) + { + double weight_value = layer->getWeight( sourceLayer, k, j ); + layerWeights[j][k] = static_cast( weight_value ); + } + double bias_value = layer->getBias( j ); + layerBiases[j] = static_cast( bias_value ); + } + + Vector flattenedWeights; + for ( const auto &weight : layerWeights ) + { + for ( const auto &w : weight ) + { + flattenedWeights.append( w ); + } + } + + torch::Tensor weightTensor = torch::tensor( flattenedWeights.getContainer(), torch::kFloat ) + .view( { outputSize, inputSize } ); + torch::Tensor biasTensor = torch::tensor( layerBiases.getContainer(), torch::kFloat ); + + torch::NoGradGuard no_grad; + linearLayer->weight.set_( weightTensor ); + linearLayer->bias.set_( biasTensor ); +} + +void CustomDNN::weightedSum( unsigned i, const Layer *layer ) +{ + unsigned sourceLayer = i - 1; + const Layer *prevLayer = _networkLevelReasoner->getLayer( sourceLayer ); + unsigned inputSize = prevLayer->getSize(); + unsigned outputSize = layer->getSize(); + + if ( outputSize > 0 ) + { + auto linearLayer = torch::nn::Linear( torch::nn::LinearOptions( inputSize, outputSize ) ); + _linearLayers.append( linearLayer ); + + setWeightsAndBiases( linearLayer, layer, sourceLayer, inputSize, outputSize ); + + register_module( "linear" + std::to_string( i ), linearLayer ); + } +} + + +CustomDNN::CustomDNN( const NetworkLevelReasoner *nlr ) +{ + CUSTOM_DNN_LOG( "----- Construct Custom Network -----" ); + _networkLevelReasoner = nlr; + _numberOfLayers = _networkLevelReasoner->getNumberOfLayers(); + for ( unsigned i = 0; i < _numberOfLayers; i++ ) + { + const Layer *layer = _networkLevelReasoner->getLayer( i ); + _layerSizes.append( layer->getSize() ); + Layer::Type layerType = layer->getLayerType(); + _layersOrder.append( layerType ); + switch ( layerType ) + { + case Layer::INPUT: + break; + case Layer::WEIGHTED_SUM: + weightedSum( i, layer ); + break; + case Layer::RELU: + { + auto reluLayer = std::make_shared( _networkLevelReasoner, i ); + _reluLayers.append( reluLayer ); + register_module( "ReLU" + std::to_string( i ), reluLayer ); + break; + } + case Layer::MAX: + { + auto maxPoolLayer = std::make_shared( _networkLevelReasoner, i ); + _maxPoolLayers.append( maxPoolLayer ); + register_module( "maxPool" + std::to_string( i ), maxPoolLayer ); + break; + } + default: + CUSTOM_DNN_LOG( "Unsupported layer type\n" ); + throw MarabouError( MarabouError::DEBUGGING_ERROR ); + } + } +} + +torch::Tensor CustomDNN::forward( torch::Tensor x ) +{ + unsigned linearIndex = 0; + unsigned reluIndex = 0; + unsigned maxPoolIndex = 0; + for ( unsigned i = 0; i < _numberOfLayers; i++ ) + { + const Layer::Type layerType = _layersOrder[i]; + switch ( layerType ) + { + case Layer::INPUT: + break; + case Layer::WEIGHTED_SUM: + x = _linearLayers[linearIndex]->forward( x ); + linearIndex++; + break; + case Layer::RELU: + x = _reluLayers[reluIndex]->forward( x ); + reluIndex++; + break; + case Layer::MAX: + x = _maxPoolLayers[maxPoolIndex]->forward( x ); + maxPoolIndex++; + break; + default: + CUSTOM_DNN_LOG( "Unsupported layer type\n" ); + throw MarabouError( MarabouError::DEBUGGING_ERROR ); + break; + } + } + return x; +} + +torch::Tensor CustomReluFunction::forward( torch::autograd::AutogradContext *ctx, + torch::Tensor x, + const NetworkLevelReasoner *nlr, + unsigned int layerIndex ) +{ + ctx->save_for_backward( { x } ); + + const Layer *layer = nlr->getLayer( layerIndex ); + torch::Tensor reluOutputs = torch::zeros( { 1, layer->getSize() } ); + torch::Tensor reluGradients = torch::zeros( { 1, layer->getSize() } ); + + for ( unsigned neuron = 0; neuron < layer->getSize(); ++neuron ) + { + auto sources = layer->getActivationSources( neuron ); + ASSERT( sources.size() == 1 ); + const NeuronIndex &sourceNeuron = sources.back(); + int index = static_cast( sourceNeuron._neuron ); + reluOutputs.index_put_( { 0, static_cast( neuron ) }, + torch::clamp_min( x.index( { 0, index } ), 0 ) ); + reluGradients.index_put_( { 0, static_cast( neuron ) }, x.index( { 0, index } ) > 0 ); + } + + ctx->saved_data["reluGradients"] = reluGradients; + + return reluOutputs; +} + +std::vector CustomReluFunction::backward( torch::autograd::AutogradContext *ctx, + std::vector grad_output ) +{ + auto saved = ctx->get_saved_variables(); + auto input = saved[0]; + + auto reluGradients = ctx->saved_data["reluGradients"].toTensor(); + auto grad_input = grad_output[0] * reluGradients[0]; + + return { grad_input, torch::Tensor(), torch::Tensor() }; +} + +torch::Tensor CustomMaxPoolFunction::forward( torch::autograd::AutogradContext *ctx, + torch::Tensor x, + const NetworkLevelReasoner *nlr, + unsigned int layerIndex ) +{ + ctx->save_for_backward( { x } ); + + const Layer *layer = nlr->getLayer( layerIndex ); + torch::Tensor maxOutputs = torch::zeros( { 1, layer->getSize() } ); + torch::Tensor argMaxOutputs = torch::zeros( { 1, layer->getSize() }, torch::kInt64 ); + + for ( unsigned neuron = 0; neuron < layer->getSize(); ++neuron ) + { + auto sources = layer->getActivationSources( neuron ); + torch::Tensor sourceValues = torch::zeros( sources.size(), torch::kFloat ); + torch::Tensor sourceIndices = torch::zeros( sources.size() ); + + for ( int i = sources.size() - 1; i >= 0; --i ) + { + const NeuronIndex &activationNeuron = sources.back(); + int index = static_cast( activationNeuron._neuron ); + sources.popBack(); + sourceValues.index_put_( { i }, x.index( { 0, index } ) ); + sourceIndices.index_put_( { i }, index ); + } + + maxOutputs.index_put_( { 0, static_cast( neuron ) }, torch::max( sourceValues ) ); + argMaxOutputs.index_put_( { 0, static_cast( neuron ) }, + sourceIndices.index( { torch::argmax( sourceValues ) } ) ); + } + + ctx->saved_data["argMaxOutputs"] = argMaxOutputs; + + return maxOutputs; +} + +std::vector CustomMaxPoolFunction::backward( torch::autograd::AutogradContext *ctx, + std::vector grad_output ) +{ + auto saved = ctx->get_saved_variables(); + auto input = saved[0]; + + auto grad_input = torch::zeros_like( input ); + + auto indices = ctx->saved_data["argMaxOutputs"].toTensor(); + + grad_input[0].index_add_( 0, indices.flatten(), grad_output[0].flatten() ); + + return { grad_input, torch::Tensor(), torch::Tensor() }; +} + +const Vector &CustomDNN::getLayerSizes() const +{ + return _layerSizes; +} + +torch::Tensor CustomDNN::getLayerWeights(unsigned layerIndex) const { + if (_layersOrder[layerIndex] == Layer::WEIGHTED_SUM) { + auto linearLayer = _linearLayers[layerIndex]; + return linearLayer->weight; // Returning weights of the corresponding linear layer + } + throw std::runtime_error("Requested weights for a non-weighted sum layer."); +} + +torch::Tensor CustomDNN::getLayerBias(unsigned layerIndex) const { + if (_layersOrder[layerIndex] == Layer::WEIGHTED_SUM) { + auto linearLayer = _linearLayers[layerIndex]; + return linearLayer->bias; // Returning bias of the corresponding linear layer + } + throw std::runtime_error("Requested bias for a non-weighted sum layer."); +} + +void CustomDNN::getInputBounds(torch::Tensor &lbTensor, torch::Tensor &ubTensor) const +{ + const Layer *layer = _networkLevelReasoner->getLayer(0); + unsigned size = layer->getSize(); + + std::vector lowerBounds; + std::vector upperBounds; + lowerBounds.reserve(size); + upperBounds.reserve(size); + + for (unsigned neuron = 0; neuron < size; ++neuron) + { + lowerBounds.push_back(layer->getLb(neuron)); + upperBounds.push_back(layer->getUb(neuron)); + } + + lbTensor = torch::tensor(lowerBounds, torch::kDouble); + ubTensor = torch::tensor(upperBounds, torch::kDouble); +} + + + +std::vector> CustomDNN::getMaxPoolSources(const Layer* maxPoolLayer) { + std::vector> sources; + unsigned size = maxPoolLayer->getSize(); + for (unsigned neuron = 0; neuron < size; ++neuron) { + + sources.push_back(maxPoolLayer->getActivationSources(neuron)); + } + return sources; +} + +} + +#endif \ No newline at end of file diff --git a/src/engine/CustomDNN.h b/src/engine/CustomDNN.h new file mode 100644 index 0000000000..a414d80f5e --- /dev/null +++ b/src/engine/CustomDNN.h @@ -0,0 +1,119 @@ +#ifdef BUILD_TORCH +#ifndef _CustomDNN_h_ +#define _CustomDNN_h_ + +#include "Layer.h" +#include "Vector.h" + +#include + +#undef Warning +#include + +#define CUSTOM_DNN_LOG( x, ... ) \ + MARABOU_LOG( GlobalConfiguration::CUSTOM_DNN_LOGGING, "customDNN: %s\n", x ) + +/* + Custom differentiation function for ReLU, implementing the forward and backward propagation + for the ReLU operation according to each variable's source layer as defined in the nlr. +*/ +namespace NLR { +class CustomReluFunction : public torch::autograd::Function +{ +public: + static torch::Tensor forward( torch::autograd::AutogradContext *ctx, + torch::Tensor x, + const NetworkLevelReasoner *nlr, + unsigned layerIndex ); + + static std::vector backward( torch::autograd::AutogradContext *ctx, + std::vector grad_output ); +}; + +class CustomRelu : public torch::nn::Module +{ +public: + CustomRelu( const NetworkLevelReasoner *nlr, unsigned layerIndex ); + torch::Tensor forward( torch::Tensor x ) const; + +private: + const NetworkLevelReasoner *_networkLevelReasoner; + unsigned _reluLayerIndex; +}; + +/* + Custom differentiation function for max pooling, implementing the forward and backward propagation + for the max pooling operation according to each variable's source layer as defined in the nlr. +*/ +class CustomMaxPoolFunction : public torch::autograd::Function +{ +public: + static torch::Tensor forward( torch::autograd::AutogradContext *ctx, + torch::Tensor x, + const NetworkLevelReasoner *nlr, + unsigned layerIndex ); + + static std::vector backward( torch::autograd::AutogradContext *ctx, + std::vector grad_output ); +}; + +class CustomMaxPool : public torch::nn::Module +{ +public: + CustomMaxPool( const NetworkLevelReasoner *nlr, unsigned layerIndex ); + torch::Tensor forward( torch::Tensor x ) const; + +private: + const NetworkLevelReasoner *_networkLevelReasoner; + unsigned _maxLayerIndex; +}; + +/* + torch implementation of the network according to the nlr. + */ +class CustomDNN : public torch::nn::Module +{ +public: + static void setWeightsAndBiases( torch::nn::Linear &linearLayer, + const Layer *layer, + unsigned sourceLayer, + unsigned inputSize, + unsigned outputSize ); + void weightedSum( unsigned i, const Layer *layer ); + explicit CustomDNN( const NetworkLevelReasoner *networkLevelReasoner ); + torch::Tensor getLayerWeights( unsigned layerIndex ) const; + torch::Tensor getLayerBias( unsigned layerIndex ) const; + torch::Tensor forward( torch::Tensor x ); + const Vector &getLayerSizes() const; + void getInputBounds( torch::Tensor &lbTensor, torch::Tensor &ubTensor ) const; + std::vector> getMaxPoolSources(const Layer* maxPoolLayer); + Vector getLinearLayers() + { + return _linearLayers; + } + Vector getLayersOrder() const + { + return _layersOrder; + } + Vector getLayersOrder() + { + return _layersOrder; + } + + unsigned getNumberOfLayers() const + { + return _numberOfLayers; + } + +private: + const NetworkLevelReasoner *_networkLevelReasoner; + Vector _layerSizes; + Vector> _reluLayers; + Vector> _maxPoolLayers; + Vector _linearLayers; + Vector _layersOrder; + unsigned _numberOfLayers; +}; +} // namespace NLR +#endif // _CustomDNN_h_ +#endif \ No newline at end of file diff --git a/src/engine/DantzigsRule.h b/src/engine/DantzigsRule.h index 5e57e28c24..b3fda42e77 100644 --- a/src/engine/DantzigsRule.h +++ b/src/engine/DantzigsRule.h @@ -19,7 +19,7 @@ #include "EntrySelectionStrategy.h" #define DANTZIG_LOG( x, ... ) \ - LOG( GlobalConfiguration::DANTZIGS_RULE_LOGGING, "DantzigsRule: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::DANTZIGS_RULE_LOGGING, "DantzigsRule: %s\n", x ) class String; diff --git a/src/engine/DnCManager.h b/src/engine/DnCManager.h index ee4a55a19d..545a9fd326 100644 --- a/src/engine/DnCManager.h +++ b/src/engine/DnCManager.h @@ -25,7 +25,7 @@ #include #define DNC_MANAGER_LOG( x, ... ) \ - LOG( GlobalConfiguration::DNC_MANAGER_LOGGING, "DnCManager: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::DNC_MANAGER_LOGGING, "DnCManager: %s\n", x ) class Query; diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index 86f45ecd0c..028bae2fbc 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -2444,6 +2444,8 @@ unsigned Engine::performSymbolicBoundTightening( Query *inputQuery ) _networkLevelReasoner->symbolicBoundPropagation(); else if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::DEEP_POLY ) _networkLevelReasoner->deepPolyPropagation(); + else if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::ALPHA_CROWN ) + _networkLevelReasoner->alphaCrown(); // Step 3: Extract the bounds List tightenings; diff --git a/src/engine/Engine.h b/src/engine/Engine.h index a3ea1c22d3..5e53564f33 100644 --- a/src/engine/Engine.h +++ b/src/engine/Engine.h @@ -57,7 +57,7 @@ #undef ERROR #endif -#define ENGINE_LOG( x, ... ) LOG( GlobalConfiguration::ENGINE_LOGGING, "Engine: %s\n", x ) +#define ENGINE_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::ENGINE_LOGGING, "Engine: %s\n", x ) class EngineState; class Query; diff --git a/src/engine/InputQuery.cpp b/src/engine/InputQuery.cpp index d275646b06..c28913a20b 100644 --- a/src/engine/InputQuery.cpp +++ b/src/engine/InputQuery.cpp @@ -29,7 +29,7 @@ #include "SoftmaxConstraint.h" #define INPUT_QUERY_LOG( x, ... ) \ - LOG( GlobalConfiguration::INPUT_QUERY_LOGGING, "Marabou Query: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::INPUT_QUERY_LOGGING, "Marabou Query: %s\n", x ) using namespace CVC4::context; diff --git a/src/engine/MarabouError.h b/src/engine/MarabouError.h index 2f2ee54c0f..1cff574056 100644 --- a/src/engine/MarabouError.h +++ b/src/engine/MarabouError.h @@ -66,6 +66,8 @@ class MarabouError : public Error FEATURE_NOT_YET_SUPPORTED = 900, + TIMEOUT = 32, + DEBUGGING_ERROR = 999, }; diff --git a/src/engine/PLConstraintScoreTracker.h b/src/engine/PLConstraintScoreTracker.h index ab62333e6b..6798074dd2 100644 --- a/src/engine/PLConstraintScoreTracker.h +++ b/src/engine/PLConstraintScoreTracker.h @@ -24,7 +24,7 @@ #include #define SCORE_TRACKER_LOG( x, ... ) \ - LOG( GlobalConfiguration::SCORE_TRACKER_LOGGING, "PLConstraintScoreTracker: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::SCORE_TRACKER_LOGGING, "PLConstraintScoreTracker: %s\n", x ) struct ScoreEntry { diff --git a/src/engine/ProjectedSteepestEdge.h b/src/engine/ProjectedSteepestEdge.h index 70b3265ef0..c84ba9bac9 100644 --- a/src/engine/ProjectedSteepestEdge.h +++ b/src/engine/ProjectedSteepestEdge.h @@ -20,7 +20,7 @@ #include "SparseUnsortedList.h" #define PSE_LOG( x, ... ) \ - LOG( GlobalConfiguration::PROJECTED_STEEPEST_EDGE_LOGGING, "Projected SE: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::PROJECTED_STEEPEST_EDGE_LOGGING, "Projected SE: %s\n", x ) class ProjectedSteepestEdgeRule : public IProjectedSteepestEdgeRule { diff --git a/src/engine/Query.cpp b/src/engine/Query.cpp index 77b22c9c9f..6c696c42be 100644 --- a/src/engine/Query.cpp +++ b/src/engine/Query.cpp @@ -29,7 +29,7 @@ #include "SymbolicBoundTighteningType.h" #define INPUT_QUERY_LOG( x, ... ) \ - LOG( GlobalConfiguration::INPUT_QUERY_LOGGING, "Input Query: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::INPUT_QUERY_LOGGING, "Input Query: %s\n", x ) Query::Query() : _ensureSameSourceLayerInNLR( Options::get()->getSymbolicBoundTighteningType() == diff --git a/src/engine/SmtCore.h b/src/engine/SmtCore.h index ad1d61f8e9..0274d475b6 100644 --- a/src/engine/SmtCore.h +++ b/src/engine/SmtCore.h @@ -28,7 +28,7 @@ #include -#define SMT_LOG( x, ... ) LOG( GlobalConfiguration::SMT_CORE_LOGGING, "SmtCore: %s\n", x ) +#define SMT_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::SMT_CORE_LOGGING, "SmtCore: %s\n", x ) class EngineState; class IEngine; diff --git a/src/engine/SumOfInfeasibilitiesManager.h b/src/engine/SumOfInfeasibilitiesManager.h index a823a92fed..7a9a3bf448 100644 --- a/src/engine/SumOfInfeasibilitiesManager.h +++ b/src/engine/SumOfInfeasibilitiesManager.h @@ -29,7 +29,7 @@ #include "T/stdlib.h" #include "Vector.h" -#define SOI_LOG( x, ... ) LOG( GlobalConfiguration::SOI_LOGGING, "SoIManager: %s\n", x ) +#define SOI_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::SOI_LOGGING, "SoIManager: %s\n", x ) class SumOfInfeasibilitiesManager { diff --git a/src/engine/SymbolicBoundTighteningType.h b/src/engine/SymbolicBoundTighteningType.h index 509c0ae21c..56a48fa730 100644 --- a/src/engine/SymbolicBoundTighteningType.h +++ b/src/engine/SymbolicBoundTighteningType.h @@ -22,7 +22,8 @@ enum class SymbolicBoundTighteningType { SYMBOLIC_BOUND_TIGHTENING = 0, DEEP_POLY = 1, - NONE = 2, + ALPHA_CROWN = 2, + NONE = 3, }; #endif // __SymbolicBoundTighteningType_h__ diff --git a/src/engine/Tableau.h b/src/engine/Tableau.h index f5bf063f57..175efbe3c8 100644 --- a/src/engine/Tableau.h +++ b/src/engine/Tableau.h @@ -29,7 +29,7 @@ #include "SparseUnsortedList.h" #include "Statistics.h" -#define TABLEAU_LOG( x, ... ) LOG( GlobalConfiguration::TABLEAU_LOGGING, "Tableau: %s\n", x ) +#define TABLEAU_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::TABLEAU_LOGGING, "Tableau: %s\n", x ) class Equation; class ICostFunctionManager; diff --git a/src/input_parsers/MpsParser.h b/src/input_parsers/MpsParser.h index 71f177f493..76c4afab96 100644 --- a/src/input_parsers/MpsParser.h +++ b/src/input_parsers/MpsParser.h @@ -20,7 +20,7 @@ #include "Map.h" #include "Set.h" -#define MPS_LOG( x, ... ) LOG( GlobalConfiguration::MPS_PARSER_LOGGING, "MpsParser: %s\n", x ) +#define MPS_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::MPS_PARSER_LOGGING, "MpsParser: %s\n", x ) class IQuery; class String; diff --git a/src/input_parsers/OnnxParser.h b/src/input_parsers/OnnxParser.h index 2b316a2004..3f0149f6c8 100644 --- a/src/input_parsers/OnnxParser.h +++ b/src/input_parsers/OnnxParser.h @@ -25,7 +25,7 @@ #include "Vector.h" #include "onnx.proto3.pb.h" -#define ONNX_LOG( x, ... ) LOG( GlobalConfiguration::ONNX_PARSER_LOGGING, "OnnxParser: %s\n", x ) +#define ONNX_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::ONNX_PARSER_LOGGING, "OnnxParser: %s\n", x ) class OnnxParser diff --git a/src/nlr/AlphaCrown.cpp b/src/nlr/AlphaCrown.cpp new file mode 100644 index 0000000000..c68f9554d9 --- /dev/null +++ b/src/nlr/AlphaCrown.cpp @@ -0,0 +1,546 @@ +// +// Created by User on 7/23/2025. +// + +#include "AlphaCrown.h" +#include "MStringf.h" +#include "NetworkLevelReasoner.h" +#include "Layer.h" + +namespace NLR { +AlphaCrown::AlphaCrown( LayerOwner *layerOwner ) + : _layerOwner( layerOwner ) +{ + _nlr = dynamic_cast( layerOwner ); + _network = new CustomDNN( _nlr ); + _network->getInputBounds( _lbInput, _ubInput ); + _inputSize = _lbInput.size( 0 ); + _linearLayers = _network->getLinearLayers().getContainer(); + _layersOrder = _network->getLayersOrder().getContainer(); + + unsigned linearIndex = 0; + for ( unsigned i = 0; i < _network->getNumberOfLayers(); i++ ) + { + if (_layersOrder[i] == Layer::WEIGHTED_SUM) + { + // const Layer *layer = _layerOwner->getLayer( i ); + auto linearLayer = _linearLayers[linearIndex]; + auto whights = linearLayer->weight; + auto bias = linearLayer->bias; + _positiveWeights.insert( {i,torch::where( whights >= 0,whights, + torch::zeros_like( + whights ) ).to(torch::kFloat32)} ); + _negativeWeights.insert( {i,torch::where( whights <= 0,whights, + torch::zeros_like( + whights ) ).to(torch::kFloat32)} ); + _biases.insert( {i,bias.to(torch::kFloat32)} ); + linearIndex += 1; + } + if (_layersOrder[i] == Layer::MAX) + { + _maxPoolSources.insert({i, _network->getMaxPoolSources(_nlr->getLayer( i ) )}); + } + } +} + +torch::Tensor AlphaCrown::createSymbolicVariablesMatrix() +{ + // Create the identity matrix and the zero matrix + auto eye_tensor = torch::eye(_inputSize, torch::kFloat32); // Ensure float32 + auto zero_tensor = torch::zeros({_inputSize, 1}, torch::kFloat32); // Ensure float32 + + // Concatenate the two tensors horizontally (along dim=1) + return torch::cat({eye_tensor, zero_tensor}, 1); // Will be of type float32 +} + +torch::Tensor AlphaCrown::lower_ReLU_relaxation( const torch::Tensor &u, const torch::Tensor &l ) +{ + torch::Tensor mult; + mult = torch::where( u - l == 0, torch::tensor( 1.0 ), u / ( u - l ) ); + mult = torch::where( l >= 0, torch::tensor( 1.0 ), mult ); + mult = torch::where( u <= 0, torch::tensor( 0.0 ), mult ); + return mult.to(torch::kFloat32); +} + +std::tuple AlphaCrown::upper_ReLU_relaxation( const torch::Tensor &u, + const torch::Tensor &l ) +{ + torch::Tensor mult = torch::where( u - l == 0, torch::tensor( 1.0 ), u / ( u - l ) ); + mult = torch::where( l >= 0, torch::tensor( 1.0 ), mult ); + mult = torch::where( u <= 0, torch::tensor( 0.0 ), mult ); + + torch::Tensor add = torch::where( u - l == 0, torch::tensor( 0.0 ), -l * mult ); + add = torch::where( l >= 0, torch::tensor( 0.0 ), add ); + + return std::make_tuple( mult.to(torch::kFloat32), add.to(torch::kFloat32) ); +} +torch::Tensor AlphaCrown::getMaxOfSymbolicVariables( const torch::Tensor &matrix ) +{ + auto coefficients = matrix.index( + { torch::indexing::Slice(), torch::indexing::Slice( torch::indexing::None, -1 ) } ); + auto free_coefficients = matrix.index( { torch::indexing::Slice(), -1 } ); + + auto positive_mask = coefficients >= 0; + + torch::Tensor u_values = + torch::sum( torch::where( positive_mask, coefficients * _ubInput, coefficients * _lbInput ), + 1 ) + + free_coefficients; + + return u_values; +} + +torch::Tensor AlphaCrown::getMinOfSymbolicVariables( const torch::Tensor &matrix ) +{ + auto coefficients = matrix.index( + { torch::indexing::Slice(), torch::indexing::Slice( torch::indexing::None, -1 ) } ); + auto free_coefficients = matrix.index( { torch::indexing::Slice(), -1 } ); + + auto positive_mask = coefficients >= 0; + + torch::Tensor l_values = + torch::sum( torch::where( positive_mask, coefficients * _lbInput, coefficients * _ubInput ), + 1 ) + + free_coefficients; + + return l_values; +} + +void AlphaCrown::relaxReluLayer(unsigned layerNumber, torch::Tensor + &EQ_up, torch::Tensor &EQ_low){ + + auto u_values_EQ_up = AlphaCrown::getMaxOfSymbolicVariables(EQ_up); + auto l_values_EQ_up = AlphaCrown::getMinOfSymbolicVariables(EQ_low); + auto [upperRelaxationSlope, upperRelaxationIntercept] = + AlphaCrown::upper_ReLU_relaxation(l_values_EQ_up, u_values_EQ_up); + + auto u_values_EQ_low = AlphaCrown::getMaxOfSymbolicVariables(EQ_up); + auto l_values_EQ_low = AlphaCrown::getMinOfSymbolicVariables(EQ_low); + auto alphaSlope = AlphaCrown::lower_ReLU_relaxation(l_values_EQ_low, + u_values_EQ_low); + + EQ_up = EQ_up * upperRelaxationSlope.unsqueeze( 1 ); + EQ_up = AlphaCrown::addVecToLastColumnValue( EQ_up, upperRelaxationIntercept ); + EQ_low = EQ_low * alphaSlope.unsqueeze( 1 ); + + _upperRelaxationSlopes.insert({layerNumber, upperRelaxationSlope} ); + // back but insert to dict + _upperRelaxationIntercepts.insert({layerNumber, upperRelaxationIntercept} ); + _indexAlphaSlopeMap.insert( {layerNumber, _initialAlphaSlopes.size()} ); + _initialAlphaSlopes.push_back( alphaSlope ); + +} + +void AlphaCrown::relaxMaxPoolLayer(unsigned layerNumber, + torch::Tensor &EQ_up, + torch::Tensor &EQ_low) +{ + std::cout << "Relaxing MaxPool layer number: " << layerNumber << std::endl; + const auto &groups = _maxPoolSources[layerNumber]; + TORCH_CHECK(!groups.empty(), "MaxPool layer has no groups"); + + const auto cols = EQ_up.size(1); + auto next_EQ_up = torch::zeros({ (long)groups.size(), cols }, torch::kFloat32); + auto next_EQ_low = torch::zeros({ (long)groups.size(), cols }, torch::kFloat32); + + std::vector upIdx; upIdx.reserve(groups.size()); + std::vector loIdx; loIdx.reserve(groups.size()); + std::vector slopes; slopes.reserve(groups.size()); + std::vector ints; ints.reserve(groups.size()); + + for (size_t k = 0; k < groups.size(); ++k) + { + // Get per-neuron relaxation parameters & indices + auto R = relaxMaxNeuron(groups, k, EQ_up, EQ_low); + + // Build next rows: + // Upper: slope * EQ_up[R.idx_up] (+ intercept on last column) + auto up_row = EQ_up.index({ (long)R.idx_up, torch::indexing::Slice() }) * R.slope; + auto bvec = torch::full({1}, R.intercept, torch::kFloat32); + up_row = AlphaCrown::addVecToLastColumnValue(up_row, bvec); + + // Lower: copy EQ_low[R.idx_low] + auto low_row = EQ_low.index({ (long)R.idx_low, torch::indexing::Slice() }).clone(); + + next_EQ_up.index_put_ ( { (long)k, torch::indexing::Slice() }, up_row ); + next_EQ_low.index_put_( { (long)k, torch::indexing::Slice() }, low_row ); + + // Persist + upIdx.push_back(R.idx_up); + loIdx.push_back(R.idx_low); + slopes.push_back(R.slope); + ints.push_back(R.intercept); + } + + + _maxUpperChoice[layerNumber] = torch::from_blob( + upIdx.data(), {(long)upIdx.size()}, torch::TensorOptions().dtype(torch::kLong)).clone(); + _maxLowerChoice[layerNumber] = torch::from_blob( + loIdx.data(), {(long)loIdx.size()}, torch::TensorOptions().dtype(torch::kLong)).clone(); + _upperRelaxationSlopes[layerNumber] = + torch::from_blob(slopes.data(), {(long)slopes.size()}, torch::TensorOptions().dtype(torch::kFloat32)).clone(); + _upperRelaxationIntercepts[layerNumber] = + torch::from_blob(ints.data(), {(long)ints.size()}, torch::TensorOptions().dtype(torch::kFloat32)).clone(); + + // Advance EQs + EQ_up = next_EQ_up; + EQ_low = next_EQ_low; +} + + + + +std::pair +AlphaCrown::boundsFromEQ(const torch::Tensor &EQ, const std::vector &rows) +{ + TORCH_CHECK(!rows.empty(), "boundsFromEQ: empty rows"); + auto idx = torch::from_blob(const_cast(rows.data()), + {(long)rows.size()}, + torch::TensorOptions().dtype(torch::kLong)).clone(); + auto sub = EQ.index({ idx, torch::indexing::Slice() }); // |S| x (n+1) + auto U = getMaxOfSymbolicVariables(sub); // |S| + auto L = getMinOfSymbolicVariables(sub); // |S| + return {U, L}; +} + + + +AlphaCrown::MaxRelaxResult AlphaCrown::relaxMaxNeuron(const std::vector> &groups, + size_t k, + const torch::Tensor &EQ_up, + const torch::Tensor &EQ_low) +{ + constexpr double EPS = 1e-12; + + // Collect absolute previous-layer row indices for output k + std::vector srcRows; srcRows.reserve(16); + const auto &srcList = groups[k]; + for (const auto &ni : srcList) { + srcRows.push_back((long)ni._neuron); + } + TORCH_CHECK(!srcRows.empty(), "MaxPool group has no sources"); + + + auto [U_low, L_low] = boundsFromEQ(EQ_low, srcRows); + auto M_low = (U_low + L_low) / 2.0; + long j_rel_low = torch::argmax(M_low).item(); + long idx_low_abs = srcRows[(size_t)j_rel_low]; + + + auto [U_up, L_up] = boundsFromEQ(EQ_up, srcRows); + + // i = argmax U_up, j = second argmax U_up (or i if single source) + int64_t kTop = std::min(2, U_up.size(0)); + auto top2 = torch::topk(U_up, kTop, /dim=/0, /largest=/true, /sorted=/true); + auto Uidxs = std::get<1>(top2); + long i_rel = Uidxs[0].item(); + long j_rel2 = (kTop > 1) ? Uidxs[1].item() : Uidxs[0].item(); + + double li = L_up[i_rel].item(); + double ui = U_up[i_rel].item(); + double uj = U_up[j_rel2].item(); + + // Case 1: (li == max(L_up)) ∧ (li >= uj) + auto Lmax_pair = torch::max(L_up, /dim=/0); + long l_arg = std::get<1>(Lmax_pair).item(); + bool case1 = (i_rel == l_arg) && (li + EPS >= uj); + + float slope, intercept; + if (case1 || (ui - li) <= EPS) { + // Case 1 (or degenerate): y ≤ x_i → a=1, intercept=0 + slope = 1.0f; + intercept = 0.0f; + } else { + // Case 2: a=(ui-uj)/(ui-li), b=uj → store as (a*xi + (b - a*li)) + double a = (ui - uj) / (ui - li); + if (a < 0.0) a = 0.0; + if (a > 1.0) a = 1.0; + slope = (float)a; + intercept = (float)(uj - a * li); // this is what you ADD to last column + } + + long idx_up_abs = srcRows[(size_t)i_rel]; + return MaxRelaxResult{ idx_up_abs, idx_low_abs, slope, intercept }; +} + + +void AlphaCrown::computeMaxPoolLayer(unsigned layerNumber, + torch::Tensor &EQ_up, + torch::Tensor &EQ_low) +{ + auto idxUp = _maxUpperChoice.at(layerNumber); // int64 [m] + auto idxLo = _maxLowerChoice.at(layerNumber); // int64 [m] + auto a = _upperRelaxationSlopes.at(layerNumber).to(torch::kFloat32); // [m] + auto b = _upperRelaxationIntercepts.at(layerNumber).to(torch::kFloat32); // [m] + + // Select rows from current EQs + auto up_sel = EQ_up.index ({ idxUp, torch::indexing::Slice() }); // m x (n+1) + auto low_sel = EQ_low.index({ idxLo, torch::indexing::Slice() }); + + // Upper: scale + add intercept on last column + auto next_up = up_sel * a.unsqueeze(1); + next_up = AlphaCrown::addVecToLastColumnValue(next_up, b); + + // Lower: copy chosen rows + auto next_low = low_sel.clone(); + + EQ_up = next_up; + EQ_low = next_low; +} + + + + +void AlphaCrown::findBounds() +{ + torch::Tensor EQ_up = createSymbolicVariablesMatrix(); + torch::Tensor EQ_low = createSymbolicVariablesMatrix(); + + for ( unsigned i = 0; i < _network->getNumberOfLayers(); i++ ){ + Layer::Type layerType = _layersOrder[i]; + switch (layerType) + { + case Layer::INPUT: + break; + case Layer::WEIGHTED_SUM: + computeWeightedSumLayer(i, EQ_up, EQ_low); + break; + case Layer::RELU: + relaxReluLayer(i, EQ_up, EQ_low); + break; + case Layer::MAX: + { + relaxMaxPoolLayer( i, EQ_up, EQ_low ); + break; + } + default: + AlphaCrown::log ( "Unsupported layer type\n"); + throw MarabouError( MarabouError::DEBUGGING_ERROR ); + } + } +} + + +std::tuple AlphaCrown::computeBounds + (std::vector &alphaSlopes) +{ + torch::Tensor EQ_up = createSymbolicVariablesMatrix(); + torch::Tensor EQ_low = createSymbolicVariablesMatrix(); + for ( unsigned i = 0; i < _network->getNumberOfLayers(); i++ ) + { + auto layerType = _layersOrder[i]; + switch (layerType) + { + case Layer::INPUT: + break; + case Layer::WEIGHTED_SUM: + computeWeightedSumLayer (i, EQ_up, EQ_low); + break; + case Layer::RELU: + computeReluLayer (i, EQ_up, EQ_low, alphaSlopes); + break; + case Layer::MAX: + computeMaxPoolLayer( i, EQ_up, EQ_low ); + break; + default: + log ("Unsupported layer type\n"); + throw MarabouError (MarabouError::DEBUGGING_ERROR); + } + } + auto outputUpBound = getMaxOfSymbolicVariables(EQ_up); + auto outputLowBound = getMinOfSymbolicVariables(EQ_low); + return std::make_tuple(outputUpBound, outputLowBound); + +} + + +void AlphaCrown::computeWeightedSumLayer(unsigned i, torch::Tensor &EQ_up, torch::Tensor &EQ_low){ + //auto linearLayer = _linearLayers[i]; + auto Wi_positive = _positiveWeights[i]; + auto Wi_negative = _negativeWeights[i]; + auto Bi = _biases[i]; + + auto EQ_up_afterLayer = Wi_positive.mm( EQ_up ) + Wi_negative.mm( EQ_low ); + EQ_up_afterLayer = + AlphaCrown::addVecToLastColumnValue( EQ_up_afterLayer, Bi ); + + + auto EQ_low_afterLayer = Wi_positive.mm( EQ_low ) + Wi_negative.mm( EQ_up ); + EQ_low_afterLayer = + AlphaCrown::addVecToLastColumnValue(EQ_low_afterLayer, Bi ); + + EQ_up = EQ_up_afterLayer; + EQ_low = EQ_low_afterLayer; + +} + + +void AlphaCrown::computeReluLayer(unsigned layerNumber, torch::Tensor + &EQ_up, torch::Tensor &EQ_low, std::vector &alphaSlopes){ + EQ_up = EQ_up * _upperRelaxationSlopes[layerNumber].unsqueeze( 1 ); // + EQ_up = AlphaCrown::addVecToLastColumnValue( EQ_up, _upperRelaxationIntercepts[layerNumber] ); + unsigned indexInAlpha = _indexAlphaSlopeMap[layerNumber]; + EQ_low = EQ_low * alphaSlopes[indexInAlpha].unsqueeze( 1 ); +} + + + + +void AlphaCrown::updateBounds(std::vector &alphaSlopes){ + torch::Tensor EQ_up = createSymbolicVariablesMatrix(); + torch::Tensor EQ_low = createSymbolicVariablesMatrix(); + + + for ( unsigned i = 0; i < _network->getNumberOfLayers(); i++ ) + { + auto layerType = _layersOrder[i]; + switch (layerType) + { + case Layer::INPUT: + break; + case Layer::WEIGHTED_SUM: + computeWeightedSumLayer (i, EQ_up, EQ_low); + break; + case Layer::RELU: + computeReluLayer (i, EQ_up, EQ_low, alphaSlopes); + break; + case Layer::MAX: + computeMaxPoolLayer( i, EQ_up, EQ_low ); + break; + default: + log ("Unsupported layer type\n"); + throw MarabouError (MarabouError::DEBUGGING_ERROR); + } + auto upBound = getMaxOfSymbolicVariables(EQ_up); + auto lowBound = getMinOfSymbolicVariables(EQ_low); + updateBoundsOfLayer(i, upBound, lowBound); + } +} + +void AlphaCrown::updateBoundsOfLayer(unsigned layerIndex, torch::Tensor &upBounds, torch::Tensor &lowBounds) +{ + + Layer * layer = _layerOwner->getLayerIndexToLayer()[layerIndex]; + //TODO it should be: Layer *layer = _layerOwner->getLayer(layerIndex); if we added non const getter + + for (int j = 0; j < upBounds.size(0); j++) + { + if ( layer->neuronEliminated( j ) ) continue; + double lb_val = lowBounds[j].item(); + if ( layer->getLb( j ) < lb_val ) + { + log( Stringf( "Neuron %u_%u lower-bound updated from %f to %f", + layerIndex, + j, + layer->getLb( j ), + lb_val ) ); + + std::cout << "Neuron " << layerIndex << "_" << j + << " lower-bound updated from " << layer->getLb(j) + << " to " << lb_val << std::endl; + layer->setLb( j, lb_val ); + _layerOwner->receiveTighterBound( + Tightening( layer->neuronToVariable( j ), lb_val, Tightening::LB ) ); + } + + + auto ub_val = upBounds[j].item(); + if ( layer->getUb( j ) > ub_val ) + { + log( Stringf( "Neuron %u_%u upper-bound updated from %f to %f", + layerIndex, + j, + layer->getUb( j ), + ub_val ) ); + std::cout << "Neuron " << layerIndex << "_" << j + << " upper-bound updated from " << layer->getUb(j) + << " to " << ub_val << std::endl; + + layer->setUb( j, ub_val ); + _layerOwner->receiveTighterBound( + Tightening( layer->neuronToVariable( j ), ub_val, Tightening::UB ) ); + } + + } +} + + +void AlphaCrown::optimizeBounds( int loops ) +{ + + + std::cout << "Starting AlphaCrown run with " << loops << " optimization loops." << std::endl; + std::vector alphaSlopesForUpBound; + std::vector alphaSlopesForLowBound; + for ( auto &tensor : _initialAlphaSlopes ) + { + alphaSlopesForUpBound.push_back( tensor.detach().clone().requires_grad_(true) ); + alphaSlopesForLowBound.push_back( tensor.detach().clone().requires_grad_(true) ); + } + GDloop( loops, "max", alphaSlopesForUpBound ); + GDloop( loops, "min", alphaSlopesForLowBound ); + updateBounds( alphaSlopesForUpBound ); + updateBounds( alphaSlopesForLowBound); + std::cout << "AlphaCrown run completed." << std::endl; +} + + +void AlphaCrown::GDloop( int loops, + const std::string val_to_opt, + std::vector &alphaSlopes ) +{ + torch::optim::Adam optimizer( alphaSlopes, 0.005 ); + for ( int i = 0; i < loops; i++ ) + { + optimizer.zero_grad(); + + auto [max_val, min_val] = AlphaCrown::computeBounds( alphaSlopes ); + auto loss = ( val_to_opt == "max" ) ? max_val.sum() : -min_val.sum(); + loss.backward(torch::Tensor(), true); + + optimizer.step(); + + for ( auto &tensor : alphaSlopes ) + { + tensor.clamp( 0, 1 ); + } + + log( Stringf( "Optimization loop %d completed", i + 1 ) ); + std::cout << "std Optimization loop completed " << i+1 << std::endl; + } +} + + +torch::Tensor AlphaCrown::addVecToLastColumnValue(const torch::Tensor &matrix, + const torch::Tensor &vec) +{ + auto result = matrix.clone(); + if (result.dim() == 2) + { + // add 'vec' per row to last column + result.slice(1, result.size(1) - 1, result.size(1)) += vec.unsqueeze(1); + } + else if (result.dim() == 1) + { + // add scalar to last entry (the constant term) + TORCH_CHECK(vec.numel() == 1, "1-D addVec expects scalar vec"); + result.index_put_({ result.size(0) - 1 }, + result.index({ result.size(0) - 1 }) + vec.item()); + } + else + { + TORCH_CHECK(false, "addVecToLastColumnValue expects 1-D or 2-D tensor"); + } + return result; +} + + + +void AlphaCrown::log( const String &message ) +{ + if ( GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING ) + printf( "DeepPolyAnalysis: %s\n", message.ascii() ); +} + + +} // namespace NLR \ No newline at end of file diff --git a/src/nlr/AlphaCrown.h b/src/nlr/AlphaCrown.h new file mode 100644 index 0000000000..6042354e99 --- /dev/null +++ b/src/nlr/AlphaCrown.h @@ -0,0 +1,100 @@ +#ifndef ALPHACROWN_H +#define ALPHACROWN_H + +#include "CustomDNN.h" +#include "LayerOwner.h" +#include + +#undef Warning +#include + +namespace NLR { +class AlphaCrown +{ +public: + AlphaCrown( LayerOwner *layerOwner ); + + void findBounds(); + void optimizeBounds( int loops = 50 ); + void run() + + { + findBounds(); + updateBounds(_initialAlphaSlopes); + optimizeBounds( 2 ); + + } + +private: + LayerOwner *_layerOwner; + NetworkLevelReasoner *_nlr; + CustomDNN *_network; + void GDloop( int loops, const std::string val_to_opt, std::vector &alphaSlopes ); + std::tuple + computeBounds( std::vector &alphaSlopes ); + int _inputSize; + torch::Tensor _lbInput; + torch::Tensor _ubInput; + + std::vector _linearLayers; + std::vector _layersOrder; + std::map _positiveWeights; + std::map _negativeWeights; + std::map _biases; + std::map _indexAlphaSlopeMap; + std::map _linearIndexMap; + + std::map>> _maxPoolSources; + std::map _maxUpperChoice; // int64 [m]: absolute row index for upper bound + std::map _maxLowerChoice; // int64 [m]: absolute row index for lower bound + void relaxMaxPoolLayer(unsigned layerNumber, torch::Tensor &EQ_up, torch::Tensor &EQ_low); + void computeMaxPoolLayer(unsigned layerNumber, torch::Tensor &EQ_up, torch::Tensor &EQ_low); + + std::pair boundsFromEQ(const torch::Tensor &EQ, const std::vector &rows); + struct MaxRelaxResult { + long idx_up; // absolute row in previous EQ for the upper bound + long idx_low; // absolute row in previous EQ for the lower bound + float slope; // upper slope a + float intercept; // upper intercept (b - a*l_i) + }; + + MaxRelaxResult relaxMaxNeuron(const std::vector> &groups, + size_t k, + const torch::Tensor &EQ_up, + const torch::Tensor &EQ_low); + + std::map _upperRelaxationSlopes; + std::map _upperRelaxationIntercepts; + + std::vector _initialAlphaSlopes; + + torch::Tensor createSymbolicVariablesMatrix(); + void relaxReluLayer(unsigned layerNumber, torch::Tensor &EQ_up, torch::Tensor &EQ_low); + void computeWeightedSumLayer(unsigned i, torch::Tensor &EQ_up, torch::Tensor &EQ_low); + void computeReluLayer(unsigned i, torch::Tensor &EQ_up, torch::Tensor &EQ_low, std::vector &alphaSlopes); + + void updateBounds(std::vector &alphaSlopes); + void updateBoundsOfLayer(unsigned layerIndex, torch::Tensor &upBounds, torch::Tensor &lowBounds); + + torch::Tensor addVecToLastColumnValue( const torch::Tensor &matrix, + const torch::Tensor &vec ); + // { + // auto result = matrix.clone(); + // result.slice( 1, result.size( 1 ) - 1, result.size( 1 ) ) += vec.unsqueeze( 1 ); + // return result; + // } + static torch::Tensor lower_ReLU_relaxation( const torch::Tensor &u, const torch::Tensor &l ); + + static std::tuple upper_ReLU_relaxation( const torch::Tensor &u, + const torch::Tensor &l ); + + torch::Tensor getMaxOfSymbolicVariables( const torch::Tensor &matrix ); + torch::Tensor getMinOfSymbolicVariables( const torch::Tensor &matrix ); + + + void log( const String &message ); +}; +} // namespace NLR + + +#endif //ALPHACROWN_H \ No newline at end of file diff --git a/src/nlr/CMakeLists.txt b/src/nlr/CMakeLists.txt index e377a638ba..f008a36da0 100644 --- a/src/nlr/CMakeLists.txt +++ b/src/nlr/CMakeLists.txt @@ -7,7 +7,7 @@ target_include_directories(${MARABOU_LIB} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}") target_sources(${MARABOU_TEST_LIB} PRIVATE ${SRCS}) target_include_directories(${MARABOU_TEST_LIB} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}") -set (NETWORK_LEVEL_REASONER_TESTS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/tests") +set(NETWORK_LEVEL_REASONER_TESTS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/tests") macro(network_level_reasoner_add_unit_test name) set(USE_MOCK_COMMON TRUE) set(USE_MOCK_ENGINE TRUE) @@ -15,15 +15,16 @@ macro(network_level_reasoner_add_unit_test name) endmacro() network_level_reasoner_add_unit_test(DeepPolyAnalysis) +network_level_reasoner_add_unit_test(AlphaCrown) network_level_reasoner_add_unit_test(NetworkLevelReasoner) network_level_reasoner_add_unit_test(WsLayerElimination) network_level_reasoner_add_unit_test(ParallelSolver) if (${ENABLE_GUROBI}) network_level_reasoner_add_unit_test(LPRelaxation) -endif() +endif () if (${BUILD_PYTHON}) target_include_directories(${MARABOU_PY} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}") -endif() +endif () diff --git a/src/nlr/IterativePropagator.h b/src/nlr/IterativePropagator.h index 7a7fba671a..0c3a593f89 100644 --- a/src/nlr/IterativePropagator.h +++ b/src/nlr/IterativePropagator.h @@ -27,7 +27,7 @@ namespace NLR { #define IterativePropagator_LOG( x, ... ) \ - LOG( GlobalConfiguration::PREPROCESSOR_LOGGING, "Iterativepropagator: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::PREPROCESSOR_LOGGING, "Iterativepropagator: %s\n", x ) class IterativePropagator : public ParallelSolver { diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index cee0abbb65..248fc968c8 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -31,7 +31,7 @@ namespace NLR { #define LPFormulator_LOG( x, ... ) \ - LOG( GlobalConfiguration::PREPROCESSOR_LOGGING, "LP Preprocessor: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::PREPROCESSOR_LOGGING, "LP Preprocessor: %s\n", x ) class LPFormulator : public ParallelSolver { diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 08e6900538..b27949cdf9 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -27,6 +27,13 @@ Layer::~Layer() freeMemoryIfNeeded(); } +void Layer::setBounds( unsigned int neuron, double lower, double upper ) +{ + ASSERT( neuron < _size ); + _lb[neuron] = lower; + _ub[neuron] = upper; +} + void Layer::setLayerOwner( LayerOwner *layerOwner ) { _layerOwner = layerOwner; diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index 900276eda3..d84237f2f1 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -58,6 +58,7 @@ class Layer Layer( const Layer *other ); Layer( unsigned index, Type type, unsigned size, LayerOwner *layerOwner ); ~Layer(); + void setBounds( unsigned int neuron, double lower, double upper ); void setLayerOwner( LayerOwner *layerOwner ); void addSourceLayer( unsigned layerNumber, unsigned layerSize ); diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index 8390a0190b..b8774af74d 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -34,13 +34,15 @@ #include -#define NLR_LOG( x, ... ) LOG( GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING, "NLR: %s\n", x ) +#define NLR_LOG( x, ... ) \ + MARABOU_LOG( GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING, "NLR: %s\n", x ) namespace NLR { NetworkLevelReasoner::NetworkLevelReasoner() : _tableau( NULL ) , _deepPolyAnalysis( nullptr ) + , _alphaCrown( nullptr ) { } @@ -127,8 +129,17 @@ void NetworkLevelReasoner::evaluate( double *input, double *output ) const Layer *outputLayer = _layerIndexToLayer[_layerIndexToLayer.size() - 1]; memcpy( output, outputLayer->getAssignment(), sizeof( double ) * outputLayer->getSize() ); } +void NetworkLevelReasoner::setBounds( unsigned layer, + unsigned int neuron, + double lower, + double upper ) +{ + ASSERT( layer < _layerIndexToLayer.size() ); + _layerIndexToLayer[layer]->setBounds( neuron, lower, upper ); +} -void NetworkLevelReasoner::concretizeInputAssignment( Map &assignment ) +void NetworkLevelReasoner::concretizeInputAssignment( Map &assignment, + const double *pgdAdversarialInput ) { Layer *inputLayer = _layerIndexToLayer[0]; ASSERT( inputLayer->getLayerType() == Layer::INPUT ); @@ -145,6 +156,8 @@ void NetworkLevelReasoner::concretizeInputAssignment( Map &ass { unsigned variable = inputLayer->neuronToVariable( index ); double value = _tableau->getValue( variable ); + if ( pgdAdversarialInput ) + value = pgdAdversarialInput[index]; input[index] = value; assignment[variable] = value; } @@ -200,6 +213,8 @@ void NetworkLevelReasoner::clearConstraintTightenings() void NetworkLevelReasoner::symbolicBoundPropagation() { + _boundTightenings.clear(); + for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) _layerIndexToLayer[i]->computeSymbolicBounds(); } @@ -211,6 +226,15 @@ void NetworkLevelReasoner::deepPolyPropagation() _deepPolyAnalysis->run(); } +void NetworkLevelReasoner::alphaCrown() +{ +#ifdef BUILD_TORCH + if ( _alphaCrown == nullptr ) + _alphaCrown = std::unique_ptr( new AlphaCrown( this ) ); + _alphaCrown->run(); +#endif +} + void NetworkLevelReasoner::lpRelaxationPropagation() { LPFormulator lpFormulator( this ); diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index 2660795be6..95d292b8a6 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -16,6 +16,7 @@ #ifndef __NetworkLevelReasoner_h__ #define __NetworkLevelReasoner_h__ +#include "AlphaCrown.h" #include "DeepPolyAnalysis.h" #include "ITableau.h" #include "Layer.h" @@ -74,12 +75,14 @@ class NetworkLevelReasoner : public LayerOwner Perform an evaluation of the network for a specific input. */ void evaluate( double *input, double *output ); + void setBounds( unsigned layer, unsigned int neuron, double lower, double upper ); /* Perform an evaluation of the network for the current input variable assignment and store the resulting variable assignment in the assignment. */ - void concretizeInputAssignment( Map &assignment ); + void concretizeInputAssignment( Map &assignment, + const double *pgdAdversarialInput = nullptr ); /* Perform a simulation of the network for a specific input @@ -124,6 +127,7 @@ class NetworkLevelReasoner : public LayerOwner void intervalArithmeticBoundPropagation(); void symbolicBoundPropagation(); void deepPolyPropagation(); + void alphaCrown(); void lpRelaxationPropagation(); void LPTighteningForOneLayer( unsigned targetIndex ); void MILPPropagation(); @@ -209,6 +213,7 @@ class NetworkLevelReasoner : public LayerOwner std::unique_ptr _deepPolyAnalysis; + std::unique_ptr _alphaCrown; void freeMemoryIfNeeded(); diff --git a/src/nlr/tests/Test_AlphaCrown.h b/src/nlr/tests/Test_AlphaCrown.h new file mode 100644 index 0000000000..1b6c7fbd4a --- /dev/null +++ b/src/nlr/tests/Test_AlphaCrown.h @@ -0,0 +1,211 @@ +// +// Created by maya-swisa on 8/6/25. +// + +#ifndef TEST_ALPHACROWN_H +#define TEST_ALPHACROWN_H + +#include "../../engine/tests/MockTableau.h" +#include "AcasParser.h" +#include "CWAttack.h" +#include "Engine.h" +#include "InputQuery.h" +#include "Layer.h" +#include "NetworkLevelReasoner.h" +#include "PropertyParser.h" +#include "Tightening.h" + +#include +#include + +class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite +{ +public: + void setUp() + { + } + + void tearDown() + { + } + + // void testWithAttack() + // { + // #ifdef BUILD_TORCH + // + // auto networkFilePath = "../../../resources/nnet/acasxu/" + // "ACASXU_experimental_v2a_1_1.nnet"; + // auto propertyFilePath = "../../../resources/properties/" + // "acas_property_4.txt"; + // + // auto *_acasParser = new AcasParser( networkFilePath ); + // InputQuery _inputQuery; + // _acasParser->generateQuery( _inputQuery ); + // PropertyParser().parse( propertyFilePath, _inputQuery ); + // std::unique_ptr _engine = std::make_unique(); + // Options *options = Options::get(); + // options->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "alphacrown" ); + // // obtain the alpha crown proceeder + // _engine->processInputQuery( _inputQuery ); + // NLR::NetworkLevelReasoner *_networkLevelReasoner = + // _engine->getNetworkLevelReasoner(); TS_ASSERT_THROWS_NOTHING( + // _networkLevelReasoner->obtainCurrentBounds() ); std::unique_ptr cwAttack = + // std::make_unique( _networkLevelReasoner ); auto + // attackResultAfterBoundTightening = cwAttack->runAttack(); TS_ASSERT( + // !attackResultAfterBoundTightening ); delete _acasParser; + + void populateNetwork( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 R 1 R 1 1 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 1 \ / 0 \ / + \/ \/ \/ + /\ /\ /\ + 1 / \ 1 / \ 1 / \ + / \ R / \ R / 1 \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + -1 -1 + + The example described in Fig. 3 of + https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 0 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + + nlr.setBias( 5, 0, 1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void test_alphacrown_relus() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetwork( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke alpha crow + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.alphaCrown() ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + for ( const auto &bound : bounds ) + { + if ( bound._type == Tightening::LB ) + printf( "lower:\n" ); + else + printf( "upper:\n" ); + std::cout << "var : " << bound._variable << " bound : " << bound._value << std::endl; + } + + double large = 1000000; + nlr.setBounds( nlr.getNumberOfLayers() -1 , 1, 0.1 , large ); + std::unique_ptr cwAttack = std::make_unique( &nlr ); + auto attackResultAfterBoundTightening = cwAttack->runAttack(); + TS_ASSERT( !attackResultAfterBoundTightening ); + + nlr.setBounds( nlr.getNumberOfLayers() -1 , 1, -large , -0.1 ); + cwAttack = std::make_unique( &nlr ); + attackResultAfterBoundTightening = cwAttack->runAttack(); + TS_ASSERT( !attackResultAfterBoundTightening ); + + nlr.setBounds( nlr.getNumberOfLayers() -1 , 0 , -large , 0.99 ); + cwAttack = std::make_unique( &nlr ); + attackResultAfterBoundTightening = cwAttack->runAttack(); + TS_ASSERT( !attackResultAfterBoundTightening ); + + nlr.setBounds( nlr.getNumberOfLayers() -1 , 0 , 1.1 , large ); + cwAttack = std::make_unique( &nlr ); + attackResultAfterBoundTightening = cwAttack->runAttack(); + TS_ASSERT( !attackResultAfterBoundTightening ); + + + } +}; + +#endif // TEST_ALPHACROWN_H diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index b74cd3931c..9ac9e083f3 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -7610,7 +7610,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Map assignment; - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment, TODO ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 4 ) ); @@ -7623,7 +7623,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.nextValues[0] = 1; tableau.nextValues[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment, TODO ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 1 ) ); @@ -7635,7 +7635,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.nextValues[0] = 1; tableau.nextValues[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment, TODO ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 0 ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 0 ) ); diff --git a/src/query_loader/QueryLoader.h b/src/query_loader/QueryLoader.h index 195a002c47..c8d1c0732c 100644 --- a/src/query_loader/QueryLoader.h +++ b/src/query_loader/QueryLoader.h @@ -19,7 +19,7 @@ #include "IQuery.h" -#define QL_LOG( x, ... ) LOG( GlobalConfiguration::QUERY_LOADER_LOGGING, "QueryLoader: %s\n", x ) +#define QL_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::QUERY_LOADER_LOGGING, "QueryLoader: %s\n", x ) class QueryLoader { diff --git a/tools/download_libtorch.sh b/tools/download_libtorch.sh new file mode 100755 index 0000000000..90f1884b42 --- /dev/null +++ b/tools/download_libtorch.sh @@ -0,0 +1,19 @@ +#!/bin/bash +curdir=$pwd +mydir="${0%/*}" +version=$1 + +cd $mydir + +# Need to download the cxx11-abi version of libtorch in order to ensure compatability +# with boost. +# +# See https://discuss.pytorch.org/t/issues-linking-with-libtorch-c-11-abi/29510 for details. +echo "Downloading PyTorch" +wget https://download.pytorch.org/libtorch/cpu/libtorch-cxx11-abi-shared-with-deps-$version%2Bcpu.zip -O libtorch-$version.zip -q --show-progress --progress=bar:force:noscroll + +echo "Unzipping PyTorch" +unzip libtorch-$version.zip >> /dev/null +mv libtorch libtorch-$version + +cd $curdir