From bf326e902f34011a96be89913f2e0f3e2767305f Mon Sep 17 00:00:00 2001 From: avi_porges Date: Sun, 27 Jul 2025 14:09:36 +0300 Subject: [PATCH 01/33] will work --- src/engine/CustomDNN.cpp | 288 +++++++++++++++++++++++++++++++++ src/engine/CustomDNN.h | 101 ++++++++++++ src/nlr/AlphaCrown.cpp | 221 +++++++++++++++++++++++++ src/nlr/AlphaCrown.h | 69 ++++++++ src/nlr/NetworkLevelReasoner.h | 3 + 5 files changed, 682 insertions(+) create mode 100644 src/engine/CustomDNN.cpp create mode 100644 src/engine/CustomDNN.h create mode 100644 src/nlr/AlphaCrown.cpp create mode 100644 src/nlr/AlphaCrown.h diff --git a/src/engine/CustomDNN.cpp b/src/engine/CustomDNN.cpp new file mode 100644 index 0000000000..9f05feeca9 --- /dev/null +++ b/src/engine/CustomDNN.cpp @@ -0,0 +1,288 @@ +#include "CustomDNN.h" +#include "Vector.h" +#ifdef BUILD_TORCH + +CustomRelu::CustomRelu( const NLR::NetworkLevelReasoner *nlr, unsigned layerIndex ) + : _networkLevelReasoner( nlr ) + , _reluLayerIndex( layerIndex ) +{ +} + +torch::Tensor CustomRelu::forward( torch::Tensor x ) const +{ + return CustomReluFunction::apply( x, _networkLevelReasoner, _reluLayerIndex ); +} + +CustomMaxPool::CustomMaxPool( const NLR::NetworkLevelReasoner *nlr, unsigned layerIndex ) + : _networkLevelReasoner( nlr ) + , _maxLayerIndex( layerIndex ) +{ +} + +torch::Tensor CustomMaxPool::forward( torch::Tensor x ) const +{ + return CustomMaxPoolFunction::apply( x, _networkLevelReasoner, _maxLayerIndex ); +} + +void CustomDNN::setWeightsAndBiases( torch::nn::Linear &linearLayer, + const NLR::Layer *layer, + unsigned sourceLayer, + unsigned inputSize, + unsigned outputSize ) +{ + Vector> layerWeights( outputSize, Vector( inputSize ) ); + Vector layerBiases( outputSize ); + + // Fetch weights and biases from networkLevelReasoner + for ( unsigned j = 0; j < outputSize; j++ ) + { + for ( unsigned k = 0; k < inputSize; k++ ) + { + double weight_value = layer->getWeight( sourceLayer, k, j ); + layerWeights[j][k] = static_cast( weight_value ); + } + double bias_value = layer->getBias( j ); + layerBiases[j] = static_cast( bias_value ); + } + + Vector flattenedWeights; + for ( const auto &weight : layerWeights ) + { + for ( const auto &w : weight ) + { + flattenedWeights.append( w ); + } + } + + torch::Tensor weightTensor = torch::tensor( flattenedWeights.getContainer(), torch::kFloat ) + .view( { outputSize, inputSize } ); + torch::Tensor biasTensor = torch::tensor( layerBiases.getContainer(), torch::kFloat ); + + torch::NoGradGuard no_grad; + linearLayer->weight.set_( weightTensor ); + linearLayer->bias.set_( biasTensor ); +} + +void CustomDNN::weightedSum( unsigned i, const NLR::Layer *layer ) +{ + unsigned sourceLayer = i - 1; + const NLR::Layer *prevLayer = _networkLevelReasoner->getLayer( sourceLayer ); + unsigned inputSize = prevLayer->getSize(); + unsigned outputSize = layer->getSize(); + + if ( outputSize > 0 ) + { + auto linearLayer = torch::nn::Linear( torch::nn::LinearOptions( inputSize, outputSize ) ); + _linearLayers.append( linearLayer ); + + setWeightsAndBiases( linearLayer, layer, sourceLayer, inputSize, outputSize ); + + register_module( "linear" + std::to_string( i ), linearLayer ); + } +} + + +CustomDNN::CustomDNN( const NLR::NetworkLevelReasoner *nlr ) +{ + CUSTOM_DNN_LOG( "----- Construct Custom Network -----" ); + _networkLevelReasoner = nlr; + _numberOfLayers = _networkLevelReasoner->getNumberOfLayers(); + for ( unsigned i = 0; i < _numberOfLayers; i++ ) + { + const NLR::Layer *layer = _networkLevelReasoner->getLayer( i ); + _layerSizes.append( layer->getSize() ); + NLR::Layer::Type layerType = layer->getLayerType(); + _layersOrder.append( layerType ); + switch ( layerType ) + { + case NLR::Layer::INPUT: + break; + case NLR::Layer::WEIGHTED_SUM: + weightedSum( i, layer ); + break; + case NLR::Layer::RELU: + { + auto reluLayer = std::make_shared( _networkLevelReasoner, i ); + _reluLayers.append( reluLayer ); + register_module( "ReLU" + std::to_string( i ), reluLayer ); + break; + } + case NLR::Layer::MAX: + { + auto maxPoolLayer = std::make_shared( _networkLevelReasoner, i ); + _maxPoolLayers.append( maxPoolLayer ); + register_module( "maxPool" + std::to_string( i ), maxPoolLayer ); + break; + } + default: + CUSTOM_DNN_LOG( "Unsupported layer type\n" ); + throw MarabouError( MarabouError::DEBUGGING_ERROR ); + } + } +} + +torch::Tensor CustomDNN::forward( torch::Tensor x ) +{ + unsigned linearIndex = 0; + unsigned reluIndex = 0; + unsigned maxPoolIndex = 0; + for ( unsigned i = 0; i < _numberOfLayers; i++ ) + { + const NLR::Layer::Type layerType = _layersOrder[i]; + switch ( layerType ) + { + case NLR::Layer::INPUT: + break; + case NLR::Layer::WEIGHTED_SUM: + x = _linearLayers[linearIndex]->forward( x ); + linearIndex++; + break; + case NLR::Layer::RELU: + x = _reluLayers[reluIndex]->forward( x ); + reluIndex++; + break; + case NLR::Layer::MAX: + x = _maxPoolLayers[maxPoolIndex]->forward( x ); + maxPoolIndex++; + break; + default: + CUSTOM_DNN_LOG( "Unsupported layer type\n" ); + throw MarabouError( MarabouError::DEBUGGING_ERROR ); + break; + } + } + return x; +} + +torch::Tensor CustomReluFunction::forward( torch::autograd::AutogradContext *ctx, + torch::Tensor x, + const NLR::NetworkLevelReasoner *nlr, + unsigned int layerIndex ) +{ + ctx->save_for_backward( { x } ); + + const NLR::Layer *layer = nlr->getLayer( layerIndex ); + torch::Tensor reluOutputs = torch::zeros( { 1, layer->getSize() } ); + torch::Tensor reluGradients = torch::zeros( { 1, layer->getSize() } ); + + for ( unsigned neuron = 0; neuron < layer->getSize(); ++neuron ) + { + auto sources = layer->getActivationSources( neuron ); + ASSERT( sources.size() == 1 ); + const NLR::NeuronIndex &sourceNeuron = sources.back(); + int index = static_cast( sourceNeuron._neuron ); + reluOutputs.index_put_( { 0, static_cast( neuron ) }, + torch::clamp_min( x.index( { 0, index } ), 0 ) ); + reluGradients.index_put_( { 0, static_cast( neuron ) }, x.index( { 0, index } ) > 0 ); + } + + ctx->saved_data["reluGradients"] = reluGradients; + + return reluOutputs; +} + +std::vector CustomReluFunction::backward( torch::autograd::AutogradContext *ctx, + std::vector grad_output ) +{ + auto saved = ctx->get_saved_variables(); + auto input = saved[0]; + + auto reluGradients = ctx->saved_data["reluGradients"].toTensor(); + auto grad_input = grad_output[0] * reluGradients[0]; + + return { grad_input, torch::Tensor(), torch::Tensor() }; +} + +torch::Tensor CustomMaxPoolFunction::forward( torch::autograd::AutogradContext *ctx, + torch::Tensor x, + const NLR::NetworkLevelReasoner *nlr, + unsigned int layerIndex ) +{ + ctx->save_for_backward( { x } ); + + const NLR::Layer *layer = nlr->getLayer( layerIndex ); + torch::Tensor maxOutputs = torch::zeros( { 1, layer->getSize() } ); + torch::Tensor argMaxOutputs = torch::zeros( { 1, layer->getSize() }, torch::kInt64 ); + + for ( unsigned neuron = 0; neuron < layer->getSize(); ++neuron ) + { + auto sources = layer->getActivationSources( neuron ); + torch::Tensor sourceValues = torch::zeros( sources.size(), torch::kFloat ); + torch::Tensor sourceIndices = torch::zeros( sources.size() ); + + for ( int i = sources.size() - 1; i >= 0; --i ) + { + const NLR::NeuronIndex &activationNeuron = sources.back(); + int index = static_cast( activationNeuron._neuron ); + sources.popBack(); + sourceValues.index_put_( { i }, x.index( { 0, index } ) ); + sourceIndices.index_put_( { i }, index ); + } + + maxOutputs.index_put_( { 0, static_cast( neuron ) }, torch::max( sourceValues ) ); + argMaxOutputs.index_put_( { 0, static_cast( neuron ) }, + sourceIndices.index( { torch::argmax( sourceValues ) } ) ); + } + + ctx->saved_data["argMaxOutputs"] = argMaxOutputs; + + return maxOutputs; +} + +std::vector CustomMaxPoolFunction::backward( torch::autograd::AutogradContext *ctx, + std::vector grad_output ) +{ + auto saved = ctx->get_saved_variables(); + auto input = saved[0]; + + auto grad_input = torch::zeros_like( input ); + + auto indices = ctx->saved_data["argMaxOutputs"].toTensor(); + + grad_input[0].index_add_( 0, indices.flatten(), grad_output[0].flatten() ); + + return { grad_input, torch::Tensor(), torch::Tensor() }; +} + +const Vector &CustomDNN::getLayerSizes() const +{ + return _layerSizes; +} + +torch::Tensor CustomDNN::getLayerWeights(unsigned layerIndex) const { + if (_layersOrder[layerIndex] == NLR::Layer::WEIGHTED_SUM) { + auto linearLayer = _linearLayers[layerIndex]; + return linearLayer->weight; // Returning weights of the corresponding linear layer + } + throw std::runtime_error("Requested weights for a non-weighted sum layer."); +} + +torch::Tensor CustomDNN::getLayerBias(unsigned layerIndex) const { + if (_layersOrder[layerIndex] == NLR::Layer::WEIGHTED_SUM) { + auto linearLayer = _linearLayers[layerIndex]; + return linearLayer->bias; // Returning bias of the corresponding linear layer + } + throw std::runtime_error("Requested bias for a non-weighted sum layer."); +} + +void CustomDNN::getInputBounds(torch::Tensor &lbTensor, torch::Tensor &ubTensor) const +{ + const NLR::Layer *layer = _networkLevelReasoner->getLayer(0); + unsigned size = layer->getSize(); + + std::vector lowerBounds; + std::vector upperBounds; + lowerBounds.reserve(size); + upperBounds.reserve(size); + + for (unsigned neuron = 0; neuron < size; ++neuron) + { + lowerBounds.push_back(layer->getLb(neuron)); + upperBounds.push_back(layer->getUb(neuron)); + } + + lbTensor = torch::tensor(lowerBounds, torch::kDouble); + ubTensor = torch::tensor(upperBounds, torch::kDouble); +} + +#endif \ No newline at end of file diff --git a/src/engine/CustomDNN.h b/src/engine/CustomDNN.h new file mode 100644 index 0000000000..577fa2966d --- /dev/null +++ b/src/engine/CustomDNN.h @@ -0,0 +1,101 @@ +#ifndef __CustomDNN_h__ +#define __CustomDNN_h__ + +#undef Warning +#include + +#include "NetworkLevelReasoner.h" +#include + +#define CUSTOM_DNN_LOG( x, ... ) \ + MARABOU_LOG( GlobalConfiguration::CUSTOM_DNN_LOGGING, "customDNN: %s\n", x ) + +/* + Custom differentiation function for ReLU, implementing the forward and backward propagation + for the ReLU operation according to each variable's source layer as defined in the nlr. +*/ +class CustomReluFunction : public torch::autograd::Function +{ +public: + static torch::Tensor forward( torch::autograd::AutogradContext *ctx, + torch::Tensor x, + const NLR::NetworkLevelReasoner *nlr, + unsigned layerIndex ); + + static std::vector backward( torch::autograd::AutogradContext *ctx, + std::vector grad_output ); +}; + +class CustomRelu : public torch::nn::Module +{ +public: + CustomRelu( const NLR::NetworkLevelReasoner *nlr, unsigned layerIndex ); + torch::Tensor forward( torch::Tensor x ) const; + +private: + const NLR::NetworkLevelReasoner *_networkLevelReasoner; + unsigned _reluLayerIndex; +}; + +/* + Custom differentiation function for max pooling, implementing the forward and backward propagation + for the max pooling operation according to each variable's source layer as defined in the nlr. +*/ +class CustomMaxPoolFunction : public torch::autograd::Function +{ +public: + static torch::Tensor forward( torch::autograd::AutogradContext *ctx, + torch::Tensor x, + const NLR::NetworkLevelReasoner *nlr, + unsigned layerIndex ); + + static std::vector backward( torch::autograd::AutogradContext *ctx, + std::vector grad_output ); +}; + +class CustomMaxPool : public torch::nn::Module +{ +public: + CustomMaxPool( const NLR::NetworkLevelReasoner *nlr, unsigned layerIndex ); + torch::Tensor forward( torch::Tensor x ) const; + +private: + const NLR::NetworkLevelReasoner *_networkLevelReasoner; + unsigned _maxLayerIndex; +}; + +/* + torch implementation of the network according to the nlr. + */ +class CustomDNN : public torch::nn::Module +{ +public: + static void setWeightsAndBiases( torch::nn::Linear &linearLayer, + const NLR::Layer *layer, + unsigned sourceLayer, + unsigned inputSize, + unsigned outputSize ); + void weightedSum( unsigned i, const NLR::Layer *layer ); + explicit CustomDNN( const NLR::NetworkLevelReasoner *networkLevelReasoner ); + torch::Tensor getLayerWeights(unsigned layerIndex) const; + torch::Tensor getLayerBias(unsigned layerIndex) const; + torch::Tensor forward( torch::Tensor x ); + const Vector &getLayerSizes() const; + void getInputBounds(torch::Tensor &lbTensor, torch::Tensor &ubTensor)const; + Vector getLinearLayers() + { + return _linearLayers; + } + +private: + const NLR::NetworkLevelReasoner *_networkLevelReasoner; + Vector _layerSizes; + Vector> _reluLayers; + Vector> _maxPoolLayers; + Vector _linearLayers; + Vector _layersOrder; + unsigned _numberOfLayers; +}; + + +#endif // __CustomDNN_h__ diff --git a/src/nlr/AlphaCrown.cpp b/src/nlr/AlphaCrown.cpp new file mode 100644 index 0000000000..2d6e8bbd0f --- /dev/null +++ b/src/nlr/AlphaCrown.cpp @@ -0,0 +1,221 @@ +// +// Created by User on 7/23/2025. +// + +#include "AlphaCrown.h" + +#include "MStringf.h" + +namespace NLR { +AlphaCrown::AlphaCrown( LayerOwner *layerOwner ) + : _layerOwner( layerOwner ) +{ + _network = new CustomDNN( static_cast(_layerOwner) ); + _network->getInputBounds( _lbInput, _ubInput ); + _inputSize = _lbInput.size( 0 ); // TODO it that length of tensor? + + _linearLayers = std::vector( _network->getLinearLayers().begin(), + _network->getLinearLayers().end() ); + for ( const auto &linearLayer : _linearLayers ) + { + _positiveWeights.push_back( torch::where( linearLayer->weight >= 0, + linearLayer->weight, + torch::zeros_like( linearLayer->weight ) ) ); + _negativeWeights.push_back( torch::where( linearLayer->weight <= 0, + linearLayer->weight, + torch::zeros_like( linearLayer->weight ) ) ); + _biases.push_back( linearLayer->bias ); + } +} + +torch::Tensor AlphaCrown::createSymbolicVariablesMatrix() +{ + return torch::cat( { torch::eye( _inputSize ), torch::zeros( { _inputSize, 1 } ) }, 1 ); +} + +torch::Tensor AlphaCrown::lower_ReLU_relaxation( const torch::Tensor &u, const torch::Tensor &l ) +{ + torch::Tensor mult; + mult = torch::where( u - l == 0, torch::tensor( 1.0 ), u / ( u - l ) ); + mult = torch::where( l >= 0, torch::tensor( 1.0 ), mult ); + mult = torch::where( u <= 0, torch::tensor( 0.0 ), mult ); + return mult; +} + +std::tuple AlphaCrown::upper_ReLU_relaxation( const torch::Tensor &u, + const torch::Tensor &l ) +{ + torch::Tensor mult = torch::where( u - l == 0, torch::tensor( 1.0 ), u / ( u - l ) ); + mult = torch::where( l >= 0, torch::tensor( 1.0 ), mult ); + mult = torch::where( u <= 0, torch::tensor( 0.0 ), mult ); + + torch::Tensor add = torch::where( u - l == 0, torch::tensor( 0.0 ), -l * mult ); + add = torch::where( l >= 0, torch::tensor( 0.0 ), add ); + + return std::make_tuple( mult, add ); +} +torch::Tensor AlphaCrown::getMaxOfSymbolicVariables( const torch::Tensor &matrix ) +{ + auto coefficients = matrix.index( + { torch::indexing::Slice(), torch::indexing::Slice( torch::indexing::None, -1 ) } ); + auto free_coefficients = matrix.index( { torch::indexing::Slice(), -1 } ); + + auto positive_mask = coefficients >= 0; + + torch::Tensor u_values = + torch::sum( torch::where( positive_mask, coefficients * _ubInput, coefficients * _lbInput ), + 1 ) + + free_coefficients; + + return u_values; +} + +torch::Tensor AlphaCrown::getMinOfSymbolicVariables( const torch::Tensor &matrix ) +{ + auto coefficients = matrix.index( + { torch::indexing::Slice(), torch::indexing::Slice( torch::indexing::None, -1 ) } ); + auto free_coefficients = matrix.index( { torch::indexing::Slice(), -1 } ); + + auto positive_mask = coefficients >= 0; + + torch::Tensor l_values = + torch::sum( torch::where( positive_mask, coefficients * _lbInput, coefficients * _ubInput ), + 1 ) + + free_coefficients; + + return l_values; +} + + +void AlphaCrown::findBounds() +{ + torch::Tensor EQ_up = createSymbolicVariablesMatrix(); + torch::Tensor EQ_low = createSymbolicVariablesMatrix(); + + for ( size_t i = 0; i < _linearLayers.size(); i++ ) + { + auto Wi_positive = _positiveWeights[i]; + auto Wi_negative = _negativeWeights[i]; + auto Bi = _biases[i]; + + auto EQ_up_before_activation = Wi_positive.mm( EQ_up ) + Wi_negative.mm( EQ_low ); + EQ_up_before_activation = + AlphaCrown::addVecToLastColumnValue( EQ_up_before_activation, Bi ); + + auto EQ_low_before_activation = Wi_positive.mm( EQ_low ) + Wi_negative.mm( EQ_up ); + EQ_low_before_activation = + AlphaCrown::addVecToLastColumnValue( EQ_low_before_activation, Bi ); + + if ( i == _linearLayers.size() - 1 ) + { + // TODO how can we know what layer it is in nlr? in order to update bounds there? + // we should get it from cDNN + + EQ_up = EQ_up_before_activation; + EQ_low = EQ_low_before_activation; + break; + } // TODO we can skip it??? + + + // TODO we can use u_values and l_values of EQ_up to compute upper relaxation? + + auto u_values = AlphaCrown::getMaxOfSymbolicVariables( EQ_up_before_activation ); + auto l_values = AlphaCrown::getMinOfSymbolicVariables( EQ_low_before_activation ); + auto [upperRelaxationSlope, upperRelaxationIntercept] = + AlphaCrown::upper_ReLU_relaxation( l_values, u_values ); + auto alphaSlope = AlphaCrown::lower_ReLU_relaxation( l_values, u_values ); + + EQ_up = EQ_up_before_activation * upperRelaxationSlope.unsqueeze( 1 ); + EQ_up = AlphaCrown::addVecToLastColumnValue( EQ_up, upperRelaxationIntercept ); + EQ_low = EQ_low_before_activation * alphaSlope.unsqueeze( 1 ); + + _upperRelaxationSlopes.push_back( upperRelaxationSlope ); + _upperRelaxationIntercepts.push_back( upperRelaxationIntercept ); + _alphaSlopes.push_back( alphaSlope ); + } +} + +std::tuple +AlphaCrown::computeBounds( std::vector &alphaSlopes ) +{ + torch::Tensor EQ_up = createSymbolicVariablesMatrix(); + torch::Tensor EQ_low = createSymbolicVariablesMatrix(); + + for ( size_t i = 0; i < _linearLayers.size(); i++ ) + { + auto Wi_positive = _positiveWeights[i]; + auto Wi_negative = _negativeWeights[i]; + auto Bi = _biases[i]; + + auto EQ_up_before_activation = Wi_positive.mm( EQ_up ) + Wi_negative.mm( EQ_low ); + EQ_up_before_activation = + AlphaCrown::addVecToLastColumnValue( EQ_up_before_activation, Bi ); + + auto EQ_low_before_activation = Wi_positive.mm( EQ_low ) + Wi_negative.mm( EQ_up ); + EQ_low_before_activation = + AlphaCrown::addVecToLastColumnValue( EQ_low_before_activation, Bi ); + + if ( i == _linearLayers.size() - 1 ) + { + EQ_up = EQ_up_before_activation; + EQ_low = EQ_low_before_activation; + break; + } + + // TODO we can improve _upperRelaxationSlopes becouse we have better bound on each neuron + // in hidden layer. if so we need to use it as an argument on each iteration becose it + // not constant + + EQ_up = EQ_up_before_activation * _upperRelaxationSlopes[i].unsqueeze( 1 ); // + EQ_up = AlphaCrown::addVecToLastColumnValue( EQ_up, _upperRelaxationIntercepts[i] ); + EQ_low = EQ_low_before_activation * alphaSlopes[i].unsqueeze( 1 ); + } + auto up_bound = getMaxOfSymbolicVariables( EQ_up ); + auto low_bound = getMinOfSymbolicVariables( EQ_low ); + return std::make_tuple( up_bound, low_bound ); +} + +void AlphaCrown::optimizeBounds( int loops ) +{ + std::vector alphaSlopesForUpBound; + std::vector alphaSlopesForLowBound; + for ( auto &tensor : _alphaSlopes ) + { + alphaSlopesForUpBound.push_back( tensor.copy_( tensor.detach().requires_grad_( true ) ) ); + alphaSlopesForLowBound.push_back( tensor.copy_( tensor.detach().requires_grad_( true ) ) ); + } + AlphaCrown::GDloop( loops, "max", alphaSlopesForUpBound ); + AlphaCrown::GDloop( loops, "min", alphaSlopesForLowBound ); +} + + +void AlphaCrown::GDloop( int loops, + const std::string val_to_opt, + std::vector &alphaSlopes ) +{ + torch::optim::Adam optimizer( alphaSlopes, 0.005 ); + for ( int i = 0; i < loops; i++ ) + { + optimizer.zero_grad(); + + auto [max_val, min_val] = AlphaCrown::computeBounds( alphaSlopes ); + auto loss = ( val_to_opt == "max" ) ? max_val : -min_val; + loss.backward(); + optimizer.step(); + + for ( auto &tensor : alphaSlopes ) + { + tensor.clamp_( 0, 1 ); + } + + log( Stringf( "Optimization loop %d completed", i + 1 ) ); + } +} + + +void AlphaCrown::log( const String &message ) +{ + if ( GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING ) + printf( "DeepPolyAnalysis: %s\n", message.ascii() ); +} +} // namespace NLR diff --git a/src/nlr/AlphaCrown.h b/src/nlr/AlphaCrown.h new file mode 100644 index 0000000000..67bfbaa36a --- /dev/null +++ b/src/nlr/AlphaCrown.h @@ -0,0 +1,69 @@ + +#ifndef _ALPHACROWN_H_ +#define _ALPHACROWN_H_ + +#include "CustomDNN.h" +#include "LayerOwner.h" +#include "NetworkLevelReasoner.h" +#include + +#undef Warning +#include + +namespace NLR { +class AlphaCrown +{ +public: + AlphaCrown( LayerOwner *layerOwner ); + + void findBounds(); + void optimizeBounds( int loops = 50 ); + void run(){ + findBounds(); + optimizeBounds(2); + } + +private: + LayerOwner *_layerOwner; + CustomDNN *_network; + void GDloop( int loops, const std::string val_to_opt, std::vector &alphaSlopes ); + std::tuple + computeBounds( std::vector &alphaSlopes ); + int _inputSize; + torch::Tensor _lbInput; + torch::Tensor _ubInput; + + std::vector _linearLayers; + std::vector _positiveWeights; + std::vector _negativeWeights; + std::vector _biases; + + std::vector _upperRelaxationSlopes; + std::vector _upperRelaxationIntercepts; + + std::vector _alphaSlopes; + + torch::Tensor createSymbolicVariablesMatrix(); + + static torch::Tensor addVecToLastColumnValue( const torch::Tensor &matrix, + const torch::Tensor &vec ) + { + auto result = matrix.clone(); + result.slice( 1, result.size( 1 ) - 1, result.size( 1 ) ) += vec.unsqueeze( 1 ); + return result; + } + static torch::Tensor lower_ReLU_relaxation( const torch::Tensor &u, const torch::Tensor &l ); + + static std::tuple upper_ReLU_relaxation( const torch::Tensor &u, + const torch::Tensor &l ); + + torch::Tensor getMaxOfSymbolicVariables( const torch::Tensor &matrix ); + torch::Tensor getMinOfSymbolicVariables( const torch::Tensor &matrix ); + + + void log( const String &message ); +}; +} // namespace NLR + + +#endif //_ALPHACROWN_H_ diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index 2660795be6..25ae75941b 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -16,6 +16,7 @@ #ifndef __NetworkLevelReasoner_h__ #define __NetworkLevelReasoner_h__ +#include "AlphaCrown.h" #include "DeepPolyAnalysis.h" #include "ITableau.h" #include "Layer.h" @@ -209,6 +210,7 @@ class NetworkLevelReasoner : public LayerOwner std::unique_ptr _deepPolyAnalysis; + std::unique_ptr _alphaCrown; void freeMemoryIfNeeded(); @@ -255,6 +257,7 @@ class NetworkLevelReasoner : public LayerOwner to all neurons in the network */ void reindexNeurons(); + void alphaCrown(); }; } // namespace NLR From 0b29884ce23036455b81192381cf083e0d61cfb6 Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Sun, 27 Jul 2025 14:26:08 +0300 Subject: [PATCH 02/33] torch in CMakeLists.txt --- CMakeLists.txt | 365 ++++++++++++++++++++++++++++--------------------- 1 file changed, 208 insertions(+), 157 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index e55a57852a..ae5639f51b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required (VERSION 3.16) +cmake_minimum_required(VERSION 3.16) project(Marabou) set(MARABOU_VERSION 2.0.0) @@ -22,7 +22,8 @@ option(RUN_PYTHON_TEST "Run Python API tests if building with Python" OFF) option(ENABLE_GUROBI "Enable use the Gurobi optimizer" OFF) option(ENABLE_OPENBLAS "Do symbolic bound tighting using blas" ON) # Not available on Windows option(CODE_COVERAGE "Add code coverage" OFF) # Available only in debug mode - +option(BUILD_CADICAL "Build the CaDiCaL SAT solver for CDCL solving" ON) +option(BUILD_TORCH "Build libtorch" ON) ################### ## Git variables ## ################### @@ -30,19 +31,19 @@ option(CODE_COVERAGE "Add code coverage" OFF) # Available only in debug mode # Get the name of the working branch execute_process( - COMMAND git rev-parse --abbrev-ref HEAD - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} - OUTPUT_VARIABLE GIT_BRANCH - OUTPUT_STRIP_TRAILING_WHITESPACE + COMMAND git rev-parse --abbrev-ref HEAD + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + OUTPUT_VARIABLE GIT_BRANCH + OUTPUT_STRIP_TRAILING_WHITESPACE ) add_definitions("-DGIT_BRANCH=\"${GIT_BRANCH}\"") # Get the latest abbreviated commit hash of the working branch execute_process( - COMMAND git log -1 --format=%h - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} - OUTPUT_VARIABLE GIT_COMMIT_HASH - OUTPUT_STRIP_TRAILING_WHITESPACE + COMMAND git log -1 --format=%h + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + OUTPUT_VARIABLE GIT_COMMIT_HASH + OUTPUT_STRIP_TRAILING_WHITESPACE ) add_definitions("-DGIT_COMMIT_HASH=\"${GIT_COMMIT_HASH}\"") @@ -60,10 +61,10 @@ set(COMMON_DIR "${SRC_DIR}/common") set(BASIS_DIR "${SRC_DIR}/basis_factorization") if (MSVC) - set(SCRIPT_EXTENSION bat) -else() - set(SCRIPT_EXTENSION sh) -endif() + set(SCRIPT_EXTENSION bat) +else () + set(SCRIPT_EXTENSION sh) +endif () ########## ## CVC4 ## @@ -85,21 +86,21 @@ add_definitions(-DBOOST_NO_CXX98_FUNCTION_BASE) set(BOOST_VERSION 1.84.0) set(BOOST_DIR "${TOOLS_DIR}/boost-${BOOST_VERSION}") if (MSVC) - set(BOOST_ROOT "${BOOST_DIR}/win_installed") - set(Boost_NAMESPACE libboost) + set(BOOST_ROOT "${BOOST_DIR}/win_installed") + set(Boost_NAMESPACE libboost) elseif (${CMAKE_SIZEOF_VOID_P} EQUAL 4 AND NOT MSVC) - set(BOOST_ROOT "${BOOST_DIR}/installed32") -else() - set(BOOST_ROOT "${BOOST_DIR}/installed") -endif() + set(BOOST_ROOT "${BOOST_DIR}/installed32") +else () + set(BOOST_ROOT "${BOOST_DIR}/installed") +endif () set(Boost_USE_DEBUG_RUNTIME FALSE) find_package(Boost ${BOOST_VERSION} COMPONENTS program_options timer chrono thread) # Find boost if (NOT ${Boost_FOUND}) - execute_process(COMMAND ${TOOLS_DIR}/download_boost.${SCRIPT_EXTENSION} ${BOOST_VERSION}) - find_package(Boost ${BOOST_VERSION} REQUIRED COMPONENTS program_options timer chrono thread regex) -endif() + execute_process(COMMAND ${TOOLS_DIR}/download_boost.${SCRIPT_EXTENSION} ${BOOST_VERSION}) + find_package(Boost ${BOOST_VERSION} REQUIRED COMPONENTS program_options timer chrono thread regex) +endif () set(LIBS_INCLUDES ${Boost_INCLUDE_DIRS}) list(APPEND LIBS ${Boost_LIBRARIES}) @@ -112,17 +113,17 @@ set(PROTOBUF_VERSION 3.19.2) set(PROTOBUF_DEFAULT_DIR "${TOOLS_DIR}/protobuf-${PROTOBUF_VERSION}") if (NOT PROTOBUF_DIR) set(PROTOBUF_DIR ${PROTOBUF_DEFAULT_DIR}) -endif() +endif () -if(NOT EXISTS "${PROTOBUF_DIR}/installed/lib/libprotobuf.a") +if (NOT EXISTS "${PROTOBUF_DIR}/installed/lib/libprotobuf.a") message("Can't find protobuf, installing. If protobuf is installed please use the PROTOBUF_DIR parameter to pass the path") if (${PROTOBUF_DIR} STREQUAL ${PROTOBUF_DEFAULT_DIR}) message("installing protobuf") execute_process(COMMAND ${TOOLS_DIR}/download_protobuf.sh ${PROTOBUF_VERSION}) - else() + else () message(FATAL_ERROR "Can't find protobuf in the supplied directory") - endif() -endif() + endif () +endif () set(PROTOBUF_LIB protobuf) add_library(${PROTOBUF_LIB} SHARED IMPORTED) @@ -139,75 +140,125 @@ list(APPEND LIBS ${PROTOBUF_LIB}) set(ONNX_VERSION 1.15.0) set(ONNX_DIR "${TOOLS_DIR}/onnx-${ONNX_VERSION}") -if(NOT EXISTS "${ONNX_DIR}/onnx.proto3.pb.h") +if (NOT EXISTS "${ONNX_DIR}/onnx.proto3.pb.h") message("generating ONNX protobuf file") execute_process(COMMAND ${TOOLS_DIR}/download_onnx.sh ${ONNX_VERSION} ${PROTOBUF_VERSION}) -endif() +endif () file(GLOB DEPS_ONNX "${ONNX_DIR}/*.cc") include_directories(SYSTEM ${ONNX_DIR}) +############# +## Pytorch ## +############# + +if (${BUILD_TORCH}) + message(STATUS "Using pytorch") + if (NOT DEFINED BUILD_TORCH) + set(BUILD_TORCH $ENV{TORCH_HOME}) + add_definitions(-DBUILD_TORCH) + endif () + add_compile_definitions(BUILD_TORCH) + set(PYTORCH_VERSION 2.2.1) + find_package(Torch ${PYTORCH_VERSION} QUIET) + if (NOT Torch_FOUND) + set(PYTORCH_DIR "${TOOLS_DIR}/libtorch-${PYTORCH_VERSION}") + list(APPEND CMAKE_PREFIX_PATH ${PYTORCH_DIR}) + if (NOT EXISTS "${PYTORCH_DIR}") + execute_process(COMMAND ${TOOLS_DIR}/download_libtorch.sh ${PYTORCH_VERSION}) + endif () + set(Torch_DIR ${PYTORCH_DIR}/share/cmake/Torch) + find_package(Torch ${PYTORCH_VERSION} REQUIRED) + endif () + set(TORCH_CXX_FLAGS "-Wno-error=array-bounds") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}") + list(APPEND LIBS ${TORCH_LIBRARIES}) +endif () ############ ## Gurobi ## ############ if (${ENABLE_GUROBI}) - message(STATUS "Using Gurobi for LP relaxation for bound tightening") - if (NOT DEFINED GUROBI_DIR) - set(GUROBI_DIR $ENV{GUROBI_HOME}) - endif() - add_compile_definitions(ENABLE_GUROBI) + message(STATUS "Using Gurobi for LP relaxation for bound tightening") + if (NOT DEFINED GUROBI_DIR) + set(GUROBI_DIR $ENV{GUROBI_HOME}) + endif () + add_compile_definitions(ENABLE_GUROBI) - set(GUROBI_LIB1 "gurobi_c++") - set(GUROBI_LIB2 "gurobi110") + set(GUROBI_LIB1 "gurobi_c++") + set(GUROBI_LIB2 "gurobi110") - add_library(${GUROBI_LIB1} SHARED IMPORTED) - set_target_properties(${GUROBI_LIB1} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi_c++.a) - list(APPEND LIBS ${GUROBI_LIB1}) - target_include_directories(${GUROBI_LIB1} INTERFACE ${GUROBI_DIR}/include/) + add_library(${GUROBI_LIB1} SHARED IMPORTED) + set_target_properties(${GUROBI_LIB1} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi_c++.a) + list(APPEND LIBS ${GUROBI_LIB1}) + target_include_directories(${GUROBI_LIB1} INTERFACE ${GUROBI_DIR}/include/) - add_library(${GUROBI_LIB2} SHARED IMPORTED) + add_library(${GUROBI_LIB2} SHARED IMPORTED) - # MACOSx uses .dylib instead of .so for its Gurobi downloads. - if (APPLE) - set_target_properties(${GUROBI_LIB2} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi110.dylib) - else() - set_target_properties(${GUROBI_LIB2} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi110.so) - endif () + # MACOSx uses .dylib instead of .so for its Gurobi downloads. + if (APPLE) + set_target_properties(${GUROBI_LIB2} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi110.dylib) + else () + set_target_properties(${GUROBI_LIB2} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi110.so) + endif () - list(APPEND LIBS ${GUROBI_LIB2}) - target_include_directories(${GUROBI_LIB2} INTERFACE ${GUROBI_DIR}/include/) -endif() + list(APPEND LIBS ${GUROBI_LIB2}) + target_include_directories(${GUROBI_LIB2} INTERFACE ${GUROBI_DIR}/include/) +endif () ############## ## OpenBLAS ## ############## if (NOT MSVC AND ${ENABLE_OPENBLAS}) - set(OPENBLAS_VERSION 0.3.19) - - set(OPENBLAS_LIB openblas) - set(OPENBLAS_DEFAULT_DIR "${TOOLS_DIR}/OpenBLAS-${OPENBLAS_VERSION}") - if (NOT OPENBLAS_DIR) - set(OPENBLAS_DIR ${OPENBLAS_DEFAULT_DIR}) - endif() - - message(STATUS "Using OpenBLAS for matrix multiplication") - add_compile_definitions(ENABLE_OPENBLAS) - if(NOT EXISTS "${OPENBLAS_DIR}/installed/lib/libopenblas.a") - message("Can't find OpenBLAS, installing. If OpenBLAS is installed please use the OPENBLAS_DIR parameter to pass the path") - if (${OPENBLAS_DIR} STREQUAL ${OPENBLAS_DEFAULT_DIR}) - message("Installing OpenBLAS") - execute_process(COMMAND ${TOOLS_DIR}/download_openBLAS.sh ${OPENBLAS_VERSION}) - else() - message(FATAL_ERROR "Can't find OpenBLAS in the supplied directory") - endif() - endif() - - add_library(${OPENBLAS_LIB} SHARED IMPORTED) - set_target_properties(${OPENBLAS_LIB} PROPERTIES IMPORTED_LOCATION ${OPENBLAS_DIR}/installed/lib/libopenblas.a) - list(APPEND LIBS ${OPENBLAS_LIB}) - target_include_directories(${OPENBLAS_LIB} INTERFACE ${OPENBLAS_DIR}/installed/include) -endif() + set(OPENBLAS_VERSION 0.3.19) + + set(OPENBLAS_LIB openblas) + set(OPENBLAS_DEFAULT_DIR "${TOOLS_DIR}/OpenBLAS-${OPENBLAS_VERSION}") + if (NOT OPENBLAS_DIR) + set(OPENBLAS_DIR ${OPENBLAS_DEFAULT_DIR}) + endif () + + message(STATUS "Using OpenBLAS for matrix multiplication") + add_compile_definitions(ENABLE_OPENBLAS) + if (NOT EXISTS "${OPENBLAS_DIR}/installed/lib/libopenblas.a") + message("Can't find OpenBLAS, installing. If OpenBLAS is installed please use the OPENBLAS_DIR parameter to pass the path") + if (${OPENBLAS_DIR} STREQUAL ${OPENBLAS_DEFAULT_DIR}) + message("Installing OpenBLAS") + execute_process(COMMAND ${TOOLS_DIR}/download_openBLAS.sh ${OPENBLAS_VERSION}) + else () + message(FATAL_ERROR "Can't find OpenBLAS in the supplied directory") + endif () + endif () + + add_library(${OPENBLAS_LIB} SHARED IMPORTED) + set_target_properties(${OPENBLAS_LIB} PROPERTIES IMPORTED_LOCATION ${OPENBLAS_DIR}/installed/lib/libopenblas.a) + list(APPEND LIBS ${OPENBLAS_LIB}) + target_include_directories(${OPENBLAS_LIB} INTERFACE ${OPENBLAS_DIR}/installed/include) +endif () + +########## +## CaDiCaL ## +########## + +if (BUILD_CADICAL) + message(STATUS "Using CaDiCaL for CDCL solving") + if (NOT CADICAL_DIR) + set(CADICAL_DIR "${TOOLS_DIR}/cadical") + endif () + add_compile_definitions(BUILD_CADICAL) + + if (NOT EXISTS "${CADICAL_DIR}") + message(STATUS "Building CaDiCaL SAT Solver") + execute_process(COMMAND ${TOOLS_DIR}/download_cadical.sh) + endif () + + set(CADICAL_LIB cadical) + add_library(${CADICAL_LIB} SHARED IMPORTED) + set_property(TARGET ${CADICAL_LIB} PROPERTY POSITION_INDEPENDENT_CODE ON) + set_target_properties(${CADICAL_LIB} PROPERTIES IMPORTED_LOCATION ${CADICAL_DIR}/build/libcadical.a) + target_include_directories(${CADICAL_LIB} INTERFACE ${CADICAL_DIR}/src) + list(APPEND LIBS ${CADICAL_LIB}) +endif () ########### ## Build ## @@ -238,30 +289,30 @@ set(INPUT_PARSERS_DIR input_parsers) include(ProcessorCount) ProcessorCount(CTEST_NTHREADS) -if(CTEST_NTHREADS EQUAL 0) - set(CTEST_NTHREADS 1) -endif() +if (CTEST_NTHREADS EQUAL 0) + set(CTEST_NTHREADS 1) +endif () # --------------- set build type ---------------------------- set(BUILD_TYPES Release Debug MinSizeRel RelWithDebInfo) # Set the default build type to Production -if(NOT CMAKE_BUILD_TYPE) - set(CMAKE_BUILD_TYPE - Release CACHE STRING "Options are: Release Debug MinSizeRel RelWithDebInfo" FORCE) - # Provide drop down menu options in cmake-gui - set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS ${BUILD_TYPES}) -endif() +if (NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE + Release CACHE STRING "Options are: Release Debug MinSizeRel RelWithDebInfo" FORCE) + # Provide drop down menu options in cmake-gui + set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS ${BUILD_TYPES}) +endif () message(STATUS "Building ${CMAKE_BUILD_TYPE} build") #-------------------------set code coverage----------------------------------# # Allow coverage only in debug mode only in gcc -if(CODE_COVERAGE AND CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_BUILD_TYPE MATCHES Debug) - message(STATUS "Building with code coverage") - set(COVERAGE_COMPILER_FLAGS "-g -O0 --coverage" CACHE INTERNAL "") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}") - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --coverage") -endif() +if (CODE_COVERAGE AND CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_BUILD_TYPE MATCHES Debug) + message(STATUS "Building with code coverage") + set(COVERAGE_COMPILER_FLAGS "-g -O0 --coverage" CACHE INTERNAL "") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --coverage") +endif () # We build a static library that is the core of the project, the link it to the # API's (executable and python at the moment) @@ -273,40 +324,40 @@ set(MARABOU_EXE Marabou${CMAKE_EXECUTABLE_SUFFIX}) add_executable(${MARABOU_EXE} "${ENGINE_DIR}/main.cpp") set(MARABOU_EXE_PATH "${BIN_DIR}/${MARABOU_EXE}") add_custom_command(TARGET ${MARABOU_EXE} POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy $ ${MARABOU_EXE_PATH} ) + COMMAND ${CMAKE_COMMAND} -E copy $ ${MARABOU_EXE_PATH}) set(MPS_PARSER_PATH "${BIN_DIR}/${MPS_PARSER}") if (NOT MSVC) if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") - set(COMPILE_FLAGS -Wall -Wextra -Werror -MMD -Qunused-arguments -Wno-deprecated-declarations -Wno-unused-but-set-variable ) + set(COMPILE_FLAGS -Wall -Wextra -Werror -MMD -Qunused-arguments -Wno-deprecated-declarations -Wno-unused-but-set-variable) elseif (CMAKE_BUILD_TYPE MATCHES "Release") - set(COMPILE_FLAGS -Wall ) - else() - set(COMPILE_FLAGS -Wall -Wextra -Werror -MMD ) #-Wno-deprecated - endif() + set(COMPILE_FLAGS -Wall) + else () + set(COMPILE_FLAGS -Wall -Wextra -Werror -MMD) #-Wno-deprecated + endif () set(RELEASE_FLAGS ${COMPILE_FLAGS} -O3) #-Wno-deprecated -endif() +endif () if (RUN_MEMORY_TEST) - if(NOT MSVC) + if (NOT MSVC) set(MEMORY_FLAGS -fsanitize=address -fno-omit-frame-pointer -O1) - endif() -endif() + endif () +endif () add_definitions(-DRESOURCES_DIR="${RESOURCES_DIR}") if (NOT MSVC) set(DEBUG_FLAGS ${COMPILE_FLAGS} ${MEMORY_FLAGS} -g) - set(CXXTEST_FLAGS ${DEBUG_FLAGS} -Wno-ignored-qualifiers) -else() + set(CXXTEST_FLAGS ${DEBUG_FLAGS} -Wno-ignored-qualifiers) +else () set(DEBUG_FLAGS ${COMPILE_FLAGS} ${MEMORY_FLAGS}) add_definitions(-DNOMINMAX) # remove min max macros -endif() +endif () if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") set(CXXTEST_FLAGS ${CXXTEST_FLAGS} -Wno-terminate) -endif() +endif () # pthread set(THREADS_PREFER_PTHREAD_FLAG ON) @@ -314,11 +365,11 @@ find_package(Threads REQUIRED) list(APPEND LIBS Threads::Threads) if (BUILD_STATIC_MARABOU) - # build a static library - target_link_libraries(${MARABOU_LIB} ${LIBS} -static) -else() - target_link_libraries(${MARABOU_LIB} ${LIBS}) -endif() + # build a static library + target_link_libraries(${MARABOU_LIB} ${LIBS} -static) +else () + target_link_libraries(${MARABOU_LIB} ${LIBS}) +endif () target_include_directories(${MARABOU_LIB} PRIVATE ${LIBS_INCLUDES}) target_compile_options(${MARABOU_LIB} PRIVATE ${RELEASE_FLAGS}) @@ -334,44 +385,44 @@ target_include_directories(${MARABOU_EXE} PRIVATE ${LIBS_INCLUDES}) set(DEFAULT_PYTHON_VERSION "3" CACHE STRING "Default Python version 2/3") set(PYTHON_VERSIONS_SUPPORTED 2 3) list(FIND PYTHON_VERSIONS_SUPPORTED ${DEFAULT_PYTHON_VERSION} index) -if(index EQUAL -1) +if (index EQUAL -1) message(FATAL_ERROR "Python version must be one of ${PYTHON_VERSIONS_SUPPORTED}") -endif() +endif () set(PYTHON_API_DIR "${PROJECT_SOURCE_DIR}/maraboupy") if (NOT PYTHON_LIBRARY_OUTPUT_DIRECTORY) set(PYTHON_LIBRARY_OUTPUT_DIRECTORY "${PYTHON_API_DIR}") -endif() +endif () # Determine if we should build Python set(PYTHON32 FALSE) -if(${BUILD_PYTHON}) +if (${BUILD_PYTHON}) execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c" - "import struct; print(struct.calcsize('@P'));" - RESULT_VARIABLE _PYTHON_SUCCESS - OUTPUT_VARIABLE PYTHON_SIZEOF_VOID_P - ERROR_VARIABLE _PYTHON_ERROR_VALUE) + "import struct; print(struct.calcsize('@P'));" + RESULT_VARIABLE _PYTHON_SUCCESS + OUTPUT_VARIABLE PYTHON_SIZEOF_VOID_P + ERROR_VARIABLE _PYTHON_ERROR_VALUE) # message("PYTHON SIZEOF VOID p ${PYTHON_SIZEOF_VOID_P}") if (PYTHON_SIZEOF_VOID_P EQUAL 4 AND NOT ${FORCE_PYTHON_BUILD}) set(PYTHON32 TRUE) message(WARNING "Python version is 32-bit, please use build_python.sh in maraboupy folder") - endif() -endif() + endif () +endif () if (${FORCE_PYTHON_BUILD}) set(BUILD_PYTHON ON) -else() +else () if (${BUILD_PYTHON} AND NOT ${PYTHON32}) set(BUILD_PYTHON ON) - else() + else () set(BUILD_PYTHON OFF) - endif() -endif() + endif () +endif () # Actually build Python if (${BUILD_PYTHON}) - set(PYBIND11_VERSION 2.10.4) - set(PYBIND11_DIR "${TOOLS_DIR}/pybind11-${PYBIND11_VERSION}") + set(PYBIND11_VERSION 2.10.4) + set(PYBIND11_DIR "${TOOLS_DIR}/pybind11-${PYBIND11_VERSION}") # This is suppose to set the PYTHON_EXECUTABLE variable # First try to find the default python version @@ -379,12 +430,12 @@ if (${BUILD_PYTHON}) if (NOT EXISTS ${PYTHON_EXECUTABLE}) # If the default didn't work just find any python version find_package(PythonInterp REQUIRED) - endif() + endif () if (NOT EXISTS ${PYBIND11_DIR}) message("didnt find pybind, getting it") - execute_process(COMMAND ${TOOLS_DIR}/download_pybind11.${SCRIPT_EXTENSION} ${PYBIND11_VERSION}) - endif() + execute_process(COMMAND ${TOOLS_DIR}/download_pybind11.${SCRIPT_EXTENSION} ${PYBIND11_VERSION}) + endif () add_subdirectory(${PYBIND11_DIR}) set(MARABOU_PY MarabouCore) @@ -394,11 +445,11 @@ if (${BUILD_PYTHON}) target_include_directories(${MARABOU_PY} PRIVATE ${LIBS_INCLUDES}) set_target_properties(${MARABOU_PY} PROPERTIES - LIBRARY_OUTPUT_DIRECTORY ${PYTHON_LIBRARY_OUTPUT_DIRECTORY}) - if(NOT MSVC) + LIBRARY_OUTPUT_DIRECTORY ${PYTHON_LIBRARY_OUTPUT_DIRECTORY}) + if (NOT MSVC) target_compile_options(${MARABOU_LIB} PRIVATE -fPIC ${RELEASE_FLAGS}) - endif() -endif() + endif () +endif () ################# ## Build tests ## @@ -407,26 +458,26 @@ endif() set(MARABOU_TEST_LIB MarabouHelperTest) add_library(${MARABOU_TEST_LIB}) -set (TEST_DIR "${CMAKE_CURRENT_BINARY_DIR}/tests") +set(TEST_DIR "${CMAKE_CURRENT_BINARY_DIR}/tests") file(MAKE_DIRECTORY ${TEST_DIR}) set(CMAKE_PREFIX_PATH "${TOOLS_DIR}/cxxtest") set(CXXTEST_USE_PYTHON FALSE) find_package(CxxTest) -if(CXXTEST_FOUND) +if (CXXTEST_FOUND) include_directories(${CXXTEST_INCLUDE_DIR}) enable_testing() -endif() +endif () target_link_libraries(${MARABOU_TEST_LIB} ${MARABOU_LIB} ${LIBS}) -target_include_directories(${MARABOU_TEST_LIB} PRIVATE ${LIBS_INCLUDES} ) +target_include_directories(${MARABOU_TEST_LIB} PRIVATE ${LIBS_INCLUDES}) target_compile_options(${MARABOU_TEST_LIB} PRIVATE ${CXXTEST_FLAGS}) add_custom_target(build-tests ALL) add_custom_target(check - COMMAND ctest --output-on-failure -j${CTEST_NTHREADS} $$ARGS - DEPENDS build-tests build_input_parsers ${MARABOU_EXE}) + COMMAND ctest --output-on-failure -j${CTEST_NTHREADS} $$ARGS + DEPENDS build-tests build_input_parsers ${MARABOU_EXE}) # Decide which tests to run and execute set(TESTS_TO_RUN "") @@ -434,50 +485,50 @@ set(TESTS_TO_RUN "") macro(append_tests_to_run new_val) if ("${TESTS_TO_RUN}" STREQUAL "") set(TESTS_TO_RUN ${new_val}) - else() + else () set(TESTS_TO_RUN "${TESTS_TO_RUN}|${new_val}") - endif() + endif () endmacro() if (${RUN_UNIT_TEST}) append_tests_to_run("unit") -endif() +endif () if (${RUN_REGRESS_TEST}) append_tests_to_run("regress[0-5]") -endif() +endif () if (${RUN_SYSTEM_TEST}) append_tests_to_run("system") -endif() +endif () if (NOT ${TESTS_TO_RUN} STREQUAL "") # make ctest verbose set(CTEST_OUTPUT_ON_FAILURE 1) add_custom_command( - TARGET build-tests - POST_BUILD - COMMAND ctest --output-on-failure -L "\"(${TESTS_TO_RUN})\"" -j${CTEST_NTHREADS} $$ARGS + TARGET build-tests + POST_BUILD + COMMAND ctest --output-on-failure -L "\"(${TESTS_TO_RUN})\"" -j${CTEST_NTHREADS} $$ARGS ) -endif() +endif () if (${BUILD_PYTHON} AND ${RUN_PYTHON_TEST}) if (MSVC) add_custom_command( - TARGET build-tests - POST_BUILD - COMMAND cp ${PYTHON_API_DIR}/Release/* ${PYTHON_API_DIR} + TARGET build-tests + POST_BUILD + COMMAND cp ${PYTHON_API_DIR}/Release/* ${PYTHON_API_DIR} ) - endif() + endif () add_custom_command( - TARGET build-tests - POST_BUILD - COMMAND ${PYTHON_EXECUTABLE} -m pytest ${PYTHON_API_DIR}/test + TARGET build-tests + POST_BUILD + COMMAND ${PYTHON_EXECUTABLE} -m pytest ${PYTHON_API_DIR}/test ) -endif() +endif () # Add the input parsers add_custom_target(build_input_parsers) add_dependencies(build_input_parsers ${MPS_PARSER} ${ACAS_PARSER} - ${BERKELEY_PARSER}) + ${BERKELEY_PARSER}) add_subdirectory(${SRC_DIR}) add_subdirectory(${TOOLS_DIR}) From ee9b8141acade16a2ab0f3e6ec9fd353c5501527 Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Sun, 27 Jul 2025 14:58:35 +0300 Subject: [PATCH 03/33] fix torch errors --- CMakeLists.txt | 184 ++++++++---------- src/basis_factorization/GaussianEliminator.h | 2 +- src/basis_factorization/LUFactorization.h | 2 +- .../SparseFTFactorization.h | 2 +- .../SparseGaussianEliminator.h | 2 +- .../SparseLUFactorization.h | 2 +- src/cegar/IncrementalLinearization.h | 2 +- src/common/Debug.h | 4 +- src/engine/CDSmtCore.h | 2 +- src/engine/CustomDNN.cpp | 1 - src/engine/CustomDNN.h | 2 + src/engine/DantzigsRule.h | 2 +- src/engine/DnCManager.h | 2 +- src/engine/Engine.h | 2 +- src/engine/InputQuery.cpp | 2 +- src/engine/PLConstraintScoreTracker.h | 2 +- src/engine/ProjectedSteepestEdge.h | 2 +- src/engine/Query.cpp | 2 +- src/engine/SmtCore.h | 2 +- src/engine/SumOfInfeasibilitiesManager.h | 2 +- src/engine/Tableau.h | 2 +- src/input_parsers/MpsParser.h | 2 +- src/input_parsers/OnnxParser.h | 2 +- src/nlr/IterativePropagator.h | 2 +- src/nlr/LPFormulator.h | 2 +- src/nlr/NetworkLevelReasoner.cpp | 2 +- src/query_loader/QueryLoader.h | 2 +- 27 files changed, 107 insertions(+), 130 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ae5639f51b..03b0d85e54 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.16) +cmake_minimum_required (VERSION 3.16) project(Marabou) set(MARABOU_VERSION 2.0.0) @@ -20,9 +20,8 @@ option(RUN_SYSTEM_TEST "Run system tests on build" OFF) option(RUN_MEMORY_TEST "Run cxxtest testing with ASAN ON" ON) option(RUN_PYTHON_TEST "Run Python API tests if building with Python" OFF) option(ENABLE_GUROBI "Enable use the Gurobi optimizer" OFF) -option(ENABLE_OPENBLAS "Do symbolic bound tighting using blas" ON) # Not available on Windows +option(ENABLE_OPENBLAS "Do symbolic bound tighting using blas" OFF) # Not available on Windows option(CODE_COVERAGE "Add code coverage" OFF) # Available only in debug mode -option(BUILD_CADICAL "Build the CaDiCaL SAT solver for CDCL solving" ON) option(BUILD_TORCH "Build libtorch" ON) ################### ## Git variables ## @@ -62,9 +61,9 @@ set(BASIS_DIR "${SRC_DIR}/basis_factorization") if (MSVC) set(SCRIPT_EXTENSION bat) -else () +else() set(SCRIPT_EXTENSION sh) -endif () +endif() ########## ## CVC4 ## @@ -90,17 +89,17 @@ if (MSVC) set(Boost_NAMESPACE libboost) elseif (${CMAKE_SIZEOF_VOID_P} EQUAL 4 AND NOT MSVC) set(BOOST_ROOT "${BOOST_DIR}/installed32") -else () +else() set(BOOST_ROOT "${BOOST_DIR}/installed") -endif () +endif() set(Boost_USE_DEBUG_RUNTIME FALSE) find_package(Boost ${BOOST_VERSION} COMPONENTS program_options timer chrono thread) # Find boost if (NOT ${Boost_FOUND}) execute_process(COMMAND ${TOOLS_DIR}/download_boost.${SCRIPT_EXTENSION} ${BOOST_VERSION}) - find_package(Boost ${BOOST_VERSION} REQUIRED COMPONENTS program_options timer chrono thread regex) -endif () + find_package(Boost ${BOOST_VERSION} REQUIRED COMPONENTS program_options timer chrono thread) +endif() set(LIBS_INCLUDES ${Boost_INCLUDE_DIRS}) list(APPEND LIBS ${Boost_LIBRARIES}) @@ -113,17 +112,17 @@ set(PROTOBUF_VERSION 3.19.2) set(PROTOBUF_DEFAULT_DIR "${TOOLS_DIR}/protobuf-${PROTOBUF_VERSION}") if (NOT PROTOBUF_DIR) set(PROTOBUF_DIR ${PROTOBUF_DEFAULT_DIR}) -endif () +endif() -if (NOT EXISTS "${PROTOBUF_DIR}/installed/lib/libprotobuf.a") +if(NOT EXISTS "${PROTOBUF_DIR}/installed/lib/libprotobuf.a") message("Can't find protobuf, installing. If protobuf is installed please use the PROTOBUF_DIR parameter to pass the path") if (${PROTOBUF_DIR} STREQUAL ${PROTOBUF_DEFAULT_DIR}) message("installing protobuf") execute_process(COMMAND ${TOOLS_DIR}/download_protobuf.sh ${PROTOBUF_VERSION}) - else () + else() message(FATAL_ERROR "Can't find protobuf in the supplied directory") - endif () -endif () + endif() +endif() set(PROTOBUF_LIB protobuf) add_library(${PROTOBUF_LIB} SHARED IMPORTED) @@ -140,10 +139,10 @@ list(APPEND LIBS ${PROTOBUF_LIB}) set(ONNX_VERSION 1.15.0) set(ONNX_DIR "${TOOLS_DIR}/onnx-${ONNX_VERSION}") -if (NOT EXISTS "${ONNX_DIR}/onnx.proto3.pb.h") +if(NOT EXISTS "${ONNX_DIR}/onnx.proto3.pb.h") message("generating ONNX protobuf file") execute_process(COMMAND ${TOOLS_DIR}/download_onnx.sh ${ONNX_VERSION} ${PROTOBUF_VERSION}) -endif () +endif() file(GLOB DEPS_ONNX "${ONNX_DIR}/*.cc") include_directories(SYSTEM ${ONNX_DIR}) @@ -156,19 +155,20 @@ if (${BUILD_TORCH}) if (NOT DEFINED BUILD_TORCH) set(BUILD_TORCH $ENV{TORCH_HOME}) add_definitions(-DBUILD_TORCH) - endif () + endif() add_compile_definitions(BUILD_TORCH) set(PYTORCH_VERSION 2.2.1) find_package(Torch ${PYTORCH_VERSION} QUIET) if (NOT Torch_FOUND) set(PYTORCH_DIR "${TOOLS_DIR}/libtorch-${PYTORCH_VERSION}") list(APPEND CMAKE_PREFIX_PATH ${PYTORCH_DIR}) - if (NOT EXISTS "${PYTORCH_DIR}") + if(NOT EXISTS "${PYTORCH_DIR}") execute_process(COMMAND ${TOOLS_DIR}/download_libtorch.sh ${PYTORCH_VERSION}) - endif () + set(Torch_NO_SYSTEM_PATHS ON) + endif() set(Torch_DIR ${PYTORCH_DIR}/share/cmake/Torch) find_package(Torch ${PYTORCH_VERSION} REQUIRED) - endif () + endif() set(TORCH_CXX_FLAGS "-Wno-error=array-bounds") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}") list(APPEND LIBS ${TORCH_LIBRARIES}) @@ -181,7 +181,7 @@ if (${ENABLE_GUROBI}) message(STATUS "Using Gurobi for LP relaxation for bound tightening") if (NOT DEFINED GUROBI_DIR) set(GUROBI_DIR $ENV{GUROBI_HOME}) - endif () + endif() add_compile_definitions(ENABLE_GUROBI) set(GUROBI_LIB1 "gurobi_c++") @@ -197,13 +197,13 @@ if (${ENABLE_GUROBI}) # MACOSx uses .dylib instead of .so for its Gurobi downloads. if (APPLE) set_target_properties(${GUROBI_LIB2} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi110.dylib) - else () + else() set_target_properties(${GUROBI_LIB2} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi110.so) endif () list(APPEND LIBS ${GUROBI_LIB2}) target_include_directories(${GUROBI_LIB2} INTERFACE ${GUROBI_DIR}/include/) -endif () +endif() ############## ## OpenBLAS ## @@ -216,49 +216,25 @@ if (NOT MSVC AND ${ENABLE_OPENBLAS}) set(OPENBLAS_DEFAULT_DIR "${TOOLS_DIR}/OpenBLAS-${OPENBLAS_VERSION}") if (NOT OPENBLAS_DIR) set(OPENBLAS_DIR ${OPENBLAS_DEFAULT_DIR}) - endif () + endif() message(STATUS "Using OpenBLAS for matrix multiplication") add_compile_definitions(ENABLE_OPENBLAS) - if (NOT EXISTS "${OPENBLAS_DIR}/installed/lib/libopenblas.a") + if(NOT EXISTS "${OPENBLAS_DIR}/installed/lib/libopenblas.a") message("Can't find OpenBLAS, installing. If OpenBLAS is installed please use the OPENBLAS_DIR parameter to pass the path") if (${OPENBLAS_DIR} STREQUAL ${OPENBLAS_DEFAULT_DIR}) message("Installing OpenBLAS") execute_process(COMMAND ${TOOLS_DIR}/download_openBLAS.sh ${OPENBLAS_VERSION}) - else () + else() message(FATAL_ERROR "Can't find OpenBLAS in the supplied directory") - endif () - endif () + endif() + endif() add_library(${OPENBLAS_LIB} SHARED IMPORTED) set_target_properties(${OPENBLAS_LIB} PROPERTIES IMPORTED_LOCATION ${OPENBLAS_DIR}/installed/lib/libopenblas.a) list(APPEND LIBS ${OPENBLAS_LIB}) target_include_directories(${OPENBLAS_LIB} INTERFACE ${OPENBLAS_DIR}/installed/include) -endif () - -########## -## CaDiCaL ## -########## - -if (BUILD_CADICAL) - message(STATUS "Using CaDiCaL for CDCL solving") - if (NOT CADICAL_DIR) - set(CADICAL_DIR "${TOOLS_DIR}/cadical") - endif () - add_compile_definitions(BUILD_CADICAL) - - if (NOT EXISTS "${CADICAL_DIR}") - message(STATUS "Building CaDiCaL SAT Solver") - execute_process(COMMAND ${TOOLS_DIR}/download_cadical.sh) - endif () - - set(CADICAL_LIB cadical) - add_library(${CADICAL_LIB} SHARED IMPORTED) - set_property(TARGET ${CADICAL_LIB} PROPERTY POSITION_INDEPENDENT_CODE ON) - set_target_properties(${CADICAL_LIB} PROPERTIES IMPORTED_LOCATION ${CADICAL_DIR}/build/libcadical.a) - target_include_directories(${CADICAL_LIB} INTERFACE ${CADICAL_DIR}/src) - list(APPEND LIBS ${CADICAL_LIB}) -endif () +endif() ########### ## Build ## @@ -289,30 +265,30 @@ set(INPUT_PARSERS_DIR input_parsers) include(ProcessorCount) ProcessorCount(CTEST_NTHREADS) -if (CTEST_NTHREADS EQUAL 0) +if(CTEST_NTHREADS EQUAL 0) set(CTEST_NTHREADS 1) -endif () +endif() # --------------- set build type ---------------------------- set(BUILD_TYPES Release Debug MinSizeRel RelWithDebInfo) # Set the default build type to Production -if (NOT CMAKE_BUILD_TYPE) +if(NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE Release CACHE STRING "Options are: Release Debug MinSizeRel RelWithDebInfo" FORCE) # Provide drop down menu options in cmake-gui set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS ${BUILD_TYPES}) -endif () +endif() message(STATUS "Building ${CMAKE_BUILD_TYPE} build") #-------------------------set code coverage----------------------------------# # Allow coverage only in debug mode only in gcc -if (CODE_COVERAGE AND CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_BUILD_TYPE MATCHES Debug) +if(CODE_COVERAGE AND CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_BUILD_TYPE MATCHES Debug) message(STATUS "Building with code coverage") set(COVERAGE_COMPILER_FLAGS "-g -O0 --coverage" CACHE INTERNAL "") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --coverage") -endif () +endif() # We build a static library that is the core of the project, the link it to the # API's (executable and python at the moment) @@ -324,40 +300,40 @@ set(MARABOU_EXE Marabou${CMAKE_EXECUTABLE_SUFFIX}) add_executable(${MARABOU_EXE} "${ENGINE_DIR}/main.cpp") set(MARABOU_EXE_PATH "${BIN_DIR}/${MARABOU_EXE}") add_custom_command(TARGET ${MARABOU_EXE} POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy $ ${MARABOU_EXE_PATH}) + COMMAND ${CMAKE_COMMAND} -E copy $ ${MARABOU_EXE_PATH} ) set(MPS_PARSER_PATH "${BIN_DIR}/${MPS_PARSER}") if (NOT MSVC) if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") - set(COMPILE_FLAGS -Wall -Wextra -Werror -MMD -Qunused-arguments -Wno-deprecated-declarations -Wno-unused-but-set-variable) + set(COMPILE_FLAGS -Wall -Wextra -Werror -MMD -Qunused-arguments -Wno-deprecated-declarations -Wno-unused-but-set-variable ) elseif (CMAKE_BUILD_TYPE MATCHES "Release") - set(COMPILE_FLAGS -Wall) - else () - set(COMPILE_FLAGS -Wall -Wextra -Werror -MMD) #-Wno-deprecated - endif () + set(COMPILE_FLAGS -Wall ) + else() + set(COMPILE_FLAGS -Wall -Wextra -Werror -MMD ) #-Wno-deprecated + endif() set(RELEASE_FLAGS ${COMPILE_FLAGS} -O3) #-Wno-deprecated -endif () +endif() if (RUN_MEMORY_TEST) - if (NOT MSVC) + if(NOT MSVC) set(MEMORY_FLAGS -fsanitize=address -fno-omit-frame-pointer -O1) - endif () -endif () + endif() +endif() add_definitions(-DRESOURCES_DIR="${RESOURCES_DIR}") if (NOT MSVC) set(DEBUG_FLAGS ${COMPILE_FLAGS} ${MEMORY_FLAGS} -g) - set(CXXTEST_FLAGS ${DEBUG_FLAGS} -Wno-ignored-qualifiers) -else () + set(CXXTEST_FLAGS ${DEBUG_FLAGS} -Wno-ignored-qualifiers) +else() set(DEBUG_FLAGS ${COMPILE_FLAGS} ${MEMORY_FLAGS}) add_definitions(-DNOMINMAX) # remove min max macros -endif () +endif() if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") set(CXXTEST_FLAGS ${CXXTEST_FLAGS} -Wno-terminate) -endif () +endif() # pthread set(THREADS_PREFER_PTHREAD_FLAG ON) @@ -367,9 +343,9 @@ list(APPEND LIBS Threads::Threads) if (BUILD_STATIC_MARABOU) # build a static library target_link_libraries(${MARABOU_LIB} ${LIBS} -static) -else () +else() target_link_libraries(${MARABOU_LIB} ${LIBS}) -endif () +endif() target_include_directories(${MARABOU_LIB} PRIVATE ${LIBS_INCLUDES}) target_compile_options(${MARABOU_LIB} PRIVATE ${RELEASE_FLAGS}) @@ -385,18 +361,18 @@ target_include_directories(${MARABOU_EXE} PRIVATE ${LIBS_INCLUDES}) set(DEFAULT_PYTHON_VERSION "3" CACHE STRING "Default Python version 2/3") set(PYTHON_VERSIONS_SUPPORTED 2 3) list(FIND PYTHON_VERSIONS_SUPPORTED ${DEFAULT_PYTHON_VERSION} index) -if (index EQUAL -1) +if(index EQUAL -1) message(FATAL_ERROR "Python version must be one of ${PYTHON_VERSIONS_SUPPORTED}") -endif () +endif() set(PYTHON_API_DIR "${PROJECT_SOURCE_DIR}/maraboupy") if (NOT PYTHON_LIBRARY_OUTPUT_DIRECTORY) set(PYTHON_LIBRARY_OUTPUT_DIRECTORY "${PYTHON_API_DIR}") -endif () +endif() # Determine if we should build Python set(PYTHON32 FALSE) -if (${BUILD_PYTHON}) +if(${BUILD_PYTHON}) execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c" "import struct; print(struct.calcsize('@P'));" RESULT_VARIABLE _PYTHON_SUCCESS @@ -407,17 +383,17 @@ if (${BUILD_PYTHON}) set(PYTHON32 TRUE) message(WARNING "Python version is 32-bit, please use build_python.sh in maraboupy folder") - endif () -endif () + endif() +endif() if (${FORCE_PYTHON_BUILD}) set(BUILD_PYTHON ON) -else () +else() if (${BUILD_PYTHON} AND NOT ${PYTHON32}) set(BUILD_PYTHON ON) - else () + else() set(BUILD_PYTHON OFF) - endif () -endif () + endif() +endif() # Actually build Python if (${BUILD_PYTHON}) @@ -430,12 +406,12 @@ if (${BUILD_PYTHON}) if (NOT EXISTS ${PYTHON_EXECUTABLE}) # If the default didn't work just find any python version find_package(PythonInterp REQUIRED) - endif () + endif() if (NOT EXISTS ${PYBIND11_DIR}) message("didnt find pybind, getting it") execute_process(COMMAND ${TOOLS_DIR}/download_pybind11.${SCRIPT_EXTENSION} ${PYBIND11_VERSION}) - endif () + endif() add_subdirectory(${PYBIND11_DIR}) set(MARABOU_PY MarabouCore) @@ -446,10 +422,10 @@ if (${BUILD_PYTHON}) set_target_properties(${MARABOU_PY} PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${PYTHON_LIBRARY_OUTPUT_DIRECTORY}) - if (NOT MSVC) + if(NOT MSVC) target_compile_options(${MARABOU_LIB} PRIVATE -fPIC ${RELEASE_FLAGS}) - endif () -endif () + endif() +endif() ################# ## Build tests ## @@ -458,19 +434,19 @@ endif () set(MARABOU_TEST_LIB MarabouHelperTest) add_library(${MARABOU_TEST_LIB}) -set(TEST_DIR "${CMAKE_CURRENT_BINARY_DIR}/tests") +set (TEST_DIR "${CMAKE_CURRENT_BINARY_DIR}/tests") file(MAKE_DIRECTORY ${TEST_DIR}) set(CMAKE_PREFIX_PATH "${TOOLS_DIR}/cxxtest") set(CXXTEST_USE_PYTHON FALSE) find_package(CxxTest) -if (CXXTEST_FOUND) +if(CXXTEST_FOUND) include_directories(${CXXTEST_INCLUDE_DIR}) enable_testing() -endif () +endif() target_link_libraries(${MARABOU_TEST_LIB} ${MARABOU_LIB} ${LIBS}) -target_include_directories(${MARABOU_TEST_LIB} PRIVATE ${LIBS_INCLUDES}) +target_include_directories(${MARABOU_TEST_LIB} PRIVATE ${LIBS_INCLUDES} ) target_compile_options(${MARABOU_TEST_LIB} PRIVATE ${CXXTEST_FLAGS}) add_custom_target(build-tests ALL) @@ -485,29 +461,29 @@ set(TESTS_TO_RUN "") macro(append_tests_to_run new_val) if ("${TESTS_TO_RUN}" STREQUAL "") set(TESTS_TO_RUN ${new_val}) - else () + else() set(TESTS_TO_RUN "${TESTS_TO_RUN}|${new_val}") - endif () + endif() endmacro() if (${RUN_UNIT_TEST}) append_tests_to_run("unit") -endif () +endif() if (${RUN_REGRESS_TEST}) append_tests_to_run("regress[0-5]") -endif () +endif() if (${RUN_SYSTEM_TEST}) append_tests_to_run("system") -endif () +endif() if (NOT ${TESTS_TO_RUN} STREQUAL "") # make ctest verbose set(CTEST_OUTPUT_ON_FAILURE 1) add_custom_command( TARGET build-tests POST_BUILD - COMMAND ctest --output-on-failure -L "\"(${TESTS_TO_RUN})\"" -j${CTEST_NTHREADS} $$ARGS + COMMAND ctest --output-on-failure -L "\"(${TESTS_TO_RUN})\"" -j${CTEST_NTHREADS} $$ARGS ) -endif () +endif() if (${BUILD_PYTHON} AND ${RUN_PYTHON_TEST}) if (MSVC) @@ -516,14 +492,14 @@ if (${BUILD_PYTHON} AND ${RUN_PYTHON_TEST}) POST_BUILD COMMAND cp ${PYTHON_API_DIR}/Release/* ${PYTHON_API_DIR} ) - endif () + endif() add_custom_command( TARGET build-tests POST_BUILD COMMAND ${PYTHON_EXECUTABLE} -m pytest ${PYTHON_API_DIR}/test ) -endif () +endif() # Add the input parsers add_custom_target(build_input_parsers) @@ -532,4 +508,4 @@ add_dependencies(build_input_parsers ${MPS_PARSER} ${ACAS_PARSER} add_subdirectory(${SRC_DIR}) add_subdirectory(${TOOLS_DIR}) -add_subdirectory(${REGRESS_DIR}) +add_subdirectory(${REGRESS_DIR}) \ No newline at end of file diff --git a/src/basis_factorization/GaussianEliminator.h b/src/basis_factorization/GaussianEliminator.h index 2177021e55..6f93605ff6 100644 --- a/src/basis_factorization/GaussianEliminator.h +++ b/src/basis_factorization/GaussianEliminator.h @@ -19,7 +19,7 @@ #include "LUFactors.h" #define GAUSSIAN_LOG( x, ... ) \ - LOG( GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING, "GaussianEliminator: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING, "GaussianEliminator: %s\n", x ) class GaussianEliminator { diff --git a/src/basis_factorization/LUFactorization.h b/src/basis_factorization/LUFactorization.h index 400d53eb88..ae4befd5e7 100644 --- a/src/basis_factorization/LUFactorization.h +++ b/src/basis_factorization/LUFactorization.h @@ -22,7 +22,7 @@ #include "List.h" #define LU_FACTORIZATION_LOG( x, ... ) \ - LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "LUFactorization: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "LUFactorization: %s\n", x ) class EtaMatrix; class LPElement; diff --git a/src/basis_factorization/SparseFTFactorization.h b/src/basis_factorization/SparseFTFactorization.h index 906f5b205e..b885cab4b2 100644 --- a/src/basis_factorization/SparseFTFactorization.h +++ b/src/basis_factorization/SparseFTFactorization.h @@ -24,7 +24,7 @@ #include "Statistics.h" #define SFTF_FACTORIZATION_LOG( x, ... ) \ - LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "SparseFTFactorization: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "SparseFTFactorization: %s\n", x ) /* This class performs a sparse FT factorization of a given matrix. diff --git a/src/basis_factorization/SparseGaussianEliminator.h b/src/basis_factorization/SparseGaussianEliminator.h index 48078b42d9..fd6a061dce 100644 --- a/src/basis_factorization/SparseGaussianEliminator.h +++ b/src/basis_factorization/SparseGaussianEliminator.h @@ -23,7 +23,7 @@ #include "Statistics.h" #define SGAUSSIAN_LOG( x, ... ) \ - LOG( GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING, "SparseGaussianEliminator: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING, "SparseGaussianEliminator: %s\n", x ) class SparseGaussianEliminator { diff --git a/src/basis_factorization/SparseLUFactorization.h b/src/basis_factorization/SparseLUFactorization.h index 7b925fec48..7d75ebe3c4 100644 --- a/src/basis_factorization/SparseLUFactorization.h +++ b/src/basis_factorization/SparseLUFactorization.h @@ -22,7 +22,7 @@ #include "SparseLUFactors.h" #define BASIS_FACTORIZATION_LOG( x, ... ) \ - LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "SparseLUFactorization: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "SparseLUFactorization: %s\n", x ) class EtaMatrix; class LPElement; diff --git a/src/cegar/IncrementalLinearization.h b/src/cegar/IncrementalLinearization.h index ddf5b00fcf..9260e7e57c 100644 --- a/src/cegar/IncrementalLinearization.h +++ b/src/cegar/IncrementalLinearization.h @@ -20,7 +20,7 @@ #include "Query.h" #define INCREMENTAL_LINEARIZATION_LOG( x, ... ) \ - LOG( GlobalConfiguration::CEGAR_LOGGING, "IncrementalLinearization: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::CEGAR_LOGGING, "IncrementalLinearization: %s\n", x ) class Engine; class IQuery; diff --git a/src/common/Debug.h b/src/common/Debug.h index 55dfda4a92..a6d9d67907 100644 --- a/src/common/Debug.h +++ b/src/common/Debug.h @@ -27,7 +27,7 @@ #endif #ifndef NDEBUG -#define LOG( x, f, y, ... ) \ +#define MARABOU_LOG( x, f, y, ... ) \ { \ if ( ( x ) ) \ { \ @@ -35,7 +35,7 @@ } \ } #else -#define LOG( x, f, y, ... ) \ +#define MARABOU_LOG( x, f, y, ... ) \ { \ } #endif diff --git a/src/engine/CDSmtCore.h b/src/engine/CDSmtCore.h index 8cbeebc5c0..a352f2c6ec 100644 --- a/src/engine/CDSmtCore.h +++ b/src/engine/CDSmtCore.h @@ -76,7 +76,7 @@ #include "context/cdlist.h" #include "context/context.h" -#define SMT_LOG( x, ... ) LOG( GlobalConfiguration::SMT_CORE_LOGGING, "CDSmtCore: %s\n", x ) +#define SMT_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::SMT_CORE_LOGGING, "CDSmtCore: %s\n", x ) class EngineState; class Engine; diff --git a/src/engine/CustomDNN.cpp b/src/engine/CustomDNN.cpp index 9f05feeca9..a2f1bb8c0b 100644 --- a/src/engine/CustomDNN.cpp +++ b/src/engine/CustomDNN.cpp @@ -1,5 +1,4 @@ #include "CustomDNN.h" -#include "Vector.h" #ifdef BUILD_TORCH CustomRelu::CustomRelu( const NLR::NetworkLevelReasoner *nlr, unsigned layerIndex ) diff --git a/src/engine/CustomDNN.h b/src/engine/CustomDNN.h index 577fa2966d..9abd4ba8d5 100644 --- a/src/engine/CustomDNN.h +++ b/src/engine/CustomDNN.h @@ -1,3 +1,4 @@ +#ifdef BUILD_TORCH #ifndef __CustomDNN_h__ #define __CustomDNN_h__ @@ -99,3 +100,4 @@ class CustomDNN : public torch::nn::Module #endif // __CustomDNN_h__ +#endif \ No newline at end of file diff --git a/src/engine/DantzigsRule.h b/src/engine/DantzigsRule.h index 5e57e28c24..b3fda42e77 100644 --- a/src/engine/DantzigsRule.h +++ b/src/engine/DantzigsRule.h @@ -19,7 +19,7 @@ #include "EntrySelectionStrategy.h" #define DANTZIG_LOG( x, ... ) \ - LOG( GlobalConfiguration::DANTZIGS_RULE_LOGGING, "DantzigsRule: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::DANTZIGS_RULE_LOGGING, "DantzigsRule: %s\n", x ) class String; diff --git a/src/engine/DnCManager.h b/src/engine/DnCManager.h index ee4a55a19d..545a9fd326 100644 --- a/src/engine/DnCManager.h +++ b/src/engine/DnCManager.h @@ -25,7 +25,7 @@ #include #define DNC_MANAGER_LOG( x, ... ) \ - LOG( GlobalConfiguration::DNC_MANAGER_LOGGING, "DnCManager: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::DNC_MANAGER_LOGGING, "DnCManager: %s\n", x ) class Query; diff --git a/src/engine/Engine.h b/src/engine/Engine.h index a3ea1c22d3..5e53564f33 100644 --- a/src/engine/Engine.h +++ b/src/engine/Engine.h @@ -57,7 +57,7 @@ #undef ERROR #endif -#define ENGINE_LOG( x, ... ) LOG( GlobalConfiguration::ENGINE_LOGGING, "Engine: %s\n", x ) +#define ENGINE_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::ENGINE_LOGGING, "Engine: %s\n", x ) class EngineState; class Query; diff --git a/src/engine/InputQuery.cpp b/src/engine/InputQuery.cpp index d275646b06..c28913a20b 100644 --- a/src/engine/InputQuery.cpp +++ b/src/engine/InputQuery.cpp @@ -29,7 +29,7 @@ #include "SoftmaxConstraint.h" #define INPUT_QUERY_LOG( x, ... ) \ - LOG( GlobalConfiguration::INPUT_QUERY_LOGGING, "Marabou Query: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::INPUT_QUERY_LOGGING, "Marabou Query: %s\n", x ) using namespace CVC4::context; diff --git a/src/engine/PLConstraintScoreTracker.h b/src/engine/PLConstraintScoreTracker.h index ab62333e6b..6798074dd2 100644 --- a/src/engine/PLConstraintScoreTracker.h +++ b/src/engine/PLConstraintScoreTracker.h @@ -24,7 +24,7 @@ #include #define SCORE_TRACKER_LOG( x, ... ) \ - LOG( GlobalConfiguration::SCORE_TRACKER_LOGGING, "PLConstraintScoreTracker: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::SCORE_TRACKER_LOGGING, "PLConstraintScoreTracker: %s\n", x ) struct ScoreEntry { diff --git a/src/engine/ProjectedSteepestEdge.h b/src/engine/ProjectedSteepestEdge.h index 70b3265ef0..c84ba9bac9 100644 --- a/src/engine/ProjectedSteepestEdge.h +++ b/src/engine/ProjectedSteepestEdge.h @@ -20,7 +20,7 @@ #include "SparseUnsortedList.h" #define PSE_LOG( x, ... ) \ - LOG( GlobalConfiguration::PROJECTED_STEEPEST_EDGE_LOGGING, "Projected SE: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::PROJECTED_STEEPEST_EDGE_LOGGING, "Projected SE: %s\n", x ) class ProjectedSteepestEdgeRule : public IProjectedSteepestEdgeRule { diff --git a/src/engine/Query.cpp b/src/engine/Query.cpp index 77b22c9c9f..6c696c42be 100644 --- a/src/engine/Query.cpp +++ b/src/engine/Query.cpp @@ -29,7 +29,7 @@ #include "SymbolicBoundTighteningType.h" #define INPUT_QUERY_LOG( x, ... ) \ - LOG( GlobalConfiguration::INPUT_QUERY_LOGGING, "Input Query: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::INPUT_QUERY_LOGGING, "Input Query: %s\n", x ) Query::Query() : _ensureSameSourceLayerInNLR( Options::get()->getSymbolicBoundTighteningType() == diff --git a/src/engine/SmtCore.h b/src/engine/SmtCore.h index ad1d61f8e9..0274d475b6 100644 --- a/src/engine/SmtCore.h +++ b/src/engine/SmtCore.h @@ -28,7 +28,7 @@ #include -#define SMT_LOG( x, ... ) LOG( GlobalConfiguration::SMT_CORE_LOGGING, "SmtCore: %s\n", x ) +#define SMT_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::SMT_CORE_LOGGING, "SmtCore: %s\n", x ) class EngineState; class IEngine; diff --git a/src/engine/SumOfInfeasibilitiesManager.h b/src/engine/SumOfInfeasibilitiesManager.h index a823a92fed..7a9a3bf448 100644 --- a/src/engine/SumOfInfeasibilitiesManager.h +++ b/src/engine/SumOfInfeasibilitiesManager.h @@ -29,7 +29,7 @@ #include "T/stdlib.h" #include "Vector.h" -#define SOI_LOG( x, ... ) LOG( GlobalConfiguration::SOI_LOGGING, "SoIManager: %s\n", x ) +#define SOI_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::SOI_LOGGING, "SoIManager: %s\n", x ) class SumOfInfeasibilitiesManager { diff --git a/src/engine/Tableau.h b/src/engine/Tableau.h index f5bf063f57..175efbe3c8 100644 --- a/src/engine/Tableau.h +++ b/src/engine/Tableau.h @@ -29,7 +29,7 @@ #include "SparseUnsortedList.h" #include "Statistics.h" -#define TABLEAU_LOG( x, ... ) LOG( GlobalConfiguration::TABLEAU_LOGGING, "Tableau: %s\n", x ) +#define TABLEAU_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::TABLEAU_LOGGING, "Tableau: %s\n", x ) class Equation; class ICostFunctionManager; diff --git a/src/input_parsers/MpsParser.h b/src/input_parsers/MpsParser.h index 71f177f493..e097deff87 100644 --- a/src/input_parsers/MpsParser.h +++ b/src/input_parsers/MpsParser.h @@ -20,7 +20,7 @@ #include "Map.h" #include "Set.h" -#define MPS_LOG( x, ... ) LOG( GlobalConfiguration::MPS_PARSER_LOGGING, "MpsParser: %s\n", x ) +#define MPS_LOG( x, ... ) MARABOU_LOG GlobalConfiguration::MPS_PARSER_LOGGING, "MpsParser: %s\n", x ) class IQuery; class String; diff --git a/src/input_parsers/OnnxParser.h b/src/input_parsers/OnnxParser.h index 2b316a2004..3f0149f6c8 100644 --- a/src/input_parsers/OnnxParser.h +++ b/src/input_parsers/OnnxParser.h @@ -25,7 +25,7 @@ #include "Vector.h" #include "onnx.proto3.pb.h" -#define ONNX_LOG( x, ... ) LOG( GlobalConfiguration::ONNX_PARSER_LOGGING, "OnnxParser: %s\n", x ) +#define ONNX_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::ONNX_PARSER_LOGGING, "OnnxParser: %s\n", x ) class OnnxParser diff --git a/src/nlr/IterativePropagator.h b/src/nlr/IterativePropagator.h index 7a7fba671a..0c3a593f89 100644 --- a/src/nlr/IterativePropagator.h +++ b/src/nlr/IterativePropagator.h @@ -27,7 +27,7 @@ namespace NLR { #define IterativePropagator_LOG( x, ... ) \ - LOG( GlobalConfiguration::PREPROCESSOR_LOGGING, "Iterativepropagator: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::PREPROCESSOR_LOGGING, "Iterativepropagator: %s\n", x ) class IterativePropagator : public ParallelSolver { diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index cee0abbb65..248fc968c8 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -31,7 +31,7 @@ namespace NLR { #define LPFormulator_LOG( x, ... ) \ - LOG( GlobalConfiguration::PREPROCESSOR_LOGGING, "LP Preprocessor: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::PREPROCESSOR_LOGGING, "LP Preprocessor: %s\n", x ) class LPFormulator : public ParallelSolver { diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index 8390a0190b..d7f4f8d173 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -34,7 +34,7 @@ #include -#define NLR_LOG( x, ... ) LOG( GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING, "NLR: %s\n", x ) +#define NLR_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING, "NLR: %s\n", x ) namespace NLR { diff --git a/src/query_loader/QueryLoader.h b/src/query_loader/QueryLoader.h index 195a002c47..c8d1c0732c 100644 --- a/src/query_loader/QueryLoader.h +++ b/src/query_loader/QueryLoader.h @@ -19,7 +19,7 @@ #include "IQuery.h" -#define QL_LOG( x, ... ) LOG( GlobalConfiguration::QUERY_LOADER_LOGGING, "QueryLoader: %s\n", x ) +#define QL_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::QUERY_LOADER_LOGGING, "QueryLoader: %s\n", x ) class QueryLoader { From 5b06ed7724fb4da3e782764b732a7d869032a8f9 Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Sun, 27 Jul 2025 16:34:49 +0300 Subject: [PATCH 04/33] multiple output layer support --- src/nlr/AlphaCrown.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nlr/AlphaCrown.cpp b/src/nlr/AlphaCrown.cpp index 2d6e8bbd0f..7381bd277d 100644 --- a/src/nlr/AlphaCrown.cpp +++ b/src/nlr/AlphaCrown.cpp @@ -199,7 +199,7 @@ void AlphaCrown::GDloop( int loops, optimizer.zero_grad(); auto [max_val, min_val] = AlphaCrown::computeBounds( alphaSlopes ); - auto loss = ( val_to_opt == "max" ) ? max_val : -min_val; + auto loss = ( val_to_opt == "max" ) ? max_val.sum( ) : -min_val.sum( ); loss.backward(); optimizer.step(); From 6f45aafc84f3b7573626e1eea424a34557529b3d Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Sun, 27 Jul 2025 16:49:33 +0300 Subject: [PATCH 05/33] minor --- src/configuration/GlobalConfiguration.cpp | 1 + src/configuration/GlobalConfiguration.h | 1 + src/engine/CustomDNN.h | 12 ++++++------ src/engine/Engine.cpp | 2 +- src/nlr/AlphaCrown.h | 5 +++-- src/nlr/NetworkLevelReasoner.cpp | 9 +++++++++ 6 files changed, 21 insertions(+), 9 deletions(-) diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index f9a074f076..49fa05782b 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -143,6 +143,7 @@ const bool GlobalConfiguration::ONNX_PARSER_LOGGING = false; const bool GlobalConfiguration::SOI_LOGGING = false; const bool GlobalConfiguration::SCORE_TRACKER_LOGGING = false; const bool GlobalConfiguration::CEGAR_LOGGING = false; +const bool GlobalConfiguration::CUSTOM_DNN_LOGGING = true; const bool GlobalConfiguration::USE_SMART_FIX = false; const bool GlobalConfiguration::USE_LEAST_FIX = false; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index 3104edf79d..4e6d7bdf98 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -292,6 +292,7 @@ class GlobalConfiguration static const bool SOI_LOGGING; static const bool SCORE_TRACKER_LOGGING; static const bool CEGAR_LOGGING; + static const bool CUSTOM_DNN_LOGGING; }; #endif // __GlobalConfiguration_h__ diff --git a/src/engine/CustomDNN.h b/src/engine/CustomDNN.h index 9abd4ba8d5..fe8783b6d3 100644 --- a/src/engine/CustomDNN.h +++ b/src/engine/CustomDNN.h @@ -2,12 +2,12 @@ #ifndef __CustomDNN_h__ #define __CustomDNN_h__ +#include +#include "NetworkLevelReasoner.h" + #undef Warning #include -#include "NetworkLevelReasoner.h" -#include - #define CUSTOM_DNN_LOG( x, ... ) \ MARABOU_LOG( GlobalConfiguration::CUSTOM_DNN_LOGGING, "customDNN: %s\n", x ) @@ -78,11 +78,11 @@ class CustomDNN : public torch::nn::Module unsigned outputSize ); void weightedSum( unsigned i, const NLR::Layer *layer ); explicit CustomDNN( const NLR::NetworkLevelReasoner *networkLevelReasoner ); - torch::Tensor getLayerWeights(unsigned layerIndex) const; - torch::Tensor getLayerBias(unsigned layerIndex) const; + torch::Tensor getLayerWeights( unsigned layerIndex ) const; + torch::Tensor getLayerBias( unsigned layerIndex ) const; torch::Tensor forward( torch::Tensor x ); const Vector &getLayerSizes() const; - void getInputBounds(torch::Tensor &lbTensor, torch::Tensor &ubTensor)const; + void getInputBounds( torch::Tensor &lbTensor, torch::Tensor &ubTensor ) const; Vector getLinearLayers() { return _linearLayers; diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index 86f45ecd0c..c451328da0 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -2443,7 +2443,7 @@ unsigned Engine::performSymbolicBoundTightening( Query *inputQuery ) if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING ) _networkLevelReasoner->symbolicBoundPropagation(); else if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::DEEP_POLY ) - _networkLevelReasoner->deepPolyPropagation(); + _networkLevelReasoner->alphaCrown(); // Step 3: Extract the bounds List tightenings; diff --git a/src/nlr/AlphaCrown.h b/src/nlr/AlphaCrown.h index 67bfbaa36a..461fc09ec1 100644 --- a/src/nlr/AlphaCrown.h +++ b/src/nlr/AlphaCrown.h @@ -18,9 +18,10 @@ class AlphaCrown void findBounds(); void optimizeBounds( int loops = 50 ); - void run(){ + void run() + { findBounds(); - optimizeBounds(2); + optimizeBounds( 2 ); } private: diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index d7f4f8d173..a5a6a2eeed 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -211,6 +211,15 @@ void NetworkLevelReasoner::deepPolyPropagation() _deepPolyAnalysis->run(); } +void NetworkLevelReasoner::alphaCrown() +{ +#ifdef BUILD_TORCH + if ( _alphaCrown == nullptr ) + _alphaCrown = std::unique_ptr( new AlphaCrown( this ) ); + _alphaCrown->run(); +#endif +} + void NetworkLevelReasoner::lpRelaxationPropagation() { LPFormulator lpFormulator( this ); From eed3c0048c3e2c1d9961e62d77fd3ec6f4b063cd Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Mon, 28 Jul 2025 11:32:44 +0300 Subject: [PATCH 06/33] fix errors --- src/common/Debug.h | 4 +-- src/engine/CustomDNN.cpp | 56 ++++++++++++++++++---------------- src/engine/CustomDNN.h | 30 +++++++++--------- src/input_parsers/MpsParser.h | 2 +- src/nlr/AlphaCrown.cpp | 5 +-- src/nlr/AlphaCrown.h | 1 - src/nlr/NetworkLevelReasoner.h | 2 +- 7 files changed, 52 insertions(+), 48 deletions(-) diff --git a/src/common/Debug.h b/src/common/Debug.h index a6d9d67907..3d0cb9554f 100644 --- a/src/common/Debug.h +++ b/src/common/Debug.h @@ -27,7 +27,7 @@ #endif #ifndef NDEBUG -#define MARABOU_LOG( x, f, y, ... ) \ +#define MARABOU_LOG( x, f, y, ... ) \ { \ if ( ( x ) ) \ { \ @@ -35,7 +35,7 @@ } \ } #else -#define MARABOU_LOG( x, f, y, ... ) \ +#define MARABOU_LOG( x, f, y, ... ) \ { \ } #endif diff --git a/src/engine/CustomDNN.cpp b/src/engine/CustomDNN.cpp index a2f1bb8c0b..3aaadacdeb 100644 --- a/src/engine/CustomDNN.cpp +++ b/src/engine/CustomDNN.cpp @@ -1,7 +1,8 @@ +#include "NetworkLevelReasoner.h" #include "CustomDNN.h" #ifdef BUILD_TORCH - -CustomRelu::CustomRelu( const NLR::NetworkLevelReasoner *nlr, unsigned layerIndex ) +namespace NLR { +CustomRelu::CustomRelu( const NetworkLevelReasoner *nlr, unsigned layerIndex ) : _networkLevelReasoner( nlr ) , _reluLayerIndex( layerIndex ) { @@ -12,7 +13,7 @@ torch::Tensor CustomRelu::forward( torch::Tensor x ) const return CustomReluFunction::apply( x, _networkLevelReasoner, _reluLayerIndex ); } -CustomMaxPool::CustomMaxPool( const NLR::NetworkLevelReasoner *nlr, unsigned layerIndex ) +CustomMaxPool::CustomMaxPool( const NetworkLevelReasoner *nlr, unsigned layerIndex ) : _networkLevelReasoner( nlr ) , _maxLayerIndex( layerIndex ) { @@ -24,7 +25,7 @@ torch::Tensor CustomMaxPool::forward( torch::Tensor x ) const } void CustomDNN::setWeightsAndBiases( torch::nn::Linear &linearLayer, - const NLR::Layer *layer, + const Layer *layer, unsigned sourceLayer, unsigned inputSize, unsigned outputSize ) @@ -62,10 +63,10 @@ void CustomDNN::setWeightsAndBiases( torch::nn::Linear &linearLayer, linearLayer->bias.set_( biasTensor ); } -void CustomDNN::weightedSum( unsigned i, const NLR::Layer *layer ) +void CustomDNN::weightedSum( unsigned i, const Layer *layer ) { unsigned sourceLayer = i - 1; - const NLR::Layer *prevLayer = _networkLevelReasoner->getLayer( sourceLayer ); + const Layer *prevLayer = _networkLevelReasoner->getLayer( sourceLayer ); unsigned inputSize = prevLayer->getSize(); unsigned outputSize = layer->getSize(); @@ -81,32 +82,32 @@ void CustomDNN::weightedSum( unsigned i, const NLR::Layer *layer ) } -CustomDNN::CustomDNN( const NLR::NetworkLevelReasoner *nlr ) +CustomDNN::CustomDNN( const NetworkLevelReasoner *nlr ) { CUSTOM_DNN_LOG( "----- Construct Custom Network -----" ); _networkLevelReasoner = nlr; _numberOfLayers = _networkLevelReasoner->getNumberOfLayers(); for ( unsigned i = 0; i < _numberOfLayers; i++ ) { - const NLR::Layer *layer = _networkLevelReasoner->getLayer( i ); + const Layer *layer = _networkLevelReasoner->getLayer( i ); _layerSizes.append( layer->getSize() ); - NLR::Layer::Type layerType = layer->getLayerType(); + Layer::Type layerType = layer->getLayerType(); _layersOrder.append( layerType ); switch ( layerType ) { - case NLR::Layer::INPUT: + case Layer::INPUT: break; - case NLR::Layer::WEIGHTED_SUM: + case Layer::WEIGHTED_SUM: weightedSum( i, layer ); break; - case NLR::Layer::RELU: + case Layer::RELU: { auto reluLayer = std::make_shared( _networkLevelReasoner, i ); _reluLayers.append( reluLayer ); register_module( "ReLU" + std::to_string( i ), reluLayer ); break; } - case NLR::Layer::MAX: + case Layer::MAX: { auto maxPoolLayer = std::make_shared( _networkLevelReasoner, i ); _maxPoolLayers.append( maxPoolLayer ); @@ -127,20 +128,20 @@ torch::Tensor CustomDNN::forward( torch::Tensor x ) unsigned maxPoolIndex = 0; for ( unsigned i = 0; i < _numberOfLayers; i++ ) { - const NLR::Layer::Type layerType = _layersOrder[i]; + const Layer::Type layerType = _layersOrder[i]; switch ( layerType ) { - case NLR::Layer::INPUT: + case Layer::INPUT: break; - case NLR::Layer::WEIGHTED_SUM: + case Layer::WEIGHTED_SUM: x = _linearLayers[linearIndex]->forward( x ); linearIndex++; break; - case NLR::Layer::RELU: + case Layer::RELU: x = _reluLayers[reluIndex]->forward( x ); reluIndex++; break; - case NLR::Layer::MAX: + case Layer::MAX: x = _maxPoolLayers[maxPoolIndex]->forward( x ); maxPoolIndex++; break; @@ -155,12 +156,12 @@ torch::Tensor CustomDNN::forward( torch::Tensor x ) torch::Tensor CustomReluFunction::forward( torch::autograd::AutogradContext *ctx, torch::Tensor x, - const NLR::NetworkLevelReasoner *nlr, + const NetworkLevelReasoner *nlr, unsigned int layerIndex ) { ctx->save_for_backward( { x } ); - const NLR::Layer *layer = nlr->getLayer( layerIndex ); + const Layer *layer = nlr->getLayer( layerIndex ); torch::Tensor reluOutputs = torch::zeros( { 1, layer->getSize() } ); torch::Tensor reluGradients = torch::zeros( { 1, layer->getSize() } ); @@ -168,7 +169,7 @@ torch::Tensor CustomReluFunction::forward( torch::autograd::AutogradContext *ctx { auto sources = layer->getActivationSources( neuron ); ASSERT( sources.size() == 1 ); - const NLR::NeuronIndex &sourceNeuron = sources.back(); + const NeuronIndex &sourceNeuron = sources.back(); int index = static_cast( sourceNeuron._neuron ); reluOutputs.index_put_( { 0, static_cast( neuron ) }, torch::clamp_min( x.index( { 0, index } ), 0 ) ); @@ -194,12 +195,12 @@ std::vector CustomReluFunction::backward( torch::autograd::Autogr torch::Tensor CustomMaxPoolFunction::forward( torch::autograd::AutogradContext *ctx, torch::Tensor x, - const NLR::NetworkLevelReasoner *nlr, + const NetworkLevelReasoner *nlr, unsigned int layerIndex ) { ctx->save_for_backward( { x } ); - const NLR::Layer *layer = nlr->getLayer( layerIndex ); + const Layer *layer = nlr->getLayer( layerIndex ); torch::Tensor maxOutputs = torch::zeros( { 1, layer->getSize() } ); torch::Tensor argMaxOutputs = torch::zeros( { 1, layer->getSize() }, torch::kInt64 ); @@ -211,7 +212,7 @@ torch::Tensor CustomMaxPoolFunction::forward( torch::autograd::AutogradContext * for ( int i = sources.size() - 1; i >= 0; --i ) { - const NLR::NeuronIndex &activationNeuron = sources.back(); + const NeuronIndex &activationNeuron = sources.back(); int index = static_cast( activationNeuron._neuron ); sources.popBack(); sourceValues.index_put_( { i }, x.index( { 0, index } ) ); @@ -249,7 +250,7 @@ const Vector &CustomDNN::getLayerSizes() const } torch::Tensor CustomDNN::getLayerWeights(unsigned layerIndex) const { - if (_layersOrder[layerIndex] == NLR::Layer::WEIGHTED_SUM) { + if (_layersOrder[layerIndex] == Layer::WEIGHTED_SUM) { auto linearLayer = _linearLayers[layerIndex]; return linearLayer->weight; // Returning weights of the corresponding linear layer } @@ -257,7 +258,7 @@ torch::Tensor CustomDNN::getLayerWeights(unsigned layerIndex) const { } torch::Tensor CustomDNN::getLayerBias(unsigned layerIndex) const { - if (_layersOrder[layerIndex] == NLR::Layer::WEIGHTED_SUM) { + if (_layersOrder[layerIndex] == Layer::WEIGHTED_SUM) { auto linearLayer = _linearLayers[layerIndex]; return linearLayer->bias; // Returning bias of the corresponding linear layer } @@ -266,7 +267,7 @@ torch::Tensor CustomDNN::getLayerBias(unsigned layerIndex) const { void CustomDNN::getInputBounds(torch::Tensor &lbTensor, torch::Tensor &ubTensor) const { - const NLR::Layer *layer = _networkLevelReasoner->getLayer(0); + const Layer *layer = _networkLevelReasoner->getLayer(0); unsigned size = layer->getSize(); std::vector lowerBounds; @@ -283,5 +284,6 @@ void CustomDNN::getInputBounds(torch::Tensor &lbTensor, torch::Tensor &ubTensor) lbTensor = torch::tensor(lowerBounds, torch::kDouble); ubTensor = torch::tensor(upperBounds, torch::kDouble); } +} #endif \ No newline at end of file diff --git a/src/engine/CustomDNN.h b/src/engine/CustomDNN.h index fe8783b6d3..057208ebf0 100644 --- a/src/engine/CustomDNN.h +++ b/src/engine/CustomDNN.h @@ -2,8 +2,10 @@ #ifndef __CustomDNN_h__ #define __CustomDNN_h__ +#include "Layer.h" +#include "Vector.h" + #include -#include "NetworkLevelReasoner.h" #undef Warning #include @@ -15,12 +17,13 @@ Custom differentiation function for ReLU, implementing the forward and backward propagation for the ReLU operation according to each variable's source layer as defined in the nlr. */ +namespace NLR { class CustomReluFunction : public torch::autograd::Function { public: static torch::Tensor forward( torch::autograd::AutogradContext *ctx, torch::Tensor x, - const NLR::NetworkLevelReasoner *nlr, + const NetworkLevelReasoner *nlr, unsigned layerIndex ); static std::vector backward( torch::autograd::AutogradContext *ctx, @@ -30,11 +33,11 @@ class CustomReluFunction : public torch::autograd::Function class CustomRelu : public torch::nn::Module { public: - CustomRelu( const NLR::NetworkLevelReasoner *nlr, unsigned layerIndex ); + CustomRelu( const NetworkLevelReasoner *nlr, unsigned layerIndex ); torch::Tensor forward( torch::Tensor x ) const; private: - const NLR::NetworkLevelReasoner *_networkLevelReasoner; + const NetworkLevelReasoner *_networkLevelReasoner; unsigned _reluLayerIndex; }; @@ -47,7 +50,7 @@ class CustomMaxPoolFunction : public torch::autograd::Function backward( torch::autograd::AutogradContext *ctx, @@ -57,11 +60,11 @@ class CustomMaxPoolFunction : public torch::autograd::Function _layerSizes; Vector> _reluLayers; Vector> _maxPoolLayers; Vector _linearLayers; - Vector _layersOrder; + Vector _layersOrder; unsigned _numberOfLayers; }; - - +} // namespace NLR #endif // __CustomDNN_h__ #endif \ No newline at end of file diff --git a/src/input_parsers/MpsParser.h b/src/input_parsers/MpsParser.h index e097deff87..76c4afab96 100644 --- a/src/input_parsers/MpsParser.h +++ b/src/input_parsers/MpsParser.h @@ -20,7 +20,7 @@ #include "Map.h" #include "Set.h" -#define MPS_LOG( x, ... ) MARABOU_LOG GlobalConfiguration::MPS_PARSER_LOGGING, "MpsParser: %s\n", x ) +#define MPS_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::MPS_PARSER_LOGGING, "MpsParser: %s\n", x ) class IQuery; class String; diff --git a/src/nlr/AlphaCrown.cpp b/src/nlr/AlphaCrown.cpp index 7381bd277d..5556c7ae3f 100644 --- a/src/nlr/AlphaCrown.cpp +++ b/src/nlr/AlphaCrown.cpp @@ -5,12 +5,13 @@ #include "AlphaCrown.h" #include "MStringf.h" +#include "NetworkLevelReasoner.h" namespace NLR { AlphaCrown::AlphaCrown( LayerOwner *layerOwner ) : _layerOwner( layerOwner ) { - _network = new CustomDNN( static_cast(_layerOwner) ); + _network = new CustomDNN( dynamic_cast( _layerOwner ) ); _network->getInputBounds( _lbInput, _ubInput ); _inputSize = _lbInput.size( 0 ); // TODO it that length of tensor? @@ -199,7 +200,7 @@ void AlphaCrown::GDloop( int loops, optimizer.zero_grad(); auto [max_val, min_val] = AlphaCrown::computeBounds( alphaSlopes ); - auto loss = ( val_to_opt == "max" ) ? max_val.sum( ) : -min_val.sum( ); + auto loss = ( val_to_opt == "max" ) ? max_val.sum() : -min_val.sum(); loss.backward(); optimizer.step(); diff --git a/src/nlr/AlphaCrown.h b/src/nlr/AlphaCrown.h index 461fc09ec1..65201c8a0f 100644 --- a/src/nlr/AlphaCrown.h +++ b/src/nlr/AlphaCrown.h @@ -4,7 +4,6 @@ #include "CustomDNN.h" #include "LayerOwner.h" -#include "NetworkLevelReasoner.h" #include #undef Warning diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index 25ae75941b..832158e72c 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -125,6 +125,7 @@ class NetworkLevelReasoner : public LayerOwner void intervalArithmeticBoundPropagation(); void symbolicBoundPropagation(); void deepPolyPropagation(); + void alphaCrown(); void lpRelaxationPropagation(); void LPTighteningForOneLayer( unsigned targetIndex ); void MILPPropagation(); @@ -257,7 +258,6 @@ class NetworkLevelReasoner : public LayerOwner to all neurons in the network */ void reindexNeurons(); - void alphaCrown(); }; } // namespace NLR From 7084272d2db0a38d9fb7d3d393c9cbacbf5e8be3 Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Mon, 28 Jul 2025 15:57:45 +0300 Subject: [PATCH 07/33] add download_libtorch.sh --- tools/download_libtorch.sh | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100755 tools/download_libtorch.sh diff --git a/tools/download_libtorch.sh b/tools/download_libtorch.sh new file mode 100755 index 0000000000..90f1884b42 --- /dev/null +++ b/tools/download_libtorch.sh @@ -0,0 +1,19 @@ +#!/bin/bash +curdir=$pwd +mydir="${0%/*}" +version=$1 + +cd $mydir + +# Need to download the cxx11-abi version of libtorch in order to ensure compatability +# with boost. +# +# See https://discuss.pytorch.org/t/issues-linking-with-libtorch-c-11-abi/29510 for details. +echo "Downloading PyTorch" +wget https://download.pytorch.org/libtorch/cpu/libtorch-cxx11-abi-shared-with-deps-$version%2Bcpu.zip -O libtorch-$version.zip -q --show-progress --progress=bar:force:noscroll + +echo "Unzipping PyTorch" +unzip libtorch-$version.zip >> /dev/null +mv libtorch libtorch-$version + +cd $curdir From ccc4d55eab269042fec0d3e84d2b522c7832cb56 Mon Sep 17 00:00:00 2001 From: avi_porges Date: Wed, 30 Jul 2025 14:02:28 +0300 Subject: [PATCH 08/33] fix some bugs --- src/nlr/AlphaCrown.cpp | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/src/nlr/AlphaCrown.cpp b/src/nlr/AlphaCrown.cpp index 5556c7ae3f..4fe8246fb3 100644 --- a/src/nlr/AlphaCrown.cpp +++ b/src/nlr/AlphaCrown.cpp @@ -14,24 +14,29 @@ AlphaCrown::AlphaCrown( LayerOwner *layerOwner ) _network = new CustomDNN( dynamic_cast( _layerOwner ) ); _network->getInputBounds( _lbInput, _ubInput ); _inputSize = _lbInput.size( 0 ); // TODO it that length of tensor? - - _linearLayers = std::vector( _network->getLinearLayers().begin(), - _network->getLinearLayers().end() ); + _network->getLinearLayers().end() ; + _linearLayers = _network->getLinearLayers().getContainer(); for ( const auto &linearLayer : _linearLayers ) { _positiveWeights.push_back( torch::where( linearLayer->weight >= 0, linearLayer->weight, - torch::zeros_like( linearLayer->weight ) ) ); + torch::zeros_like( linearLayer->weight ) ).to(torch::kFloat32) ); _negativeWeights.push_back( torch::where( linearLayer->weight <= 0, linearLayer->weight, - torch::zeros_like( linearLayer->weight ) ) ); - _biases.push_back( linearLayer->bias ); + torch::zeros_like( linearLayer->weight ) ).to + (torch::kFloat32) ); + _biases.push_back( linearLayer->bias.to(torch::kFloat32) ); } } torch::Tensor AlphaCrown::createSymbolicVariablesMatrix() { - return torch::cat( { torch::eye( _inputSize ), torch::zeros( { _inputSize, 1 } ) }, 1 ); + // Create the identity matrix and the zero matrix + auto eye_tensor = torch::eye(_inputSize, torch::kFloat32); // Ensure float32 + auto zero_tensor = torch::zeros({_inputSize, 1}, torch::kFloat32); // Ensure float32 + + // Concatenate the two tensors horizontally (along dim=1) + return torch::cat({eye_tensor, zero_tensor}, 1); // Will be of type float32 } torch::Tensor AlphaCrown::lower_ReLU_relaxation( const torch::Tensor &u, const torch::Tensor &l ) @@ -40,7 +45,7 @@ torch::Tensor AlphaCrown::lower_ReLU_relaxation( const torch::Tensor &u, const t mult = torch::where( u - l == 0, torch::tensor( 1.0 ), u / ( u - l ) ); mult = torch::where( l >= 0, torch::tensor( 1.0 ), mult ); mult = torch::where( u <= 0, torch::tensor( 0.0 ), mult ); - return mult; + return mult.to(torch::kFloat32); } std::tuple AlphaCrown::upper_ReLU_relaxation( const torch::Tensor &u, @@ -53,7 +58,7 @@ std::tuple AlphaCrown::upper_ReLU_relaxation( cons torch::Tensor add = torch::where( u - l == 0, torch::tensor( 0.0 ), -l * mult ); add = torch::where( l >= 0, torch::tensor( 0.0 ), add ); - return std::make_tuple( mult, add ); + return std::make_tuple( mult.to(torch::kFloat32), add.to(torch::kFloat32) ); } torch::Tensor AlphaCrown::getMaxOfSymbolicVariables( const torch::Tensor &matrix ) { @@ -182,11 +187,12 @@ void AlphaCrown::optimizeBounds( int loops ) std::vector alphaSlopesForLowBound; for ( auto &tensor : _alphaSlopes ) { - alphaSlopesForUpBound.push_back( tensor.copy_( tensor.detach().requires_grad_( true ) ) ); - alphaSlopesForLowBound.push_back( tensor.copy_( tensor.detach().requires_grad_( true ) ) ); + alphaSlopesForUpBound.push_back( tensor.detach().clone().requires_grad_(true) ); + alphaSlopesForLowBound.push_back( tensor.detach().clone().requires_grad_(true) ); } AlphaCrown::GDloop( loops, "max", alphaSlopesForUpBound ); AlphaCrown::GDloop( loops, "min", alphaSlopesForLowBound ); + std::cout << "AlphaCrown run completed." << std::endl; } @@ -201,15 +207,17 @@ void AlphaCrown::GDloop( int loops, auto [max_val, min_val] = AlphaCrown::computeBounds( alphaSlopes ); auto loss = ( val_to_opt == "max" ) ? max_val.sum() : -min_val.sum(); - loss.backward(); + loss.backward(torch::Tensor(), /*retain_graph=*/true); + optimizer.step(); for ( auto &tensor : alphaSlopes ) { - tensor.clamp_( 0, 1 ); + tensor.clamp( 0, 1 ); } log( Stringf( "Optimization loop %d completed", i + 1 ) ); + std::cout << "std Optimization loop completed " << i+1 << std::endl; } } From b0111d9d45dd3e8eb7f76f543e3bea1116c596c4 Mon Sep 17 00:00:00 2001 From: avi_porges Date: Fri, 1 Aug 2025 13:33:07 +0300 Subject: [PATCH 09/33] refactor and update bounds --- src/engine/CustomDNN.h | 8 ++ src/nlr/AlphaCrown.cpp | 255 +++++++++++++++++++++++++++++------------ src/nlr/AlphaCrown.h | 31 +++-- 3 files changed, 213 insertions(+), 81 deletions(-) diff --git a/src/engine/CustomDNN.h b/src/engine/CustomDNN.h index 057208ebf0..8fefd4778a 100644 --- a/src/engine/CustomDNN.h +++ b/src/engine/CustomDNN.h @@ -90,6 +90,14 @@ class CustomDNN : public torch::nn::Module { return _linearLayers; } + Vector getLayersOrder() const + { + return _layersOrder; + } + Vector getLayersOrder() + { + return _layersOrder; + } private: const NetworkLevelReasoner *_networkLevelReasoner; diff --git a/src/nlr/AlphaCrown.cpp b/src/nlr/AlphaCrown.cpp index 4fe8246fb3..c6aa7269cb 100644 --- a/src/nlr/AlphaCrown.cpp +++ b/src/nlr/AlphaCrown.cpp @@ -3,9 +3,9 @@ // #include "AlphaCrown.h" - #include "MStringf.h" #include "NetworkLevelReasoner.h" +#include "Layer.h" namespace NLR { AlphaCrown::AlphaCrown( LayerOwner *layerOwner ) @@ -13,19 +13,27 @@ AlphaCrown::AlphaCrown( LayerOwner *layerOwner ) { _network = new CustomDNN( dynamic_cast( _layerOwner ) ); _network->getInputBounds( _lbInput, _ubInput ); - _inputSize = _lbInput.size( 0 ); // TODO it that length of tensor? + _inputSize = _lbInput.size( 0 ); _network->getLinearLayers().end() ; _linearLayers = _network->getLinearLayers().getContainer(); - for ( const auto &linearLayer : _linearLayers ) + _layersOrder = _network->getLayersOrder().getContainer(); + + unsigned linearIndex = 0; + for ( unsigned i = 0; i < _network->_numberOfLayers; i++ ) { - _positiveWeights.push_back( torch::where( linearLayer->weight >= 0, - linearLayer->weight, - torch::zeros_like( linearLayer->weight ) ).to(torch::kFloat32) ); - _negativeWeights.push_back( torch::where( linearLayer->weight <= 0, - linearLayer->weight, - torch::zeros_like( linearLayer->weight ) ).to - (torch::kFloat32) ); - _biases.push_back( linearLayer->bias.to(torch::kFloat32) ); + if (_layersOrder[i] != Layer::WEIGHTED_SUM) continue; + //const Layer *layer = _layerOwner->getLayer( i ); + auto linearLayer = _linearLayers[linearIndex]; + auto whights = linearLayer->weight; + auto bias = linearLayer->bias; + _positiveWeights.insert( {i,torch::where( whights >= 0,whights, + torch::zeros_like( + whights ) ).to(torch::kFloat32)} ); + _negativeWeights.insert( {i,torch::where( whights <= 0,whights, + torch::zeros_like( + whights ) ).to(torch::kFloat32)} ); + _biases.insert( {i,bias.to(torch::kFloat32)} ); + linearIndex += 1; } } @@ -92,106 +100,209 @@ torch::Tensor AlphaCrown::getMinOfSymbolicVariables( const torch::Tensor &matrix return l_values; } +void AlphaCrown::relaxReluLayer(unsigned layerNumber, torch::Tensor + &EQ_up, torch::Tensor &EQ_low){ + + auto u_values_EQ_up = AlphaCrown::getMaxOfSymbolicVariables(EQ_up); + auto l_values_EQ_up = AlphaCrown::getMinOfSymbolicVariables(EQ_low); + auto [upperRelaxationSlope, upperRelaxationIntercept] = + AlphaCrown::upper_ReLU_relaxation(l_values_EQ_up, u_values_EQ_up); + + auto u_values_EQ_low = AlphaCrown::getMaxOfSymbolicVariables(EQ_up); + auto l_values_EQ_low = AlphaCrown::getMinOfSymbolicVariables(EQ_low); + auto alphaSlope = AlphaCrown::lower_ReLU_relaxation(l_values_EQ_low, + u_values_EQ_low); + + EQ_up = EQ_up * upperRelaxationSlope.unsqueeze( 1 ); + EQ_up = AlphaCrown::addVecToLastColumnValue( EQ_up, upperRelaxationIntercept ); + EQ_low = EQ_low * alphaSlope.unsqueeze( 1 ); + + _upperRelaxationSlopes.insert({layerNumber, upperRelaxationSlope} ); + // back but insert to dict + _upperRelaxationIntercepts.insert({layerNumber, upperRelaxationIntercept} ); + _indexAlphaSlopeMap.insert( {layerNumber, _initialAlphaSlopes.size()} ); + _initialAlphaSlopes.push_back( alphaSlope ); + +} + + void AlphaCrown::findBounds() { torch::Tensor EQ_up = createSymbolicVariablesMatrix(); torch::Tensor EQ_low = createSymbolicVariablesMatrix(); - for ( size_t i = 0; i < _linearLayers.size(); i++ ) + for ( unsigned i = 0; i < _network->_numberOfLayers; i++ ){ + Layer::Type layerType = _layersOrder[i]; + switch (layerType) + { + case Layer::INPUT: + break; + case Layer::WEIGHTED_SUM: + computeWeightedSumLayer(i, EQ_up, EQ_low); + break; + case Layer::RELU: + relaxReluLayer(i, EQ_up, EQ_low); + break; + default: + AlphaCrown::log ( "Unsupported layer type\n"); + throw MarabouError( MarabouError::DEBUGGING_ERROR ); + } + } +} + + +std::tuple AlphaCrown::computeBounds + (std::vector &alphaSlopes) +{ + torch::Tensor EQ_up = createSymbolicVariablesMatrix(); + torch::Tensor EQ_low = createSymbolicVariablesMatrix(); + for ( unsigned i = 0; i < _network->_numberOfLayers; i++ ) { - auto Wi_positive = _positiveWeights[i]; - auto Wi_negative = _negativeWeights[i]; - auto Bi = _biases[i]; + auto layerType = _layersOrder[i]; + switch (layerType) + { + case Layer::INPUT: + break; + case Layer::WEIGHTED_SUM: + computeWeightedSumLayer (i, EQ_up, EQ_low); + break; + case Layer::RELU: + computeReluLayer (i, EQ_up, EQ_low, alphaSlopes); + break; + default: + log ("Unsupported layer type\n"); + throw MarabouError (MarabouError::DEBUGGING_ERROR); + } + } + auto outputUpBound = getMaxOfSymbolicVariables(EQ_up); + auto outputLowBound = getMinOfSymbolicVariables(EQ_low); + return std::make_tuple(outputUpBound, outputLowBound); - auto EQ_up_before_activation = Wi_positive.mm( EQ_up ) + Wi_negative.mm( EQ_low ); - EQ_up_before_activation = - AlphaCrown::addVecToLastColumnValue( EQ_up_before_activation, Bi ); +} - auto EQ_low_before_activation = Wi_positive.mm( EQ_low ) + Wi_negative.mm( EQ_up ); - EQ_low_before_activation = - AlphaCrown::addVecToLastColumnValue( EQ_low_before_activation, Bi ); - if ( i == _linearLayers.size() - 1 ) - { - // TODO how can we know what layer it is in nlr? in order to update bounds there? - // we should get it from cDNN +void AlphaCrown::computeWeightedSumLayer(unsigned i, torch::Tensor &EQ_up, torch::Tensor &EQ_low){ + //auto linearLayer = _linearLayers[i]; + auto Wi_positive = _positiveWeights[i]; + auto Wi_negative = _negativeWeights[i]; + auto Bi = _biases[i]; - EQ_up = EQ_up_before_activation; - EQ_low = EQ_low_before_activation; - break; - } // TODO we can skip it??? + auto EQ_up_afterLayer = Wi_positive.mm( EQ_up ) + Wi_negative.mm( EQ_low ); + EQ_up_afterLayer = + AlphaCrown::addVecToLastColumnValue( EQ_up_afterLayer, Bi ); - // TODO we can use u_values and l_values of EQ_up to compute upper relaxation? + auto EQ_low_afterLayer = Wi_positive.mm( EQ_low ) + Wi_negative.mm( EQ_up ); + EQ_low_afterLayer = + AlphaCrown::addVecToLastColumnValue(EQ_low_afterLayer, Bi ); - auto u_values = AlphaCrown::getMaxOfSymbolicVariables( EQ_up_before_activation ); - auto l_values = AlphaCrown::getMinOfSymbolicVariables( EQ_low_before_activation ); - auto [upperRelaxationSlope, upperRelaxationIntercept] = - AlphaCrown::upper_ReLU_relaxation( l_values, u_values ); - auto alphaSlope = AlphaCrown::lower_ReLU_relaxation( l_values, u_values ); + EQ_up = EQ_up_afterLayer; + EQ_low = EQ_low_afterLayer; - EQ_up = EQ_up_before_activation * upperRelaxationSlope.unsqueeze( 1 ); - EQ_up = AlphaCrown::addVecToLastColumnValue( EQ_up, upperRelaxationIntercept ); - EQ_low = EQ_low_before_activation * alphaSlope.unsqueeze( 1 ); +} - _upperRelaxationSlopes.push_back( upperRelaxationSlope ); - _upperRelaxationIntercepts.push_back( upperRelaxationIntercept ); - _alphaSlopes.push_back( alphaSlope ); - } + +void AlphaCrown::computeReluLayer(unsigned layerNumber, torch::Tensor + &EQ_up, torch::Tensor &EQ_low, std::vector &alphaSlopes){ + EQ_up = EQ_up * _upperRelaxationSlopes[layerNumber].unsqueeze( 1 ); // + EQ_up = AlphaCrown::addVecToLastColumnValue( EQ_up, _upperRelaxationIntercepts[layerNumber] ); + unsigned indexInAlpha = _indexAlphaSlopeMap[layerNumber]; + EQ_low = EQ_low * alphaSlopes[indexInAlpha].unsqueeze( 1 ); } -std::tuple -AlphaCrown::computeBounds( std::vector &alphaSlopes ) -{ + + + +void AlphaCrown::updateBounds(std::vector &alphaSlopes){ torch::Tensor EQ_up = createSymbolicVariablesMatrix(); torch::Tensor EQ_low = createSymbolicVariablesMatrix(); - for ( size_t i = 0; i < _linearLayers.size(); i++ ) + + for ( unsigned i = 0; i < _network->_numberOfLayers; i++ ) { - auto Wi_positive = _positiveWeights[i]; - auto Wi_negative = _negativeWeights[i]; - auto Bi = _biases[i]; + auto layerType = _layersOrder[i]; + switch (layerType) + { + case Layer::INPUT: + break; + case Layer::WEIGHTED_SUM: + computeWeightedSumLayer (i, EQ_up, EQ_low); + break; + case Layer::RELU: + computeReluLayer (i, EQ_up, EQ_low, alphaSlopes); + break; + default: + log ("Unsupported layer type\n"); + throw MarabouError (MarabouError::DEBUGGING_ERROR); + } + auto upBound = getMaxOfSymbolicVariables(EQ_up); + auto lowBound = getMinOfSymbolicVariables(EQ_low); + updateBoundsOfLayer(i, upBound, lowBound); + } +} - auto EQ_up_before_activation = Wi_positive.mm( EQ_up ) + Wi_negative.mm( EQ_low ); - EQ_up_before_activation = - AlphaCrown::addVecToLastColumnValue( EQ_up_before_activation, Bi ); +void AlphaCrown::updateBoundsOfLayer(unsigned layerIndex, torch::Tensor &upBounds, torch::Tensor &lowBounds) +{ - auto EQ_low_before_activation = Wi_positive.mm( EQ_low ) + Wi_negative.mm( EQ_up ); - EQ_low_before_activation = - AlphaCrown::addVecToLastColumnValue( EQ_low_before_activation, Bi ); + Layer * layer = _layerOwner->getLayerIndexToLayer()[layerIndex]; + //TODO it should be: Layer *layer = _layerOwner->getLayer(layerIndex); if we added non const getter - if ( i == _linearLayers.size() - 1 ) + for (int j = 0; j < upBounds.size(0); j++) + { + if ( layer->neuronEliminated( j ) ) continue; + double lb_val = lowBounds[j].item(); + if ( layer->getLb( j ) < lb_val ) { - EQ_up = EQ_up_before_activation; - EQ_low = EQ_low_before_activation; - break; + log( Stringf( "Neuron %u_%u lower-bound updated from %f to %f", + layerIndex, + j, + layer->getLb( j ), + lb_val ) ); + + std::cout << "Neuron " << layerIndex << "_" << j + << " lower-bound updated from " << layer->getLb(j) + << " to " << lb_val << std::endl; + layer->setLb( j, lb_val ); + _layerOwner->receiveTighterBound( + Tightening( layer->neuronToVariable( j ), lb_val, Tightening::LB ) ); } - // TODO we can improve _upperRelaxationSlopes becouse we have better bound on each neuron - // in hidden layer. if so we need to use it as an argument on each iteration becose it - // not constant - EQ_up = EQ_up_before_activation * _upperRelaxationSlopes[i].unsqueeze( 1 ); // - EQ_up = AlphaCrown::addVecToLastColumnValue( EQ_up, _upperRelaxationIntercepts[i] ); - EQ_low = EQ_low_before_activation * alphaSlopes[i].unsqueeze( 1 ); + auto ub_val = upBounds[j].item(); + if ( layer->getUb( j ) > ub_val ) + { + log( Stringf( "Neuron %u_%u upper-bound updated from %f to %f", + layerIndex, + j, + layer->getUb( j ), + ub_val ) ); + std::cout << "Neuron " << layerIndex << "_" << j + << " upper-bound updated from " << layer->getUb(j) + << " to " << ub_val << std::endl; + + layer->setUb( j, ub_val ); + _layerOwner->receiveTighterBound( + Tightening( layer->neuronToVariable( j ), ub_val, Tightening::UB ) ); + } + } - auto up_bound = getMaxOfSymbolicVariables( EQ_up ); - auto low_bound = getMinOfSymbolicVariables( EQ_low ); - return std::make_tuple( up_bound, low_bound ); } + void AlphaCrown::optimizeBounds( int loops ) { std::vector alphaSlopesForUpBound; std::vector alphaSlopesForLowBound; - for ( auto &tensor : _alphaSlopes ) + for ( auto &tensor : _initialAlphaSlopes ) { alphaSlopesForUpBound.push_back( tensor.detach().clone().requires_grad_(true) ); alphaSlopesForLowBound.push_back( tensor.detach().clone().requires_grad_(true) ); } AlphaCrown::GDloop( loops, "max", alphaSlopesForUpBound ); AlphaCrown::GDloop( loops, "min", alphaSlopesForLowBound ); + updateBounds( alphaSlopesForUpBound ); + updateBounds( alphaSlopesForLowBound); std::cout << "AlphaCrown run completed." << std::endl; } @@ -207,7 +318,7 @@ void AlphaCrown::GDloop( int loops, auto [max_val, min_val] = AlphaCrown::computeBounds( alphaSlopes ); auto loss = ( val_to_opt == "max" ) ? max_val.sum() : -min_val.sum(); - loss.backward(torch::Tensor(), /*retain_graph=*/true); + loss.backward(torch::Tensor(), /retain_graph=/true); optimizer.step(); @@ -227,4 +338,6 @@ void AlphaCrown::log( const String &message ) if ( GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING ) printf( "DeepPolyAnalysis: %s\n", message.ascii() ); } -} // namespace NLR + + +} // namespace NLR \ No newline at end of file diff --git a/src/nlr/AlphaCrown.h b/src/nlr/AlphaCrown.h index 65201c8a0f..fafabb1db0 100644 --- a/src/nlr/AlphaCrown.h +++ b/src/nlr/AlphaCrown.h @@ -1,6 +1,5 @@ - -#ifndef _ALPHACROWN_H_ -#define _ALPHACROWN_H_ +#ifndef ALPHACROWN_H +#define ALPHACROWN_H #include "CustomDNN.h" #include "LayerOwner.h" @@ -18,9 +17,12 @@ class AlphaCrown void findBounds(); void optimizeBounds( int loops = 50 ); void run() + { findBounds(); + updateBounds(_initialAlphaSlopes); optimizeBounds( 2 ); + } private: @@ -34,16 +36,25 @@ class AlphaCrown torch::Tensor _ubInput; std::vector _linearLayers; - std::vector _positiveWeights; - std::vector _negativeWeights; - std::vector _biases; + std::vector _layersOrder; + std::map _positiveWeights; + std::map _negativeWeights; + std::map _biases; + std::map _indexAlphaSlopeMap; + std::map _linearIndexMap; - std::vector _upperRelaxationSlopes; - std::vector _upperRelaxationIntercepts; + std::map _upperRelaxationSlopes; + std::map _upperRelaxationIntercepts; - std::vector _alphaSlopes; + std::vector _initialAlphaSlopes; torch::Tensor createSymbolicVariablesMatrix(); + void relaxReluLayer(unsigned layerNumber, torch::Tensor &EQ_up, torch::Tensor &EQ_low); + void computeWeightedSumLayer(unsigned i, torch::Tensor &EQ_up, torch::Tensor &EQ_low); + void computeReluLayer(unsigned i, torch::Tensor &EQ_up, torch::Tensor &EQ_low, std::vector &alphaSlopes); + + void updateBounds(std::vector &alphaSlopes); + void updateBoundsOfLayer(unsigned layerIndex, torch::Tensor &upBounds, torch::Tensor &lowBounds); static torch::Tensor addVecToLastColumnValue( const torch::Tensor &matrix, const torch::Tensor &vec ) @@ -66,4 +77,4 @@ class AlphaCrown } // namespace NLR -#endif //_ALPHACROWN_H_ +#endif //ALPHACROWN_H \ No newline at end of file From 78edae40d78e4137d7deda3f3c4dc0ebb2a75a58 Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Sun, 3 Aug 2025 10:19:43 +0300 Subject: [PATCH 10/33] minor bugs fixed --- CMakeLists.txt | 2 +- src/engine/CustomDNN.h | 5 +++++ src/nlr/AlphaCrown.cpp | 10 +++++----- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 03b0d85e54..bf6c6cbed4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -98,7 +98,7 @@ find_package(Boost ${BOOST_VERSION} COMPONENTS program_options timer chrono thre # Find boost if (NOT ${Boost_FOUND}) execute_process(COMMAND ${TOOLS_DIR}/download_boost.${SCRIPT_EXTENSION} ${BOOST_VERSION}) - find_package(Boost ${BOOST_VERSION} REQUIRED COMPONENTS program_options timer chrono thread) + find_package(Boost ${BOOST_VERSION} REQUIRED COMPONENTS program_options timer chrono thread regex) endif() set(LIBS_INCLUDES ${Boost_INCLUDE_DIRS}) list(APPEND LIBS ${Boost_LIBRARIES}) diff --git a/src/engine/CustomDNN.h b/src/engine/CustomDNN.h index 8fefd4778a..900f64e83f 100644 --- a/src/engine/CustomDNN.h +++ b/src/engine/CustomDNN.h @@ -99,6 +99,11 @@ class CustomDNN : public torch::nn::Module return _layersOrder; } + unsigned getNumberOfLayers() const + { + return _numberOfLayers; + } + private: const NetworkLevelReasoner *_networkLevelReasoner; Vector _layerSizes; diff --git a/src/nlr/AlphaCrown.cpp b/src/nlr/AlphaCrown.cpp index c6aa7269cb..1c8c56aac4 100644 --- a/src/nlr/AlphaCrown.cpp +++ b/src/nlr/AlphaCrown.cpp @@ -19,7 +19,7 @@ AlphaCrown::AlphaCrown( LayerOwner *layerOwner ) _layersOrder = _network->getLayersOrder().getContainer(); unsigned linearIndex = 0; - for ( unsigned i = 0; i < _network->_numberOfLayers; i++ ) + for ( unsigned i = 0; i < _network->getNumberOfLayers(); i++ ) { if (_layersOrder[i] != Layer::WEIGHTED_SUM) continue; //const Layer *layer = _layerOwner->getLayer( i ); @@ -132,7 +132,7 @@ void AlphaCrown::findBounds() torch::Tensor EQ_up = createSymbolicVariablesMatrix(); torch::Tensor EQ_low = createSymbolicVariablesMatrix(); - for ( unsigned i = 0; i < _network->_numberOfLayers; i++ ){ + for ( unsigned i = 0; i < _network->getNumberOfLayers(); i++ ){ Layer::Type layerType = _layersOrder[i]; switch (layerType) { @@ -157,7 +157,7 @@ std::tuple AlphaCrown::computeBounds { torch::Tensor EQ_up = createSymbolicVariablesMatrix(); torch::Tensor EQ_low = createSymbolicVariablesMatrix(); - for ( unsigned i = 0; i < _network->_numberOfLayers; i++ ) + for ( unsigned i = 0; i < _network->getNumberOfLayers(); i++ ) { auto layerType = _layersOrder[i]; switch (layerType) @@ -219,7 +219,7 @@ void AlphaCrown::updateBounds(std::vector &alphaSlopes){ torch::Tensor EQ_low = createSymbolicVariablesMatrix(); - for ( unsigned i = 0; i < _network->_numberOfLayers; i++ ) + for ( unsigned i = 0; i < _network->getNumberOfLayers(); i++ ) { auto layerType = _layersOrder[i]; switch (layerType) @@ -318,7 +318,7 @@ void AlphaCrown::GDloop( int loops, auto [max_val, min_val] = AlphaCrown::computeBounds( alphaSlopes ); auto loss = ( val_to_opt == "max" ) ? max_val.sum() : -min_val.sum(); - loss.backward(torch::Tensor(), /retain_graph=/true); + loss.backward(torch::Tensor(), true); optimizer.step(); From 56dd0755ee9e16f51a9bd101874f937364be741d Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Wed, 6 Aug 2025 16:08:34 +0300 Subject: [PATCH 11/33] add option alpha crown and test draft --- src/configuration/Options.cpp | 6 ++- src/engine/Engine.cpp | 2 + src/engine/SymbolicBoundTighteningType.h | 3 +- src/nlr/NetworkLevelReasoner.cpp | 14 ++++++- src/nlr/tests/Test_AlphaCrown.h | 49 ++++++++++++++++++++++++ 5 files changed, 69 insertions(+), 5 deletions(-) create mode 100644 src/nlr/tests/Test_AlphaCrown.h diff --git a/src/configuration/Options.cpp b/src/configuration/Options.cpp index 657ddb6b7c..c834a9c590 100644 --- a/src/configuration/Options.cpp +++ b/src/configuration/Options.cpp @@ -91,7 +91,7 @@ void Options::initializeDefaultValues() _stringOptions[SUMMARY_FILE] = ""; _stringOptions[SPLITTING_STRATEGY] = "auto"; _stringOptions[SNC_SPLITTING_STRATEGY] = "auto"; - _stringOptions[SYMBOLIC_BOUND_TIGHTENING_TYPE] = "deeppoly"; + _stringOptions[SYMBOLIC_BOUND_TIGHTENING_TYPE] = "alphacrown"; _stringOptions[MILP_SOLVER_BOUND_TIGHTENING_TYPE] = "none"; _stringOptions[QUERY_DUMP_FILE] = ""; _stringOptions[IMPORT_ASSIGNMENT_FILE_PATH] = "assignment.txt"; @@ -189,10 +189,12 @@ SymbolicBoundTighteningType Options::getSymbolicBoundTighteningType() const return SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING; else if ( strategyString == "deeppoly" ) return SymbolicBoundTighteningType::DEEP_POLY; + else if (strategyString == "alphacrown") + return SymbolicBoundTighteningType::ALPHA_CROWN; else if ( strategyString == "none" ) return SymbolicBoundTighteningType::NONE; else - return SymbolicBoundTighteningType::DEEP_POLY; + return SymbolicBoundTighteningType::ALPHA_CROWN; } MILPSolverBoundTighteningType Options::getMILPSolverBoundTighteningType() const diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index c451328da0..820cb48867 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -2444,6 +2444,8 @@ unsigned Engine::performSymbolicBoundTightening( Query *inputQuery ) _networkLevelReasoner->symbolicBoundPropagation(); else if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::DEEP_POLY ) _networkLevelReasoner->alphaCrown(); + else if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::ALPHA_CROWN ) + _networkLevelReasoner->alphaCrownPropagation(); // Step 3: Extract the bounds List tightenings; diff --git a/src/engine/SymbolicBoundTighteningType.h b/src/engine/SymbolicBoundTighteningType.h index 509c0ae21c..56a48fa730 100644 --- a/src/engine/SymbolicBoundTighteningType.h +++ b/src/engine/SymbolicBoundTighteningType.h @@ -22,7 +22,8 @@ enum class SymbolicBoundTighteningType { SYMBOLIC_BOUND_TIGHTENING = 0, DEEP_POLY = 1, - NONE = 2, + ALPHA_CROWN = 2, + NONE = 3, }; #endif // __SymbolicBoundTighteningType_h__ diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index a5a6a2eeed..d391e305b3 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -34,13 +34,15 @@ #include -#define NLR_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING, "NLR: %s\n", x ) +#define NLR_LOG( x, ... ) \ + MARABOU_LOG( GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING, "NLR: %s\n", x ) namespace NLR { NetworkLevelReasoner::NetworkLevelReasoner() : _tableau( NULL ) , _deepPolyAnalysis( nullptr ) + , _alphaCrown( nullptr ) { } @@ -200,20 +202,28 @@ void NetworkLevelReasoner::clearConstraintTightenings() void NetworkLevelReasoner::symbolicBoundPropagation() { + _outputBounds.clear(); + _boundTightenings.clear(); + for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) _layerIndexToLayer[i]->computeSymbolicBounds(); } void NetworkLevelReasoner::deepPolyPropagation() { + _outputBounds.clear(); + _boundTightenings.clear(); + if ( _deepPolyAnalysis == nullptr ) _deepPolyAnalysis = std::unique_ptr( new DeepPolyAnalysis( this ) ); _deepPolyAnalysis->run(); } -void NetworkLevelReasoner::alphaCrown() +void NetworkLevelReasoner::alphaCrownPropagation() { #ifdef BUILD_TORCH + _outputBounds.clear(); + _boundTightenings.clear(); if ( _alphaCrown == nullptr ) _alphaCrown = std::unique_ptr( new AlphaCrown( this ) ); _alphaCrown->run(); diff --git a/src/nlr/tests/Test_AlphaCrown.h b/src/nlr/tests/Test_AlphaCrown.h new file mode 100644 index 0000000000..6d6942557a --- /dev/null +++ b/src/nlr/tests/Test_AlphaCrown.h @@ -0,0 +1,49 @@ +// +// Created by maya-swisa on 8/6/25. +// + +#ifndef TEST_ALPHACROWN_H +#define TEST_ALPHACROWN_H + +#include "../../engine/tests/MockTableau.h" +#include "InputQuery.h" +#include "Layer.h" +#include "NetworkLevelReasoner.h" +#include "Tightening.h" +#include "AcasParser.h" + +#include + +class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite +{ +public: + void setUp() + { + } + + void tearDown() + { + } + + void testWithAttack() + { + // todo set property to unast property (1.1) ? + // todo create nlr + auto networkFilePath = "resources/onnx/acasxu/ACASXU_experimental_v2a_1_1.onnx"; + auto propertyFilePath = "resources/properties/acas_property_4.txt"; // todo check UNSAT property + AcasParser *_acasParser = new AcasParser( networkFilePath ); + InputQuery _inputQuery; + _acasParser->generateQuery( _inputQuery ); + PropertyParser().parse( propertyFilePath, _inputQuery ); + CWAttack cwAttack = std::make_unique( _networkLevelReasoner ); + auto attackResultBeforeBoundTightening = cwAttack->runAttack(); + // todo call alpha crown + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.alphaCrownPropagation() ); + TS_ASSERT( !_cwAttack->runAttack() ) +// todo maybe all it loop + +} +}; + +#endif //TEST_ALPHACROWN_H From 6bb0a27a1aa21a6fee01335ad38d5e51178956b3 Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Wed, 6 Aug 2025 18:21:16 +0300 Subject: [PATCH 12/33] modify test --- src/nlr/CMakeLists.txt | 1 + src/nlr/tests/Test_AlphaCrown.h | 34 +++++++++++++++++++++------------ 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/src/nlr/CMakeLists.txt b/src/nlr/CMakeLists.txt index e377a638ba..20847987d0 100644 --- a/src/nlr/CMakeLists.txt +++ b/src/nlr/CMakeLists.txt @@ -15,6 +15,7 @@ macro(network_level_reasoner_add_unit_test name) endmacro() network_level_reasoner_add_unit_test(DeepPolyAnalysis) +network_level_reasoner_add_unit_test(AlphaCrown) network_level_reasoner_add_unit_test(NetworkLevelReasoner) network_level_reasoner_add_unit_test(WsLayerElimination) network_level_reasoner_add_unit_test(ParallelSolver) diff --git a/src/nlr/tests/Test_AlphaCrown.h b/src/nlr/tests/Test_AlphaCrown.h index 6d6942557a..0ea8c94144 100644 --- a/src/nlr/tests/Test_AlphaCrown.h +++ b/src/nlr/tests/Test_AlphaCrown.h @@ -6,13 +6,17 @@ #define TEST_ALPHACROWN_H #include "../../engine/tests/MockTableau.h" +#include "AcasParser.h" +#include "CWAttack.h" +#include "Engine.h" #include "InputQuery.h" #include "Layer.h" #include "NetworkLevelReasoner.h" +#include "PropertyParser.h" #include "Tightening.h" -#include "AcasParser.h" #include +#include class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite { @@ -27,23 +31,29 @@ class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite void testWithAttack() { +#ifdef BUILD_TORCH // todo set property to unast property (1.1) ? // todo create nlr auto networkFilePath = "resources/onnx/acasxu/ACASXU_experimental_v2a_1_1.onnx"; - auto propertyFilePath = "resources/properties/acas_property_4.txt"; // todo check UNSAT property + auto propertyFilePath = "resources/properties/acas_property_4.txt"; // todo check UNSAT + // property AcasParser *_acasParser = new AcasParser( networkFilePath ); InputQuery _inputQuery; _acasParser->generateQuery( _inputQuery ); PropertyParser().parse( propertyFilePath, _inputQuery ); - CWAttack cwAttack = std::make_unique( _networkLevelReasoner ); - auto attackResultBeforeBoundTightening = cwAttack->runAttack(); - // todo call alpha crown - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.alphaCrownPropagation() ); - TS_ASSERT( !_cwAttack->runAttack() ) -// todo maybe all it loop - -} + std::unique_ptr _engine = std::make_unique(); + _engine->processInputQuery( _inputQuery ); + NLR::NetworkLevelReasoner *_networkLevelReasoner = _engine->getNetworkLevelReasoner(); + TS_ASSERT_THROWS_NOTHING( _networkLevelReasoner->obtainCurrentBounds() ); + std::unique_ptr cwAttack = std::make_unique( _networkLevelReasoner ); + auto attackResultBeforeBoundTightening = cwAttack->runAttack(); + auto attackResultAfterBoundTightening = cwAttack->runAttack(); + TS_ASSERT( attackResultBeforeBoundTightening ) + TS_ASSERT_THROWS_NOTHING( _networkLevelReasoner->alphaCrownPropagation() ); + TS_ASSERT( !attackResultAfterBoundTightening ) +#endif + } + }; -#endif //TEST_ALPHACROWN_H +#endif // TEST_ALPHACROWN_H From 0ffa939fc04c4ccfc9b84b9a8dfc039076100d09 Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Wed, 6 Aug 2025 18:44:31 +0300 Subject: [PATCH 13/33] minor test fix - still with bug --- src/nlr/tests/Test_AlphaCrown.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/nlr/tests/Test_AlphaCrown.h b/src/nlr/tests/Test_AlphaCrown.h index 0ea8c94144..4816687de6 100644 --- a/src/nlr/tests/Test_AlphaCrown.h +++ b/src/nlr/tests/Test_AlphaCrown.h @@ -34,10 +34,10 @@ class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite #ifdef BUILD_TORCH // todo set property to unast property (1.1) ? // todo create nlr - auto networkFilePath = "resources/onnx/acasxu/ACASXU_experimental_v2a_1_1.onnx"; - auto propertyFilePath = "resources/properties/acas_property_4.txt"; // todo check UNSAT + auto networkFilePath = "resources/nnet/acasxu/ACASXU_experimental_v2a_1_1.nnet"; + auto propertyFilePath = String("resources/properties/acas_property_4.txt"); // todo check UNSAT // property - AcasParser *_acasParser = new AcasParser( networkFilePath ); + auto *_acasParser = new AcasParser( networkFilePath ); InputQuery _inputQuery; _acasParser->generateQuery( _inputQuery ); PropertyParser().parse( propertyFilePath, _inputQuery ); From 9749b122c6287ecce05cb004358c1953bdc8a133 Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Thu, 7 Aug 2025 10:45:39 +0300 Subject: [PATCH 14/33] test fix --- src/nlr/tests/Test_AlphaCrown.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/nlr/tests/Test_AlphaCrown.h b/src/nlr/tests/Test_AlphaCrown.h index 4816687de6..f4e3da4431 100644 --- a/src/nlr/tests/Test_AlphaCrown.h +++ b/src/nlr/tests/Test_AlphaCrown.h @@ -32,28 +32,28 @@ class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite void testWithAttack() { #ifdef BUILD_TORCH - // todo set property to unast property (1.1) ? - // todo create nlr - auto networkFilePath = "resources/nnet/acasxu/ACASXU_experimental_v2a_1_1.nnet"; - auto propertyFilePath = String("resources/properties/acas_property_4.txt"); // todo check UNSAT - // property + + auto networkFilePath = "/home/maya-swisa/Documents/rigora/Marabou/resources/nnet/acasxu/" + "ACASXU_experimental_v2a_1_1.nnet"; + auto propertyFilePath = "/home/maya-swisa/Documents/rigora/Marabou/resources/properties/" + "acas_property_4.txt"; // todo check UNSAT + auto *_acasParser = new AcasParser( networkFilePath ); InputQuery _inputQuery; _acasParser->generateQuery( _inputQuery ); PropertyParser().parse( propertyFilePath, _inputQuery ); std::unique_ptr _engine = std::make_unique(); + Options *options = Options::get(); + options->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "alphacrown" ); + // obtain the alpha crown proceeder _engine->processInputQuery( _inputQuery ); NLR::NetworkLevelReasoner *_networkLevelReasoner = _engine->getNetworkLevelReasoner(); TS_ASSERT_THROWS_NOTHING( _networkLevelReasoner->obtainCurrentBounds() ); std::unique_ptr cwAttack = std::make_unique( _networkLevelReasoner ); - auto attackResultBeforeBoundTightening = cwAttack->runAttack(); auto attackResultAfterBoundTightening = cwAttack->runAttack(); - TS_ASSERT( attackResultBeforeBoundTightening ) - TS_ASSERT_THROWS_NOTHING( _networkLevelReasoner->alphaCrownPropagation() ); TS_ASSERT( !attackResultAfterBoundTightening ) #endif } - }; #endif // TEST_ALPHACROWN_H From 28f1deea41edb2c1a2c403cf1f5175292b9bef14 Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Thu, 7 Aug 2025 14:20:58 +0300 Subject: [PATCH 15/33] test fix --- src/nlr/CMakeLists.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/nlr/CMakeLists.txt b/src/nlr/CMakeLists.txt index 20847987d0..f008a36da0 100644 --- a/src/nlr/CMakeLists.txt +++ b/src/nlr/CMakeLists.txt @@ -7,7 +7,7 @@ target_include_directories(${MARABOU_LIB} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}") target_sources(${MARABOU_TEST_LIB} PRIVATE ${SRCS}) target_include_directories(${MARABOU_TEST_LIB} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}") -set (NETWORK_LEVEL_REASONER_TESTS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/tests") +set(NETWORK_LEVEL_REASONER_TESTS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/tests") macro(network_level_reasoner_add_unit_test name) set(USE_MOCK_COMMON TRUE) set(USE_MOCK_ENGINE TRUE) @@ -22,9 +22,9 @@ network_level_reasoner_add_unit_test(ParallelSolver) if (${ENABLE_GUROBI}) network_level_reasoner_add_unit_test(LPRelaxation) -endif() +endif () if (${BUILD_PYTHON}) target_include_directories(${MARABOU_PY} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}") -endif() +endif () From a20d17f95078fa53d5f2dc95d2fac5d0d6a2a288 Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Thu, 7 Aug 2025 14:32:09 +0300 Subject: [PATCH 16/33] test fix --- src/nlr/tests/Test_AlphaCrown.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/nlr/tests/Test_AlphaCrown.h b/src/nlr/tests/Test_AlphaCrown.h index f4e3da4431..fc867ab647 100644 --- a/src/nlr/tests/Test_AlphaCrown.h +++ b/src/nlr/tests/Test_AlphaCrown.h @@ -33,10 +33,10 @@ class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite { #ifdef BUILD_TORCH - auto networkFilePath = "/home/maya-swisa/Documents/rigora/Marabou/resources/nnet/acasxu/" + auto networkFilePath = "../../../resources/nnet/acasxu/" "ACASXU_experimental_v2a_1_1.nnet"; - auto propertyFilePath = "/home/maya-swisa/Documents/rigora/Marabou/resources/properties/" - "acas_property_4.txt"; // todo check UNSAT + auto propertyFilePath = "../../../resources/properties/" + "acas_property_4.txt"; auto *_acasParser = new AcasParser( networkFilePath ); InputQuery _inputQuery; From 5265890e25ead6e4442bbd6eb8fed64f12221367 Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Thu, 7 Aug 2025 15:35:47 +0300 Subject: [PATCH 17/33] test fix --- src/nlr/tests/Test_AlphaCrown.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/nlr/tests/Test_AlphaCrown.h b/src/nlr/tests/Test_AlphaCrown.h index fc867ab647..e8e66372e6 100644 --- a/src/nlr/tests/Test_AlphaCrown.h +++ b/src/nlr/tests/Test_AlphaCrown.h @@ -51,7 +51,8 @@ class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( _networkLevelReasoner->obtainCurrentBounds() ); std::unique_ptr cwAttack = std::make_unique( _networkLevelReasoner ); auto attackResultAfterBoundTightening = cwAttack->runAttack(); - TS_ASSERT( !attackResultAfterBoundTightening ) + TS_ASSERT( !attackResultAfterBoundTightening ); + delete _acasParser; #endif } }; From 4d903ccb72dd1a6c70bff9530055c9e29ce372c0 Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Thu, 7 Aug 2025 17:33:15 +0300 Subject: [PATCH 18/33] test fix --- src/nlr/tests/Test_AlphaCrown.h | 245 +++++++++++++++++++++++++++++--- 1 file changed, 222 insertions(+), 23 deletions(-) diff --git a/src/nlr/tests/Test_AlphaCrown.h b/src/nlr/tests/Test_AlphaCrown.h index e8e66372e6..e4da6274d4 100644 --- a/src/nlr/tests/Test_AlphaCrown.h +++ b/src/nlr/tests/Test_AlphaCrown.h @@ -29,32 +29,231 @@ class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite { } - void testWithAttack() +// void testWithAttack() +// { +// #ifdef BUILD_TORCH +// +// auto networkFilePath = "../../../resources/nnet/acasxu/" +// "ACASXU_experimental_v2a_1_1.nnet"; +// auto propertyFilePath = "../../../resources/properties/" +// "acas_property_4.txt"; +// +// auto *_acasParser = new AcasParser( networkFilePath ); +// InputQuery _inputQuery; +// _acasParser->generateQuery( _inputQuery ); +// PropertyParser().parse( propertyFilePath, _inputQuery ); +// std::unique_ptr _engine = std::make_unique(); +// Options *options = Options::get(); +// options->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "alphacrown" ); +// // obtain the alpha crown proceeder +// _engine->processInputQuery( _inputQuery ); +// NLR::NetworkLevelReasoner *_networkLevelReasoner = _engine->getNetworkLevelReasoner(); +// TS_ASSERT_THROWS_NOTHING( _networkLevelReasoner->obtainCurrentBounds() ); +// std::unique_ptr cwAttack = std::make_unique( _networkLevelReasoner ); +// auto attackResultAfterBoundTightening = cwAttack->runAttack(); +// TS_ASSERT( !attackResultAfterBoundTightening ); +// delete _acasParser; + + void populateNetwork( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { -#ifdef BUILD_TORCH - - auto networkFilePath = "../../../resources/nnet/acasxu/" - "ACASXU_experimental_v2a_1_1.nnet"; - auto propertyFilePath = "../../../resources/properties/" - "acas_property_4.txt"; - - auto *_acasParser = new AcasParser( networkFilePath ); - InputQuery _inputQuery; - _acasParser->generateQuery( _inputQuery ); - PropertyParser().parse( propertyFilePath, _inputQuery ); - std::unique_ptr _engine = std::make_unique(); - Options *options = Options::get(); - options->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "alphacrown" ); - // obtain the alpha crown proceeder - _engine->processInputQuery( _inputQuery ); - NLR::NetworkLevelReasoner *_networkLevelReasoner = _engine->getNetworkLevelReasoner(); - TS_ASSERT_THROWS_NOTHING( _networkLevelReasoner->obtainCurrentBounds() ); - std::unique_ptr cwAttack = std::make_unique( _networkLevelReasoner ); + /* + + 1 R 1 R 1 1 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 1 \ / 0 \ / + \/ \/ \/ + /\ /\ /\ + 1 / \ 1 / \ 1 / \ + / \ R / \ R / 1 \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + -1 -1 + + The example described in Fig. 3 of + https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 0 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + + nlr.setBias( 5, 0, 1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void test_alphacrown_relus() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetwork( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke Deeppoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.alphaCrownPropagation() ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layer 1: + + x2: [-2, 2] + x3: [-2, 2] + + Layer 2: + + x4: [0, 2] + x5: [0, 2] + + Layer 3: + + x6: [0, 3] + x7: [-2, 2] + + Layer 4: + + x8: [0, 3] + x9: [0, 2] + + Layer 5: + + x10: [1, 5.5] + x11: [0, 2] + + */ + + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + for ( const auto &bound : bounds ) + { + if ( bound._type == Tightening::LB ) + printf( "lower:\n" ); + else + printf( "upper:\n" ); + std::cout << "var : " << bound._variable << " bound : " << bound._value << std::endl; + } + double large = 1000000; + tableau.setLowerBound( 1, 2 ); + tableau.setUpperBound( 1, large ); + + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.alphaCrownPropagation() ); + std::unique_ptr cwAttack = std::make_unique( &nlr ); auto attackResultAfterBoundTightening = cwAttack->runAttack(); TS_ASSERT( !attackResultAfterBoundTightening ); - delete _acasParser; -#endif + + tableau.setLowerBound( 1, -2 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, 2 ); + tableau.setUpperBound( 1, large ); + + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.alphaCrownPropagation() ); + cwAttack = std::make_unique( &nlr ); + attackResultAfterBoundTightening = cwAttack->runAttack(); + TS_ASSERT( !attackResultAfterBoundTightening ); + + + tableau.setLowerBound( 1, -large ); + tableau.setUpperBound( 1, -2 ); + tableau.setLowerBound( 2, 2 ); + tableau.setUpperBound( 1, large ); + + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.alphaCrownPropagation() ); + cwAttack = std::make_unique( &nlr ); + attackResultAfterBoundTightening = cwAttack->runAttack(); + TS_ASSERT( !attackResultAfterBoundTightening ); + + + } -}; + +} +; #endif // TEST_ALPHACROWN_H From 7f73ffff7e536299b2dcc5f0781628134fc02af2 Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Thu, 7 Aug 2025 17:45:59 +0300 Subject: [PATCH 19/33] attack found bug --- src/nlr/tests/Test_AlphaCrown.h | 77 +++++++++++++++------------------ 1 file changed, 36 insertions(+), 41 deletions(-) diff --git a/src/nlr/tests/Test_AlphaCrown.h b/src/nlr/tests/Test_AlphaCrown.h index e4da6274d4..e20bbf8d05 100644 --- a/src/nlr/tests/Test_AlphaCrown.h +++ b/src/nlr/tests/Test_AlphaCrown.h @@ -29,30 +29,30 @@ class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite { } -// void testWithAttack() -// { -// #ifdef BUILD_TORCH -// -// auto networkFilePath = "../../../resources/nnet/acasxu/" -// "ACASXU_experimental_v2a_1_1.nnet"; -// auto propertyFilePath = "../../../resources/properties/" -// "acas_property_4.txt"; -// -// auto *_acasParser = new AcasParser( networkFilePath ); -// InputQuery _inputQuery; -// _acasParser->generateQuery( _inputQuery ); -// PropertyParser().parse( propertyFilePath, _inputQuery ); -// std::unique_ptr _engine = std::make_unique(); -// Options *options = Options::get(); -// options->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "alphacrown" ); -// // obtain the alpha crown proceeder -// _engine->processInputQuery( _inputQuery ); -// NLR::NetworkLevelReasoner *_networkLevelReasoner = _engine->getNetworkLevelReasoner(); -// TS_ASSERT_THROWS_NOTHING( _networkLevelReasoner->obtainCurrentBounds() ); -// std::unique_ptr cwAttack = std::make_unique( _networkLevelReasoner ); -// auto attackResultAfterBoundTightening = cwAttack->runAttack(); -// TS_ASSERT( !attackResultAfterBoundTightening ); -// delete _acasParser; + // void testWithAttack() + // { + // #ifdef BUILD_TORCH + // + // auto networkFilePath = "../../../resources/nnet/acasxu/" + // "ACASXU_experimental_v2a_1_1.nnet"; + // auto propertyFilePath = "../../../resources/properties/" + // "acas_property_4.txt"; + // + // auto *_acasParser = new AcasParser( networkFilePath ); + // InputQuery _inputQuery; + // _acasParser->generateQuery( _inputQuery ); + // PropertyParser().parse( propertyFilePath, _inputQuery ); + // std::unique_ptr _engine = std::make_unique(); + // Options *options = Options::get(); + // options->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "alphacrown" ); + // // obtain the alpha crown proceeder + // _engine->processInputQuery( _inputQuery ); + // NLR::NetworkLevelReasoner *_networkLevelReasoner = + // _engine->getNetworkLevelReasoner(); TS_ASSERT_THROWS_NOTHING( + // _networkLevelReasoner->obtainCurrentBounds() ); std::unique_ptr cwAttack = + // std::make_unique( _networkLevelReasoner ); auto + // attackResultAfterBoundTightening = cwAttack->runAttack(); TS_ASSERT( + // !attackResultAfterBoundTightening ); delete _acasParser; void populateNetwork( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { @@ -216,21 +216,20 @@ class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite printf( "upper:\n" ); std::cout << "var : " << bound._variable << " bound : " << bound._value << std::endl; } - double large = 1000000; - tableau.setLowerBound( 1, 2 ); - tableau.setUpperBound( 1, large ); + double large = 1000000; + tableau.setLowerBound( 2, 2 ); + tableau.setUpperBound( 2, large ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.alphaCrownPropagation() ); std::unique_ptr cwAttack = std::make_unique( &nlr ); auto attackResultAfterBoundTightening = cwAttack->runAttack(); TS_ASSERT( !attackResultAfterBoundTightening ); - tableau.setLowerBound( 1, -2 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, 2 ); - tableau.setUpperBound( 1, large ); - + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + tableau.setLowerBound( 3, 2 ); + tableau.setUpperBound( 3, large ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.alphaCrownPropagation() ); cwAttack = std::make_unique( &nlr ); @@ -238,10 +237,10 @@ class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite TS_ASSERT( !attackResultAfterBoundTightening ); - tableau.setLowerBound( 1, -large ); - tableau.setUpperBound( 1, -2 ); - tableau.setLowerBound( 2, 2 ); - tableau.setUpperBound( 1, large ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, -2 ); + tableau.setLowerBound( 3, -2 ); + tableau.setUpperBound( 3, 2 ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.alphaCrownPropagation() ); @@ -249,11 +248,7 @@ class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite attackResultAfterBoundTightening = cwAttack->runAttack(); TS_ASSERT( !attackResultAfterBoundTightening ); - - } - -} -; +}; #endif // TEST_ALPHACROWN_H From b66be0458c71263c4eee792980664244513611de Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Fri, 8 Aug 2025 12:09:02 +0300 Subject: [PATCH 20/33] attack found bug --- src/nlr/tests/Test_AlphaCrown.h | 88 +++++++++++---------------------- 1 file changed, 28 insertions(+), 60 deletions(-) diff --git a/src/nlr/tests/Test_AlphaCrown.h b/src/nlr/tests/Test_AlphaCrown.h index e20bbf8d05..4b58cfe63e 100644 --- a/src/nlr/tests/Test_AlphaCrown.h +++ b/src/nlr/tests/Test_AlphaCrown.h @@ -170,42 +170,12 @@ class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite // Invoke Deeppoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.alphaCrownPropagation() ); - - /* - Input ranges: - - x0: [-1, 1] - x1: [-1, 1] - - Layer 1: - - x2: [-2, 2] - x3: [-2, 2] - - Layer 2: - - x4: [0, 2] - x5: [0, 2] - - Layer 3: - - x6: [0, 3] - x7: [-2, 2] - - Layer 4: - - x8: [0, 3] - x9: [0, 2] - - Layer 5: - - x10: [1, 5.5] - x11: [0, 2] - - */ - + double large = 1000000; + nlr.setBounds( nlr.getNumberOfLayers() -1 , 1, 0 , large ); List bounds; + // TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + // TS_ASSERT_THROWS_NOTHING( nlr.alphaCrownPropagation() ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); for ( const auto &bound : bounds ) @@ -217,36 +187,34 @@ class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite std::cout << "var : " << bound._variable << " bound : " << bound._value << std::endl; } - double large = 1000000; - tableau.setLowerBound( 2, 2 ); - tableau.setUpperBound( 2, large ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.alphaCrownPropagation() ); + + + // TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); std::unique_ptr cwAttack = std::make_unique( &nlr ); auto attackResultAfterBoundTightening = cwAttack->runAttack(); TS_ASSERT( !attackResultAfterBoundTightening ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - tableau.setLowerBound( 3, 2 ); - tableau.setUpperBound( 3, large ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.alphaCrownPropagation() ); - cwAttack = std::make_unique( &nlr ); - attackResultAfterBoundTightening = cwAttack->runAttack(); - TS_ASSERT( !attackResultAfterBoundTightening ); - - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, -2 ); - tableau.setLowerBound( 3, -2 ); - tableau.setUpperBound( 3, 2 ); - - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.alphaCrownPropagation() ); - cwAttack = std::make_unique( &nlr ); - attackResultAfterBoundTightening = cwAttack->runAttack(); - TS_ASSERT( !attackResultAfterBoundTightening ); + // tableau.setLowerBound( 2, -2 ); + // tableau.setUpperBound( 2, 2 ); + // tableau.setLowerBound( 3, 2 ); + // tableau.setUpperBound( 3, large ); + // TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + // TS_ASSERT_THROWS_NOTHING( nlr.alphaCrownPropagation() ); + // cwAttack = std::make_unique( &nlr ); + // attackResultAfterBoundTightening = cwAttack->runAttack(); + // TS_ASSERT( !attackResultAfterBoundTightening ); + // + // + // tableau.setLowerBound( 2, -large ); + // tableau.setUpperBound( 2, -2 ); + // tableau.setLowerBound( 3, -2 ); + // tableau.setUpperBound( 3, 2 ); + // + // TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + // TS_ASSERT_THROWS_NOTHING( nlr.alphaCrownPropagation() ); + // cwAttack = std::make_unique( &nlr ); + // attackResultAfterBoundTightening = cwAttack->runAttack(); + // TS_ASSERT( !attackResultAfterBoundTightening ); } }; From 67a7541ecdd84eee7a2da153f711657d58f2c9c1 Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Sun, 10 Aug 2025 15:59:12 +0300 Subject: [PATCH 21/33] attack found bug --- src/nlr/tests/Test_AlphaCrown.h | 30 ++++-------------------------- 1 file changed, 4 insertions(+), 26 deletions(-) diff --git a/src/nlr/tests/Test_AlphaCrown.h b/src/nlr/tests/Test_AlphaCrown.h index 4b58cfe63e..8cde3c5b23 100644 --- a/src/nlr/tests/Test_AlphaCrown.h +++ b/src/nlr/tests/Test_AlphaCrown.h @@ -170,8 +170,8 @@ class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite // Invoke Deeppoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.alphaCrownPropagation() ); - double large = 1000000; - nlr.setBounds( nlr.getNumberOfLayers() -1 , 1, 0 , large ); + // double large = 1000000; + // nlr.setBounds( nlr.getNumberOfLayers() -1 , 1, 0 , large ); List bounds; // TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -187,34 +187,12 @@ class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite std::cout << "var : " << bound._variable << " bound : " << bound._value << std::endl; } - - - // TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + double large = 1000000; + nlr.setBounds( nlr.getNumberOfLayers() -1 , 1, 0 , large ); std::unique_ptr cwAttack = std::make_unique( &nlr ); auto attackResultAfterBoundTightening = cwAttack->runAttack(); TS_ASSERT( !attackResultAfterBoundTightening ); - // tableau.setLowerBound( 2, -2 ); - // tableau.setUpperBound( 2, 2 ); - // tableau.setLowerBound( 3, 2 ); - // tableau.setUpperBound( 3, large ); - // TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - // TS_ASSERT_THROWS_NOTHING( nlr.alphaCrownPropagation() ); - // cwAttack = std::make_unique( &nlr ); - // attackResultAfterBoundTightening = cwAttack->runAttack(); - // TS_ASSERT( !attackResultAfterBoundTightening ); - // - // - // tableau.setLowerBound( 2, -large ); - // tableau.setUpperBound( 2, -2 ); - // tableau.setLowerBound( 3, -2 ); - // tableau.setUpperBound( 3, 2 ); - // - // TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - // TS_ASSERT_THROWS_NOTHING( nlr.alphaCrownPropagation() ); - // cwAttack = std::make_unique( &nlr ); - // attackResultAfterBoundTightening = cwAttack->runAttack(); - // TS_ASSERT( !attackResultAfterBoundTightening ); } }; From 19d3feb5d63f178cd00835528a93b691ed607f4d Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Sun, 10 Aug 2025 16:11:02 +0300 Subject: [PATCH 22/33] fix version --- src/nlr/NetworkLevelReasoner.cpp | 7 +------ src/nlr/tests/Test_AlphaCrown.h | 2 +- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index d391e305b3..c060b71845 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -211,19 +211,14 @@ void NetworkLevelReasoner::symbolicBoundPropagation() void NetworkLevelReasoner::deepPolyPropagation() { - _outputBounds.clear(); - _boundTightenings.clear(); - if ( _deepPolyAnalysis == nullptr ) _deepPolyAnalysis = std::unique_ptr( new DeepPolyAnalysis( this ) ); _deepPolyAnalysis->run(); } -void NetworkLevelReasoner::alphaCrownPropagation() +void NetworkLevelReasoner::alphaCrown() { #ifdef BUILD_TORCH - _outputBounds.clear(); - _boundTightenings.clear(); if ( _alphaCrown == nullptr ) _alphaCrown = std::unique_ptr( new AlphaCrown( this ) ); _alphaCrown->run(); diff --git a/src/nlr/tests/Test_AlphaCrown.h b/src/nlr/tests/Test_AlphaCrown.h index 8cde3c5b23..43ea17c91f 100644 --- a/src/nlr/tests/Test_AlphaCrown.h +++ b/src/nlr/tests/Test_AlphaCrown.h @@ -169,7 +169,7 @@ class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite // Invoke Deeppoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.alphaCrownPropagation() ); + TS_ASSERT_THROWS_NOTHING( nlr.alphaCrown() ); // double large = 1000000; // nlr.setBounds( nlr.getNumberOfLayers() -1 , 1, 0 , large ); From 875b315493cae49b713c3f5ac676f16276ba2b18 Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Sun, 10 Aug 2025 16:12:16 +0300 Subject: [PATCH 23/33] fix call for symbolic bound tightening --- src/engine/Engine.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index 820cb48867..028bae2fbc 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -2443,9 +2443,9 @@ unsigned Engine::performSymbolicBoundTightening( Query *inputQuery ) if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING ) _networkLevelReasoner->symbolicBoundPropagation(); else if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::DEEP_POLY ) - _networkLevelReasoner->alphaCrown(); + _networkLevelReasoner->deepPolyPropagation(); else if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::ALPHA_CROWN ) - _networkLevelReasoner->alphaCrownPropagation(); + _networkLevelReasoner->alphaCrown(); // Step 3: Extract the bounds List tightenings; From 4b9f58a3d89ebab23951500dd787fa0c6a9571c8 Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Sun, 10 Aug 2025 16:15:09 +0300 Subject: [PATCH 24/33] fix call for symbolic bound tightening --- src/nlr/NetworkLevelReasoner.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index c060b71845..c20bb38c70 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -202,7 +202,6 @@ void NetworkLevelReasoner::clearConstraintTightenings() void NetworkLevelReasoner::symbolicBoundPropagation() { - _outputBounds.clear(); _boundTightenings.clear(); for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) From 32f0233bbc41d1c6d367b6f373ffd4e6ffa83dc7 Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Sun, 10 Aug 2025 18:02:21 +0300 Subject: [PATCH 25/33] attack code --- src/engine/CWAttack.cpp | 256 ++++++++++++++++++++++++++++++++++++++++ src/engine/CWAttack.h | 64 ++++++++++ 2 files changed, 320 insertions(+) create mode 100644 src/engine/CWAttack.cpp create mode 100644 src/engine/CWAttack.h diff --git a/src/engine/CWAttack.cpp b/src/engine/CWAttack.cpp new file mode 100644 index 0000000000..27b3c5744c --- /dev/null +++ b/src/engine/CWAttack.cpp @@ -0,0 +1,256 @@ +#include "CWAttack.h" + +#ifdef BUILD_TORCH + +CWAttack::CWAttack( NLR::NetworkLevelReasoner *networkLevelReasoner ) + : networkLevelReasoner( networkLevelReasoner ) + , _device( torch::cuda::is_available() ? torch::kCUDA : torch::kCPU ) + , _model( std::make_unique( networkLevelReasoner ) ) + , _iters( GlobalConfiguration::CW_DEFAULT_ITERS ) + , _restarts( GlobalConfiguration::CW_NUM_RESTARTS ) + , _specLossWeight( 1e-2 ) + , _adversarialInput( nullptr ) + , _adversarialOutput( nullptr ) +{ + _inputSize = _model->getLayerSizes().first(); + getBounds( _inputBounds, GlobalConfiguration::PdgBoundType::ATTACK_INPUT ); + getBounds( _outputBounds, GlobalConfiguration::PdgBoundType::ATTACK_OUTPUT ); + + _inputLb = torch::tensor( _inputBounds.first.getContainer(), torch::kFloat32 ).to( _device ); + _inputUb = torch::tensor( _inputBounds.second.getContainer(), torch::kFloat32 ).to( _device ); + + auto vars = generateSampleAndEpsilon(); + _x0 = vars.first; +} + +CWAttack::~CWAttack() +{ + if ( _adversarialInput ) + delete[] _adversarialInput; + if ( _adversarialOutput ) + delete[] _adversarialOutput; +} + +bool CWAttack::runAttack() +{ + CW_LOG( "-----Starting CW attack-----" ); + auto adversarial = findAdvExample(); + auto advInput = adversarial.first.to( torch::kDouble ); + auto advPred = adversarial.second.to( torch::kDouble ); + + bool isFooled = + isWithinBounds( advInput, _inputBounds ) && isWithinBounds( advPred, _outputBounds ); + + auto inputPtr = advInput.data_ptr(); + auto predPtr = advPred.data_ptr(); + size_t outSize = advPred.size( 1 ); + + if ( isFooled ) + { + _adversarialInput = new double[_inputSize]; + _adversarialOutput = new double[outSize]; + std::copy( inputPtr, inputPtr + _inputSize, _adversarialInput ); + std::copy( predPtr, predPtr + outSize, _adversarialOutput ); + } + CW_LOG( "Input Lower Bounds : " ); + for ( auto &bound : _inputBounds.first.getContainer() ) + printValue( bound ); + CW_LOG( "Input Upper Bounds : " ); + for ( auto &bound : _inputBounds.second.getContainer() ) + printValue( bound ); + + CW_LOG( "Adversarial Input:" ); + for ( int i = 0; i < advInput.numel(); ++i ) + { + CW_LOG( Stringf( "x%u=%.3lf", i, inputPtr[i] ).ascii() ); + } + CW_LOG( "Output Lower Bounds : " ); + for ( auto &bound : _outputBounds.first.getContainer() ) + printValue( bound ); + CW_LOG( "Output Upper Bounds : " ); + for ( auto &bound : _outputBounds.second.getContainer() ) + printValue( bound ); + + CW_LOG( "Adversarial Prediction: " ); + for ( int i = 0; i < advPred.numel(); ++i ) + { + CW_LOG( Stringf( "y%u=%.3lf", i, predPtr[i] ).ascii() ); + } + + + if ( isFooled ) + { + CW_LOG( "Model fooled: Yes \n ------ CW Attack Succeed ------\n" ); + } + else + CW_LOG( "Model fooled: No \n ------ CW Attack Failed ------\n" ); + // Concretize assignments if attack succeeded + if ( _adversarialInput ) + networkLevelReasoner->concretizeInputAssignment( _assignments, _adversarialInput ); + return isFooled; +} + + +void CWAttack::getBounds( std::pair, Vector> &bounds, signed type ) const +{ + unsigned layerIndex = type == GlobalConfiguration::PdgBoundType::ATTACK_INPUT + ? 0 + : networkLevelReasoner->getNumberOfLayers() - 1; + const NLR::Layer *layer = networkLevelReasoner->getLayer( layerIndex ); + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + bounds.first.append( layer->getLb( i ) ); + bounds.second.append( layer->getUb( i ) ); + } +} + +std::pair CWAttack::generateSampleAndEpsilon() +{ + Vector sample( _inputSize, 0.0 ), eps( _inputSize, 0.0 ); + for ( unsigned i = 0; i < _inputSize; ++i ) + { + double lo = _inputBounds.first.get( i ), hi = _inputBounds.second.get( i ); + if ( std::isfinite( lo ) && std::isfinite( hi ) ) + { + sample[i] = 0.5 * ( lo + hi ); + eps[i] = 0.5 * ( hi - lo ); + } + else + { + sample[i] = 0.0; + eps[i] = GlobalConfiguration::ATTACK_INPUT_RANGE; + } + } + auto s = torch::tensor( sample.getContainer(), torch::kFloat32 ).unsqueeze( 0 ).to( _device ); + auto e = torch::tensor( eps.getContainer(), torch::kFloat32 ).to( _device ); + return { s, e }; +} + +torch::Tensor CWAttack::calculateLoss( const torch::Tensor &pred ) +{ + auto lb = torch::tensor( _outputBounds.first.data(), torch::kFloat32 ).to( _device ); + auto ub = torch::tensor( _outputBounds.second.data(), torch::kFloat32 ).to( _device ); + auto ubv = torch::sum( torch::square( torch::relu( pred - ub ) ) ); + auto lbv = torch::sum( torch::square( torch::relu( lb - pred ) ) ); + return ( ubv + lbv ).to( _device ); +} + +std::pair CWAttack::findAdvExample() +{ + torch::Tensor bestAdv, bestPred; + double bestL2 = std::numeric_limits::infinity(); + double timeoutForAttack = ( Options::get()->getInt( Options::ATTACK_TIMEOUT ) == 0 + ? FloatUtils::infinity() + : Options::get()->getInt( Options::ATTACK_TIMEOUT ) ); + CW_LOG( Stringf( "Adversarial attack timeout set to %f\n", timeoutForAttack ).ascii() ); + timespec startTime = TimeUtils::sampleMicro(); + torch::Tensor advExample; + for ( unsigned r = 0; r < _restarts; ++r ) + { + unsigned long timePassed = TimeUtils::timePassed( startTime, TimeUtils::sampleMicro() ); + if ( static_cast( timePassed ) / MICROSECONDS_TO_SECONDS > timeoutForAttack ) + { + throw MarabouError( MarabouError::TIMEOUT, "Attack failed due to timeout" ); + } + torch::Tensor delta = torch::zeros_like( _x0, torch::requires_grad() ).to( _device ); + torch::optim::Adam optimizer( { delta }, + torch::optim::AdamOptions( GlobalConfiguration::CW_LR ) ); + + for ( unsigned it = 0; it < _iters; ++it ) + { + torch::Tensor prevExample = advExample; + advExample = ( _x0 + delta ).clamp( _inputLb, _inputUb ); + // Skip the equality check on the first iteration + if ( ( it > 0 && prevExample.defined() && advExample.equal( prevExample ) ) || + !isWithinBounds( advExample, _inputBounds ) ) + break; + auto pred = _model->forward( advExample ); + auto specLoss = calculateLoss( pred ); + auto l2norm = torch::sum( torch::pow( advExample - _x0, 2 ) ); + auto loss = l2norm + _specLossWeight * specLoss; + + optimizer.zero_grad(); + loss.backward(); + optimizer.step(); + + if ( specLoss.item() == 0.0 ) + { + double curL2 = l2norm.item(); + if ( curL2 < bestL2 ) + { + bestL2 = curL2; + bestAdv = advExample.detach(); + bestPred = pred.detach(); + } + } + } + } + + if ( !bestAdv.defined() ) + { + bestAdv = ( _x0 + torch::zeros_like( _x0 ) ).clamp( _inputLb, _inputUb ); + bestPred = _model->forward( bestAdv ); + } + + return { bestAdv, bestPred }; +} + +bool CWAttack::isWithinBounds( const torch::Tensor &sample, + const std::pair, Vector> &bounds ) +{ + torch::Tensor flatInput = sample.view( { -1 } ); + if ( flatInput.numel() != (int)bounds.first.size() || + flatInput.numel() != (int)bounds.second.size() ) + throw std::runtime_error( "Mismatch in sizes of input and bounds" ); + + for ( int64_t i = 0; i < flatInput.size( 0 ); ++i ) + { + double v = flatInput[i].item(); + double lo = bounds.first.get( i ), hi = bounds.second.get( i ); + if ( std::isinf( lo ) && std::isinf( hi ) ) + continue; + if ( std::isinf( lo ) ) + { + if ( v > hi ) + return false; + } + else if ( std::isinf( hi ) ) + { + if ( v < lo ) + return false; + } + else if ( v < lo || v > hi ) + return false; + } + return true; +} + +double CWAttack::getAssignment( int index ) +{ + return _assignments[index]; +} + +void CWAttack::printValue( double value ) +{ + if ( std::isinf( value ) ) + { + if ( value < 0 ) + { + CW_LOG( "-inf" ); + } + else + { + CW_LOG( "inf" ); + } + } + else if ( std::isnan( value ) ) + { + CW_LOG( "nan" ); + } + else + { + CW_LOG( Stringf( "%.3lf", value ).ascii() ); + } +} + +#endif // BUILD_TORCH diff --git a/src/engine/CWAttack.h b/src/engine/CWAttack.h new file mode 100644 index 0000000000..742c99fe7b --- /dev/null +++ b/src/engine/CWAttack.h @@ -0,0 +1,64 @@ +#ifndef __CWATTACK_H__ +#define __CWATTACK_H__ +#ifdef BUILD_TORCH + +#include "CustomDNN.h" +#include "InputQuery.h" +#include "Options.h" + +#include +#include +#include + +#define CW_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::CW_LOGGING, "CW: %s\n", x ) + +/** + CWAttack implements the Carlini–Wagner L2 adversarial attack, + optimizing min ||δ||_2^2 + c * specLoss(x0 + δ) with Adam and restarts. +*/ +class CWAttack +{ +public: + enum { + MICROSECONDS_TO_SECONDS = 1000000 + }; + CWAttack( NLR::NetworkLevelReasoner *networkLevelReasoner ); + ~CWAttack(); + + /** + Runs the CW attack. Returns true if a valid adversarial example is found. + */ + bool runAttack(); + double getAssignment( int index ); + +private: + NLR::NetworkLevelReasoner *networkLevelReasoner; + torch::Device _device; + std::unique_ptr _model; + + unsigned _inputSize; + unsigned _iters; + unsigned _restarts; + double _specLossWeight; + + std::pair, Vector> _inputBounds; + std::pair, Vector> _outputBounds; + torch::Tensor _inputLb; + torch::Tensor _inputUb; + torch::Tensor _x0; + + Map _assignments; + double *_adversarialInput; + double *_adversarialOutput; + + void getBounds( std::pair, Vector> &bounds, signed type ) const; + std::pair generateSampleAndEpsilon(); + torch::Tensor calculateLoss( const torch::Tensor &predictions ); + std::pair findAdvExample(); + static bool isWithinBounds( const torch::Tensor &sample, + const std::pair, Vector> &bounds ); + static void printValue( double value ); +}; + +#endif // BUILD_TORCH +#endif // __CWATTACK_H__ From b54b5de7bdbbbd6f8cc7273791979d974e537703 Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Mon, 11 Aug 2025 09:35:25 +0300 Subject: [PATCH 26/33] add attack's code --- src/configuration/GlobalConfiguration.cpp | 10 ++++++++++ src/configuration/GlobalConfiguration.h | 14 ++++++++++++++ src/configuration/Options.cpp | 1 + src/configuration/Options.h | 2 ++ src/engine/MarabouError.h | 2 ++ src/nlr/NetworkLevelReasoner.cpp | 5 ++++- src/nlr/NetworkLevelReasoner.h | 3 ++- src/nlr/tests/Test_NetworkLevelReasoner.h | 6 +++--- 8 files changed, 38 insertions(+), 5 deletions(-) diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index 49fa05782b..6cbb8c9bb1 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -119,6 +119,15 @@ const bool GlobalConfiguration::WRITE_JSON_PROOF = false; const unsigned GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH = 3; const unsigned GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS = 10; +const GlobalConfiguration::PdgBoundType GlobalConfiguration::PGD_BOUND_TYPE = + GlobalConfiguration::ATTACK_INPUT; +const unsigned GlobalConfiguration::PGD_DEFAULT_NUM_ITER = 10; +const unsigned GlobalConfiguration::PGD_NUM_RESTARTS = 4; +const double GlobalConfiguration::ATTACK_INPUT_RANGE = 1000; +const unsigned GlobalConfiguration::CW_DEFAULT_ITERS = 1000; +const unsigned GlobalConfiguration::CW_NUM_RESTARTS = 4; +const double GlobalConfiguration::CW_LR = 1e-2; + #ifdef ENABLE_GUROBI const unsigned GlobalConfiguration::GUROBI_NUMBER_OF_THREADS = 1; const bool GlobalConfiguration::GUROBI_LOGGING = false; @@ -144,6 +153,7 @@ const bool GlobalConfiguration::SOI_LOGGING = false; const bool GlobalConfiguration::SCORE_TRACKER_LOGGING = false; const bool GlobalConfiguration::CEGAR_LOGGING = false; const bool GlobalConfiguration::CUSTOM_DNN_LOGGING = true; +const bool GlobalConfiguration::CW_LOGGING = true; const bool GlobalConfiguration::USE_SMART_FIX = false; const bool GlobalConfiguration::USE_LEAST_FIX = false; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index 4e6d7bdf98..b34a508a7e 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -263,6 +263,19 @@ class GlobalConfiguration */ static const unsigned MAX_ROUNDS_OF_BACKWARD_ANALYSIS; + enum PdgBoundType { + ATTACK_INPUT = 0, + ATTACK_OUTPUT = 1 + }; + static const PdgBoundType PGD_BOUND_TYPE; + static const unsigned PGD_DEFAULT_NUM_ITER; + static const unsigned PGD_NUM_RESTARTS; + static const double ATTACK_INPUT_RANGE; + + static const unsigned CW_DEFAULT_ITERS; + static const unsigned CW_NUM_RESTARTS; + static const double CW_LR; + #ifdef ENABLE_GUROBI /* The number of threads Gurobi spawns @@ -293,6 +306,7 @@ class GlobalConfiguration static const bool SCORE_TRACKER_LOGGING; static const bool CEGAR_LOGGING; static const bool CUSTOM_DNN_LOGGING; + static const bool CW_LOGGING; }; #endif // __GlobalConfiguration_h__ diff --git a/src/configuration/Options.cpp b/src/configuration/Options.cpp index c834a9c590..4e05fc295a 100644 --- a/src/configuration/Options.cpp +++ b/src/configuration/Options.cpp @@ -71,6 +71,7 @@ void Options::initializeDefaultValues() _intOptions[SEED] = 1; _intOptions[NUM_BLAS_THREADS] = 1; _intOptions[NUM_CONSTRAINTS_TO_REFINE_INC_LIN] = 30; + _intOptions[ATTACK_TIMEOUT] = 60; /* Float options diff --git a/src/configuration/Options.h b/src/configuration/Options.h index 4dd4f31fd5..3afec75a56 100644 --- a/src/configuration/Options.h +++ b/src/configuration/Options.h @@ -154,6 +154,8 @@ class Options // The strategy used for initializing the soi SOI_INITIALIZATION_STRATEGY, + // Adversarial attack timeout in seconds + ATTACK_TIMEOUT, // The procedure/solver for solving the LP LP_SOLVER }; diff --git a/src/engine/MarabouError.h b/src/engine/MarabouError.h index 2f2ee54c0f..1cff574056 100644 --- a/src/engine/MarabouError.h +++ b/src/engine/MarabouError.h @@ -66,6 +66,8 @@ class MarabouError : public Error FEATURE_NOT_YET_SUPPORTED = 900, + TIMEOUT = 32, + DEBUGGING_ERROR = 999, }; diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index c20bb38c70..cb64db32cd 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -130,7 +130,8 @@ void NetworkLevelReasoner::evaluate( double *input, double *output ) memcpy( output, outputLayer->getAssignment(), sizeof( double ) * outputLayer->getSize() ); } -void NetworkLevelReasoner::concretizeInputAssignment( Map &assignment ) +void NetworkLevelReasoner::concretizeInputAssignment( Map &assignment, + const double *pgdAdversarialInput ) { Layer *inputLayer = _layerIndexToLayer[0]; ASSERT( inputLayer->getLayerType() == Layer::INPUT ); @@ -147,6 +148,8 @@ void NetworkLevelReasoner::concretizeInputAssignment( Map &ass { unsigned variable = inputLayer->neuronToVariable( index ); double value = _tableau->getValue( variable ); + if ( pgdAdversarialInput ) + value = pgdAdversarialInput[index]; input[index] = value; assignment[variable] = value; } diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index 832158e72c..31dfc528c4 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -80,7 +80,8 @@ class NetworkLevelReasoner : public LayerOwner Perform an evaluation of the network for the current input variable assignment and store the resulting variable assignment in the assignment. */ - void concretizeInputAssignment( Map &assignment ); + void concretizeInputAssignment( Map &assignment, + const double *pgdAdversarialInput = nullptr ); /* Perform a simulation of the network for a specific input diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index b74cd3931c..9ac9e083f3 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -7610,7 +7610,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Map assignment; - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment, TODO ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 4 ) ); @@ -7623,7 +7623,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.nextValues[0] = 1; tableau.nextValues[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment, TODO ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 1 ) ); @@ -7635,7 +7635,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.nextValues[0] = 1; tableau.nextValues[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment, TODO ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 0 ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 0 ) ); From 3443212e41e6363e820f7b23a5bff541a74c3f65 Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Mon, 11 Aug 2025 09:36:56 +0300 Subject: [PATCH 27/33] add attack's code --- src/nlr/tests/Test_AlphaCrown.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nlr/tests/Test_AlphaCrown.h b/src/nlr/tests/Test_AlphaCrown.h index 43ea17c91f..eb9c99a971 100644 --- a/src/nlr/tests/Test_AlphaCrown.h +++ b/src/nlr/tests/Test_AlphaCrown.h @@ -188,7 +188,7 @@ class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite } double large = 1000000; - nlr.setBounds( nlr.getNumberOfLayers() -1 , 1, 0 , large ); + nlr.setBounds( nlr.getNumberOfLayers() -1 , 1, 1 , large ); std::unique_ptr cwAttack = std::make_unique( &nlr ); auto attackResultAfterBoundTightening = cwAttack->runAttack(); TS_ASSERT( !attackResultAfterBoundTightening ); From d594a4302891d649f82703f50c17ed47247ed081 Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Mon, 11 Aug 2025 09:58:41 +0300 Subject: [PATCH 28/33] add attack's code --- src/nlr/Layer.cpp | 7 +++++++ src/nlr/Layer.h | 1 + src/nlr/NetworkLevelReasoner.cpp | 8 ++++++++ src/nlr/NetworkLevelReasoner.h | 1 + 4 files changed, 17 insertions(+) diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 08e6900538..b27949cdf9 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -27,6 +27,13 @@ Layer::~Layer() freeMemoryIfNeeded(); } +void Layer::setBounds( unsigned int neuron, double lower, double upper ) +{ + ASSERT( neuron < _size ); + _lb[neuron] = lower; + _ub[neuron] = upper; +} + void Layer::setLayerOwner( LayerOwner *layerOwner ) { _layerOwner = layerOwner; diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index 900276eda3..d84237f2f1 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -58,6 +58,7 @@ class Layer Layer( const Layer *other ); Layer( unsigned index, Type type, unsigned size, LayerOwner *layerOwner ); ~Layer(); + void setBounds( unsigned int neuron, double lower, double upper ); void setLayerOwner( LayerOwner *layerOwner ); void addSourceLayer( unsigned layerNumber, unsigned layerSize ); diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index cb64db32cd..b8774af74d 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -129,6 +129,14 @@ void NetworkLevelReasoner::evaluate( double *input, double *output ) const Layer *outputLayer = _layerIndexToLayer[_layerIndexToLayer.size() - 1]; memcpy( output, outputLayer->getAssignment(), sizeof( double ) * outputLayer->getSize() ); } +void NetworkLevelReasoner::setBounds( unsigned layer, + unsigned int neuron, + double lower, + double upper ) +{ + ASSERT( layer < _layerIndexToLayer.size() ); + _layerIndexToLayer[layer]->setBounds( neuron, lower, upper ); +} void NetworkLevelReasoner::concretizeInputAssignment( Map &assignment, const double *pgdAdversarialInput ) diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index 31dfc528c4..95d292b8a6 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -75,6 +75,7 @@ class NetworkLevelReasoner : public LayerOwner Perform an evaluation of the network for a specific input. */ void evaluate( double *input, double *output ); + void setBounds( unsigned layer, unsigned int neuron, double lower, double upper ); /* Perform an evaluation of the network for the current input variable From 08d9c377736a4d61e042f401064f6084068ffbbc Mon Sep 17 00:00:00 2001 From: mayaswissa Date: Mon, 11 Aug 2025 10:23:53 +0300 Subject: [PATCH 29/33] add attack's code --- src/nlr/tests/Test_AlphaCrown.h | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/src/nlr/tests/Test_AlphaCrown.h b/src/nlr/tests/Test_AlphaCrown.h index eb9c99a971..1b6c7fbd4a 100644 --- a/src/nlr/tests/Test_AlphaCrown.h +++ b/src/nlr/tests/Test_AlphaCrown.h @@ -167,15 +167,11 @@ class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - // Invoke Deeppoly + // Invoke alpha crow TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.alphaCrown() ); - // double large = 1000000; - // nlr.setBounds( nlr.getNumberOfLayers() -1 , 1, 0 , large ); List bounds; - // TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - // TS_ASSERT_THROWS_NOTHING( nlr.alphaCrownPropagation() ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); for ( const auto &bound : bounds ) @@ -188,11 +184,26 @@ class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite } double large = 1000000; - nlr.setBounds( nlr.getNumberOfLayers() -1 , 1, 1 , large ); + nlr.setBounds( nlr.getNumberOfLayers() -1 , 1, 0.1 , large ); std::unique_ptr cwAttack = std::make_unique( &nlr ); auto attackResultAfterBoundTightening = cwAttack->runAttack(); TS_ASSERT( !attackResultAfterBoundTightening ); + nlr.setBounds( nlr.getNumberOfLayers() -1 , 1, -large , -0.1 ); + cwAttack = std::make_unique( &nlr ); + attackResultAfterBoundTightening = cwAttack->runAttack(); + TS_ASSERT( !attackResultAfterBoundTightening ); + + nlr.setBounds( nlr.getNumberOfLayers() -1 , 0 , -large , 0.99 ); + cwAttack = std::make_unique( &nlr ); + attackResultAfterBoundTightening = cwAttack->runAttack(); + TS_ASSERT( !attackResultAfterBoundTightening ); + + nlr.setBounds( nlr.getNumberOfLayers() -1 , 0 , 1.1 , large ); + cwAttack = std::make_unique( &nlr ); + attackResultAfterBoundTightening = cwAttack->runAttack(); + TS_ASSERT( !attackResultAfterBoundTightening ); + } }; From 475007e7f2f4939aab7a356c1e5192afe1f7205a Mon Sep 17 00:00:00 2001 From: avi_porges Date: Fri, 29 Aug 2025 15:00:37 +0300 Subject: [PATCH 30/33] Add MaxPool Relaxation --- src/engine/CustomDNN.cpp | 13 +++ src/engine/CustomDNN.h | 7 +- src/nlr/AlphaCrown.cpp | 237 ++++++++++++++++++++++++++++++++++++--- src/nlr/AlphaCrown.h | 34 ++++-- 4 files changed, 264 insertions(+), 27 deletions(-) diff --git a/src/engine/CustomDNN.cpp b/src/engine/CustomDNN.cpp index 3aaadacdeb..3fbdb8d186 100644 --- a/src/engine/CustomDNN.cpp +++ b/src/engine/CustomDNN.cpp @@ -284,6 +284,19 @@ void CustomDNN::getInputBounds(torch::Tensor &lbTensor, torch::Tensor &ubTensor) lbTensor = torch::tensor(lowerBounds, torch::kDouble); ubTensor = torch::tensor(upperBounds, torch::kDouble); } + + + +std::vector> CustomDNN::getMaxPoolSources(const Layer* maxPoolLayer) { + std::vector> sources; + unsigned size = maxPoolLayer->getSize(); + for (unsigned neuron = 0; neuron < size; ++neuron) { + + sources.push_back(maxPoolLayer->getActivationSources(neuron)); + } + return sources; +} + } #endif \ No newline at end of file diff --git a/src/engine/CustomDNN.h b/src/engine/CustomDNN.h index 900f64e83f..a414d80f5e 100644 --- a/src/engine/CustomDNN.h +++ b/src/engine/CustomDNN.h @@ -1,6 +1,6 @@ #ifdef BUILD_TORCH -#ifndef __CustomDNN_h__ -#define __CustomDNN_h__ +#ifndef _CustomDNN_h_ +#define _CustomDNN_h_ #include "Layer.h" #include "Vector.h" @@ -86,6 +86,7 @@ class CustomDNN : public torch::nn::Module torch::Tensor forward( torch::Tensor x ); const Vector &getLayerSizes() const; void getInputBounds( torch::Tensor &lbTensor, torch::Tensor &ubTensor ) const; + std::vector> getMaxPoolSources(const Layer* maxPoolLayer); Vector getLinearLayers() { return _linearLayers; @@ -114,5 +115,5 @@ class CustomDNN : public torch::nn::Module unsigned _numberOfLayers; }; } // namespace NLR -#endif // __CustomDNN_h__ +#endif // _CustomDNN_h_ #endif \ No newline at end of file diff --git a/src/nlr/AlphaCrown.cpp b/src/nlr/AlphaCrown.cpp index 1c8c56aac4..c68f9554d9 100644 --- a/src/nlr/AlphaCrown.cpp +++ b/src/nlr/AlphaCrown.cpp @@ -11,29 +11,35 @@ namespace NLR { AlphaCrown::AlphaCrown( LayerOwner *layerOwner ) : _layerOwner( layerOwner ) { - _network = new CustomDNN( dynamic_cast( _layerOwner ) ); + _nlr = dynamic_cast( layerOwner ); + _network = new CustomDNN( _nlr ); _network->getInputBounds( _lbInput, _ubInput ); _inputSize = _lbInput.size( 0 ); - _network->getLinearLayers().end() ; _linearLayers = _network->getLinearLayers().getContainer(); _layersOrder = _network->getLayersOrder().getContainer(); unsigned linearIndex = 0; for ( unsigned i = 0; i < _network->getNumberOfLayers(); i++ ) { - if (_layersOrder[i] != Layer::WEIGHTED_SUM) continue; - //const Layer *layer = _layerOwner->getLayer( i ); - auto linearLayer = _linearLayers[linearIndex]; - auto whights = linearLayer->weight; - auto bias = linearLayer->bias; - _positiveWeights.insert( {i,torch::where( whights >= 0,whights, - torch::zeros_like( - whights ) ).to(torch::kFloat32)} ); - _negativeWeights.insert( {i,torch::where( whights <= 0,whights, - torch::zeros_like( - whights ) ).to(torch::kFloat32)} ); - _biases.insert( {i,bias.to(torch::kFloat32)} ); - linearIndex += 1; + if (_layersOrder[i] == Layer::WEIGHTED_SUM) + { + // const Layer *layer = _layerOwner->getLayer( i ); + auto linearLayer = _linearLayers[linearIndex]; + auto whights = linearLayer->weight; + auto bias = linearLayer->bias; + _positiveWeights.insert( {i,torch::where( whights >= 0,whights, + torch::zeros_like( + whights ) ).to(torch::kFloat32)} ); + _negativeWeights.insert( {i,torch::where( whights <= 0,whights, + torch::zeros_like( + whights ) ).to(torch::kFloat32)} ); + _biases.insert( {i,bias.to(torch::kFloat32)} ); + linearIndex += 1; + } + if (_layersOrder[i] == Layer::MAX) + { + _maxPoolSources.insert({i, _network->getMaxPoolSources(_nlr->getLayer( i ) )}); + } } } @@ -125,6 +131,164 @@ void AlphaCrown::relaxReluLayer(unsigned layerNumber, torch::Tensor } +void AlphaCrown::relaxMaxPoolLayer(unsigned layerNumber, + torch::Tensor &EQ_up, + torch::Tensor &EQ_low) +{ + std::cout << "Relaxing MaxPool layer number: " << layerNumber << std::endl; + const auto &groups = _maxPoolSources[layerNumber]; + TORCH_CHECK(!groups.empty(), "MaxPool layer has no groups"); + + const auto cols = EQ_up.size(1); + auto next_EQ_up = torch::zeros({ (long)groups.size(), cols }, torch::kFloat32); + auto next_EQ_low = torch::zeros({ (long)groups.size(), cols }, torch::kFloat32); + + std::vector upIdx; upIdx.reserve(groups.size()); + std::vector loIdx; loIdx.reserve(groups.size()); + std::vector slopes; slopes.reserve(groups.size()); + std::vector ints; ints.reserve(groups.size()); + + for (size_t k = 0; k < groups.size(); ++k) + { + // Get per-neuron relaxation parameters & indices + auto R = relaxMaxNeuron(groups, k, EQ_up, EQ_low); + + // Build next rows: + // Upper: slope * EQ_up[R.idx_up] (+ intercept on last column) + auto up_row = EQ_up.index({ (long)R.idx_up, torch::indexing::Slice() }) * R.slope; + auto bvec = torch::full({1}, R.intercept, torch::kFloat32); + up_row = AlphaCrown::addVecToLastColumnValue(up_row, bvec); + + // Lower: copy EQ_low[R.idx_low] + auto low_row = EQ_low.index({ (long)R.idx_low, torch::indexing::Slice() }).clone(); + + next_EQ_up.index_put_ ( { (long)k, torch::indexing::Slice() }, up_row ); + next_EQ_low.index_put_( { (long)k, torch::indexing::Slice() }, low_row ); + + // Persist + upIdx.push_back(R.idx_up); + loIdx.push_back(R.idx_low); + slopes.push_back(R.slope); + ints.push_back(R.intercept); + } + + + _maxUpperChoice[layerNumber] = torch::from_blob( + upIdx.data(), {(long)upIdx.size()}, torch::TensorOptions().dtype(torch::kLong)).clone(); + _maxLowerChoice[layerNumber] = torch::from_blob( + loIdx.data(), {(long)loIdx.size()}, torch::TensorOptions().dtype(torch::kLong)).clone(); + _upperRelaxationSlopes[layerNumber] = + torch::from_blob(slopes.data(), {(long)slopes.size()}, torch::TensorOptions().dtype(torch::kFloat32)).clone(); + _upperRelaxationIntercepts[layerNumber] = + torch::from_blob(ints.data(), {(long)ints.size()}, torch::TensorOptions().dtype(torch::kFloat32)).clone(); + + // Advance EQs + EQ_up = next_EQ_up; + EQ_low = next_EQ_low; +} + + + + +std::pair +AlphaCrown::boundsFromEQ(const torch::Tensor &EQ, const std::vector &rows) +{ + TORCH_CHECK(!rows.empty(), "boundsFromEQ: empty rows"); + auto idx = torch::from_blob(const_cast(rows.data()), + {(long)rows.size()}, + torch::TensorOptions().dtype(torch::kLong)).clone(); + auto sub = EQ.index({ idx, torch::indexing::Slice() }); // |S| x (n+1) + auto U = getMaxOfSymbolicVariables(sub); // |S| + auto L = getMinOfSymbolicVariables(sub); // |S| + return {U, L}; +} + + + +AlphaCrown::MaxRelaxResult AlphaCrown::relaxMaxNeuron(const std::vector> &groups, + size_t k, + const torch::Tensor &EQ_up, + const torch::Tensor &EQ_low) +{ + constexpr double EPS = 1e-12; + + // Collect absolute previous-layer row indices for output k + std::vector srcRows; srcRows.reserve(16); + const auto &srcList = groups[k]; + for (const auto &ni : srcList) { + srcRows.push_back((long)ni._neuron); + } + TORCH_CHECK(!srcRows.empty(), "MaxPool group has no sources"); + + + auto [U_low, L_low] = boundsFromEQ(EQ_low, srcRows); + auto M_low = (U_low + L_low) / 2.0; + long j_rel_low = torch::argmax(M_low).item(); + long idx_low_abs = srcRows[(size_t)j_rel_low]; + + + auto [U_up, L_up] = boundsFromEQ(EQ_up, srcRows); + + // i = argmax U_up, j = second argmax U_up (or i if single source) + int64_t kTop = std::min(2, U_up.size(0)); + auto top2 = torch::topk(U_up, kTop, /dim=/0, /largest=/true, /sorted=/true); + auto Uidxs = std::get<1>(top2); + long i_rel = Uidxs[0].item(); + long j_rel2 = (kTop > 1) ? Uidxs[1].item() : Uidxs[0].item(); + + double li = L_up[i_rel].item(); + double ui = U_up[i_rel].item(); + double uj = U_up[j_rel2].item(); + + // Case 1: (li == max(L_up)) ∧ (li >= uj) + auto Lmax_pair = torch::max(L_up, /dim=/0); + long l_arg = std::get<1>(Lmax_pair).item(); + bool case1 = (i_rel == l_arg) && (li + EPS >= uj); + + float slope, intercept; + if (case1 || (ui - li) <= EPS) { + // Case 1 (or degenerate): y ≤ x_i → a=1, intercept=0 + slope = 1.0f; + intercept = 0.0f; + } else { + // Case 2: a=(ui-uj)/(ui-li), b=uj → store as (a*xi + (b - a*li)) + double a = (ui - uj) / (ui - li); + if (a < 0.0) a = 0.0; + if (a > 1.0) a = 1.0; + slope = (float)a; + intercept = (float)(uj - a * li); // this is what you ADD to last column + } + + long idx_up_abs = srcRows[(size_t)i_rel]; + return MaxRelaxResult{ idx_up_abs, idx_low_abs, slope, intercept }; +} + + +void AlphaCrown::computeMaxPoolLayer(unsigned layerNumber, + torch::Tensor &EQ_up, + torch::Tensor &EQ_low) +{ + auto idxUp = _maxUpperChoice.at(layerNumber); // int64 [m] + auto idxLo = _maxLowerChoice.at(layerNumber); // int64 [m] + auto a = _upperRelaxationSlopes.at(layerNumber).to(torch::kFloat32); // [m] + auto b = _upperRelaxationIntercepts.at(layerNumber).to(torch::kFloat32); // [m] + + // Select rows from current EQs + auto up_sel = EQ_up.index ({ idxUp, torch::indexing::Slice() }); // m x (n+1) + auto low_sel = EQ_low.index({ idxLo, torch::indexing::Slice() }); + + // Upper: scale + add intercept on last column + auto next_up = up_sel * a.unsqueeze(1); + next_up = AlphaCrown::addVecToLastColumnValue(next_up, b); + + // Lower: copy chosen rows + auto next_low = low_sel.clone(); + + EQ_up = next_up; + EQ_low = next_low; +} + + void AlphaCrown::findBounds() @@ -144,6 +308,11 @@ void AlphaCrown::findBounds() case Layer::RELU: relaxReluLayer(i, EQ_up, EQ_low); break; + case Layer::MAX: + { + relaxMaxPoolLayer( i, EQ_up, EQ_low ); + break; + } default: AlphaCrown::log ( "Unsupported layer type\n"); throw MarabouError( MarabouError::DEBUGGING_ERROR ); @@ -170,6 +339,9 @@ std::tuple AlphaCrown::computeBounds case Layer::RELU: computeReluLayer (i, EQ_up, EQ_low, alphaSlopes); break; + case Layer::MAX: + computeMaxPoolLayer( i, EQ_up, EQ_low ); + break; default: log ("Unsupported layer type\n"); throw MarabouError (MarabouError::DEBUGGING_ERROR); @@ -232,6 +404,9 @@ void AlphaCrown::updateBounds(std::vector &alphaSlopes){ case Layer::RELU: computeReluLayer (i, EQ_up, EQ_low, alphaSlopes); break; + case Layer::MAX: + computeMaxPoolLayer( i, EQ_up, EQ_low ); + break; default: log ("Unsupported layer type\n"); throw MarabouError (MarabouError::DEBUGGING_ERROR); @@ -292,6 +467,9 @@ void AlphaCrown::updateBoundsOfLayer(unsigned layerIndex, torch::Tensor &upBound void AlphaCrown::optimizeBounds( int loops ) { + + + std::cout << "Starting AlphaCrown run with " << loops << " optimization loops." << std::endl; std::vector alphaSlopesForUpBound; std::vector alphaSlopesForLowBound; for ( auto &tensor : _initialAlphaSlopes ) @@ -299,8 +477,8 @@ void AlphaCrown::optimizeBounds( int loops ) alphaSlopesForUpBound.push_back( tensor.detach().clone().requires_grad_(true) ); alphaSlopesForLowBound.push_back( tensor.detach().clone().requires_grad_(true) ); } - AlphaCrown::GDloop( loops, "max", alphaSlopesForUpBound ); - AlphaCrown::GDloop( loops, "min", alphaSlopesForLowBound ); + GDloop( loops, "max", alphaSlopesForUpBound ); + GDloop( loops, "min", alphaSlopesForLowBound ); updateBounds( alphaSlopesForUpBound ); updateBounds( alphaSlopesForLowBound); std::cout << "AlphaCrown run completed." << std::endl; @@ -333,6 +511,31 @@ void AlphaCrown::GDloop( int loops, } +torch::Tensor AlphaCrown::addVecToLastColumnValue(const torch::Tensor &matrix, + const torch::Tensor &vec) +{ + auto result = matrix.clone(); + if (result.dim() == 2) + { + // add 'vec' per row to last column + result.slice(1, result.size(1) - 1, result.size(1)) += vec.unsqueeze(1); + } + else if (result.dim() == 1) + { + // add scalar to last entry (the constant term) + TORCH_CHECK(vec.numel() == 1, "1-D addVec expects scalar vec"); + result.index_put_({ result.size(0) - 1 }, + result.index({ result.size(0) - 1 }) + vec.item()); + } + else + { + TORCH_CHECK(false, "addVecToLastColumnValue expects 1-D or 2-D tensor"); + } + return result; +} + + + void AlphaCrown::log( const String &message ) { if ( GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING ) diff --git a/src/nlr/AlphaCrown.h b/src/nlr/AlphaCrown.h index fafabb1db0..6042354e99 100644 --- a/src/nlr/AlphaCrown.h +++ b/src/nlr/AlphaCrown.h @@ -27,6 +27,7 @@ class AlphaCrown private: LayerOwner *_layerOwner; + NetworkLevelReasoner *_nlr; CustomDNN *_network; void GDloop( int loops, const std::string val_to_opt, std::vector &alphaSlopes ); std::tuple @@ -43,6 +44,25 @@ class AlphaCrown std::map _indexAlphaSlopeMap; std::map _linearIndexMap; + std::map>> _maxPoolSources; + std::map _maxUpperChoice; // int64 [m]: absolute row index for upper bound + std::map _maxLowerChoice; // int64 [m]: absolute row index for lower bound + void relaxMaxPoolLayer(unsigned layerNumber, torch::Tensor &EQ_up, torch::Tensor &EQ_low); + void computeMaxPoolLayer(unsigned layerNumber, torch::Tensor &EQ_up, torch::Tensor &EQ_low); + + std::pair boundsFromEQ(const torch::Tensor &EQ, const std::vector &rows); + struct MaxRelaxResult { + long idx_up; // absolute row in previous EQ for the upper bound + long idx_low; // absolute row in previous EQ for the lower bound + float slope; // upper slope a + float intercept; // upper intercept (b - a*l_i) + }; + + MaxRelaxResult relaxMaxNeuron(const std::vector> &groups, + size_t k, + const torch::Tensor &EQ_up, + const torch::Tensor &EQ_low); + std::map _upperRelaxationSlopes; std::map _upperRelaxationIntercepts; @@ -56,13 +76,13 @@ class AlphaCrown void updateBounds(std::vector &alphaSlopes); void updateBoundsOfLayer(unsigned layerIndex, torch::Tensor &upBounds, torch::Tensor &lowBounds); - static torch::Tensor addVecToLastColumnValue( const torch::Tensor &matrix, - const torch::Tensor &vec ) - { - auto result = matrix.clone(); - result.slice( 1, result.size( 1 ) - 1, result.size( 1 ) ) += vec.unsqueeze( 1 ); - return result; - } + torch::Tensor addVecToLastColumnValue( const torch::Tensor &matrix, + const torch::Tensor &vec ); + // { + // auto result = matrix.clone(); + // result.slice( 1, result.size( 1 ) - 1, result.size( 1 ) ) += vec.unsqueeze( 1 ); + // return result; + // } static torch::Tensor lower_ReLU_relaxation( const torch::Tensor &u, const torch::Tensor &l ); static std::tuple upper_ReLU_relaxation( const torch::Tensor &u, From 03cbbeb498a669e9cfbb96f7e63830bf17d1269b Mon Sep 17 00:00:00 2001 From: Avi Porges <151055500+Avi-Porges@users.noreply.github.com> Date: Fri, 29 Aug 2025 15:30:15 +0300 Subject: [PATCH 31/33] Revert "Alpha crown" --- CMakeLists.txt | 229 ++++---- src/basis_factorization/GaussianEliminator.h | 2 +- src/basis_factorization/LUFactorization.h | 2 +- .../SparseFTFactorization.h | 2 +- .../SparseGaussianEliminator.h | 2 +- .../SparseLUFactorization.h | 2 +- src/cegar/IncrementalLinearization.h | 2 +- src/common/Debug.h | 4 +- src/configuration/GlobalConfiguration.cpp | 11 - src/configuration/GlobalConfiguration.h | 15 - src/configuration/Options.cpp | 7 +- src/configuration/Options.h | 2 - src/engine/CDSmtCore.h | 2 +- src/engine/CWAttack.cpp | 256 -------- src/engine/CWAttack.h | 64 -- src/engine/CustomDNN.cpp | 302 ---------- src/engine/CustomDNN.h | 119 ---- src/engine/DantzigsRule.h | 2 +- src/engine/DnCManager.h | 2 +- src/engine/Engine.cpp | 2 - src/engine/Engine.h | 2 +- src/engine/InputQuery.cpp | 2 +- src/engine/MarabouError.h | 2 - src/engine/PLConstraintScoreTracker.h | 2 +- src/engine/ProjectedSteepestEdge.h | 2 +- src/engine/Query.cpp | 2 +- src/engine/SmtCore.h | 2 +- src/engine/SumOfInfeasibilitiesManager.h | 2 +- src/engine/SymbolicBoundTighteningType.h | 3 +- src/engine/Tableau.h | 2 +- src/input_parsers/MpsParser.h | 2 +- src/input_parsers/OnnxParser.h | 2 +- src/nlr/AlphaCrown.cpp | 546 ------------------ src/nlr/AlphaCrown.h | 100 ---- src/nlr/CMakeLists.txt | 7 +- src/nlr/IterativePropagator.h | 2 +- src/nlr/LPFormulator.h | 2 +- src/nlr/Layer.cpp | 7 - src/nlr/Layer.h | 1 - src/nlr/NetworkLevelReasoner.cpp | 28 +- src/nlr/NetworkLevelReasoner.h | 7 +- src/nlr/tests/Test_AlphaCrown.h | 211 ------- src/nlr/tests/Test_NetworkLevelReasoner.h | 6 +- src/query_loader/QueryLoader.h | 2 +- tools/download_libtorch.sh | 19 - 45 files changed, 137 insertions(+), 1855 deletions(-) delete mode 100644 src/engine/CWAttack.cpp delete mode 100644 src/engine/CWAttack.h delete mode 100644 src/engine/CustomDNN.cpp delete mode 100644 src/engine/CustomDNN.h delete mode 100644 src/nlr/AlphaCrown.cpp delete mode 100644 src/nlr/AlphaCrown.h delete mode 100644 src/nlr/tests/Test_AlphaCrown.h delete mode 100755 tools/download_libtorch.sh diff --git a/CMakeLists.txt b/CMakeLists.txt index bf6c6cbed4..e55a57852a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -20,9 +20,9 @@ option(RUN_SYSTEM_TEST "Run system tests on build" OFF) option(RUN_MEMORY_TEST "Run cxxtest testing with ASAN ON" ON) option(RUN_PYTHON_TEST "Run Python API tests if building with Python" OFF) option(ENABLE_GUROBI "Enable use the Gurobi optimizer" OFF) -option(ENABLE_OPENBLAS "Do symbolic bound tighting using blas" OFF) # Not available on Windows +option(ENABLE_OPENBLAS "Do symbolic bound tighting using blas" ON) # Not available on Windows option(CODE_COVERAGE "Add code coverage" OFF) # Available only in debug mode -option(BUILD_TORCH "Build libtorch" ON) + ################### ## Git variables ## ################### @@ -30,19 +30,19 @@ option(BUILD_TORCH "Build libtorch" ON) # Get the name of the working branch execute_process( - COMMAND git rev-parse --abbrev-ref HEAD - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} - OUTPUT_VARIABLE GIT_BRANCH - OUTPUT_STRIP_TRAILING_WHITESPACE + COMMAND git rev-parse --abbrev-ref HEAD + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + OUTPUT_VARIABLE GIT_BRANCH + OUTPUT_STRIP_TRAILING_WHITESPACE ) add_definitions("-DGIT_BRANCH=\"${GIT_BRANCH}\"") # Get the latest abbreviated commit hash of the working branch execute_process( - COMMAND git log -1 --format=%h - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} - OUTPUT_VARIABLE GIT_COMMIT_HASH - OUTPUT_STRIP_TRAILING_WHITESPACE + COMMAND git log -1 --format=%h + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + OUTPUT_VARIABLE GIT_COMMIT_HASH + OUTPUT_STRIP_TRAILING_WHITESPACE ) add_definitions("-DGIT_COMMIT_HASH=\"${GIT_COMMIT_HASH}\"") @@ -60,9 +60,9 @@ set(COMMON_DIR "${SRC_DIR}/common") set(BASIS_DIR "${SRC_DIR}/basis_factorization") if (MSVC) - set(SCRIPT_EXTENSION bat) + set(SCRIPT_EXTENSION bat) else() - set(SCRIPT_EXTENSION sh) + set(SCRIPT_EXTENSION sh) endif() ########## @@ -85,20 +85,20 @@ add_definitions(-DBOOST_NO_CXX98_FUNCTION_BASE) set(BOOST_VERSION 1.84.0) set(BOOST_DIR "${TOOLS_DIR}/boost-${BOOST_VERSION}") if (MSVC) - set(BOOST_ROOT "${BOOST_DIR}/win_installed") - set(Boost_NAMESPACE libboost) + set(BOOST_ROOT "${BOOST_DIR}/win_installed") + set(Boost_NAMESPACE libboost) elseif (${CMAKE_SIZEOF_VOID_P} EQUAL 4 AND NOT MSVC) - set(BOOST_ROOT "${BOOST_DIR}/installed32") + set(BOOST_ROOT "${BOOST_DIR}/installed32") else() - set(BOOST_ROOT "${BOOST_DIR}/installed") + set(BOOST_ROOT "${BOOST_DIR}/installed") endif() set(Boost_USE_DEBUG_RUNTIME FALSE) find_package(Boost ${BOOST_VERSION} COMPONENTS program_options timer chrono thread) # Find boost if (NOT ${Boost_FOUND}) - execute_process(COMMAND ${TOOLS_DIR}/download_boost.${SCRIPT_EXTENSION} ${BOOST_VERSION}) - find_package(Boost ${BOOST_VERSION} REQUIRED COMPONENTS program_options timer chrono thread regex) + execute_process(COMMAND ${TOOLS_DIR}/download_boost.${SCRIPT_EXTENSION} ${BOOST_VERSION}) + find_package(Boost ${BOOST_VERSION} REQUIRED COMPONENTS program_options timer chrono thread regex) endif() set(LIBS_INCLUDES ${Boost_INCLUDE_DIRS}) list(APPEND LIBS ${Boost_LIBRARIES}) @@ -146,63 +146,36 @@ endif() file(GLOB DEPS_ONNX "${ONNX_DIR}/*.cc") include_directories(SYSTEM ${ONNX_DIR}) -############# -## Pytorch ## -############# - -if (${BUILD_TORCH}) - message(STATUS "Using pytorch") - if (NOT DEFINED BUILD_TORCH) - set(BUILD_TORCH $ENV{TORCH_HOME}) - add_definitions(-DBUILD_TORCH) - endif() - add_compile_definitions(BUILD_TORCH) - set(PYTORCH_VERSION 2.2.1) - find_package(Torch ${PYTORCH_VERSION} QUIET) - if (NOT Torch_FOUND) - set(PYTORCH_DIR "${TOOLS_DIR}/libtorch-${PYTORCH_VERSION}") - list(APPEND CMAKE_PREFIX_PATH ${PYTORCH_DIR}) - if(NOT EXISTS "${PYTORCH_DIR}") - execute_process(COMMAND ${TOOLS_DIR}/download_libtorch.sh ${PYTORCH_VERSION}) - set(Torch_NO_SYSTEM_PATHS ON) - endif() - set(Torch_DIR ${PYTORCH_DIR}/share/cmake/Torch) - find_package(Torch ${PYTORCH_VERSION} REQUIRED) - endif() - set(TORCH_CXX_FLAGS "-Wno-error=array-bounds") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}") - list(APPEND LIBS ${TORCH_LIBRARIES}) -endif () ############ ## Gurobi ## ############ if (${ENABLE_GUROBI}) - message(STATUS "Using Gurobi for LP relaxation for bound tightening") - if (NOT DEFINED GUROBI_DIR) - set(GUROBI_DIR $ENV{GUROBI_HOME}) - endif() - add_compile_definitions(ENABLE_GUROBI) - - set(GUROBI_LIB1 "gurobi_c++") - set(GUROBI_LIB2 "gurobi110") - - add_library(${GUROBI_LIB1} SHARED IMPORTED) - set_target_properties(${GUROBI_LIB1} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi_c++.a) - list(APPEND LIBS ${GUROBI_LIB1}) - target_include_directories(${GUROBI_LIB1} INTERFACE ${GUROBI_DIR}/include/) - - add_library(${GUROBI_LIB2} SHARED IMPORTED) - - # MACOSx uses .dylib instead of .so for its Gurobi downloads. - if (APPLE) - set_target_properties(${GUROBI_LIB2} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi110.dylib) - else() - set_target_properties(${GUROBI_LIB2} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi110.so) - endif () - - list(APPEND LIBS ${GUROBI_LIB2}) - target_include_directories(${GUROBI_LIB2} INTERFACE ${GUROBI_DIR}/include/) + message(STATUS "Using Gurobi for LP relaxation for bound tightening") + if (NOT DEFINED GUROBI_DIR) + set(GUROBI_DIR $ENV{GUROBI_HOME}) + endif() + add_compile_definitions(ENABLE_GUROBI) + + set(GUROBI_LIB1 "gurobi_c++") + set(GUROBI_LIB2 "gurobi110") + + add_library(${GUROBI_LIB1} SHARED IMPORTED) + set_target_properties(${GUROBI_LIB1} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi_c++.a) + list(APPEND LIBS ${GUROBI_LIB1}) + target_include_directories(${GUROBI_LIB1} INTERFACE ${GUROBI_DIR}/include/) + + add_library(${GUROBI_LIB2} SHARED IMPORTED) + + # MACOSx uses .dylib instead of .so for its Gurobi downloads. + if (APPLE) + set_target_properties(${GUROBI_LIB2} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi110.dylib) + else() + set_target_properties(${GUROBI_LIB2} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi110.so) + endif () + + list(APPEND LIBS ${GUROBI_LIB2}) + target_include_directories(${GUROBI_LIB2} INTERFACE ${GUROBI_DIR}/include/) endif() ############## @@ -210,30 +183,30 @@ endif() ############## if (NOT MSVC AND ${ENABLE_OPENBLAS}) - set(OPENBLAS_VERSION 0.3.19) - - set(OPENBLAS_LIB openblas) - set(OPENBLAS_DEFAULT_DIR "${TOOLS_DIR}/OpenBLAS-${OPENBLAS_VERSION}") - if (NOT OPENBLAS_DIR) - set(OPENBLAS_DIR ${OPENBLAS_DEFAULT_DIR}) - endif() - - message(STATUS "Using OpenBLAS for matrix multiplication") - add_compile_definitions(ENABLE_OPENBLAS) - if(NOT EXISTS "${OPENBLAS_DIR}/installed/lib/libopenblas.a") - message("Can't find OpenBLAS, installing. If OpenBLAS is installed please use the OPENBLAS_DIR parameter to pass the path") - if (${OPENBLAS_DIR} STREQUAL ${OPENBLAS_DEFAULT_DIR}) - message("Installing OpenBLAS") - execute_process(COMMAND ${TOOLS_DIR}/download_openBLAS.sh ${OPENBLAS_VERSION}) - else() - message(FATAL_ERROR "Can't find OpenBLAS in the supplied directory") - endif() - endif() - - add_library(${OPENBLAS_LIB} SHARED IMPORTED) - set_target_properties(${OPENBLAS_LIB} PROPERTIES IMPORTED_LOCATION ${OPENBLAS_DIR}/installed/lib/libopenblas.a) - list(APPEND LIBS ${OPENBLAS_LIB}) - target_include_directories(${OPENBLAS_LIB} INTERFACE ${OPENBLAS_DIR}/installed/include) + set(OPENBLAS_VERSION 0.3.19) + + set(OPENBLAS_LIB openblas) + set(OPENBLAS_DEFAULT_DIR "${TOOLS_DIR}/OpenBLAS-${OPENBLAS_VERSION}") + if (NOT OPENBLAS_DIR) + set(OPENBLAS_DIR ${OPENBLAS_DEFAULT_DIR}) + endif() + + message(STATUS "Using OpenBLAS for matrix multiplication") + add_compile_definitions(ENABLE_OPENBLAS) + if(NOT EXISTS "${OPENBLAS_DIR}/installed/lib/libopenblas.a") + message("Can't find OpenBLAS, installing. If OpenBLAS is installed please use the OPENBLAS_DIR parameter to pass the path") + if (${OPENBLAS_DIR} STREQUAL ${OPENBLAS_DEFAULT_DIR}) + message("Installing OpenBLAS") + execute_process(COMMAND ${TOOLS_DIR}/download_openBLAS.sh ${OPENBLAS_VERSION}) + else() + message(FATAL_ERROR "Can't find OpenBLAS in the supplied directory") + endif() + endif() + + add_library(${OPENBLAS_LIB} SHARED IMPORTED) + set_target_properties(${OPENBLAS_LIB} PROPERTIES IMPORTED_LOCATION ${OPENBLAS_DIR}/installed/lib/libopenblas.a) + list(APPEND LIBS ${OPENBLAS_LIB}) + target_include_directories(${OPENBLAS_LIB} INTERFACE ${OPENBLAS_DIR}/installed/include) endif() ########### @@ -266,7 +239,7 @@ set(INPUT_PARSERS_DIR input_parsers) include(ProcessorCount) ProcessorCount(CTEST_NTHREADS) if(CTEST_NTHREADS EQUAL 0) - set(CTEST_NTHREADS 1) + set(CTEST_NTHREADS 1) endif() # --------------- set build type ---------------------------- @@ -274,20 +247,20 @@ set(BUILD_TYPES Release Debug MinSizeRel RelWithDebInfo) # Set the default build type to Production if(NOT CMAKE_BUILD_TYPE) - set(CMAKE_BUILD_TYPE - Release CACHE STRING "Options are: Release Debug MinSizeRel RelWithDebInfo" FORCE) - # Provide drop down menu options in cmake-gui - set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS ${BUILD_TYPES}) + set(CMAKE_BUILD_TYPE + Release CACHE STRING "Options are: Release Debug MinSizeRel RelWithDebInfo" FORCE) + # Provide drop down menu options in cmake-gui + set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS ${BUILD_TYPES}) endif() message(STATUS "Building ${CMAKE_BUILD_TYPE} build") #-------------------------set code coverage----------------------------------# # Allow coverage only in debug mode only in gcc if(CODE_COVERAGE AND CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_BUILD_TYPE MATCHES Debug) - message(STATUS "Building with code coverage") - set(COVERAGE_COMPILER_FLAGS "-g -O0 --coverage" CACHE INTERNAL "") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}") - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --coverage") + message(STATUS "Building with code coverage") + set(COVERAGE_COMPILER_FLAGS "-g -O0 --coverage" CACHE INTERNAL "") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --coverage") endif() # We build a static library that is the core of the project, the link it to the @@ -300,7 +273,7 @@ set(MARABOU_EXE Marabou${CMAKE_EXECUTABLE_SUFFIX}) add_executable(${MARABOU_EXE} "${ENGINE_DIR}/main.cpp") set(MARABOU_EXE_PATH "${BIN_DIR}/${MARABOU_EXE}") add_custom_command(TARGET ${MARABOU_EXE} POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy $ ${MARABOU_EXE_PATH} ) + COMMAND ${CMAKE_COMMAND} -E copy $ ${MARABOU_EXE_PATH} ) set(MPS_PARSER_PATH "${BIN_DIR}/${MPS_PARSER}") @@ -341,10 +314,10 @@ find_package(Threads REQUIRED) list(APPEND LIBS Threads::Threads) if (BUILD_STATIC_MARABOU) - # build a static library - target_link_libraries(${MARABOU_LIB} ${LIBS} -static) + # build a static library + target_link_libraries(${MARABOU_LIB} ${LIBS} -static) else() - target_link_libraries(${MARABOU_LIB} ${LIBS}) + target_link_libraries(${MARABOU_LIB} ${LIBS}) endif() target_include_directories(${MARABOU_LIB} PRIVATE ${LIBS_INCLUDES}) @@ -374,10 +347,10 @@ endif() set(PYTHON32 FALSE) if(${BUILD_PYTHON}) execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c" - "import struct; print(struct.calcsize('@P'));" - RESULT_VARIABLE _PYTHON_SUCCESS - OUTPUT_VARIABLE PYTHON_SIZEOF_VOID_P - ERROR_VARIABLE _PYTHON_ERROR_VALUE) + "import struct; print(struct.calcsize('@P'));" + RESULT_VARIABLE _PYTHON_SUCCESS + OUTPUT_VARIABLE PYTHON_SIZEOF_VOID_P + ERROR_VARIABLE _PYTHON_ERROR_VALUE) # message("PYTHON SIZEOF VOID p ${PYTHON_SIZEOF_VOID_P}") if (PYTHON_SIZEOF_VOID_P EQUAL 4 AND NOT ${FORCE_PYTHON_BUILD}) set(PYTHON32 TRUE) @@ -397,8 +370,8 @@ endif() # Actually build Python if (${BUILD_PYTHON}) - set(PYBIND11_VERSION 2.10.4) - set(PYBIND11_DIR "${TOOLS_DIR}/pybind11-${PYBIND11_VERSION}") + set(PYBIND11_VERSION 2.10.4) + set(PYBIND11_DIR "${TOOLS_DIR}/pybind11-${PYBIND11_VERSION}") # This is suppose to set the PYTHON_EXECUTABLE variable # First try to find the default python version @@ -410,7 +383,7 @@ if (${BUILD_PYTHON}) if (NOT EXISTS ${PYBIND11_DIR}) message("didnt find pybind, getting it") - execute_process(COMMAND ${TOOLS_DIR}/download_pybind11.${SCRIPT_EXTENSION} ${PYBIND11_VERSION}) + execute_process(COMMAND ${TOOLS_DIR}/download_pybind11.${SCRIPT_EXTENSION} ${PYBIND11_VERSION}) endif() add_subdirectory(${PYBIND11_DIR}) @@ -421,7 +394,7 @@ if (${BUILD_PYTHON}) target_include_directories(${MARABOU_PY} PRIVATE ${LIBS_INCLUDES}) set_target_properties(${MARABOU_PY} PROPERTIES - LIBRARY_OUTPUT_DIRECTORY ${PYTHON_LIBRARY_OUTPUT_DIRECTORY}) + LIBRARY_OUTPUT_DIRECTORY ${PYTHON_LIBRARY_OUTPUT_DIRECTORY}) if(NOT MSVC) target_compile_options(${MARABOU_LIB} PRIVATE -fPIC ${RELEASE_FLAGS}) endif() @@ -452,8 +425,8 @@ target_compile_options(${MARABOU_TEST_LIB} PRIVATE ${CXXTEST_FLAGS}) add_custom_target(build-tests ALL) add_custom_target(check - COMMAND ctest --output-on-failure -j${CTEST_NTHREADS} $$ARGS - DEPENDS build-tests build_input_parsers ${MARABOU_EXE}) + COMMAND ctest --output-on-failure -j${CTEST_NTHREADS} $$ARGS + DEPENDS build-tests build_input_parsers ${MARABOU_EXE}) # Decide which tests to run and execute set(TESTS_TO_RUN "") @@ -479,33 +452,33 @@ if (NOT ${TESTS_TO_RUN} STREQUAL "") # make ctest verbose set(CTEST_OUTPUT_ON_FAILURE 1) add_custom_command( - TARGET build-tests - POST_BUILD - COMMAND ctest --output-on-failure -L "\"(${TESTS_TO_RUN})\"" -j${CTEST_NTHREADS} $$ARGS + TARGET build-tests + POST_BUILD + COMMAND ctest --output-on-failure -L "\"(${TESTS_TO_RUN})\"" -j${CTEST_NTHREADS} $$ARGS ) endif() if (${BUILD_PYTHON} AND ${RUN_PYTHON_TEST}) if (MSVC) add_custom_command( - TARGET build-tests - POST_BUILD - COMMAND cp ${PYTHON_API_DIR}/Release/* ${PYTHON_API_DIR} + TARGET build-tests + POST_BUILD + COMMAND cp ${PYTHON_API_DIR}/Release/* ${PYTHON_API_DIR} ) endif() add_custom_command( - TARGET build-tests - POST_BUILD - COMMAND ${PYTHON_EXECUTABLE} -m pytest ${PYTHON_API_DIR}/test + TARGET build-tests + POST_BUILD + COMMAND ${PYTHON_EXECUTABLE} -m pytest ${PYTHON_API_DIR}/test ) endif() # Add the input parsers add_custom_target(build_input_parsers) add_dependencies(build_input_parsers ${MPS_PARSER} ${ACAS_PARSER} - ${BERKELEY_PARSER}) + ${BERKELEY_PARSER}) add_subdirectory(${SRC_DIR}) add_subdirectory(${TOOLS_DIR}) -add_subdirectory(${REGRESS_DIR}) \ No newline at end of file +add_subdirectory(${REGRESS_DIR}) diff --git a/src/basis_factorization/GaussianEliminator.h b/src/basis_factorization/GaussianEliminator.h index 6f93605ff6..2177021e55 100644 --- a/src/basis_factorization/GaussianEliminator.h +++ b/src/basis_factorization/GaussianEliminator.h @@ -19,7 +19,7 @@ #include "LUFactors.h" #define GAUSSIAN_LOG( x, ... ) \ - MARABOU_LOG( GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING, "GaussianEliminator: %s\n", x ) + LOG( GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING, "GaussianEliminator: %s\n", x ) class GaussianEliminator { diff --git a/src/basis_factorization/LUFactorization.h b/src/basis_factorization/LUFactorization.h index ae4befd5e7..400d53eb88 100644 --- a/src/basis_factorization/LUFactorization.h +++ b/src/basis_factorization/LUFactorization.h @@ -22,7 +22,7 @@ #include "List.h" #define LU_FACTORIZATION_LOG( x, ... ) \ - MARABOU_LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "LUFactorization: %s\n", x ) + LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "LUFactorization: %s\n", x ) class EtaMatrix; class LPElement; diff --git a/src/basis_factorization/SparseFTFactorization.h b/src/basis_factorization/SparseFTFactorization.h index b885cab4b2..906f5b205e 100644 --- a/src/basis_factorization/SparseFTFactorization.h +++ b/src/basis_factorization/SparseFTFactorization.h @@ -24,7 +24,7 @@ #include "Statistics.h" #define SFTF_FACTORIZATION_LOG( x, ... ) \ - MARABOU_LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "SparseFTFactorization: %s\n", x ) + LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "SparseFTFactorization: %s\n", x ) /* This class performs a sparse FT factorization of a given matrix. diff --git a/src/basis_factorization/SparseGaussianEliminator.h b/src/basis_factorization/SparseGaussianEliminator.h index fd6a061dce..48078b42d9 100644 --- a/src/basis_factorization/SparseGaussianEliminator.h +++ b/src/basis_factorization/SparseGaussianEliminator.h @@ -23,7 +23,7 @@ #include "Statistics.h" #define SGAUSSIAN_LOG( x, ... ) \ - MARABOU_LOG( GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING, "SparseGaussianEliminator: %s\n", x ) + LOG( GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING, "SparseGaussianEliminator: %s\n", x ) class SparseGaussianEliminator { diff --git a/src/basis_factorization/SparseLUFactorization.h b/src/basis_factorization/SparseLUFactorization.h index 7d75ebe3c4..7b925fec48 100644 --- a/src/basis_factorization/SparseLUFactorization.h +++ b/src/basis_factorization/SparseLUFactorization.h @@ -22,7 +22,7 @@ #include "SparseLUFactors.h" #define BASIS_FACTORIZATION_LOG( x, ... ) \ - MARABOU_LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "SparseLUFactorization: %s\n", x ) + LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "SparseLUFactorization: %s\n", x ) class EtaMatrix; class LPElement; diff --git a/src/cegar/IncrementalLinearization.h b/src/cegar/IncrementalLinearization.h index 9260e7e57c..ddf5b00fcf 100644 --- a/src/cegar/IncrementalLinearization.h +++ b/src/cegar/IncrementalLinearization.h @@ -20,7 +20,7 @@ #include "Query.h" #define INCREMENTAL_LINEARIZATION_LOG( x, ... ) \ - MARABOU_LOG( GlobalConfiguration::CEGAR_LOGGING, "IncrementalLinearization: %s\n", x ) + LOG( GlobalConfiguration::CEGAR_LOGGING, "IncrementalLinearization: %s\n", x ) class Engine; class IQuery; diff --git a/src/common/Debug.h b/src/common/Debug.h index 3d0cb9554f..55dfda4a92 100644 --- a/src/common/Debug.h +++ b/src/common/Debug.h @@ -27,7 +27,7 @@ #endif #ifndef NDEBUG -#define MARABOU_LOG( x, f, y, ... ) \ +#define LOG( x, f, y, ... ) \ { \ if ( ( x ) ) \ { \ @@ -35,7 +35,7 @@ } \ } #else -#define MARABOU_LOG( x, f, y, ... ) \ +#define LOG( x, f, y, ... ) \ { \ } #endif diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index 6cbb8c9bb1..f9a074f076 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -119,15 +119,6 @@ const bool GlobalConfiguration::WRITE_JSON_PROOF = false; const unsigned GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH = 3; const unsigned GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS = 10; -const GlobalConfiguration::PdgBoundType GlobalConfiguration::PGD_BOUND_TYPE = - GlobalConfiguration::ATTACK_INPUT; -const unsigned GlobalConfiguration::PGD_DEFAULT_NUM_ITER = 10; -const unsigned GlobalConfiguration::PGD_NUM_RESTARTS = 4; -const double GlobalConfiguration::ATTACK_INPUT_RANGE = 1000; -const unsigned GlobalConfiguration::CW_DEFAULT_ITERS = 1000; -const unsigned GlobalConfiguration::CW_NUM_RESTARTS = 4; -const double GlobalConfiguration::CW_LR = 1e-2; - #ifdef ENABLE_GUROBI const unsigned GlobalConfiguration::GUROBI_NUMBER_OF_THREADS = 1; const bool GlobalConfiguration::GUROBI_LOGGING = false; @@ -152,8 +143,6 @@ const bool GlobalConfiguration::ONNX_PARSER_LOGGING = false; const bool GlobalConfiguration::SOI_LOGGING = false; const bool GlobalConfiguration::SCORE_TRACKER_LOGGING = false; const bool GlobalConfiguration::CEGAR_LOGGING = false; -const bool GlobalConfiguration::CUSTOM_DNN_LOGGING = true; -const bool GlobalConfiguration::CW_LOGGING = true; const bool GlobalConfiguration::USE_SMART_FIX = false; const bool GlobalConfiguration::USE_LEAST_FIX = false; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index b34a508a7e..3104edf79d 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -263,19 +263,6 @@ class GlobalConfiguration */ static const unsigned MAX_ROUNDS_OF_BACKWARD_ANALYSIS; - enum PdgBoundType { - ATTACK_INPUT = 0, - ATTACK_OUTPUT = 1 - }; - static const PdgBoundType PGD_BOUND_TYPE; - static const unsigned PGD_DEFAULT_NUM_ITER; - static const unsigned PGD_NUM_RESTARTS; - static const double ATTACK_INPUT_RANGE; - - static const unsigned CW_DEFAULT_ITERS; - static const unsigned CW_NUM_RESTARTS; - static const double CW_LR; - #ifdef ENABLE_GUROBI /* The number of threads Gurobi spawns @@ -305,8 +292,6 @@ class GlobalConfiguration static const bool SOI_LOGGING; static const bool SCORE_TRACKER_LOGGING; static const bool CEGAR_LOGGING; - static const bool CUSTOM_DNN_LOGGING; - static const bool CW_LOGGING; }; #endif // __GlobalConfiguration_h__ diff --git a/src/configuration/Options.cpp b/src/configuration/Options.cpp index 4e05fc295a..657ddb6b7c 100644 --- a/src/configuration/Options.cpp +++ b/src/configuration/Options.cpp @@ -71,7 +71,6 @@ void Options::initializeDefaultValues() _intOptions[SEED] = 1; _intOptions[NUM_BLAS_THREADS] = 1; _intOptions[NUM_CONSTRAINTS_TO_REFINE_INC_LIN] = 30; - _intOptions[ATTACK_TIMEOUT] = 60; /* Float options @@ -92,7 +91,7 @@ void Options::initializeDefaultValues() _stringOptions[SUMMARY_FILE] = ""; _stringOptions[SPLITTING_STRATEGY] = "auto"; _stringOptions[SNC_SPLITTING_STRATEGY] = "auto"; - _stringOptions[SYMBOLIC_BOUND_TIGHTENING_TYPE] = "alphacrown"; + _stringOptions[SYMBOLIC_BOUND_TIGHTENING_TYPE] = "deeppoly"; _stringOptions[MILP_SOLVER_BOUND_TIGHTENING_TYPE] = "none"; _stringOptions[QUERY_DUMP_FILE] = ""; _stringOptions[IMPORT_ASSIGNMENT_FILE_PATH] = "assignment.txt"; @@ -190,12 +189,10 @@ SymbolicBoundTighteningType Options::getSymbolicBoundTighteningType() const return SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING; else if ( strategyString == "deeppoly" ) return SymbolicBoundTighteningType::DEEP_POLY; - else if (strategyString == "alphacrown") - return SymbolicBoundTighteningType::ALPHA_CROWN; else if ( strategyString == "none" ) return SymbolicBoundTighteningType::NONE; else - return SymbolicBoundTighteningType::ALPHA_CROWN; + return SymbolicBoundTighteningType::DEEP_POLY; } MILPSolverBoundTighteningType Options::getMILPSolverBoundTighteningType() const diff --git a/src/configuration/Options.h b/src/configuration/Options.h index 3afec75a56..4dd4f31fd5 100644 --- a/src/configuration/Options.h +++ b/src/configuration/Options.h @@ -154,8 +154,6 @@ class Options // The strategy used for initializing the soi SOI_INITIALIZATION_STRATEGY, - // Adversarial attack timeout in seconds - ATTACK_TIMEOUT, // The procedure/solver for solving the LP LP_SOLVER }; diff --git a/src/engine/CDSmtCore.h b/src/engine/CDSmtCore.h index a352f2c6ec..8cbeebc5c0 100644 --- a/src/engine/CDSmtCore.h +++ b/src/engine/CDSmtCore.h @@ -76,7 +76,7 @@ #include "context/cdlist.h" #include "context/context.h" -#define SMT_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::SMT_CORE_LOGGING, "CDSmtCore: %s\n", x ) +#define SMT_LOG( x, ... ) LOG( GlobalConfiguration::SMT_CORE_LOGGING, "CDSmtCore: %s\n", x ) class EngineState; class Engine; diff --git a/src/engine/CWAttack.cpp b/src/engine/CWAttack.cpp deleted file mode 100644 index 27b3c5744c..0000000000 --- a/src/engine/CWAttack.cpp +++ /dev/null @@ -1,256 +0,0 @@ -#include "CWAttack.h" - -#ifdef BUILD_TORCH - -CWAttack::CWAttack( NLR::NetworkLevelReasoner *networkLevelReasoner ) - : networkLevelReasoner( networkLevelReasoner ) - , _device( torch::cuda::is_available() ? torch::kCUDA : torch::kCPU ) - , _model( std::make_unique( networkLevelReasoner ) ) - , _iters( GlobalConfiguration::CW_DEFAULT_ITERS ) - , _restarts( GlobalConfiguration::CW_NUM_RESTARTS ) - , _specLossWeight( 1e-2 ) - , _adversarialInput( nullptr ) - , _adversarialOutput( nullptr ) -{ - _inputSize = _model->getLayerSizes().first(); - getBounds( _inputBounds, GlobalConfiguration::PdgBoundType::ATTACK_INPUT ); - getBounds( _outputBounds, GlobalConfiguration::PdgBoundType::ATTACK_OUTPUT ); - - _inputLb = torch::tensor( _inputBounds.first.getContainer(), torch::kFloat32 ).to( _device ); - _inputUb = torch::tensor( _inputBounds.second.getContainer(), torch::kFloat32 ).to( _device ); - - auto vars = generateSampleAndEpsilon(); - _x0 = vars.first; -} - -CWAttack::~CWAttack() -{ - if ( _adversarialInput ) - delete[] _adversarialInput; - if ( _adversarialOutput ) - delete[] _adversarialOutput; -} - -bool CWAttack::runAttack() -{ - CW_LOG( "-----Starting CW attack-----" ); - auto adversarial = findAdvExample(); - auto advInput = adversarial.first.to( torch::kDouble ); - auto advPred = adversarial.second.to( torch::kDouble ); - - bool isFooled = - isWithinBounds( advInput, _inputBounds ) && isWithinBounds( advPred, _outputBounds ); - - auto inputPtr = advInput.data_ptr(); - auto predPtr = advPred.data_ptr(); - size_t outSize = advPred.size( 1 ); - - if ( isFooled ) - { - _adversarialInput = new double[_inputSize]; - _adversarialOutput = new double[outSize]; - std::copy( inputPtr, inputPtr + _inputSize, _adversarialInput ); - std::copy( predPtr, predPtr + outSize, _adversarialOutput ); - } - CW_LOG( "Input Lower Bounds : " ); - for ( auto &bound : _inputBounds.first.getContainer() ) - printValue( bound ); - CW_LOG( "Input Upper Bounds : " ); - for ( auto &bound : _inputBounds.second.getContainer() ) - printValue( bound ); - - CW_LOG( "Adversarial Input:" ); - for ( int i = 0; i < advInput.numel(); ++i ) - { - CW_LOG( Stringf( "x%u=%.3lf", i, inputPtr[i] ).ascii() ); - } - CW_LOG( "Output Lower Bounds : " ); - for ( auto &bound : _outputBounds.first.getContainer() ) - printValue( bound ); - CW_LOG( "Output Upper Bounds : " ); - for ( auto &bound : _outputBounds.second.getContainer() ) - printValue( bound ); - - CW_LOG( "Adversarial Prediction: " ); - for ( int i = 0; i < advPred.numel(); ++i ) - { - CW_LOG( Stringf( "y%u=%.3lf", i, predPtr[i] ).ascii() ); - } - - - if ( isFooled ) - { - CW_LOG( "Model fooled: Yes \n ------ CW Attack Succeed ------\n" ); - } - else - CW_LOG( "Model fooled: No \n ------ CW Attack Failed ------\n" ); - // Concretize assignments if attack succeeded - if ( _adversarialInput ) - networkLevelReasoner->concretizeInputAssignment( _assignments, _adversarialInput ); - return isFooled; -} - - -void CWAttack::getBounds( std::pair, Vector> &bounds, signed type ) const -{ - unsigned layerIndex = type == GlobalConfiguration::PdgBoundType::ATTACK_INPUT - ? 0 - : networkLevelReasoner->getNumberOfLayers() - 1; - const NLR::Layer *layer = networkLevelReasoner->getLayer( layerIndex ); - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - bounds.first.append( layer->getLb( i ) ); - bounds.second.append( layer->getUb( i ) ); - } -} - -std::pair CWAttack::generateSampleAndEpsilon() -{ - Vector sample( _inputSize, 0.0 ), eps( _inputSize, 0.0 ); - for ( unsigned i = 0; i < _inputSize; ++i ) - { - double lo = _inputBounds.first.get( i ), hi = _inputBounds.second.get( i ); - if ( std::isfinite( lo ) && std::isfinite( hi ) ) - { - sample[i] = 0.5 * ( lo + hi ); - eps[i] = 0.5 * ( hi - lo ); - } - else - { - sample[i] = 0.0; - eps[i] = GlobalConfiguration::ATTACK_INPUT_RANGE; - } - } - auto s = torch::tensor( sample.getContainer(), torch::kFloat32 ).unsqueeze( 0 ).to( _device ); - auto e = torch::tensor( eps.getContainer(), torch::kFloat32 ).to( _device ); - return { s, e }; -} - -torch::Tensor CWAttack::calculateLoss( const torch::Tensor &pred ) -{ - auto lb = torch::tensor( _outputBounds.first.data(), torch::kFloat32 ).to( _device ); - auto ub = torch::tensor( _outputBounds.second.data(), torch::kFloat32 ).to( _device ); - auto ubv = torch::sum( torch::square( torch::relu( pred - ub ) ) ); - auto lbv = torch::sum( torch::square( torch::relu( lb - pred ) ) ); - return ( ubv + lbv ).to( _device ); -} - -std::pair CWAttack::findAdvExample() -{ - torch::Tensor bestAdv, bestPred; - double bestL2 = std::numeric_limits::infinity(); - double timeoutForAttack = ( Options::get()->getInt( Options::ATTACK_TIMEOUT ) == 0 - ? FloatUtils::infinity() - : Options::get()->getInt( Options::ATTACK_TIMEOUT ) ); - CW_LOG( Stringf( "Adversarial attack timeout set to %f\n", timeoutForAttack ).ascii() ); - timespec startTime = TimeUtils::sampleMicro(); - torch::Tensor advExample; - for ( unsigned r = 0; r < _restarts; ++r ) - { - unsigned long timePassed = TimeUtils::timePassed( startTime, TimeUtils::sampleMicro() ); - if ( static_cast( timePassed ) / MICROSECONDS_TO_SECONDS > timeoutForAttack ) - { - throw MarabouError( MarabouError::TIMEOUT, "Attack failed due to timeout" ); - } - torch::Tensor delta = torch::zeros_like( _x0, torch::requires_grad() ).to( _device ); - torch::optim::Adam optimizer( { delta }, - torch::optim::AdamOptions( GlobalConfiguration::CW_LR ) ); - - for ( unsigned it = 0; it < _iters; ++it ) - { - torch::Tensor prevExample = advExample; - advExample = ( _x0 + delta ).clamp( _inputLb, _inputUb ); - // Skip the equality check on the first iteration - if ( ( it > 0 && prevExample.defined() && advExample.equal( prevExample ) ) || - !isWithinBounds( advExample, _inputBounds ) ) - break; - auto pred = _model->forward( advExample ); - auto specLoss = calculateLoss( pred ); - auto l2norm = torch::sum( torch::pow( advExample - _x0, 2 ) ); - auto loss = l2norm + _specLossWeight * specLoss; - - optimizer.zero_grad(); - loss.backward(); - optimizer.step(); - - if ( specLoss.item() == 0.0 ) - { - double curL2 = l2norm.item(); - if ( curL2 < bestL2 ) - { - bestL2 = curL2; - bestAdv = advExample.detach(); - bestPred = pred.detach(); - } - } - } - } - - if ( !bestAdv.defined() ) - { - bestAdv = ( _x0 + torch::zeros_like( _x0 ) ).clamp( _inputLb, _inputUb ); - bestPred = _model->forward( bestAdv ); - } - - return { bestAdv, bestPred }; -} - -bool CWAttack::isWithinBounds( const torch::Tensor &sample, - const std::pair, Vector> &bounds ) -{ - torch::Tensor flatInput = sample.view( { -1 } ); - if ( flatInput.numel() != (int)bounds.first.size() || - flatInput.numel() != (int)bounds.second.size() ) - throw std::runtime_error( "Mismatch in sizes of input and bounds" ); - - for ( int64_t i = 0; i < flatInput.size( 0 ); ++i ) - { - double v = flatInput[i].item(); - double lo = bounds.first.get( i ), hi = bounds.second.get( i ); - if ( std::isinf( lo ) && std::isinf( hi ) ) - continue; - if ( std::isinf( lo ) ) - { - if ( v > hi ) - return false; - } - else if ( std::isinf( hi ) ) - { - if ( v < lo ) - return false; - } - else if ( v < lo || v > hi ) - return false; - } - return true; -} - -double CWAttack::getAssignment( int index ) -{ - return _assignments[index]; -} - -void CWAttack::printValue( double value ) -{ - if ( std::isinf( value ) ) - { - if ( value < 0 ) - { - CW_LOG( "-inf" ); - } - else - { - CW_LOG( "inf" ); - } - } - else if ( std::isnan( value ) ) - { - CW_LOG( "nan" ); - } - else - { - CW_LOG( Stringf( "%.3lf", value ).ascii() ); - } -} - -#endif // BUILD_TORCH diff --git a/src/engine/CWAttack.h b/src/engine/CWAttack.h deleted file mode 100644 index 742c99fe7b..0000000000 --- a/src/engine/CWAttack.h +++ /dev/null @@ -1,64 +0,0 @@ -#ifndef __CWATTACK_H__ -#define __CWATTACK_H__ -#ifdef BUILD_TORCH - -#include "CustomDNN.h" -#include "InputQuery.h" -#include "Options.h" - -#include -#include -#include - -#define CW_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::CW_LOGGING, "CW: %s\n", x ) - -/** - CWAttack implements the Carlini–Wagner L2 adversarial attack, - optimizing min ||δ||_2^2 + c * specLoss(x0 + δ) with Adam and restarts. -*/ -class CWAttack -{ -public: - enum { - MICROSECONDS_TO_SECONDS = 1000000 - }; - CWAttack( NLR::NetworkLevelReasoner *networkLevelReasoner ); - ~CWAttack(); - - /** - Runs the CW attack. Returns true if a valid adversarial example is found. - */ - bool runAttack(); - double getAssignment( int index ); - -private: - NLR::NetworkLevelReasoner *networkLevelReasoner; - torch::Device _device; - std::unique_ptr _model; - - unsigned _inputSize; - unsigned _iters; - unsigned _restarts; - double _specLossWeight; - - std::pair, Vector> _inputBounds; - std::pair, Vector> _outputBounds; - torch::Tensor _inputLb; - torch::Tensor _inputUb; - torch::Tensor _x0; - - Map _assignments; - double *_adversarialInput; - double *_adversarialOutput; - - void getBounds( std::pair, Vector> &bounds, signed type ) const; - std::pair generateSampleAndEpsilon(); - torch::Tensor calculateLoss( const torch::Tensor &predictions ); - std::pair findAdvExample(); - static bool isWithinBounds( const torch::Tensor &sample, - const std::pair, Vector> &bounds ); - static void printValue( double value ); -}; - -#endif // BUILD_TORCH -#endif // __CWATTACK_H__ diff --git a/src/engine/CustomDNN.cpp b/src/engine/CustomDNN.cpp deleted file mode 100644 index 3fbdb8d186..0000000000 --- a/src/engine/CustomDNN.cpp +++ /dev/null @@ -1,302 +0,0 @@ -#include "NetworkLevelReasoner.h" -#include "CustomDNN.h" -#ifdef BUILD_TORCH -namespace NLR { -CustomRelu::CustomRelu( const NetworkLevelReasoner *nlr, unsigned layerIndex ) - : _networkLevelReasoner( nlr ) - , _reluLayerIndex( layerIndex ) -{ -} - -torch::Tensor CustomRelu::forward( torch::Tensor x ) const -{ - return CustomReluFunction::apply( x, _networkLevelReasoner, _reluLayerIndex ); -} - -CustomMaxPool::CustomMaxPool( const NetworkLevelReasoner *nlr, unsigned layerIndex ) - : _networkLevelReasoner( nlr ) - , _maxLayerIndex( layerIndex ) -{ -} - -torch::Tensor CustomMaxPool::forward( torch::Tensor x ) const -{ - return CustomMaxPoolFunction::apply( x, _networkLevelReasoner, _maxLayerIndex ); -} - -void CustomDNN::setWeightsAndBiases( torch::nn::Linear &linearLayer, - const Layer *layer, - unsigned sourceLayer, - unsigned inputSize, - unsigned outputSize ) -{ - Vector> layerWeights( outputSize, Vector( inputSize ) ); - Vector layerBiases( outputSize ); - - // Fetch weights and biases from networkLevelReasoner - for ( unsigned j = 0; j < outputSize; j++ ) - { - for ( unsigned k = 0; k < inputSize; k++ ) - { - double weight_value = layer->getWeight( sourceLayer, k, j ); - layerWeights[j][k] = static_cast( weight_value ); - } - double bias_value = layer->getBias( j ); - layerBiases[j] = static_cast( bias_value ); - } - - Vector flattenedWeights; - for ( const auto &weight : layerWeights ) - { - for ( const auto &w : weight ) - { - flattenedWeights.append( w ); - } - } - - torch::Tensor weightTensor = torch::tensor( flattenedWeights.getContainer(), torch::kFloat ) - .view( { outputSize, inputSize } ); - torch::Tensor biasTensor = torch::tensor( layerBiases.getContainer(), torch::kFloat ); - - torch::NoGradGuard no_grad; - linearLayer->weight.set_( weightTensor ); - linearLayer->bias.set_( biasTensor ); -} - -void CustomDNN::weightedSum( unsigned i, const Layer *layer ) -{ - unsigned sourceLayer = i - 1; - const Layer *prevLayer = _networkLevelReasoner->getLayer( sourceLayer ); - unsigned inputSize = prevLayer->getSize(); - unsigned outputSize = layer->getSize(); - - if ( outputSize > 0 ) - { - auto linearLayer = torch::nn::Linear( torch::nn::LinearOptions( inputSize, outputSize ) ); - _linearLayers.append( linearLayer ); - - setWeightsAndBiases( linearLayer, layer, sourceLayer, inputSize, outputSize ); - - register_module( "linear" + std::to_string( i ), linearLayer ); - } -} - - -CustomDNN::CustomDNN( const NetworkLevelReasoner *nlr ) -{ - CUSTOM_DNN_LOG( "----- Construct Custom Network -----" ); - _networkLevelReasoner = nlr; - _numberOfLayers = _networkLevelReasoner->getNumberOfLayers(); - for ( unsigned i = 0; i < _numberOfLayers; i++ ) - { - const Layer *layer = _networkLevelReasoner->getLayer( i ); - _layerSizes.append( layer->getSize() ); - Layer::Type layerType = layer->getLayerType(); - _layersOrder.append( layerType ); - switch ( layerType ) - { - case Layer::INPUT: - break; - case Layer::WEIGHTED_SUM: - weightedSum( i, layer ); - break; - case Layer::RELU: - { - auto reluLayer = std::make_shared( _networkLevelReasoner, i ); - _reluLayers.append( reluLayer ); - register_module( "ReLU" + std::to_string( i ), reluLayer ); - break; - } - case Layer::MAX: - { - auto maxPoolLayer = std::make_shared( _networkLevelReasoner, i ); - _maxPoolLayers.append( maxPoolLayer ); - register_module( "maxPool" + std::to_string( i ), maxPoolLayer ); - break; - } - default: - CUSTOM_DNN_LOG( "Unsupported layer type\n" ); - throw MarabouError( MarabouError::DEBUGGING_ERROR ); - } - } -} - -torch::Tensor CustomDNN::forward( torch::Tensor x ) -{ - unsigned linearIndex = 0; - unsigned reluIndex = 0; - unsigned maxPoolIndex = 0; - for ( unsigned i = 0; i < _numberOfLayers; i++ ) - { - const Layer::Type layerType = _layersOrder[i]; - switch ( layerType ) - { - case Layer::INPUT: - break; - case Layer::WEIGHTED_SUM: - x = _linearLayers[linearIndex]->forward( x ); - linearIndex++; - break; - case Layer::RELU: - x = _reluLayers[reluIndex]->forward( x ); - reluIndex++; - break; - case Layer::MAX: - x = _maxPoolLayers[maxPoolIndex]->forward( x ); - maxPoolIndex++; - break; - default: - CUSTOM_DNN_LOG( "Unsupported layer type\n" ); - throw MarabouError( MarabouError::DEBUGGING_ERROR ); - break; - } - } - return x; -} - -torch::Tensor CustomReluFunction::forward( torch::autograd::AutogradContext *ctx, - torch::Tensor x, - const NetworkLevelReasoner *nlr, - unsigned int layerIndex ) -{ - ctx->save_for_backward( { x } ); - - const Layer *layer = nlr->getLayer( layerIndex ); - torch::Tensor reluOutputs = torch::zeros( { 1, layer->getSize() } ); - torch::Tensor reluGradients = torch::zeros( { 1, layer->getSize() } ); - - for ( unsigned neuron = 0; neuron < layer->getSize(); ++neuron ) - { - auto sources = layer->getActivationSources( neuron ); - ASSERT( sources.size() == 1 ); - const NeuronIndex &sourceNeuron = sources.back(); - int index = static_cast( sourceNeuron._neuron ); - reluOutputs.index_put_( { 0, static_cast( neuron ) }, - torch::clamp_min( x.index( { 0, index } ), 0 ) ); - reluGradients.index_put_( { 0, static_cast( neuron ) }, x.index( { 0, index } ) > 0 ); - } - - ctx->saved_data["reluGradients"] = reluGradients; - - return reluOutputs; -} - -std::vector CustomReluFunction::backward( torch::autograd::AutogradContext *ctx, - std::vector grad_output ) -{ - auto saved = ctx->get_saved_variables(); - auto input = saved[0]; - - auto reluGradients = ctx->saved_data["reluGradients"].toTensor(); - auto grad_input = grad_output[0] * reluGradients[0]; - - return { grad_input, torch::Tensor(), torch::Tensor() }; -} - -torch::Tensor CustomMaxPoolFunction::forward( torch::autograd::AutogradContext *ctx, - torch::Tensor x, - const NetworkLevelReasoner *nlr, - unsigned int layerIndex ) -{ - ctx->save_for_backward( { x } ); - - const Layer *layer = nlr->getLayer( layerIndex ); - torch::Tensor maxOutputs = torch::zeros( { 1, layer->getSize() } ); - torch::Tensor argMaxOutputs = torch::zeros( { 1, layer->getSize() }, torch::kInt64 ); - - for ( unsigned neuron = 0; neuron < layer->getSize(); ++neuron ) - { - auto sources = layer->getActivationSources( neuron ); - torch::Tensor sourceValues = torch::zeros( sources.size(), torch::kFloat ); - torch::Tensor sourceIndices = torch::zeros( sources.size() ); - - for ( int i = sources.size() - 1; i >= 0; --i ) - { - const NeuronIndex &activationNeuron = sources.back(); - int index = static_cast( activationNeuron._neuron ); - sources.popBack(); - sourceValues.index_put_( { i }, x.index( { 0, index } ) ); - sourceIndices.index_put_( { i }, index ); - } - - maxOutputs.index_put_( { 0, static_cast( neuron ) }, torch::max( sourceValues ) ); - argMaxOutputs.index_put_( { 0, static_cast( neuron ) }, - sourceIndices.index( { torch::argmax( sourceValues ) } ) ); - } - - ctx->saved_data["argMaxOutputs"] = argMaxOutputs; - - return maxOutputs; -} - -std::vector CustomMaxPoolFunction::backward( torch::autograd::AutogradContext *ctx, - std::vector grad_output ) -{ - auto saved = ctx->get_saved_variables(); - auto input = saved[0]; - - auto grad_input = torch::zeros_like( input ); - - auto indices = ctx->saved_data["argMaxOutputs"].toTensor(); - - grad_input[0].index_add_( 0, indices.flatten(), grad_output[0].flatten() ); - - return { grad_input, torch::Tensor(), torch::Tensor() }; -} - -const Vector &CustomDNN::getLayerSizes() const -{ - return _layerSizes; -} - -torch::Tensor CustomDNN::getLayerWeights(unsigned layerIndex) const { - if (_layersOrder[layerIndex] == Layer::WEIGHTED_SUM) { - auto linearLayer = _linearLayers[layerIndex]; - return linearLayer->weight; // Returning weights of the corresponding linear layer - } - throw std::runtime_error("Requested weights for a non-weighted sum layer."); -} - -torch::Tensor CustomDNN::getLayerBias(unsigned layerIndex) const { - if (_layersOrder[layerIndex] == Layer::WEIGHTED_SUM) { - auto linearLayer = _linearLayers[layerIndex]; - return linearLayer->bias; // Returning bias of the corresponding linear layer - } - throw std::runtime_error("Requested bias for a non-weighted sum layer."); -} - -void CustomDNN::getInputBounds(torch::Tensor &lbTensor, torch::Tensor &ubTensor) const -{ - const Layer *layer = _networkLevelReasoner->getLayer(0); - unsigned size = layer->getSize(); - - std::vector lowerBounds; - std::vector upperBounds; - lowerBounds.reserve(size); - upperBounds.reserve(size); - - for (unsigned neuron = 0; neuron < size; ++neuron) - { - lowerBounds.push_back(layer->getLb(neuron)); - upperBounds.push_back(layer->getUb(neuron)); - } - - lbTensor = torch::tensor(lowerBounds, torch::kDouble); - ubTensor = torch::tensor(upperBounds, torch::kDouble); -} - - - -std::vector> CustomDNN::getMaxPoolSources(const Layer* maxPoolLayer) { - std::vector> sources; - unsigned size = maxPoolLayer->getSize(); - for (unsigned neuron = 0; neuron < size; ++neuron) { - - sources.push_back(maxPoolLayer->getActivationSources(neuron)); - } - return sources; -} - -} - -#endif \ No newline at end of file diff --git a/src/engine/CustomDNN.h b/src/engine/CustomDNN.h deleted file mode 100644 index a414d80f5e..0000000000 --- a/src/engine/CustomDNN.h +++ /dev/null @@ -1,119 +0,0 @@ -#ifdef BUILD_TORCH -#ifndef _CustomDNN_h_ -#define _CustomDNN_h_ - -#include "Layer.h" -#include "Vector.h" - -#include - -#undef Warning -#include - -#define CUSTOM_DNN_LOG( x, ... ) \ - MARABOU_LOG( GlobalConfiguration::CUSTOM_DNN_LOGGING, "customDNN: %s\n", x ) - -/* - Custom differentiation function for ReLU, implementing the forward and backward propagation - for the ReLU operation according to each variable's source layer as defined in the nlr. -*/ -namespace NLR { -class CustomReluFunction : public torch::autograd::Function -{ -public: - static torch::Tensor forward( torch::autograd::AutogradContext *ctx, - torch::Tensor x, - const NetworkLevelReasoner *nlr, - unsigned layerIndex ); - - static std::vector backward( torch::autograd::AutogradContext *ctx, - std::vector grad_output ); -}; - -class CustomRelu : public torch::nn::Module -{ -public: - CustomRelu( const NetworkLevelReasoner *nlr, unsigned layerIndex ); - torch::Tensor forward( torch::Tensor x ) const; - -private: - const NetworkLevelReasoner *_networkLevelReasoner; - unsigned _reluLayerIndex; -}; - -/* - Custom differentiation function for max pooling, implementing the forward and backward propagation - for the max pooling operation according to each variable's source layer as defined in the nlr. -*/ -class CustomMaxPoolFunction : public torch::autograd::Function -{ -public: - static torch::Tensor forward( torch::autograd::AutogradContext *ctx, - torch::Tensor x, - const NetworkLevelReasoner *nlr, - unsigned layerIndex ); - - static std::vector backward( torch::autograd::AutogradContext *ctx, - std::vector grad_output ); -}; - -class CustomMaxPool : public torch::nn::Module -{ -public: - CustomMaxPool( const NetworkLevelReasoner *nlr, unsigned layerIndex ); - torch::Tensor forward( torch::Tensor x ) const; - -private: - const NetworkLevelReasoner *_networkLevelReasoner; - unsigned _maxLayerIndex; -}; - -/* - torch implementation of the network according to the nlr. - */ -class CustomDNN : public torch::nn::Module -{ -public: - static void setWeightsAndBiases( torch::nn::Linear &linearLayer, - const Layer *layer, - unsigned sourceLayer, - unsigned inputSize, - unsigned outputSize ); - void weightedSum( unsigned i, const Layer *layer ); - explicit CustomDNN( const NetworkLevelReasoner *networkLevelReasoner ); - torch::Tensor getLayerWeights( unsigned layerIndex ) const; - torch::Tensor getLayerBias( unsigned layerIndex ) const; - torch::Tensor forward( torch::Tensor x ); - const Vector &getLayerSizes() const; - void getInputBounds( torch::Tensor &lbTensor, torch::Tensor &ubTensor ) const; - std::vector> getMaxPoolSources(const Layer* maxPoolLayer); - Vector getLinearLayers() - { - return _linearLayers; - } - Vector getLayersOrder() const - { - return _layersOrder; - } - Vector getLayersOrder() - { - return _layersOrder; - } - - unsigned getNumberOfLayers() const - { - return _numberOfLayers; - } - -private: - const NetworkLevelReasoner *_networkLevelReasoner; - Vector _layerSizes; - Vector> _reluLayers; - Vector> _maxPoolLayers; - Vector _linearLayers; - Vector _layersOrder; - unsigned _numberOfLayers; -}; -} // namespace NLR -#endif // _CustomDNN_h_ -#endif \ No newline at end of file diff --git a/src/engine/DantzigsRule.h b/src/engine/DantzigsRule.h index b3fda42e77..5e57e28c24 100644 --- a/src/engine/DantzigsRule.h +++ b/src/engine/DantzigsRule.h @@ -19,7 +19,7 @@ #include "EntrySelectionStrategy.h" #define DANTZIG_LOG( x, ... ) \ - MARABOU_LOG( GlobalConfiguration::DANTZIGS_RULE_LOGGING, "DantzigsRule: %s\n", x ) + LOG( GlobalConfiguration::DANTZIGS_RULE_LOGGING, "DantzigsRule: %s\n", x ) class String; diff --git a/src/engine/DnCManager.h b/src/engine/DnCManager.h index 545a9fd326..ee4a55a19d 100644 --- a/src/engine/DnCManager.h +++ b/src/engine/DnCManager.h @@ -25,7 +25,7 @@ #include #define DNC_MANAGER_LOG( x, ... ) \ - MARABOU_LOG( GlobalConfiguration::DNC_MANAGER_LOGGING, "DnCManager: %s\n", x ) + LOG( GlobalConfiguration::DNC_MANAGER_LOGGING, "DnCManager: %s\n", x ) class Query; diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index 028bae2fbc..86f45ecd0c 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -2444,8 +2444,6 @@ unsigned Engine::performSymbolicBoundTightening( Query *inputQuery ) _networkLevelReasoner->symbolicBoundPropagation(); else if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::DEEP_POLY ) _networkLevelReasoner->deepPolyPropagation(); - else if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::ALPHA_CROWN ) - _networkLevelReasoner->alphaCrown(); // Step 3: Extract the bounds List tightenings; diff --git a/src/engine/Engine.h b/src/engine/Engine.h index 5e53564f33..a3ea1c22d3 100644 --- a/src/engine/Engine.h +++ b/src/engine/Engine.h @@ -57,7 +57,7 @@ #undef ERROR #endif -#define ENGINE_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::ENGINE_LOGGING, "Engine: %s\n", x ) +#define ENGINE_LOG( x, ... ) LOG( GlobalConfiguration::ENGINE_LOGGING, "Engine: %s\n", x ) class EngineState; class Query; diff --git a/src/engine/InputQuery.cpp b/src/engine/InputQuery.cpp index c28913a20b..d275646b06 100644 --- a/src/engine/InputQuery.cpp +++ b/src/engine/InputQuery.cpp @@ -29,7 +29,7 @@ #include "SoftmaxConstraint.h" #define INPUT_QUERY_LOG( x, ... ) \ - MARABOU_LOG( GlobalConfiguration::INPUT_QUERY_LOGGING, "Marabou Query: %s\n", x ) + LOG( GlobalConfiguration::INPUT_QUERY_LOGGING, "Marabou Query: %s\n", x ) using namespace CVC4::context; diff --git a/src/engine/MarabouError.h b/src/engine/MarabouError.h index 1cff574056..2f2ee54c0f 100644 --- a/src/engine/MarabouError.h +++ b/src/engine/MarabouError.h @@ -66,8 +66,6 @@ class MarabouError : public Error FEATURE_NOT_YET_SUPPORTED = 900, - TIMEOUT = 32, - DEBUGGING_ERROR = 999, }; diff --git a/src/engine/PLConstraintScoreTracker.h b/src/engine/PLConstraintScoreTracker.h index 6798074dd2..ab62333e6b 100644 --- a/src/engine/PLConstraintScoreTracker.h +++ b/src/engine/PLConstraintScoreTracker.h @@ -24,7 +24,7 @@ #include #define SCORE_TRACKER_LOG( x, ... ) \ - MARABOU_LOG( GlobalConfiguration::SCORE_TRACKER_LOGGING, "PLConstraintScoreTracker: %s\n", x ) + LOG( GlobalConfiguration::SCORE_TRACKER_LOGGING, "PLConstraintScoreTracker: %s\n", x ) struct ScoreEntry { diff --git a/src/engine/ProjectedSteepestEdge.h b/src/engine/ProjectedSteepestEdge.h index c84ba9bac9..70b3265ef0 100644 --- a/src/engine/ProjectedSteepestEdge.h +++ b/src/engine/ProjectedSteepestEdge.h @@ -20,7 +20,7 @@ #include "SparseUnsortedList.h" #define PSE_LOG( x, ... ) \ - MARABOU_LOG( GlobalConfiguration::PROJECTED_STEEPEST_EDGE_LOGGING, "Projected SE: %s\n", x ) + LOG( GlobalConfiguration::PROJECTED_STEEPEST_EDGE_LOGGING, "Projected SE: %s\n", x ) class ProjectedSteepestEdgeRule : public IProjectedSteepestEdgeRule { diff --git a/src/engine/Query.cpp b/src/engine/Query.cpp index 6c696c42be..77b22c9c9f 100644 --- a/src/engine/Query.cpp +++ b/src/engine/Query.cpp @@ -29,7 +29,7 @@ #include "SymbolicBoundTighteningType.h" #define INPUT_QUERY_LOG( x, ... ) \ - MARABOU_LOG( GlobalConfiguration::INPUT_QUERY_LOGGING, "Input Query: %s\n", x ) + LOG( GlobalConfiguration::INPUT_QUERY_LOGGING, "Input Query: %s\n", x ) Query::Query() : _ensureSameSourceLayerInNLR( Options::get()->getSymbolicBoundTighteningType() == diff --git a/src/engine/SmtCore.h b/src/engine/SmtCore.h index 0274d475b6..ad1d61f8e9 100644 --- a/src/engine/SmtCore.h +++ b/src/engine/SmtCore.h @@ -28,7 +28,7 @@ #include -#define SMT_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::SMT_CORE_LOGGING, "SmtCore: %s\n", x ) +#define SMT_LOG( x, ... ) LOG( GlobalConfiguration::SMT_CORE_LOGGING, "SmtCore: %s\n", x ) class EngineState; class IEngine; diff --git a/src/engine/SumOfInfeasibilitiesManager.h b/src/engine/SumOfInfeasibilitiesManager.h index 7a9a3bf448..a823a92fed 100644 --- a/src/engine/SumOfInfeasibilitiesManager.h +++ b/src/engine/SumOfInfeasibilitiesManager.h @@ -29,7 +29,7 @@ #include "T/stdlib.h" #include "Vector.h" -#define SOI_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::SOI_LOGGING, "SoIManager: %s\n", x ) +#define SOI_LOG( x, ... ) LOG( GlobalConfiguration::SOI_LOGGING, "SoIManager: %s\n", x ) class SumOfInfeasibilitiesManager { diff --git a/src/engine/SymbolicBoundTighteningType.h b/src/engine/SymbolicBoundTighteningType.h index 56a48fa730..509c0ae21c 100644 --- a/src/engine/SymbolicBoundTighteningType.h +++ b/src/engine/SymbolicBoundTighteningType.h @@ -22,8 +22,7 @@ enum class SymbolicBoundTighteningType { SYMBOLIC_BOUND_TIGHTENING = 0, DEEP_POLY = 1, - ALPHA_CROWN = 2, - NONE = 3, + NONE = 2, }; #endif // __SymbolicBoundTighteningType_h__ diff --git a/src/engine/Tableau.h b/src/engine/Tableau.h index 175efbe3c8..f5bf063f57 100644 --- a/src/engine/Tableau.h +++ b/src/engine/Tableau.h @@ -29,7 +29,7 @@ #include "SparseUnsortedList.h" #include "Statistics.h" -#define TABLEAU_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::TABLEAU_LOGGING, "Tableau: %s\n", x ) +#define TABLEAU_LOG( x, ... ) LOG( GlobalConfiguration::TABLEAU_LOGGING, "Tableau: %s\n", x ) class Equation; class ICostFunctionManager; diff --git a/src/input_parsers/MpsParser.h b/src/input_parsers/MpsParser.h index 76c4afab96..71f177f493 100644 --- a/src/input_parsers/MpsParser.h +++ b/src/input_parsers/MpsParser.h @@ -20,7 +20,7 @@ #include "Map.h" #include "Set.h" -#define MPS_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::MPS_PARSER_LOGGING, "MpsParser: %s\n", x ) +#define MPS_LOG( x, ... ) LOG( GlobalConfiguration::MPS_PARSER_LOGGING, "MpsParser: %s\n", x ) class IQuery; class String; diff --git a/src/input_parsers/OnnxParser.h b/src/input_parsers/OnnxParser.h index 3f0149f6c8..2b316a2004 100644 --- a/src/input_parsers/OnnxParser.h +++ b/src/input_parsers/OnnxParser.h @@ -25,7 +25,7 @@ #include "Vector.h" #include "onnx.proto3.pb.h" -#define ONNX_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::ONNX_PARSER_LOGGING, "OnnxParser: %s\n", x ) +#define ONNX_LOG( x, ... ) LOG( GlobalConfiguration::ONNX_PARSER_LOGGING, "OnnxParser: %s\n", x ) class OnnxParser diff --git a/src/nlr/AlphaCrown.cpp b/src/nlr/AlphaCrown.cpp deleted file mode 100644 index c68f9554d9..0000000000 --- a/src/nlr/AlphaCrown.cpp +++ /dev/null @@ -1,546 +0,0 @@ -// -// Created by User on 7/23/2025. -// - -#include "AlphaCrown.h" -#include "MStringf.h" -#include "NetworkLevelReasoner.h" -#include "Layer.h" - -namespace NLR { -AlphaCrown::AlphaCrown( LayerOwner *layerOwner ) - : _layerOwner( layerOwner ) -{ - _nlr = dynamic_cast( layerOwner ); - _network = new CustomDNN( _nlr ); - _network->getInputBounds( _lbInput, _ubInput ); - _inputSize = _lbInput.size( 0 ); - _linearLayers = _network->getLinearLayers().getContainer(); - _layersOrder = _network->getLayersOrder().getContainer(); - - unsigned linearIndex = 0; - for ( unsigned i = 0; i < _network->getNumberOfLayers(); i++ ) - { - if (_layersOrder[i] == Layer::WEIGHTED_SUM) - { - // const Layer *layer = _layerOwner->getLayer( i ); - auto linearLayer = _linearLayers[linearIndex]; - auto whights = linearLayer->weight; - auto bias = linearLayer->bias; - _positiveWeights.insert( {i,torch::where( whights >= 0,whights, - torch::zeros_like( - whights ) ).to(torch::kFloat32)} ); - _negativeWeights.insert( {i,torch::where( whights <= 0,whights, - torch::zeros_like( - whights ) ).to(torch::kFloat32)} ); - _biases.insert( {i,bias.to(torch::kFloat32)} ); - linearIndex += 1; - } - if (_layersOrder[i] == Layer::MAX) - { - _maxPoolSources.insert({i, _network->getMaxPoolSources(_nlr->getLayer( i ) )}); - } - } -} - -torch::Tensor AlphaCrown::createSymbolicVariablesMatrix() -{ - // Create the identity matrix and the zero matrix - auto eye_tensor = torch::eye(_inputSize, torch::kFloat32); // Ensure float32 - auto zero_tensor = torch::zeros({_inputSize, 1}, torch::kFloat32); // Ensure float32 - - // Concatenate the two tensors horizontally (along dim=1) - return torch::cat({eye_tensor, zero_tensor}, 1); // Will be of type float32 -} - -torch::Tensor AlphaCrown::lower_ReLU_relaxation( const torch::Tensor &u, const torch::Tensor &l ) -{ - torch::Tensor mult; - mult = torch::where( u - l == 0, torch::tensor( 1.0 ), u / ( u - l ) ); - mult = torch::where( l >= 0, torch::tensor( 1.0 ), mult ); - mult = torch::where( u <= 0, torch::tensor( 0.0 ), mult ); - return mult.to(torch::kFloat32); -} - -std::tuple AlphaCrown::upper_ReLU_relaxation( const torch::Tensor &u, - const torch::Tensor &l ) -{ - torch::Tensor mult = torch::where( u - l == 0, torch::tensor( 1.0 ), u / ( u - l ) ); - mult = torch::where( l >= 0, torch::tensor( 1.0 ), mult ); - mult = torch::where( u <= 0, torch::tensor( 0.0 ), mult ); - - torch::Tensor add = torch::where( u - l == 0, torch::tensor( 0.0 ), -l * mult ); - add = torch::where( l >= 0, torch::tensor( 0.0 ), add ); - - return std::make_tuple( mult.to(torch::kFloat32), add.to(torch::kFloat32) ); -} -torch::Tensor AlphaCrown::getMaxOfSymbolicVariables( const torch::Tensor &matrix ) -{ - auto coefficients = matrix.index( - { torch::indexing::Slice(), torch::indexing::Slice( torch::indexing::None, -1 ) } ); - auto free_coefficients = matrix.index( { torch::indexing::Slice(), -1 } ); - - auto positive_mask = coefficients >= 0; - - torch::Tensor u_values = - torch::sum( torch::where( positive_mask, coefficients * _ubInput, coefficients * _lbInput ), - 1 ) + - free_coefficients; - - return u_values; -} - -torch::Tensor AlphaCrown::getMinOfSymbolicVariables( const torch::Tensor &matrix ) -{ - auto coefficients = matrix.index( - { torch::indexing::Slice(), torch::indexing::Slice( torch::indexing::None, -1 ) } ); - auto free_coefficients = matrix.index( { torch::indexing::Slice(), -1 } ); - - auto positive_mask = coefficients >= 0; - - torch::Tensor l_values = - torch::sum( torch::where( positive_mask, coefficients * _lbInput, coefficients * _ubInput ), - 1 ) + - free_coefficients; - - return l_values; -} - -void AlphaCrown::relaxReluLayer(unsigned layerNumber, torch::Tensor - &EQ_up, torch::Tensor &EQ_low){ - - auto u_values_EQ_up = AlphaCrown::getMaxOfSymbolicVariables(EQ_up); - auto l_values_EQ_up = AlphaCrown::getMinOfSymbolicVariables(EQ_low); - auto [upperRelaxationSlope, upperRelaxationIntercept] = - AlphaCrown::upper_ReLU_relaxation(l_values_EQ_up, u_values_EQ_up); - - auto u_values_EQ_low = AlphaCrown::getMaxOfSymbolicVariables(EQ_up); - auto l_values_EQ_low = AlphaCrown::getMinOfSymbolicVariables(EQ_low); - auto alphaSlope = AlphaCrown::lower_ReLU_relaxation(l_values_EQ_low, - u_values_EQ_low); - - EQ_up = EQ_up * upperRelaxationSlope.unsqueeze( 1 ); - EQ_up = AlphaCrown::addVecToLastColumnValue( EQ_up, upperRelaxationIntercept ); - EQ_low = EQ_low * alphaSlope.unsqueeze( 1 ); - - _upperRelaxationSlopes.insert({layerNumber, upperRelaxationSlope} ); - // back but insert to dict - _upperRelaxationIntercepts.insert({layerNumber, upperRelaxationIntercept} ); - _indexAlphaSlopeMap.insert( {layerNumber, _initialAlphaSlopes.size()} ); - _initialAlphaSlopes.push_back( alphaSlope ); - -} - -void AlphaCrown::relaxMaxPoolLayer(unsigned layerNumber, - torch::Tensor &EQ_up, - torch::Tensor &EQ_low) -{ - std::cout << "Relaxing MaxPool layer number: " << layerNumber << std::endl; - const auto &groups = _maxPoolSources[layerNumber]; - TORCH_CHECK(!groups.empty(), "MaxPool layer has no groups"); - - const auto cols = EQ_up.size(1); - auto next_EQ_up = torch::zeros({ (long)groups.size(), cols }, torch::kFloat32); - auto next_EQ_low = torch::zeros({ (long)groups.size(), cols }, torch::kFloat32); - - std::vector upIdx; upIdx.reserve(groups.size()); - std::vector loIdx; loIdx.reserve(groups.size()); - std::vector slopes; slopes.reserve(groups.size()); - std::vector ints; ints.reserve(groups.size()); - - for (size_t k = 0; k < groups.size(); ++k) - { - // Get per-neuron relaxation parameters & indices - auto R = relaxMaxNeuron(groups, k, EQ_up, EQ_low); - - // Build next rows: - // Upper: slope * EQ_up[R.idx_up] (+ intercept on last column) - auto up_row = EQ_up.index({ (long)R.idx_up, torch::indexing::Slice() }) * R.slope; - auto bvec = torch::full({1}, R.intercept, torch::kFloat32); - up_row = AlphaCrown::addVecToLastColumnValue(up_row, bvec); - - // Lower: copy EQ_low[R.idx_low] - auto low_row = EQ_low.index({ (long)R.idx_low, torch::indexing::Slice() }).clone(); - - next_EQ_up.index_put_ ( { (long)k, torch::indexing::Slice() }, up_row ); - next_EQ_low.index_put_( { (long)k, torch::indexing::Slice() }, low_row ); - - // Persist - upIdx.push_back(R.idx_up); - loIdx.push_back(R.idx_low); - slopes.push_back(R.slope); - ints.push_back(R.intercept); - } - - - _maxUpperChoice[layerNumber] = torch::from_blob( - upIdx.data(), {(long)upIdx.size()}, torch::TensorOptions().dtype(torch::kLong)).clone(); - _maxLowerChoice[layerNumber] = torch::from_blob( - loIdx.data(), {(long)loIdx.size()}, torch::TensorOptions().dtype(torch::kLong)).clone(); - _upperRelaxationSlopes[layerNumber] = - torch::from_blob(slopes.data(), {(long)slopes.size()}, torch::TensorOptions().dtype(torch::kFloat32)).clone(); - _upperRelaxationIntercepts[layerNumber] = - torch::from_blob(ints.data(), {(long)ints.size()}, torch::TensorOptions().dtype(torch::kFloat32)).clone(); - - // Advance EQs - EQ_up = next_EQ_up; - EQ_low = next_EQ_low; -} - - - - -std::pair -AlphaCrown::boundsFromEQ(const torch::Tensor &EQ, const std::vector &rows) -{ - TORCH_CHECK(!rows.empty(), "boundsFromEQ: empty rows"); - auto idx = torch::from_blob(const_cast(rows.data()), - {(long)rows.size()}, - torch::TensorOptions().dtype(torch::kLong)).clone(); - auto sub = EQ.index({ idx, torch::indexing::Slice() }); // |S| x (n+1) - auto U = getMaxOfSymbolicVariables(sub); // |S| - auto L = getMinOfSymbolicVariables(sub); // |S| - return {U, L}; -} - - - -AlphaCrown::MaxRelaxResult AlphaCrown::relaxMaxNeuron(const std::vector> &groups, - size_t k, - const torch::Tensor &EQ_up, - const torch::Tensor &EQ_low) -{ - constexpr double EPS = 1e-12; - - // Collect absolute previous-layer row indices for output k - std::vector srcRows; srcRows.reserve(16); - const auto &srcList = groups[k]; - for (const auto &ni : srcList) { - srcRows.push_back((long)ni._neuron); - } - TORCH_CHECK(!srcRows.empty(), "MaxPool group has no sources"); - - - auto [U_low, L_low] = boundsFromEQ(EQ_low, srcRows); - auto M_low = (U_low + L_low) / 2.0; - long j_rel_low = torch::argmax(M_low).item(); - long idx_low_abs = srcRows[(size_t)j_rel_low]; - - - auto [U_up, L_up] = boundsFromEQ(EQ_up, srcRows); - - // i = argmax U_up, j = second argmax U_up (or i if single source) - int64_t kTop = std::min(2, U_up.size(0)); - auto top2 = torch::topk(U_up, kTop, /dim=/0, /largest=/true, /sorted=/true); - auto Uidxs = std::get<1>(top2); - long i_rel = Uidxs[0].item(); - long j_rel2 = (kTop > 1) ? Uidxs[1].item() : Uidxs[0].item(); - - double li = L_up[i_rel].item(); - double ui = U_up[i_rel].item(); - double uj = U_up[j_rel2].item(); - - // Case 1: (li == max(L_up)) ∧ (li >= uj) - auto Lmax_pair = torch::max(L_up, /dim=/0); - long l_arg = std::get<1>(Lmax_pair).item(); - bool case1 = (i_rel == l_arg) && (li + EPS >= uj); - - float slope, intercept; - if (case1 || (ui - li) <= EPS) { - // Case 1 (or degenerate): y ≤ x_i → a=1, intercept=0 - slope = 1.0f; - intercept = 0.0f; - } else { - // Case 2: a=(ui-uj)/(ui-li), b=uj → store as (a*xi + (b - a*li)) - double a = (ui - uj) / (ui - li); - if (a < 0.0) a = 0.0; - if (a > 1.0) a = 1.0; - slope = (float)a; - intercept = (float)(uj - a * li); // this is what you ADD to last column - } - - long idx_up_abs = srcRows[(size_t)i_rel]; - return MaxRelaxResult{ idx_up_abs, idx_low_abs, slope, intercept }; -} - - -void AlphaCrown::computeMaxPoolLayer(unsigned layerNumber, - torch::Tensor &EQ_up, - torch::Tensor &EQ_low) -{ - auto idxUp = _maxUpperChoice.at(layerNumber); // int64 [m] - auto idxLo = _maxLowerChoice.at(layerNumber); // int64 [m] - auto a = _upperRelaxationSlopes.at(layerNumber).to(torch::kFloat32); // [m] - auto b = _upperRelaxationIntercepts.at(layerNumber).to(torch::kFloat32); // [m] - - // Select rows from current EQs - auto up_sel = EQ_up.index ({ idxUp, torch::indexing::Slice() }); // m x (n+1) - auto low_sel = EQ_low.index({ idxLo, torch::indexing::Slice() }); - - // Upper: scale + add intercept on last column - auto next_up = up_sel * a.unsqueeze(1); - next_up = AlphaCrown::addVecToLastColumnValue(next_up, b); - - // Lower: copy chosen rows - auto next_low = low_sel.clone(); - - EQ_up = next_up; - EQ_low = next_low; -} - - - - -void AlphaCrown::findBounds() -{ - torch::Tensor EQ_up = createSymbolicVariablesMatrix(); - torch::Tensor EQ_low = createSymbolicVariablesMatrix(); - - for ( unsigned i = 0; i < _network->getNumberOfLayers(); i++ ){ - Layer::Type layerType = _layersOrder[i]; - switch (layerType) - { - case Layer::INPUT: - break; - case Layer::WEIGHTED_SUM: - computeWeightedSumLayer(i, EQ_up, EQ_low); - break; - case Layer::RELU: - relaxReluLayer(i, EQ_up, EQ_low); - break; - case Layer::MAX: - { - relaxMaxPoolLayer( i, EQ_up, EQ_low ); - break; - } - default: - AlphaCrown::log ( "Unsupported layer type\n"); - throw MarabouError( MarabouError::DEBUGGING_ERROR ); - } - } -} - - -std::tuple AlphaCrown::computeBounds - (std::vector &alphaSlopes) -{ - torch::Tensor EQ_up = createSymbolicVariablesMatrix(); - torch::Tensor EQ_low = createSymbolicVariablesMatrix(); - for ( unsigned i = 0; i < _network->getNumberOfLayers(); i++ ) - { - auto layerType = _layersOrder[i]; - switch (layerType) - { - case Layer::INPUT: - break; - case Layer::WEIGHTED_SUM: - computeWeightedSumLayer (i, EQ_up, EQ_low); - break; - case Layer::RELU: - computeReluLayer (i, EQ_up, EQ_low, alphaSlopes); - break; - case Layer::MAX: - computeMaxPoolLayer( i, EQ_up, EQ_low ); - break; - default: - log ("Unsupported layer type\n"); - throw MarabouError (MarabouError::DEBUGGING_ERROR); - } - } - auto outputUpBound = getMaxOfSymbolicVariables(EQ_up); - auto outputLowBound = getMinOfSymbolicVariables(EQ_low); - return std::make_tuple(outputUpBound, outputLowBound); - -} - - -void AlphaCrown::computeWeightedSumLayer(unsigned i, torch::Tensor &EQ_up, torch::Tensor &EQ_low){ - //auto linearLayer = _linearLayers[i]; - auto Wi_positive = _positiveWeights[i]; - auto Wi_negative = _negativeWeights[i]; - auto Bi = _biases[i]; - - auto EQ_up_afterLayer = Wi_positive.mm( EQ_up ) + Wi_negative.mm( EQ_low ); - EQ_up_afterLayer = - AlphaCrown::addVecToLastColumnValue( EQ_up_afterLayer, Bi ); - - - auto EQ_low_afterLayer = Wi_positive.mm( EQ_low ) + Wi_negative.mm( EQ_up ); - EQ_low_afterLayer = - AlphaCrown::addVecToLastColumnValue(EQ_low_afterLayer, Bi ); - - EQ_up = EQ_up_afterLayer; - EQ_low = EQ_low_afterLayer; - -} - - -void AlphaCrown::computeReluLayer(unsigned layerNumber, torch::Tensor - &EQ_up, torch::Tensor &EQ_low, std::vector &alphaSlopes){ - EQ_up = EQ_up * _upperRelaxationSlopes[layerNumber].unsqueeze( 1 ); // - EQ_up = AlphaCrown::addVecToLastColumnValue( EQ_up, _upperRelaxationIntercepts[layerNumber] ); - unsigned indexInAlpha = _indexAlphaSlopeMap[layerNumber]; - EQ_low = EQ_low * alphaSlopes[indexInAlpha].unsqueeze( 1 ); -} - - - - -void AlphaCrown::updateBounds(std::vector &alphaSlopes){ - torch::Tensor EQ_up = createSymbolicVariablesMatrix(); - torch::Tensor EQ_low = createSymbolicVariablesMatrix(); - - - for ( unsigned i = 0; i < _network->getNumberOfLayers(); i++ ) - { - auto layerType = _layersOrder[i]; - switch (layerType) - { - case Layer::INPUT: - break; - case Layer::WEIGHTED_SUM: - computeWeightedSumLayer (i, EQ_up, EQ_low); - break; - case Layer::RELU: - computeReluLayer (i, EQ_up, EQ_low, alphaSlopes); - break; - case Layer::MAX: - computeMaxPoolLayer( i, EQ_up, EQ_low ); - break; - default: - log ("Unsupported layer type\n"); - throw MarabouError (MarabouError::DEBUGGING_ERROR); - } - auto upBound = getMaxOfSymbolicVariables(EQ_up); - auto lowBound = getMinOfSymbolicVariables(EQ_low); - updateBoundsOfLayer(i, upBound, lowBound); - } -} - -void AlphaCrown::updateBoundsOfLayer(unsigned layerIndex, torch::Tensor &upBounds, torch::Tensor &lowBounds) -{ - - Layer * layer = _layerOwner->getLayerIndexToLayer()[layerIndex]; - //TODO it should be: Layer *layer = _layerOwner->getLayer(layerIndex); if we added non const getter - - for (int j = 0; j < upBounds.size(0); j++) - { - if ( layer->neuronEliminated( j ) ) continue; - double lb_val = lowBounds[j].item(); - if ( layer->getLb( j ) < lb_val ) - { - log( Stringf( "Neuron %u_%u lower-bound updated from %f to %f", - layerIndex, - j, - layer->getLb( j ), - lb_val ) ); - - std::cout << "Neuron " << layerIndex << "_" << j - << " lower-bound updated from " << layer->getLb(j) - << " to " << lb_val << std::endl; - layer->setLb( j, lb_val ); - _layerOwner->receiveTighterBound( - Tightening( layer->neuronToVariable( j ), lb_val, Tightening::LB ) ); - } - - - auto ub_val = upBounds[j].item(); - if ( layer->getUb( j ) > ub_val ) - { - log( Stringf( "Neuron %u_%u upper-bound updated from %f to %f", - layerIndex, - j, - layer->getUb( j ), - ub_val ) ); - std::cout << "Neuron " << layerIndex << "_" << j - << " upper-bound updated from " << layer->getUb(j) - << " to " << ub_val << std::endl; - - layer->setUb( j, ub_val ); - _layerOwner->receiveTighterBound( - Tightening( layer->neuronToVariable( j ), ub_val, Tightening::UB ) ); - } - - } -} - - -void AlphaCrown::optimizeBounds( int loops ) -{ - - - std::cout << "Starting AlphaCrown run with " << loops << " optimization loops." << std::endl; - std::vector alphaSlopesForUpBound; - std::vector alphaSlopesForLowBound; - for ( auto &tensor : _initialAlphaSlopes ) - { - alphaSlopesForUpBound.push_back( tensor.detach().clone().requires_grad_(true) ); - alphaSlopesForLowBound.push_back( tensor.detach().clone().requires_grad_(true) ); - } - GDloop( loops, "max", alphaSlopesForUpBound ); - GDloop( loops, "min", alphaSlopesForLowBound ); - updateBounds( alphaSlopesForUpBound ); - updateBounds( alphaSlopesForLowBound); - std::cout << "AlphaCrown run completed." << std::endl; -} - - -void AlphaCrown::GDloop( int loops, - const std::string val_to_opt, - std::vector &alphaSlopes ) -{ - torch::optim::Adam optimizer( alphaSlopes, 0.005 ); - for ( int i = 0; i < loops; i++ ) - { - optimizer.zero_grad(); - - auto [max_val, min_val] = AlphaCrown::computeBounds( alphaSlopes ); - auto loss = ( val_to_opt == "max" ) ? max_val.sum() : -min_val.sum(); - loss.backward(torch::Tensor(), true); - - optimizer.step(); - - for ( auto &tensor : alphaSlopes ) - { - tensor.clamp( 0, 1 ); - } - - log( Stringf( "Optimization loop %d completed", i + 1 ) ); - std::cout << "std Optimization loop completed " << i+1 << std::endl; - } -} - - -torch::Tensor AlphaCrown::addVecToLastColumnValue(const torch::Tensor &matrix, - const torch::Tensor &vec) -{ - auto result = matrix.clone(); - if (result.dim() == 2) - { - // add 'vec' per row to last column - result.slice(1, result.size(1) - 1, result.size(1)) += vec.unsqueeze(1); - } - else if (result.dim() == 1) - { - // add scalar to last entry (the constant term) - TORCH_CHECK(vec.numel() == 1, "1-D addVec expects scalar vec"); - result.index_put_({ result.size(0) - 1 }, - result.index({ result.size(0) - 1 }) + vec.item()); - } - else - { - TORCH_CHECK(false, "addVecToLastColumnValue expects 1-D or 2-D tensor"); - } - return result; -} - - - -void AlphaCrown::log( const String &message ) -{ - if ( GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING ) - printf( "DeepPolyAnalysis: %s\n", message.ascii() ); -} - - -} // namespace NLR \ No newline at end of file diff --git a/src/nlr/AlphaCrown.h b/src/nlr/AlphaCrown.h deleted file mode 100644 index 6042354e99..0000000000 --- a/src/nlr/AlphaCrown.h +++ /dev/null @@ -1,100 +0,0 @@ -#ifndef ALPHACROWN_H -#define ALPHACROWN_H - -#include "CustomDNN.h" -#include "LayerOwner.h" -#include - -#undef Warning -#include - -namespace NLR { -class AlphaCrown -{ -public: - AlphaCrown( LayerOwner *layerOwner ); - - void findBounds(); - void optimizeBounds( int loops = 50 ); - void run() - - { - findBounds(); - updateBounds(_initialAlphaSlopes); - optimizeBounds( 2 ); - - } - -private: - LayerOwner *_layerOwner; - NetworkLevelReasoner *_nlr; - CustomDNN *_network; - void GDloop( int loops, const std::string val_to_opt, std::vector &alphaSlopes ); - std::tuple - computeBounds( std::vector &alphaSlopes ); - int _inputSize; - torch::Tensor _lbInput; - torch::Tensor _ubInput; - - std::vector _linearLayers; - std::vector _layersOrder; - std::map _positiveWeights; - std::map _negativeWeights; - std::map _biases; - std::map _indexAlphaSlopeMap; - std::map _linearIndexMap; - - std::map>> _maxPoolSources; - std::map _maxUpperChoice; // int64 [m]: absolute row index for upper bound - std::map _maxLowerChoice; // int64 [m]: absolute row index for lower bound - void relaxMaxPoolLayer(unsigned layerNumber, torch::Tensor &EQ_up, torch::Tensor &EQ_low); - void computeMaxPoolLayer(unsigned layerNumber, torch::Tensor &EQ_up, torch::Tensor &EQ_low); - - std::pair boundsFromEQ(const torch::Tensor &EQ, const std::vector &rows); - struct MaxRelaxResult { - long idx_up; // absolute row in previous EQ for the upper bound - long idx_low; // absolute row in previous EQ for the lower bound - float slope; // upper slope a - float intercept; // upper intercept (b - a*l_i) - }; - - MaxRelaxResult relaxMaxNeuron(const std::vector> &groups, - size_t k, - const torch::Tensor &EQ_up, - const torch::Tensor &EQ_low); - - std::map _upperRelaxationSlopes; - std::map _upperRelaxationIntercepts; - - std::vector _initialAlphaSlopes; - - torch::Tensor createSymbolicVariablesMatrix(); - void relaxReluLayer(unsigned layerNumber, torch::Tensor &EQ_up, torch::Tensor &EQ_low); - void computeWeightedSumLayer(unsigned i, torch::Tensor &EQ_up, torch::Tensor &EQ_low); - void computeReluLayer(unsigned i, torch::Tensor &EQ_up, torch::Tensor &EQ_low, std::vector &alphaSlopes); - - void updateBounds(std::vector &alphaSlopes); - void updateBoundsOfLayer(unsigned layerIndex, torch::Tensor &upBounds, torch::Tensor &lowBounds); - - torch::Tensor addVecToLastColumnValue( const torch::Tensor &matrix, - const torch::Tensor &vec ); - // { - // auto result = matrix.clone(); - // result.slice( 1, result.size( 1 ) - 1, result.size( 1 ) ) += vec.unsqueeze( 1 ); - // return result; - // } - static torch::Tensor lower_ReLU_relaxation( const torch::Tensor &u, const torch::Tensor &l ); - - static std::tuple upper_ReLU_relaxation( const torch::Tensor &u, - const torch::Tensor &l ); - - torch::Tensor getMaxOfSymbolicVariables( const torch::Tensor &matrix ); - torch::Tensor getMinOfSymbolicVariables( const torch::Tensor &matrix ); - - - void log( const String &message ); -}; -} // namespace NLR - - -#endif //ALPHACROWN_H \ No newline at end of file diff --git a/src/nlr/CMakeLists.txt b/src/nlr/CMakeLists.txt index f008a36da0..e377a638ba 100644 --- a/src/nlr/CMakeLists.txt +++ b/src/nlr/CMakeLists.txt @@ -7,7 +7,7 @@ target_include_directories(${MARABOU_LIB} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}") target_sources(${MARABOU_TEST_LIB} PRIVATE ${SRCS}) target_include_directories(${MARABOU_TEST_LIB} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}") -set(NETWORK_LEVEL_REASONER_TESTS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/tests") +set (NETWORK_LEVEL_REASONER_TESTS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/tests") macro(network_level_reasoner_add_unit_test name) set(USE_MOCK_COMMON TRUE) set(USE_MOCK_ENGINE TRUE) @@ -15,16 +15,15 @@ macro(network_level_reasoner_add_unit_test name) endmacro() network_level_reasoner_add_unit_test(DeepPolyAnalysis) -network_level_reasoner_add_unit_test(AlphaCrown) network_level_reasoner_add_unit_test(NetworkLevelReasoner) network_level_reasoner_add_unit_test(WsLayerElimination) network_level_reasoner_add_unit_test(ParallelSolver) if (${ENABLE_GUROBI}) network_level_reasoner_add_unit_test(LPRelaxation) -endif () +endif() if (${BUILD_PYTHON}) target_include_directories(${MARABOU_PY} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}") -endif () +endif() diff --git a/src/nlr/IterativePropagator.h b/src/nlr/IterativePropagator.h index 0c3a593f89..7a7fba671a 100644 --- a/src/nlr/IterativePropagator.h +++ b/src/nlr/IterativePropagator.h @@ -27,7 +27,7 @@ namespace NLR { #define IterativePropagator_LOG( x, ... ) \ - MARABOU_LOG( GlobalConfiguration::PREPROCESSOR_LOGGING, "Iterativepropagator: %s\n", x ) + LOG( GlobalConfiguration::PREPROCESSOR_LOGGING, "Iterativepropagator: %s\n", x ) class IterativePropagator : public ParallelSolver { diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index 248fc968c8..cee0abbb65 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -31,7 +31,7 @@ namespace NLR { #define LPFormulator_LOG( x, ... ) \ - MARABOU_LOG( GlobalConfiguration::PREPROCESSOR_LOGGING, "LP Preprocessor: %s\n", x ) + LOG( GlobalConfiguration::PREPROCESSOR_LOGGING, "LP Preprocessor: %s\n", x ) class LPFormulator : public ParallelSolver { diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index b27949cdf9..08e6900538 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -27,13 +27,6 @@ Layer::~Layer() freeMemoryIfNeeded(); } -void Layer::setBounds( unsigned int neuron, double lower, double upper ) -{ - ASSERT( neuron < _size ); - _lb[neuron] = lower; - _ub[neuron] = upper; -} - void Layer::setLayerOwner( LayerOwner *layerOwner ) { _layerOwner = layerOwner; diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index d84237f2f1..900276eda3 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -58,7 +58,6 @@ class Layer Layer( const Layer *other ); Layer( unsigned index, Type type, unsigned size, LayerOwner *layerOwner ); ~Layer(); - void setBounds( unsigned int neuron, double lower, double upper ); void setLayerOwner( LayerOwner *layerOwner ); void addSourceLayer( unsigned layerNumber, unsigned layerSize ); diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index b8774af74d..8390a0190b 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -34,15 +34,13 @@ #include -#define NLR_LOG( x, ... ) \ - MARABOU_LOG( GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING, "NLR: %s\n", x ) +#define NLR_LOG( x, ... ) LOG( GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING, "NLR: %s\n", x ) namespace NLR { NetworkLevelReasoner::NetworkLevelReasoner() : _tableau( NULL ) , _deepPolyAnalysis( nullptr ) - , _alphaCrown( nullptr ) { } @@ -129,17 +127,8 @@ void NetworkLevelReasoner::evaluate( double *input, double *output ) const Layer *outputLayer = _layerIndexToLayer[_layerIndexToLayer.size() - 1]; memcpy( output, outputLayer->getAssignment(), sizeof( double ) * outputLayer->getSize() ); } -void NetworkLevelReasoner::setBounds( unsigned layer, - unsigned int neuron, - double lower, - double upper ) -{ - ASSERT( layer < _layerIndexToLayer.size() ); - _layerIndexToLayer[layer]->setBounds( neuron, lower, upper ); -} -void NetworkLevelReasoner::concretizeInputAssignment( Map &assignment, - const double *pgdAdversarialInput ) +void NetworkLevelReasoner::concretizeInputAssignment( Map &assignment ) { Layer *inputLayer = _layerIndexToLayer[0]; ASSERT( inputLayer->getLayerType() == Layer::INPUT ); @@ -156,8 +145,6 @@ void NetworkLevelReasoner::concretizeInputAssignment( Map &ass { unsigned variable = inputLayer->neuronToVariable( index ); double value = _tableau->getValue( variable ); - if ( pgdAdversarialInput ) - value = pgdAdversarialInput[index]; input[index] = value; assignment[variable] = value; } @@ -213,8 +200,6 @@ void NetworkLevelReasoner::clearConstraintTightenings() void NetworkLevelReasoner::symbolicBoundPropagation() { - _boundTightenings.clear(); - for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) _layerIndexToLayer[i]->computeSymbolicBounds(); } @@ -226,15 +211,6 @@ void NetworkLevelReasoner::deepPolyPropagation() _deepPolyAnalysis->run(); } -void NetworkLevelReasoner::alphaCrown() -{ -#ifdef BUILD_TORCH - if ( _alphaCrown == nullptr ) - _alphaCrown = std::unique_ptr( new AlphaCrown( this ) ); - _alphaCrown->run(); -#endif -} - void NetworkLevelReasoner::lpRelaxationPropagation() { LPFormulator lpFormulator( this ); diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index 95d292b8a6..2660795be6 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -16,7 +16,6 @@ #ifndef __NetworkLevelReasoner_h__ #define __NetworkLevelReasoner_h__ -#include "AlphaCrown.h" #include "DeepPolyAnalysis.h" #include "ITableau.h" #include "Layer.h" @@ -75,14 +74,12 @@ class NetworkLevelReasoner : public LayerOwner Perform an evaluation of the network for a specific input. */ void evaluate( double *input, double *output ); - void setBounds( unsigned layer, unsigned int neuron, double lower, double upper ); /* Perform an evaluation of the network for the current input variable assignment and store the resulting variable assignment in the assignment. */ - void concretizeInputAssignment( Map &assignment, - const double *pgdAdversarialInput = nullptr ); + void concretizeInputAssignment( Map &assignment ); /* Perform a simulation of the network for a specific input @@ -127,7 +124,6 @@ class NetworkLevelReasoner : public LayerOwner void intervalArithmeticBoundPropagation(); void symbolicBoundPropagation(); void deepPolyPropagation(); - void alphaCrown(); void lpRelaxationPropagation(); void LPTighteningForOneLayer( unsigned targetIndex ); void MILPPropagation(); @@ -213,7 +209,6 @@ class NetworkLevelReasoner : public LayerOwner std::unique_ptr _deepPolyAnalysis; - std::unique_ptr _alphaCrown; void freeMemoryIfNeeded(); diff --git a/src/nlr/tests/Test_AlphaCrown.h b/src/nlr/tests/Test_AlphaCrown.h deleted file mode 100644 index 1b6c7fbd4a..0000000000 --- a/src/nlr/tests/Test_AlphaCrown.h +++ /dev/null @@ -1,211 +0,0 @@ -// -// Created by maya-swisa on 8/6/25. -// - -#ifndef TEST_ALPHACROWN_H -#define TEST_ALPHACROWN_H - -#include "../../engine/tests/MockTableau.h" -#include "AcasParser.h" -#include "CWAttack.h" -#include "Engine.h" -#include "InputQuery.h" -#include "Layer.h" -#include "NetworkLevelReasoner.h" -#include "PropertyParser.h" -#include "Tightening.h" - -#include -#include - -class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite -{ -public: - void setUp() - { - } - - void tearDown() - { - } - - // void testWithAttack() - // { - // #ifdef BUILD_TORCH - // - // auto networkFilePath = "../../../resources/nnet/acasxu/" - // "ACASXU_experimental_v2a_1_1.nnet"; - // auto propertyFilePath = "../../../resources/properties/" - // "acas_property_4.txt"; - // - // auto *_acasParser = new AcasParser( networkFilePath ); - // InputQuery _inputQuery; - // _acasParser->generateQuery( _inputQuery ); - // PropertyParser().parse( propertyFilePath, _inputQuery ); - // std::unique_ptr _engine = std::make_unique(); - // Options *options = Options::get(); - // options->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "alphacrown" ); - // // obtain the alpha crown proceeder - // _engine->processInputQuery( _inputQuery ); - // NLR::NetworkLevelReasoner *_networkLevelReasoner = - // _engine->getNetworkLevelReasoner(); TS_ASSERT_THROWS_NOTHING( - // _networkLevelReasoner->obtainCurrentBounds() ); std::unique_ptr cwAttack = - // std::make_unique( _networkLevelReasoner ); auto - // attackResultAfterBoundTightening = cwAttack->runAttack(); TS_ASSERT( - // !attackResultAfterBoundTightening ); delete _acasParser; - - void populateNetwork( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 R 1 R 1 1 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 1 \ / 0 \ / - \/ \/ \/ - /\ /\ /\ - 1 / \ 1 / \ 1 / \ - / \ R / \ R / 1 \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - -1 -1 - - The example described in Fig. 3 of - https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, 1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 0 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - - nlr.setBias( 5, 0, 1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void test_alphacrown_relus() - { - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetwork( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke alpha crow - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.alphaCrown() ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - for ( const auto &bound : bounds ) - { - if ( bound._type == Tightening::LB ) - printf( "lower:\n" ); - else - printf( "upper:\n" ); - std::cout << "var : " << bound._variable << " bound : " << bound._value << std::endl; - } - - double large = 1000000; - nlr.setBounds( nlr.getNumberOfLayers() -1 , 1, 0.1 , large ); - std::unique_ptr cwAttack = std::make_unique( &nlr ); - auto attackResultAfterBoundTightening = cwAttack->runAttack(); - TS_ASSERT( !attackResultAfterBoundTightening ); - - nlr.setBounds( nlr.getNumberOfLayers() -1 , 1, -large , -0.1 ); - cwAttack = std::make_unique( &nlr ); - attackResultAfterBoundTightening = cwAttack->runAttack(); - TS_ASSERT( !attackResultAfterBoundTightening ); - - nlr.setBounds( nlr.getNumberOfLayers() -1 , 0 , -large , 0.99 ); - cwAttack = std::make_unique( &nlr ); - attackResultAfterBoundTightening = cwAttack->runAttack(); - TS_ASSERT( !attackResultAfterBoundTightening ); - - nlr.setBounds( nlr.getNumberOfLayers() -1 , 0 , 1.1 , large ); - cwAttack = std::make_unique( &nlr ); - attackResultAfterBoundTightening = cwAttack->runAttack(); - TS_ASSERT( !attackResultAfterBoundTightening ); - - - } -}; - -#endif // TEST_ALPHACROWN_H diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 9ac9e083f3..b74cd3931c 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -7610,7 +7610,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Map assignment; - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment, TODO ) ); + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 4 ) ); @@ -7623,7 +7623,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.nextValues[0] = 1; tableau.nextValues[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment, TODO ) ); + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 1 ) ); @@ -7635,7 +7635,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.nextValues[0] = 1; tableau.nextValues[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment, TODO ) ); + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 0 ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 0 ) ); diff --git a/src/query_loader/QueryLoader.h b/src/query_loader/QueryLoader.h index c8d1c0732c..195a002c47 100644 --- a/src/query_loader/QueryLoader.h +++ b/src/query_loader/QueryLoader.h @@ -19,7 +19,7 @@ #include "IQuery.h" -#define QL_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::QUERY_LOADER_LOGGING, "QueryLoader: %s\n", x ) +#define QL_LOG( x, ... ) LOG( GlobalConfiguration::QUERY_LOADER_LOGGING, "QueryLoader: %s\n", x ) class QueryLoader { diff --git a/tools/download_libtorch.sh b/tools/download_libtorch.sh deleted file mode 100755 index 90f1884b42..0000000000 --- a/tools/download_libtorch.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -curdir=$pwd -mydir="${0%/*}" -version=$1 - -cd $mydir - -# Need to download the cxx11-abi version of libtorch in order to ensure compatability -# with boost. -# -# See https://discuss.pytorch.org/t/issues-linking-with-libtorch-c-11-abi/29510 for details. -echo "Downloading PyTorch" -wget https://download.pytorch.org/libtorch/cpu/libtorch-cxx11-abi-shared-with-deps-$version%2Bcpu.zip -O libtorch-$version.zip -q --show-progress --progress=bar:force:noscroll - -echo "Unzipping PyTorch" -unzip libtorch-$version.zip >> /dev/null -mv libtorch libtorch-$version - -cd $curdir From 064a2844bea63747f2a8b00266b9214f71fed68b Mon Sep 17 00:00:00 2001 From: Avi Porges <151055500+Avi-Porges@users.noreply.github.com> Date: Fri, 29 Aug 2025 16:00:42 +0300 Subject: [PATCH 32/33] Revert "Revert "Alpha crown"" --- CMakeLists.txt | 229 ++++---- src/basis_factorization/GaussianEliminator.h | 2 +- src/basis_factorization/LUFactorization.h | 2 +- .../SparseFTFactorization.h | 2 +- .../SparseGaussianEliminator.h | 2 +- .../SparseLUFactorization.h | 2 +- src/cegar/IncrementalLinearization.h | 2 +- src/common/Debug.h | 4 +- src/configuration/GlobalConfiguration.cpp | 11 + src/configuration/GlobalConfiguration.h | 15 + src/configuration/Options.cpp | 7 +- src/configuration/Options.h | 2 + src/engine/CDSmtCore.h | 2 +- src/engine/CWAttack.cpp | 256 ++++++++ src/engine/CWAttack.h | 64 ++ src/engine/CustomDNN.cpp | 302 ++++++++++ src/engine/CustomDNN.h | 119 ++++ src/engine/DantzigsRule.h | 2 +- src/engine/DnCManager.h | 2 +- src/engine/Engine.cpp | 2 + src/engine/Engine.h | 2 +- src/engine/InputQuery.cpp | 2 +- src/engine/MarabouError.h | 2 + src/engine/PLConstraintScoreTracker.h | 2 +- src/engine/ProjectedSteepestEdge.h | 2 +- src/engine/Query.cpp | 2 +- src/engine/SmtCore.h | 2 +- src/engine/SumOfInfeasibilitiesManager.h | 2 +- src/engine/SymbolicBoundTighteningType.h | 3 +- src/engine/Tableau.h | 2 +- src/input_parsers/MpsParser.h | 2 +- src/input_parsers/OnnxParser.h | 2 +- src/nlr/AlphaCrown.cpp | 546 ++++++++++++++++++ src/nlr/AlphaCrown.h | 100 ++++ src/nlr/CMakeLists.txt | 7 +- src/nlr/IterativePropagator.h | 2 +- src/nlr/LPFormulator.h | 2 +- src/nlr/Layer.cpp | 7 + src/nlr/Layer.h | 1 + src/nlr/NetworkLevelReasoner.cpp | 28 +- src/nlr/NetworkLevelReasoner.h | 7 +- src/nlr/tests/Test_AlphaCrown.h | 211 +++++++ src/nlr/tests/Test_NetworkLevelReasoner.h | 6 +- src/query_loader/QueryLoader.h | 2 +- tools/download_libtorch.sh | 19 + 45 files changed, 1855 insertions(+), 137 deletions(-) create mode 100644 src/engine/CWAttack.cpp create mode 100644 src/engine/CWAttack.h create mode 100644 src/engine/CustomDNN.cpp create mode 100644 src/engine/CustomDNN.h create mode 100644 src/nlr/AlphaCrown.cpp create mode 100644 src/nlr/AlphaCrown.h create mode 100644 src/nlr/tests/Test_AlphaCrown.h create mode 100755 tools/download_libtorch.sh diff --git a/CMakeLists.txt b/CMakeLists.txt index e55a57852a..bf6c6cbed4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -20,9 +20,9 @@ option(RUN_SYSTEM_TEST "Run system tests on build" OFF) option(RUN_MEMORY_TEST "Run cxxtest testing with ASAN ON" ON) option(RUN_PYTHON_TEST "Run Python API tests if building with Python" OFF) option(ENABLE_GUROBI "Enable use the Gurobi optimizer" OFF) -option(ENABLE_OPENBLAS "Do symbolic bound tighting using blas" ON) # Not available on Windows +option(ENABLE_OPENBLAS "Do symbolic bound tighting using blas" OFF) # Not available on Windows option(CODE_COVERAGE "Add code coverage" OFF) # Available only in debug mode - +option(BUILD_TORCH "Build libtorch" ON) ################### ## Git variables ## ################### @@ -30,19 +30,19 @@ option(CODE_COVERAGE "Add code coverage" OFF) # Available only in debug mode # Get the name of the working branch execute_process( - COMMAND git rev-parse --abbrev-ref HEAD - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} - OUTPUT_VARIABLE GIT_BRANCH - OUTPUT_STRIP_TRAILING_WHITESPACE + COMMAND git rev-parse --abbrev-ref HEAD + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + OUTPUT_VARIABLE GIT_BRANCH + OUTPUT_STRIP_TRAILING_WHITESPACE ) add_definitions("-DGIT_BRANCH=\"${GIT_BRANCH}\"") # Get the latest abbreviated commit hash of the working branch execute_process( - COMMAND git log -1 --format=%h - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} - OUTPUT_VARIABLE GIT_COMMIT_HASH - OUTPUT_STRIP_TRAILING_WHITESPACE + COMMAND git log -1 --format=%h + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + OUTPUT_VARIABLE GIT_COMMIT_HASH + OUTPUT_STRIP_TRAILING_WHITESPACE ) add_definitions("-DGIT_COMMIT_HASH=\"${GIT_COMMIT_HASH}\"") @@ -60,9 +60,9 @@ set(COMMON_DIR "${SRC_DIR}/common") set(BASIS_DIR "${SRC_DIR}/basis_factorization") if (MSVC) - set(SCRIPT_EXTENSION bat) + set(SCRIPT_EXTENSION bat) else() - set(SCRIPT_EXTENSION sh) + set(SCRIPT_EXTENSION sh) endif() ########## @@ -85,20 +85,20 @@ add_definitions(-DBOOST_NO_CXX98_FUNCTION_BASE) set(BOOST_VERSION 1.84.0) set(BOOST_DIR "${TOOLS_DIR}/boost-${BOOST_VERSION}") if (MSVC) - set(BOOST_ROOT "${BOOST_DIR}/win_installed") - set(Boost_NAMESPACE libboost) + set(BOOST_ROOT "${BOOST_DIR}/win_installed") + set(Boost_NAMESPACE libboost) elseif (${CMAKE_SIZEOF_VOID_P} EQUAL 4 AND NOT MSVC) - set(BOOST_ROOT "${BOOST_DIR}/installed32") + set(BOOST_ROOT "${BOOST_DIR}/installed32") else() - set(BOOST_ROOT "${BOOST_DIR}/installed") + set(BOOST_ROOT "${BOOST_DIR}/installed") endif() set(Boost_USE_DEBUG_RUNTIME FALSE) find_package(Boost ${BOOST_VERSION} COMPONENTS program_options timer chrono thread) # Find boost if (NOT ${Boost_FOUND}) - execute_process(COMMAND ${TOOLS_DIR}/download_boost.${SCRIPT_EXTENSION} ${BOOST_VERSION}) - find_package(Boost ${BOOST_VERSION} REQUIRED COMPONENTS program_options timer chrono thread regex) + execute_process(COMMAND ${TOOLS_DIR}/download_boost.${SCRIPT_EXTENSION} ${BOOST_VERSION}) + find_package(Boost ${BOOST_VERSION} REQUIRED COMPONENTS program_options timer chrono thread regex) endif() set(LIBS_INCLUDES ${Boost_INCLUDE_DIRS}) list(APPEND LIBS ${Boost_LIBRARIES}) @@ -146,36 +146,63 @@ endif() file(GLOB DEPS_ONNX "${ONNX_DIR}/*.cc") include_directories(SYSTEM ${ONNX_DIR}) +############# +## Pytorch ## +############# + +if (${BUILD_TORCH}) + message(STATUS "Using pytorch") + if (NOT DEFINED BUILD_TORCH) + set(BUILD_TORCH $ENV{TORCH_HOME}) + add_definitions(-DBUILD_TORCH) + endif() + add_compile_definitions(BUILD_TORCH) + set(PYTORCH_VERSION 2.2.1) + find_package(Torch ${PYTORCH_VERSION} QUIET) + if (NOT Torch_FOUND) + set(PYTORCH_DIR "${TOOLS_DIR}/libtorch-${PYTORCH_VERSION}") + list(APPEND CMAKE_PREFIX_PATH ${PYTORCH_DIR}) + if(NOT EXISTS "${PYTORCH_DIR}") + execute_process(COMMAND ${TOOLS_DIR}/download_libtorch.sh ${PYTORCH_VERSION}) + set(Torch_NO_SYSTEM_PATHS ON) + endif() + set(Torch_DIR ${PYTORCH_DIR}/share/cmake/Torch) + find_package(Torch ${PYTORCH_VERSION} REQUIRED) + endif() + set(TORCH_CXX_FLAGS "-Wno-error=array-bounds") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}") + list(APPEND LIBS ${TORCH_LIBRARIES}) +endif () ############ ## Gurobi ## ############ if (${ENABLE_GUROBI}) - message(STATUS "Using Gurobi for LP relaxation for bound tightening") - if (NOT DEFINED GUROBI_DIR) - set(GUROBI_DIR $ENV{GUROBI_HOME}) - endif() - add_compile_definitions(ENABLE_GUROBI) - - set(GUROBI_LIB1 "gurobi_c++") - set(GUROBI_LIB2 "gurobi110") - - add_library(${GUROBI_LIB1} SHARED IMPORTED) - set_target_properties(${GUROBI_LIB1} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi_c++.a) - list(APPEND LIBS ${GUROBI_LIB1}) - target_include_directories(${GUROBI_LIB1} INTERFACE ${GUROBI_DIR}/include/) - - add_library(${GUROBI_LIB2} SHARED IMPORTED) - - # MACOSx uses .dylib instead of .so for its Gurobi downloads. - if (APPLE) - set_target_properties(${GUROBI_LIB2} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi110.dylib) - else() - set_target_properties(${GUROBI_LIB2} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi110.so) - endif () - - list(APPEND LIBS ${GUROBI_LIB2}) - target_include_directories(${GUROBI_LIB2} INTERFACE ${GUROBI_DIR}/include/) + message(STATUS "Using Gurobi for LP relaxation for bound tightening") + if (NOT DEFINED GUROBI_DIR) + set(GUROBI_DIR $ENV{GUROBI_HOME}) + endif() + add_compile_definitions(ENABLE_GUROBI) + + set(GUROBI_LIB1 "gurobi_c++") + set(GUROBI_LIB2 "gurobi110") + + add_library(${GUROBI_LIB1} SHARED IMPORTED) + set_target_properties(${GUROBI_LIB1} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi_c++.a) + list(APPEND LIBS ${GUROBI_LIB1}) + target_include_directories(${GUROBI_LIB1} INTERFACE ${GUROBI_DIR}/include/) + + add_library(${GUROBI_LIB2} SHARED IMPORTED) + + # MACOSx uses .dylib instead of .so for its Gurobi downloads. + if (APPLE) + set_target_properties(${GUROBI_LIB2} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi110.dylib) + else() + set_target_properties(${GUROBI_LIB2} PROPERTIES IMPORTED_LOCATION ${GUROBI_DIR}/lib/libgurobi110.so) + endif () + + list(APPEND LIBS ${GUROBI_LIB2}) + target_include_directories(${GUROBI_LIB2} INTERFACE ${GUROBI_DIR}/include/) endif() ############## @@ -183,30 +210,30 @@ endif() ############## if (NOT MSVC AND ${ENABLE_OPENBLAS}) - set(OPENBLAS_VERSION 0.3.19) - - set(OPENBLAS_LIB openblas) - set(OPENBLAS_DEFAULT_DIR "${TOOLS_DIR}/OpenBLAS-${OPENBLAS_VERSION}") - if (NOT OPENBLAS_DIR) - set(OPENBLAS_DIR ${OPENBLAS_DEFAULT_DIR}) - endif() - - message(STATUS "Using OpenBLAS for matrix multiplication") - add_compile_definitions(ENABLE_OPENBLAS) - if(NOT EXISTS "${OPENBLAS_DIR}/installed/lib/libopenblas.a") - message("Can't find OpenBLAS, installing. If OpenBLAS is installed please use the OPENBLAS_DIR parameter to pass the path") - if (${OPENBLAS_DIR} STREQUAL ${OPENBLAS_DEFAULT_DIR}) - message("Installing OpenBLAS") - execute_process(COMMAND ${TOOLS_DIR}/download_openBLAS.sh ${OPENBLAS_VERSION}) - else() - message(FATAL_ERROR "Can't find OpenBLAS in the supplied directory") - endif() - endif() - - add_library(${OPENBLAS_LIB} SHARED IMPORTED) - set_target_properties(${OPENBLAS_LIB} PROPERTIES IMPORTED_LOCATION ${OPENBLAS_DIR}/installed/lib/libopenblas.a) - list(APPEND LIBS ${OPENBLAS_LIB}) - target_include_directories(${OPENBLAS_LIB} INTERFACE ${OPENBLAS_DIR}/installed/include) + set(OPENBLAS_VERSION 0.3.19) + + set(OPENBLAS_LIB openblas) + set(OPENBLAS_DEFAULT_DIR "${TOOLS_DIR}/OpenBLAS-${OPENBLAS_VERSION}") + if (NOT OPENBLAS_DIR) + set(OPENBLAS_DIR ${OPENBLAS_DEFAULT_DIR}) + endif() + + message(STATUS "Using OpenBLAS for matrix multiplication") + add_compile_definitions(ENABLE_OPENBLAS) + if(NOT EXISTS "${OPENBLAS_DIR}/installed/lib/libopenblas.a") + message("Can't find OpenBLAS, installing. If OpenBLAS is installed please use the OPENBLAS_DIR parameter to pass the path") + if (${OPENBLAS_DIR} STREQUAL ${OPENBLAS_DEFAULT_DIR}) + message("Installing OpenBLAS") + execute_process(COMMAND ${TOOLS_DIR}/download_openBLAS.sh ${OPENBLAS_VERSION}) + else() + message(FATAL_ERROR "Can't find OpenBLAS in the supplied directory") + endif() + endif() + + add_library(${OPENBLAS_LIB} SHARED IMPORTED) + set_target_properties(${OPENBLAS_LIB} PROPERTIES IMPORTED_LOCATION ${OPENBLAS_DIR}/installed/lib/libopenblas.a) + list(APPEND LIBS ${OPENBLAS_LIB}) + target_include_directories(${OPENBLAS_LIB} INTERFACE ${OPENBLAS_DIR}/installed/include) endif() ########### @@ -239,7 +266,7 @@ set(INPUT_PARSERS_DIR input_parsers) include(ProcessorCount) ProcessorCount(CTEST_NTHREADS) if(CTEST_NTHREADS EQUAL 0) - set(CTEST_NTHREADS 1) + set(CTEST_NTHREADS 1) endif() # --------------- set build type ---------------------------- @@ -247,20 +274,20 @@ set(BUILD_TYPES Release Debug MinSizeRel RelWithDebInfo) # Set the default build type to Production if(NOT CMAKE_BUILD_TYPE) - set(CMAKE_BUILD_TYPE - Release CACHE STRING "Options are: Release Debug MinSizeRel RelWithDebInfo" FORCE) - # Provide drop down menu options in cmake-gui - set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS ${BUILD_TYPES}) + set(CMAKE_BUILD_TYPE + Release CACHE STRING "Options are: Release Debug MinSizeRel RelWithDebInfo" FORCE) + # Provide drop down menu options in cmake-gui + set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS ${BUILD_TYPES}) endif() message(STATUS "Building ${CMAKE_BUILD_TYPE} build") #-------------------------set code coverage----------------------------------# # Allow coverage only in debug mode only in gcc if(CODE_COVERAGE AND CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_BUILD_TYPE MATCHES Debug) - message(STATUS "Building with code coverage") - set(COVERAGE_COMPILER_FLAGS "-g -O0 --coverage" CACHE INTERNAL "") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}") - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --coverage") + message(STATUS "Building with code coverage") + set(COVERAGE_COMPILER_FLAGS "-g -O0 --coverage" CACHE INTERNAL "") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --coverage") endif() # We build a static library that is the core of the project, the link it to the @@ -273,7 +300,7 @@ set(MARABOU_EXE Marabou${CMAKE_EXECUTABLE_SUFFIX}) add_executable(${MARABOU_EXE} "${ENGINE_DIR}/main.cpp") set(MARABOU_EXE_PATH "${BIN_DIR}/${MARABOU_EXE}") add_custom_command(TARGET ${MARABOU_EXE} POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy $ ${MARABOU_EXE_PATH} ) + COMMAND ${CMAKE_COMMAND} -E copy $ ${MARABOU_EXE_PATH} ) set(MPS_PARSER_PATH "${BIN_DIR}/${MPS_PARSER}") @@ -314,10 +341,10 @@ find_package(Threads REQUIRED) list(APPEND LIBS Threads::Threads) if (BUILD_STATIC_MARABOU) - # build a static library - target_link_libraries(${MARABOU_LIB} ${LIBS} -static) + # build a static library + target_link_libraries(${MARABOU_LIB} ${LIBS} -static) else() - target_link_libraries(${MARABOU_LIB} ${LIBS}) + target_link_libraries(${MARABOU_LIB} ${LIBS}) endif() target_include_directories(${MARABOU_LIB} PRIVATE ${LIBS_INCLUDES}) @@ -347,10 +374,10 @@ endif() set(PYTHON32 FALSE) if(${BUILD_PYTHON}) execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c" - "import struct; print(struct.calcsize('@P'));" - RESULT_VARIABLE _PYTHON_SUCCESS - OUTPUT_VARIABLE PYTHON_SIZEOF_VOID_P - ERROR_VARIABLE _PYTHON_ERROR_VALUE) + "import struct; print(struct.calcsize('@P'));" + RESULT_VARIABLE _PYTHON_SUCCESS + OUTPUT_VARIABLE PYTHON_SIZEOF_VOID_P + ERROR_VARIABLE _PYTHON_ERROR_VALUE) # message("PYTHON SIZEOF VOID p ${PYTHON_SIZEOF_VOID_P}") if (PYTHON_SIZEOF_VOID_P EQUAL 4 AND NOT ${FORCE_PYTHON_BUILD}) set(PYTHON32 TRUE) @@ -370,8 +397,8 @@ endif() # Actually build Python if (${BUILD_PYTHON}) - set(PYBIND11_VERSION 2.10.4) - set(PYBIND11_DIR "${TOOLS_DIR}/pybind11-${PYBIND11_VERSION}") + set(PYBIND11_VERSION 2.10.4) + set(PYBIND11_DIR "${TOOLS_DIR}/pybind11-${PYBIND11_VERSION}") # This is suppose to set the PYTHON_EXECUTABLE variable # First try to find the default python version @@ -383,7 +410,7 @@ if (${BUILD_PYTHON}) if (NOT EXISTS ${PYBIND11_DIR}) message("didnt find pybind, getting it") - execute_process(COMMAND ${TOOLS_DIR}/download_pybind11.${SCRIPT_EXTENSION} ${PYBIND11_VERSION}) + execute_process(COMMAND ${TOOLS_DIR}/download_pybind11.${SCRIPT_EXTENSION} ${PYBIND11_VERSION}) endif() add_subdirectory(${PYBIND11_DIR}) @@ -394,7 +421,7 @@ if (${BUILD_PYTHON}) target_include_directories(${MARABOU_PY} PRIVATE ${LIBS_INCLUDES}) set_target_properties(${MARABOU_PY} PROPERTIES - LIBRARY_OUTPUT_DIRECTORY ${PYTHON_LIBRARY_OUTPUT_DIRECTORY}) + LIBRARY_OUTPUT_DIRECTORY ${PYTHON_LIBRARY_OUTPUT_DIRECTORY}) if(NOT MSVC) target_compile_options(${MARABOU_LIB} PRIVATE -fPIC ${RELEASE_FLAGS}) endif() @@ -425,8 +452,8 @@ target_compile_options(${MARABOU_TEST_LIB} PRIVATE ${CXXTEST_FLAGS}) add_custom_target(build-tests ALL) add_custom_target(check - COMMAND ctest --output-on-failure -j${CTEST_NTHREADS} $$ARGS - DEPENDS build-tests build_input_parsers ${MARABOU_EXE}) + COMMAND ctest --output-on-failure -j${CTEST_NTHREADS} $$ARGS + DEPENDS build-tests build_input_parsers ${MARABOU_EXE}) # Decide which tests to run and execute set(TESTS_TO_RUN "") @@ -452,33 +479,33 @@ if (NOT ${TESTS_TO_RUN} STREQUAL "") # make ctest verbose set(CTEST_OUTPUT_ON_FAILURE 1) add_custom_command( - TARGET build-tests - POST_BUILD - COMMAND ctest --output-on-failure -L "\"(${TESTS_TO_RUN})\"" -j${CTEST_NTHREADS} $$ARGS + TARGET build-tests + POST_BUILD + COMMAND ctest --output-on-failure -L "\"(${TESTS_TO_RUN})\"" -j${CTEST_NTHREADS} $$ARGS ) endif() if (${BUILD_PYTHON} AND ${RUN_PYTHON_TEST}) if (MSVC) add_custom_command( - TARGET build-tests - POST_BUILD - COMMAND cp ${PYTHON_API_DIR}/Release/* ${PYTHON_API_DIR} + TARGET build-tests + POST_BUILD + COMMAND cp ${PYTHON_API_DIR}/Release/* ${PYTHON_API_DIR} ) endif() add_custom_command( - TARGET build-tests - POST_BUILD - COMMAND ${PYTHON_EXECUTABLE} -m pytest ${PYTHON_API_DIR}/test + TARGET build-tests + POST_BUILD + COMMAND ${PYTHON_EXECUTABLE} -m pytest ${PYTHON_API_DIR}/test ) endif() # Add the input parsers add_custom_target(build_input_parsers) add_dependencies(build_input_parsers ${MPS_PARSER} ${ACAS_PARSER} - ${BERKELEY_PARSER}) + ${BERKELEY_PARSER}) add_subdirectory(${SRC_DIR}) add_subdirectory(${TOOLS_DIR}) -add_subdirectory(${REGRESS_DIR}) +add_subdirectory(${REGRESS_DIR}) \ No newline at end of file diff --git a/src/basis_factorization/GaussianEliminator.h b/src/basis_factorization/GaussianEliminator.h index 2177021e55..6f93605ff6 100644 --- a/src/basis_factorization/GaussianEliminator.h +++ b/src/basis_factorization/GaussianEliminator.h @@ -19,7 +19,7 @@ #include "LUFactors.h" #define GAUSSIAN_LOG( x, ... ) \ - LOG( GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING, "GaussianEliminator: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING, "GaussianEliminator: %s\n", x ) class GaussianEliminator { diff --git a/src/basis_factorization/LUFactorization.h b/src/basis_factorization/LUFactorization.h index 400d53eb88..ae4befd5e7 100644 --- a/src/basis_factorization/LUFactorization.h +++ b/src/basis_factorization/LUFactorization.h @@ -22,7 +22,7 @@ #include "List.h" #define LU_FACTORIZATION_LOG( x, ... ) \ - LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "LUFactorization: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "LUFactorization: %s\n", x ) class EtaMatrix; class LPElement; diff --git a/src/basis_factorization/SparseFTFactorization.h b/src/basis_factorization/SparseFTFactorization.h index 906f5b205e..b885cab4b2 100644 --- a/src/basis_factorization/SparseFTFactorization.h +++ b/src/basis_factorization/SparseFTFactorization.h @@ -24,7 +24,7 @@ #include "Statistics.h" #define SFTF_FACTORIZATION_LOG( x, ... ) \ - LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "SparseFTFactorization: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "SparseFTFactorization: %s\n", x ) /* This class performs a sparse FT factorization of a given matrix. diff --git a/src/basis_factorization/SparseGaussianEliminator.h b/src/basis_factorization/SparseGaussianEliminator.h index 48078b42d9..fd6a061dce 100644 --- a/src/basis_factorization/SparseGaussianEliminator.h +++ b/src/basis_factorization/SparseGaussianEliminator.h @@ -23,7 +23,7 @@ #include "Statistics.h" #define SGAUSSIAN_LOG( x, ... ) \ - LOG( GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING, "SparseGaussianEliminator: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING, "SparseGaussianEliminator: %s\n", x ) class SparseGaussianEliminator { diff --git a/src/basis_factorization/SparseLUFactorization.h b/src/basis_factorization/SparseLUFactorization.h index 7b925fec48..7d75ebe3c4 100644 --- a/src/basis_factorization/SparseLUFactorization.h +++ b/src/basis_factorization/SparseLUFactorization.h @@ -22,7 +22,7 @@ #include "SparseLUFactors.h" #define BASIS_FACTORIZATION_LOG( x, ... ) \ - LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "SparseLUFactorization: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::BASIS_FACTORIZATION_LOGGING, "SparseLUFactorization: %s\n", x ) class EtaMatrix; class LPElement; diff --git a/src/cegar/IncrementalLinearization.h b/src/cegar/IncrementalLinearization.h index ddf5b00fcf..9260e7e57c 100644 --- a/src/cegar/IncrementalLinearization.h +++ b/src/cegar/IncrementalLinearization.h @@ -20,7 +20,7 @@ #include "Query.h" #define INCREMENTAL_LINEARIZATION_LOG( x, ... ) \ - LOG( GlobalConfiguration::CEGAR_LOGGING, "IncrementalLinearization: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::CEGAR_LOGGING, "IncrementalLinearization: %s\n", x ) class Engine; class IQuery; diff --git a/src/common/Debug.h b/src/common/Debug.h index 55dfda4a92..3d0cb9554f 100644 --- a/src/common/Debug.h +++ b/src/common/Debug.h @@ -27,7 +27,7 @@ #endif #ifndef NDEBUG -#define LOG( x, f, y, ... ) \ +#define MARABOU_LOG( x, f, y, ... ) \ { \ if ( ( x ) ) \ { \ @@ -35,7 +35,7 @@ } \ } #else -#define LOG( x, f, y, ... ) \ +#define MARABOU_LOG( x, f, y, ... ) \ { \ } #endif diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index f9a074f076..6cbb8c9bb1 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -119,6 +119,15 @@ const bool GlobalConfiguration::WRITE_JSON_PROOF = false; const unsigned GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH = 3; const unsigned GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS = 10; +const GlobalConfiguration::PdgBoundType GlobalConfiguration::PGD_BOUND_TYPE = + GlobalConfiguration::ATTACK_INPUT; +const unsigned GlobalConfiguration::PGD_DEFAULT_NUM_ITER = 10; +const unsigned GlobalConfiguration::PGD_NUM_RESTARTS = 4; +const double GlobalConfiguration::ATTACK_INPUT_RANGE = 1000; +const unsigned GlobalConfiguration::CW_DEFAULT_ITERS = 1000; +const unsigned GlobalConfiguration::CW_NUM_RESTARTS = 4; +const double GlobalConfiguration::CW_LR = 1e-2; + #ifdef ENABLE_GUROBI const unsigned GlobalConfiguration::GUROBI_NUMBER_OF_THREADS = 1; const bool GlobalConfiguration::GUROBI_LOGGING = false; @@ -143,6 +152,8 @@ const bool GlobalConfiguration::ONNX_PARSER_LOGGING = false; const bool GlobalConfiguration::SOI_LOGGING = false; const bool GlobalConfiguration::SCORE_TRACKER_LOGGING = false; const bool GlobalConfiguration::CEGAR_LOGGING = false; +const bool GlobalConfiguration::CUSTOM_DNN_LOGGING = true; +const bool GlobalConfiguration::CW_LOGGING = true; const bool GlobalConfiguration::USE_SMART_FIX = false; const bool GlobalConfiguration::USE_LEAST_FIX = false; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index 3104edf79d..b34a508a7e 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -263,6 +263,19 @@ class GlobalConfiguration */ static const unsigned MAX_ROUNDS_OF_BACKWARD_ANALYSIS; + enum PdgBoundType { + ATTACK_INPUT = 0, + ATTACK_OUTPUT = 1 + }; + static const PdgBoundType PGD_BOUND_TYPE; + static const unsigned PGD_DEFAULT_NUM_ITER; + static const unsigned PGD_NUM_RESTARTS; + static const double ATTACK_INPUT_RANGE; + + static const unsigned CW_DEFAULT_ITERS; + static const unsigned CW_NUM_RESTARTS; + static const double CW_LR; + #ifdef ENABLE_GUROBI /* The number of threads Gurobi spawns @@ -292,6 +305,8 @@ class GlobalConfiguration static const bool SOI_LOGGING; static const bool SCORE_TRACKER_LOGGING; static const bool CEGAR_LOGGING; + static const bool CUSTOM_DNN_LOGGING; + static const bool CW_LOGGING; }; #endif // __GlobalConfiguration_h__ diff --git a/src/configuration/Options.cpp b/src/configuration/Options.cpp index 657ddb6b7c..4e05fc295a 100644 --- a/src/configuration/Options.cpp +++ b/src/configuration/Options.cpp @@ -71,6 +71,7 @@ void Options::initializeDefaultValues() _intOptions[SEED] = 1; _intOptions[NUM_BLAS_THREADS] = 1; _intOptions[NUM_CONSTRAINTS_TO_REFINE_INC_LIN] = 30; + _intOptions[ATTACK_TIMEOUT] = 60; /* Float options @@ -91,7 +92,7 @@ void Options::initializeDefaultValues() _stringOptions[SUMMARY_FILE] = ""; _stringOptions[SPLITTING_STRATEGY] = "auto"; _stringOptions[SNC_SPLITTING_STRATEGY] = "auto"; - _stringOptions[SYMBOLIC_BOUND_TIGHTENING_TYPE] = "deeppoly"; + _stringOptions[SYMBOLIC_BOUND_TIGHTENING_TYPE] = "alphacrown"; _stringOptions[MILP_SOLVER_BOUND_TIGHTENING_TYPE] = "none"; _stringOptions[QUERY_DUMP_FILE] = ""; _stringOptions[IMPORT_ASSIGNMENT_FILE_PATH] = "assignment.txt"; @@ -189,10 +190,12 @@ SymbolicBoundTighteningType Options::getSymbolicBoundTighteningType() const return SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING; else if ( strategyString == "deeppoly" ) return SymbolicBoundTighteningType::DEEP_POLY; + else if (strategyString == "alphacrown") + return SymbolicBoundTighteningType::ALPHA_CROWN; else if ( strategyString == "none" ) return SymbolicBoundTighteningType::NONE; else - return SymbolicBoundTighteningType::DEEP_POLY; + return SymbolicBoundTighteningType::ALPHA_CROWN; } MILPSolverBoundTighteningType Options::getMILPSolverBoundTighteningType() const diff --git a/src/configuration/Options.h b/src/configuration/Options.h index 4dd4f31fd5..3afec75a56 100644 --- a/src/configuration/Options.h +++ b/src/configuration/Options.h @@ -154,6 +154,8 @@ class Options // The strategy used for initializing the soi SOI_INITIALIZATION_STRATEGY, + // Adversarial attack timeout in seconds + ATTACK_TIMEOUT, // The procedure/solver for solving the LP LP_SOLVER }; diff --git a/src/engine/CDSmtCore.h b/src/engine/CDSmtCore.h index 8cbeebc5c0..a352f2c6ec 100644 --- a/src/engine/CDSmtCore.h +++ b/src/engine/CDSmtCore.h @@ -76,7 +76,7 @@ #include "context/cdlist.h" #include "context/context.h" -#define SMT_LOG( x, ... ) LOG( GlobalConfiguration::SMT_CORE_LOGGING, "CDSmtCore: %s\n", x ) +#define SMT_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::SMT_CORE_LOGGING, "CDSmtCore: %s\n", x ) class EngineState; class Engine; diff --git a/src/engine/CWAttack.cpp b/src/engine/CWAttack.cpp new file mode 100644 index 0000000000..27b3c5744c --- /dev/null +++ b/src/engine/CWAttack.cpp @@ -0,0 +1,256 @@ +#include "CWAttack.h" + +#ifdef BUILD_TORCH + +CWAttack::CWAttack( NLR::NetworkLevelReasoner *networkLevelReasoner ) + : networkLevelReasoner( networkLevelReasoner ) + , _device( torch::cuda::is_available() ? torch::kCUDA : torch::kCPU ) + , _model( std::make_unique( networkLevelReasoner ) ) + , _iters( GlobalConfiguration::CW_DEFAULT_ITERS ) + , _restarts( GlobalConfiguration::CW_NUM_RESTARTS ) + , _specLossWeight( 1e-2 ) + , _adversarialInput( nullptr ) + , _adversarialOutput( nullptr ) +{ + _inputSize = _model->getLayerSizes().first(); + getBounds( _inputBounds, GlobalConfiguration::PdgBoundType::ATTACK_INPUT ); + getBounds( _outputBounds, GlobalConfiguration::PdgBoundType::ATTACK_OUTPUT ); + + _inputLb = torch::tensor( _inputBounds.first.getContainer(), torch::kFloat32 ).to( _device ); + _inputUb = torch::tensor( _inputBounds.second.getContainer(), torch::kFloat32 ).to( _device ); + + auto vars = generateSampleAndEpsilon(); + _x0 = vars.first; +} + +CWAttack::~CWAttack() +{ + if ( _adversarialInput ) + delete[] _adversarialInput; + if ( _adversarialOutput ) + delete[] _adversarialOutput; +} + +bool CWAttack::runAttack() +{ + CW_LOG( "-----Starting CW attack-----" ); + auto adversarial = findAdvExample(); + auto advInput = adversarial.first.to( torch::kDouble ); + auto advPred = adversarial.second.to( torch::kDouble ); + + bool isFooled = + isWithinBounds( advInput, _inputBounds ) && isWithinBounds( advPred, _outputBounds ); + + auto inputPtr = advInput.data_ptr(); + auto predPtr = advPred.data_ptr(); + size_t outSize = advPred.size( 1 ); + + if ( isFooled ) + { + _adversarialInput = new double[_inputSize]; + _adversarialOutput = new double[outSize]; + std::copy( inputPtr, inputPtr + _inputSize, _adversarialInput ); + std::copy( predPtr, predPtr + outSize, _adversarialOutput ); + } + CW_LOG( "Input Lower Bounds : " ); + for ( auto &bound : _inputBounds.first.getContainer() ) + printValue( bound ); + CW_LOG( "Input Upper Bounds : " ); + for ( auto &bound : _inputBounds.second.getContainer() ) + printValue( bound ); + + CW_LOG( "Adversarial Input:" ); + for ( int i = 0; i < advInput.numel(); ++i ) + { + CW_LOG( Stringf( "x%u=%.3lf", i, inputPtr[i] ).ascii() ); + } + CW_LOG( "Output Lower Bounds : " ); + for ( auto &bound : _outputBounds.first.getContainer() ) + printValue( bound ); + CW_LOG( "Output Upper Bounds : " ); + for ( auto &bound : _outputBounds.second.getContainer() ) + printValue( bound ); + + CW_LOG( "Adversarial Prediction: " ); + for ( int i = 0; i < advPred.numel(); ++i ) + { + CW_LOG( Stringf( "y%u=%.3lf", i, predPtr[i] ).ascii() ); + } + + + if ( isFooled ) + { + CW_LOG( "Model fooled: Yes \n ------ CW Attack Succeed ------\n" ); + } + else + CW_LOG( "Model fooled: No \n ------ CW Attack Failed ------\n" ); + // Concretize assignments if attack succeeded + if ( _adversarialInput ) + networkLevelReasoner->concretizeInputAssignment( _assignments, _adversarialInput ); + return isFooled; +} + + +void CWAttack::getBounds( std::pair, Vector> &bounds, signed type ) const +{ + unsigned layerIndex = type == GlobalConfiguration::PdgBoundType::ATTACK_INPUT + ? 0 + : networkLevelReasoner->getNumberOfLayers() - 1; + const NLR::Layer *layer = networkLevelReasoner->getLayer( layerIndex ); + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + bounds.first.append( layer->getLb( i ) ); + bounds.second.append( layer->getUb( i ) ); + } +} + +std::pair CWAttack::generateSampleAndEpsilon() +{ + Vector sample( _inputSize, 0.0 ), eps( _inputSize, 0.0 ); + for ( unsigned i = 0; i < _inputSize; ++i ) + { + double lo = _inputBounds.first.get( i ), hi = _inputBounds.second.get( i ); + if ( std::isfinite( lo ) && std::isfinite( hi ) ) + { + sample[i] = 0.5 * ( lo + hi ); + eps[i] = 0.5 * ( hi - lo ); + } + else + { + sample[i] = 0.0; + eps[i] = GlobalConfiguration::ATTACK_INPUT_RANGE; + } + } + auto s = torch::tensor( sample.getContainer(), torch::kFloat32 ).unsqueeze( 0 ).to( _device ); + auto e = torch::tensor( eps.getContainer(), torch::kFloat32 ).to( _device ); + return { s, e }; +} + +torch::Tensor CWAttack::calculateLoss( const torch::Tensor &pred ) +{ + auto lb = torch::tensor( _outputBounds.first.data(), torch::kFloat32 ).to( _device ); + auto ub = torch::tensor( _outputBounds.second.data(), torch::kFloat32 ).to( _device ); + auto ubv = torch::sum( torch::square( torch::relu( pred - ub ) ) ); + auto lbv = torch::sum( torch::square( torch::relu( lb - pred ) ) ); + return ( ubv + lbv ).to( _device ); +} + +std::pair CWAttack::findAdvExample() +{ + torch::Tensor bestAdv, bestPred; + double bestL2 = std::numeric_limits::infinity(); + double timeoutForAttack = ( Options::get()->getInt( Options::ATTACK_TIMEOUT ) == 0 + ? FloatUtils::infinity() + : Options::get()->getInt( Options::ATTACK_TIMEOUT ) ); + CW_LOG( Stringf( "Adversarial attack timeout set to %f\n", timeoutForAttack ).ascii() ); + timespec startTime = TimeUtils::sampleMicro(); + torch::Tensor advExample; + for ( unsigned r = 0; r < _restarts; ++r ) + { + unsigned long timePassed = TimeUtils::timePassed( startTime, TimeUtils::sampleMicro() ); + if ( static_cast( timePassed ) / MICROSECONDS_TO_SECONDS > timeoutForAttack ) + { + throw MarabouError( MarabouError::TIMEOUT, "Attack failed due to timeout" ); + } + torch::Tensor delta = torch::zeros_like( _x0, torch::requires_grad() ).to( _device ); + torch::optim::Adam optimizer( { delta }, + torch::optim::AdamOptions( GlobalConfiguration::CW_LR ) ); + + for ( unsigned it = 0; it < _iters; ++it ) + { + torch::Tensor prevExample = advExample; + advExample = ( _x0 + delta ).clamp( _inputLb, _inputUb ); + // Skip the equality check on the first iteration + if ( ( it > 0 && prevExample.defined() && advExample.equal( prevExample ) ) || + !isWithinBounds( advExample, _inputBounds ) ) + break; + auto pred = _model->forward( advExample ); + auto specLoss = calculateLoss( pred ); + auto l2norm = torch::sum( torch::pow( advExample - _x0, 2 ) ); + auto loss = l2norm + _specLossWeight * specLoss; + + optimizer.zero_grad(); + loss.backward(); + optimizer.step(); + + if ( specLoss.item() == 0.0 ) + { + double curL2 = l2norm.item(); + if ( curL2 < bestL2 ) + { + bestL2 = curL2; + bestAdv = advExample.detach(); + bestPred = pred.detach(); + } + } + } + } + + if ( !bestAdv.defined() ) + { + bestAdv = ( _x0 + torch::zeros_like( _x0 ) ).clamp( _inputLb, _inputUb ); + bestPred = _model->forward( bestAdv ); + } + + return { bestAdv, bestPred }; +} + +bool CWAttack::isWithinBounds( const torch::Tensor &sample, + const std::pair, Vector> &bounds ) +{ + torch::Tensor flatInput = sample.view( { -1 } ); + if ( flatInput.numel() != (int)bounds.first.size() || + flatInput.numel() != (int)bounds.second.size() ) + throw std::runtime_error( "Mismatch in sizes of input and bounds" ); + + for ( int64_t i = 0; i < flatInput.size( 0 ); ++i ) + { + double v = flatInput[i].item(); + double lo = bounds.first.get( i ), hi = bounds.second.get( i ); + if ( std::isinf( lo ) && std::isinf( hi ) ) + continue; + if ( std::isinf( lo ) ) + { + if ( v > hi ) + return false; + } + else if ( std::isinf( hi ) ) + { + if ( v < lo ) + return false; + } + else if ( v < lo || v > hi ) + return false; + } + return true; +} + +double CWAttack::getAssignment( int index ) +{ + return _assignments[index]; +} + +void CWAttack::printValue( double value ) +{ + if ( std::isinf( value ) ) + { + if ( value < 0 ) + { + CW_LOG( "-inf" ); + } + else + { + CW_LOG( "inf" ); + } + } + else if ( std::isnan( value ) ) + { + CW_LOG( "nan" ); + } + else + { + CW_LOG( Stringf( "%.3lf", value ).ascii() ); + } +} + +#endif // BUILD_TORCH diff --git a/src/engine/CWAttack.h b/src/engine/CWAttack.h new file mode 100644 index 0000000000..742c99fe7b --- /dev/null +++ b/src/engine/CWAttack.h @@ -0,0 +1,64 @@ +#ifndef __CWATTACK_H__ +#define __CWATTACK_H__ +#ifdef BUILD_TORCH + +#include "CustomDNN.h" +#include "InputQuery.h" +#include "Options.h" + +#include +#include +#include + +#define CW_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::CW_LOGGING, "CW: %s\n", x ) + +/** + CWAttack implements the Carlini–Wagner L2 adversarial attack, + optimizing min ||δ||_2^2 + c * specLoss(x0 + δ) with Adam and restarts. +*/ +class CWAttack +{ +public: + enum { + MICROSECONDS_TO_SECONDS = 1000000 + }; + CWAttack( NLR::NetworkLevelReasoner *networkLevelReasoner ); + ~CWAttack(); + + /** + Runs the CW attack. Returns true if a valid adversarial example is found. + */ + bool runAttack(); + double getAssignment( int index ); + +private: + NLR::NetworkLevelReasoner *networkLevelReasoner; + torch::Device _device; + std::unique_ptr _model; + + unsigned _inputSize; + unsigned _iters; + unsigned _restarts; + double _specLossWeight; + + std::pair, Vector> _inputBounds; + std::pair, Vector> _outputBounds; + torch::Tensor _inputLb; + torch::Tensor _inputUb; + torch::Tensor _x0; + + Map _assignments; + double *_adversarialInput; + double *_adversarialOutput; + + void getBounds( std::pair, Vector> &bounds, signed type ) const; + std::pair generateSampleAndEpsilon(); + torch::Tensor calculateLoss( const torch::Tensor &predictions ); + std::pair findAdvExample(); + static bool isWithinBounds( const torch::Tensor &sample, + const std::pair, Vector> &bounds ); + static void printValue( double value ); +}; + +#endif // BUILD_TORCH +#endif // __CWATTACK_H__ diff --git a/src/engine/CustomDNN.cpp b/src/engine/CustomDNN.cpp new file mode 100644 index 0000000000..3fbdb8d186 --- /dev/null +++ b/src/engine/CustomDNN.cpp @@ -0,0 +1,302 @@ +#include "NetworkLevelReasoner.h" +#include "CustomDNN.h" +#ifdef BUILD_TORCH +namespace NLR { +CustomRelu::CustomRelu( const NetworkLevelReasoner *nlr, unsigned layerIndex ) + : _networkLevelReasoner( nlr ) + , _reluLayerIndex( layerIndex ) +{ +} + +torch::Tensor CustomRelu::forward( torch::Tensor x ) const +{ + return CustomReluFunction::apply( x, _networkLevelReasoner, _reluLayerIndex ); +} + +CustomMaxPool::CustomMaxPool( const NetworkLevelReasoner *nlr, unsigned layerIndex ) + : _networkLevelReasoner( nlr ) + , _maxLayerIndex( layerIndex ) +{ +} + +torch::Tensor CustomMaxPool::forward( torch::Tensor x ) const +{ + return CustomMaxPoolFunction::apply( x, _networkLevelReasoner, _maxLayerIndex ); +} + +void CustomDNN::setWeightsAndBiases( torch::nn::Linear &linearLayer, + const Layer *layer, + unsigned sourceLayer, + unsigned inputSize, + unsigned outputSize ) +{ + Vector> layerWeights( outputSize, Vector( inputSize ) ); + Vector layerBiases( outputSize ); + + // Fetch weights and biases from networkLevelReasoner + for ( unsigned j = 0; j < outputSize; j++ ) + { + for ( unsigned k = 0; k < inputSize; k++ ) + { + double weight_value = layer->getWeight( sourceLayer, k, j ); + layerWeights[j][k] = static_cast( weight_value ); + } + double bias_value = layer->getBias( j ); + layerBiases[j] = static_cast( bias_value ); + } + + Vector flattenedWeights; + for ( const auto &weight : layerWeights ) + { + for ( const auto &w : weight ) + { + flattenedWeights.append( w ); + } + } + + torch::Tensor weightTensor = torch::tensor( flattenedWeights.getContainer(), torch::kFloat ) + .view( { outputSize, inputSize } ); + torch::Tensor biasTensor = torch::tensor( layerBiases.getContainer(), torch::kFloat ); + + torch::NoGradGuard no_grad; + linearLayer->weight.set_( weightTensor ); + linearLayer->bias.set_( biasTensor ); +} + +void CustomDNN::weightedSum( unsigned i, const Layer *layer ) +{ + unsigned sourceLayer = i - 1; + const Layer *prevLayer = _networkLevelReasoner->getLayer( sourceLayer ); + unsigned inputSize = prevLayer->getSize(); + unsigned outputSize = layer->getSize(); + + if ( outputSize > 0 ) + { + auto linearLayer = torch::nn::Linear( torch::nn::LinearOptions( inputSize, outputSize ) ); + _linearLayers.append( linearLayer ); + + setWeightsAndBiases( linearLayer, layer, sourceLayer, inputSize, outputSize ); + + register_module( "linear" + std::to_string( i ), linearLayer ); + } +} + + +CustomDNN::CustomDNN( const NetworkLevelReasoner *nlr ) +{ + CUSTOM_DNN_LOG( "----- Construct Custom Network -----" ); + _networkLevelReasoner = nlr; + _numberOfLayers = _networkLevelReasoner->getNumberOfLayers(); + for ( unsigned i = 0; i < _numberOfLayers; i++ ) + { + const Layer *layer = _networkLevelReasoner->getLayer( i ); + _layerSizes.append( layer->getSize() ); + Layer::Type layerType = layer->getLayerType(); + _layersOrder.append( layerType ); + switch ( layerType ) + { + case Layer::INPUT: + break; + case Layer::WEIGHTED_SUM: + weightedSum( i, layer ); + break; + case Layer::RELU: + { + auto reluLayer = std::make_shared( _networkLevelReasoner, i ); + _reluLayers.append( reluLayer ); + register_module( "ReLU" + std::to_string( i ), reluLayer ); + break; + } + case Layer::MAX: + { + auto maxPoolLayer = std::make_shared( _networkLevelReasoner, i ); + _maxPoolLayers.append( maxPoolLayer ); + register_module( "maxPool" + std::to_string( i ), maxPoolLayer ); + break; + } + default: + CUSTOM_DNN_LOG( "Unsupported layer type\n" ); + throw MarabouError( MarabouError::DEBUGGING_ERROR ); + } + } +} + +torch::Tensor CustomDNN::forward( torch::Tensor x ) +{ + unsigned linearIndex = 0; + unsigned reluIndex = 0; + unsigned maxPoolIndex = 0; + for ( unsigned i = 0; i < _numberOfLayers; i++ ) + { + const Layer::Type layerType = _layersOrder[i]; + switch ( layerType ) + { + case Layer::INPUT: + break; + case Layer::WEIGHTED_SUM: + x = _linearLayers[linearIndex]->forward( x ); + linearIndex++; + break; + case Layer::RELU: + x = _reluLayers[reluIndex]->forward( x ); + reluIndex++; + break; + case Layer::MAX: + x = _maxPoolLayers[maxPoolIndex]->forward( x ); + maxPoolIndex++; + break; + default: + CUSTOM_DNN_LOG( "Unsupported layer type\n" ); + throw MarabouError( MarabouError::DEBUGGING_ERROR ); + break; + } + } + return x; +} + +torch::Tensor CustomReluFunction::forward( torch::autograd::AutogradContext *ctx, + torch::Tensor x, + const NetworkLevelReasoner *nlr, + unsigned int layerIndex ) +{ + ctx->save_for_backward( { x } ); + + const Layer *layer = nlr->getLayer( layerIndex ); + torch::Tensor reluOutputs = torch::zeros( { 1, layer->getSize() } ); + torch::Tensor reluGradients = torch::zeros( { 1, layer->getSize() } ); + + for ( unsigned neuron = 0; neuron < layer->getSize(); ++neuron ) + { + auto sources = layer->getActivationSources( neuron ); + ASSERT( sources.size() == 1 ); + const NeuronIndex &sourceNeuron = sources.back(); + int index = static_cast( sourceNeuron._neuron ); + reluOutputs.index_put_( { 0, static_cast( neuron ) }, + torch::clamp_min( x.index( { 0, index } ), 0 ) ); + reluGradients.index_put_( { 0, static_cast( neuron ) }, x.index( { 0, index } ) > 0 ); + } + + ctx->saved_data["reluGradients"] = reluGradients; + + return reluOutputs; +} + +std::vector CustomReluFunction::backward( torch::autograd::AutogradContext *ctx, + std::vector grad_output ) +{ + auto saved = ctx->get_saved_variables(); + auto input = saved[0]; + + auto reluGradients = ctx->saved_data["reluGradients"].toTensor(); + auto grad_input = grad_output[0] * reluGradients[0]; + + return { grad_input, torch::Tensor(), torch::Tensor() }; +} + +torch::Tensor CustomMaxPoolFunction::forward( torch::autograd::AutogradContext *ctx, + torch::Tensor x, + const NetworkLevelReasoner *nlr, + unsigned int layerIndex ) +{ + ctx->save_for_backward( { x } ); + + const Layer *layer = nlr->getLayer( layerIndex ); + torch::Tensor maxOutputs = torch::zeros( { 1, layer->getSize() } ); + torch::Tensor argMaxOutputs = torch::zeros( { 1, layer->getSize() }, torch::kInt64 ); + + for ( unsigned neuron = 0; neuron < layer->getSize(); ++neuron ) + { + auto sources = layer->getActivationSources( neuron ); + torch::Tensor sourceValues = torch::zeros( sources.size(), torch::kFloat ); + torch::Tensor sourceIndices = torch::zeros( sources.size() ); + + for ( int i = sources.size() - 1; i >= 0; --i ) + { + const NeuronIndex &activationNeuron = sources.back(); + int index = static_cast( activationNeuron._neuron ); + sources.popBack(); + sourceValues.index_put_( { i }, x.index( { 0, index } ) ); + sourceIndices.index_put_( { i }, index ); + } + + maxOutputs.index_put_( { 0, static_cast( neuron ) }, torch::max( sourceValues ) ); + argMaxOutputs.index_put_( { 0, static_cast( neuron ) }, + sourceIndices.index( { torch::argmax( sourceValues ) } ) ); + } + + ctx->saved_data["argMaxOutputs"] = argMaxOutputs; + + return maxOutputs; +} + +std::vector CustomMaxPoolFunction::backward( torch::autograd::AutogradContext *ctx, + std::vector grad_output ) +{ + auto saved = ctx->get_saved_variables(); + auto input = saved[0]; + + auto grad_input = torch::zeros_like( input ); + + auto indices = ctx->saved_data["argMaxOutputs"].toTensor(); + + grad_input[0].index_add_( 0, indices.flatten(), grad_output[0].flatten() ); + + return { grad_input, torch::Tensor(), torch::Tensor() }; +} + +const Vector &CustomDNN::getLayerSizes() const +{ + return _layerSizes; +} + +torch::Tensor CustomDNN::getLayerWeights(unsigned layerIndex) const { + if (_layersOrder[layerIndex] == Layer::WEIGHTED_SUM) { + auto linearLayer = _linearLayers[layerIndex]; + return linearLayer->weight; // Returning weights of the corresponding linear layer + } + throw std::runtime_error("Requested weights for a non-weighted sum layer."); +} + +torch::Tensor CustomDNN::getLayerBias(unsigned layerIndex) const { + if (_layersOrder[layerIndex] == Layer::WEIGHTED_SUM) { + auto linearLayer = _linearLayers[layerIndex]; + return linearLayer->bias; // Returning bias of the corresponding linear layer + } + throw std::runtime_error("Requested bias for a non-weighted sum layer."); +} + +void CustomDNN::getInputBounds(torch::Tensor &lbTensor, torch::Tensor &ubTensor) const +{ + const Layer *layer = _networkLevelReasoner->getLayer(0); + unsigned size = layer->getSize(); + + std::vector lowerBounds; + std::vector upperBounds; + lowerBounds.reserve(size); + upperBounds.reserve(size); + + for (unsigned neuron = 0; neuron < size; ++neuron) + { + lowerBounds.push_back(layer->getLb(neuron)); + upperBounds.push_back(layer->getUb(neuron)); + } + + lbTensor = torch::tensor(lowerBounds, torch::kDouble); + ubTensor = torch::tensor(upperBounds, torch::kDouble); +} + + + +std::vector> CustomDNN::getMaxPoolSources(const Layer* maxPoolLayer) { + std::vector> sources; + unsigned size = maxPoolLayer->getSize(); + for (unsigned neuron = 0; neuron < size; ++neuron) { + + sources.push_back(maxPoolLayer->getActivationSources(neuron)); + } + return sources; +} + +} + +#endif \ No newline at end of file diff --git a/src/engine/CustomDNN.h b/src/engine/CustomDNN.h new file mode 100644 index 0000000000..a414d80f5e --- /dev/null +++ b/src/engine/CustomDNN.h @@ -0,0 +1,119 @@ +#ifdef BUILD_TORCH +#ifndef _CustomDNN_h_ +#define _CustomDNN_h_ + +#include "Layer.h" +#include "Vector.h" + +#include + +#undef Warning +#include + +#define CUSTOM_DNN_LOG( x, ... ) \ + MARABOU_LOG( GlobalConfiguration::CUSTOM_DNN_LOGGING, "customDNN: %s\n", x ) + +/* + Custom differentiation function for ReLU, implementing the forward and backward propagation + for the ReLU operation according to each variable's source layer as defined in the nlr. +*/ +namespace NLR { +class CustomReluFunction : public torch::autograd::Function +{ +public: + static torch::Tensor forward( torch::autograd::AutogradContext *ctx, + torch::Tensor x, + const NetworkLevelReasoner *nlr, + unsigned layerIndex ); + + static std::vector backward( torch::autograd::AutogradContext *ctx, + std::vector grad_output ); +}; + +class CustomRelu : public torch::nn::Module +{ +public: + CustomRelu( const NetworkLevelReasoner *nlr, unsigned layerIndex ); + torch::Tensor forward( torch::Tensor x ) const; + +private: + const NetworkLevelReasoner *_networkLevelReasoner; + unsigned _reluLayerIndex; +}; + +/* + Custom differentiation function for max pooling, implementing the forward and backward propagation + for the max pooling operation according to each variable's source layer as defined in the nlr. +*/ +class CustomMaxPoolFunction : public torch::autograd::Function +{ +public: + static torch::Tensor forward( torch::autograd::AutogradContext *ctx, + torch::Tensor x, + const NetworkLevelReasoner *nlr, + unsigned layerIndex ); + + static std::vector backward( torch::autograd::AutogradContext *ctx, + std::vector grad_output ); +}; + +class CustomMaxPool : public torch::nn::Module +{ +public: + CustomMaxPool( const NetworkLevelReasoner *nlr, unsigned layerIndex ); + torch::Tensor forward( torch::Tensor x ) const; + +private: + const NetworkLevelReasoner *_networkLevelReasoner; + unsigned _maxLayerIndex; +}; + +/* + torch implementation of the network according to the nlr. + */ +class CustomDNN : public torch::nn::Module +{ +public: + static void setWeightsAndBiases( torch::nn::Linear &linearLayer, + const Layer *layer, + unsigned sourceLayer, + unsigned inputSize, + unsigned outputSize ); + void weightedSum( unsigned i, const Layer *layer ); + explicit CustomDNN( const NetworkLevelReasoner *networkLevelReasoner ); + torch::Tensor getLayerWeights( unsigned layerIndex ) const; + torch::Tensor getLayerBias( unsigned layerIndex ) const; + torch::Tensor forward( torch::Tensor x ); + const Vector &getLayerSizes() const; + void getInputBounds( torch::Tensor &lbTensor, torch::Tensor &ubTensor ) const; + std::vector> getMaxPoolSources(const Layer* maxPoolLayer); + Vector getLinearLayers() + { + return _linearLayers; + } + Vector getLayersOrder() const + { + return _layersOrder; + } + Vector getLayersOrder() + { + return _layersOrder; + } + + unsigned getNumberOfLayers() const + { + return _numberOfLayers; + } + +private: + const NetworkLevelReasoner *_networkLevelReasoner; + Vector _layerSizes; + Vector> _reluLayers; + Vector> _maxPoolLayers; + Vector _linearLayers; + Vector _layersOrder; + unsigned _numberOfLayers; +}; +} // namespace NLR +#endif // _CustomDNN_h_ +#endif \ No newline at end of file diff --git a/src/engine/DantzigsRule.h b/src/engine/DantzigsRule.h index 5e57e28c24..b3fda42e77 100644 --- a/src/engine/DantzigsRule.h +++ b/src/engine/DantzigsRule.h @@ -19,7 +19,7 @@ #include "EntrySelectionStrategy.h" #define DANTZIG_LOG( x, ... ) \ - LOG( GlobalConfiguration::DANTZIGS_RULE_LOGGING, "DantzigsRule: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::DANTZIGS_RULE_LOGGING, "DantzigsRule: %s\n", x ) class String; diff --git a/src/engine/DnCManager.h b/src/engine/DnCManager.h index ee4a55a19d..545a9fd326 100644 --- a/src/engine/DnCManager.h +++ b/src/engine/DnCManager.h @@ -25,7 +25,7 @@ #include #define DNC_MANAGER_LOG( x, ... ) \ - LOG( GlobalConfiguration::DNC_MANAGER_LOGGING, "DnCManager: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::DNC_MANAGER_LOGGING, "DnCManager: %s\n", x ) class Query; diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index 86f45ecd0c..028bae2fbc 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -2444,6 +2444,8 @@ unsigned Engine::performSymbolicBoundTightening( Query *inputQuery ) _networkLevelReasoner->symbolicBoundPropagation(); else if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::DEEP_POLY ) _networkLevelReasoner->deepPolyPropagation(); + else if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::ALPHA_CROWN ) + _networkLevelReasoner->alphaCrown(); // Step 3: Extract the bounds List tightenings; diff --git a/src/engine/Engine.h b/src/engine/Engine.h index a3ea1c22d3..5e53564f33 100644 --- a/src/engine/Engine.h +++ b/src/engine/Engine.h @@ -57,7 +57,7 @@ #undef ERROR #endif -#define ENGINE_LOG( x, ... ) LOG( GlobalConfiguration::ENGINE_LOGGING, "Engine: %s\n", x ) +#define ENGINE_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::ENGINE_LOGGING, "Engine: %s\n", x ) class EngineState; class Query; diff --git a/src/engine/InputQuery.cpp b/src/engine/InputQuery.cpp index d275646b06..c28913a20b 100644 --- a/src/engine/InputQuery.cpp +++ b/src/engine/InputQuery.cpp @@ -29,7 +29,7 @@ #include "SoftmaxConstraint.h" #define INPUT_QUERY_LOG( x, ... ) \ - LOG( GlobalConfiguration::INPUT_QUERY_LOGGING, "Marabou Query: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::INPUT_QUERY_LOGGING, "Marabou Query: %s\n", x ) using namespace CVC4::context; diff --git a/src/engine/MarabouError.h b/src/engine/MarabouError.h index 2f2ee54c0f..1cff574056 100644 --- a/src/engine/MarabouError.h +++ b/src/engine/MarabouError.h @@ -66,6 +66,8 @@ class MarabouError : public Error FEATURE_NOT_YET_SUPPORTED = 900, + TIMEOUT = 32, + DEBUGGING_ERROR = 999, }; diff --git a/src/engine/PLConstraintScoreTracker.h b/src/engine/PLConstraintScoreTracker.h index ab62333e6b..6798074dd2 100644 --- a/src/engine/PLConstraintScoreTracker.h +++ b/src/engine/PLConstraintScoreTracker.h @@ -24,7 +24,7 @@ #include #define SCORE_TRACKER_LOG( x, ... ) \ - LOG( GlobalConfiguration::SCORE_TRACKER_LOGGING, "PLConstraintScoreTracker: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::SCORE_TRACKER_LOGGING, "PLConstraintScoreTracker: %s\n", x ) struct ScoreEntry { diff --git a/src/engine/ProjectedSteepestEdge.h b/src/engine/ProjectedSteepestEdge.h index 70b3265ef0..c84ba9bac9 100644 --- a/src/engine/ProjectedSteepestEdge.h +++ b/src/engine/ProjectedSteepestEdge.h @@ -20,7 +20,7 @@ #include "SparseUnsortedList.h" #define PSE_LOG( x, ... ) \ - LOG( GlobalConfiguration::PROJECTED_STEEPEST_EDGE_LOGGING, "Projected SE: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::PROJECTED_STEEPEST_EDGE_LOGGING, "Projected SE: %s\n", x ) class ProjectedSteepestEdgeRule : public IProjectedSteepestEdgeRule { diff --git a/src/engine/Query.cpp b/src/engine/Query.cpp index 77b22c9c9f..6c696c42be 100644 --- a/src/engine/Query.cpp +++ b/src/engine/Query.cpp @@ -29,7 +29,7 @@ #include "SymbolicBoundTighteningType.h" #define INPUT_QUERY_LOG( x, ... ) \ - LOG( GlobalConfiguration::INPUT_QUERY_LOGGING, "Input Query: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::INPUT_QUERY_LOGGING, "Input Query: %s\n", x ) Query::Query() : _ensureSameSourceLayerInNLR( Options::get()->getSymbolicBoundTighteningType() == diff --git a/src/engine/SmtCore.h b/src/engine/SmtCore.h index ad1d61f8e9..0274d475b6 100644 --- a/src/engine/SmtCore.h +++ b/src/engine/SmtCore.h @@ -28,7 +28,7 @@ #include -#define SMT_LOG( x, ... ) LOG( GlobalConfiguration::SMT_CORE_LOGGING, "SmtCore: %s\n", x ) +#define SMT_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::SMT_CORE_LOGGING, "SmtCore: %s\n", x ) class EngineState; class IEngine; diff --git a/src/engine/SumOfInfeasibilitiesManager.h b/src/engine/SumOfInfeasibilitiesManager.h index a823a92fed..7a9a3bf448 100644 --- a/src/engine/SumOfInfeasibilitiesManager.h +++ b/src/engine/SumOfInfeasibilitiesManager.h @@ -29,7 +29,7 @@ #include "T/stdlib.h" #include "Vector.h" -#define SOI_LOG( x, ... ) LOG( GlobalConfiguration::SOI_LOGGING, "SoIManager: %s\n", x ) +#define SOI_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::SOI_LOGGING, "SoIManager: %s\n", x ) class SumOfInfeasibilitiesManager { diff --git a/src/engine/SymbolicBoundTighteningType.h b/src/engine/SymbolicBoundTighteningType.h index 509c0ae21c..56a48fa730 100644 --- a/src/engine/SymbolicBoundTighteningType.h +++ b/src/engine/SymbolicBoundTighteningType.h @@ -22,7 +22,8 @@ enum class SymbolicBoundTighteningType { SYMBOLIC_BOUND_TIGHTENING = 0, DEEP_POLY = 1, - NONE = 2, + ALPHA_CROWN = 2, + NONE = 3, }; #endif // __SymbolicBoundTighteningType_h__ diff --git a/src/engine/Tableau.h b/src/engine/Tableau.h index f5bf063f57..175efbe3c8 100644 --- a/src/engine/Tableau.h +++ b/src/engine/Tableau.h @@ -29,7 +29,7 @@ #include "SparseUnsortedList.h" #include "Statistics.h" -#define TABLEAU_LOG( x, ... ) LOG( GlobalConfiguration::TABLEAU_LOGGING, "Tableau: %s\n", x ) +#define TABLEAU_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::TABLEAU_LOGGING, "Tableau: %s\n", x ) class Equation; class ICostFunctionManager; diff --git a/src/input_parsers/MpsParser.h b/src/input_parsers/MpsParser.h index 71f177f493..76c4afab96 100644 --- a/src/input_parsers/MpsParser.h +++ b/src/input_parsers/MpsParser.h @@ -20,7 +20,7 @@ #include "Map.h" #include "Set.h" -#define MPS_LOG( x, ... ) LOG( GlobalConfiguration::MPS_PARSER_LOGGING, "MpsParser: %s\n", x ) +#define MPS_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::MPS_PARSER_LOGGING, "MpsParser: %s\n", x ) class IQuery; class String; diff --git a/src/input_parsers/OnnxParser.h b/src/input_parsers/OnnxParser.h index 2b316a2004..3f0149f6c8 100644 --- a/src/input_parsers/OnnxParser.h +++ b/src/input_parsers/OnnxParser.h @@ -25,7 +25,7 @@ #include "Vector.h" #include "onnx.proto3.pb.h" -#define ONNX_LOG( x, ... ) LOG( GlobalConfiguration::ONNX_PARSER_LOGGING, "OnnxParser: %s\n", x ) +#define ONNX_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::ONNX_PARSER_LOGGING, "OnnxParser: %s\n", x ) class OnnxParser diff --git a/src/nlr/AlphaCrown.cpp b/src/nlr/AlphaCrown.cpp new file mode 100644 index 0000000000..c68f9554d9 --- /dev/null +++ b/src/nlr/AlphaCrown.cpp @@ -0,0 +1,546 @@ +// +// Created by User on 7/23/2025. +// + +#include "AlphaCrown.h" +#include "MStringf.h" +#include "NetworkLevelReasoner.h" +#include "Layer.h" + +namespace NLR { +AlphaCrown::AlphaCrown( LayerOwner *layerOwner ) + : _layerOwner( layerOwner ) +{ + _nlr = dynamic_cast( layerOwner ); + _network = new CustomDNN( _nlr ); + _network->getInputBounds( _lbInput, _ubInput ); + _inputSize = _lbInput.size( 0 ); + _linearLayers = _network->getLinearLayers().getContainer(); + _layersOrder = _network->getLayersOrder().getContainer(); + + unsigned linearIndex = 0; + for ( unsigned i = 0; i < _network->getNumberOfLayers(); i++ ) + { + if (_layersOrder[i] == Layer::WEIGHTED_SUM) + { + // const Layer *layer = _layerOwner->getLayer( i ); + auto linearLayer = _linearLayers[linearIndex]; + auto whights = linearLayer->weight; + auto bias = linearLayer->bias; + _positiveWeights.insert( {i,torch::where( whights >= 0,whights, + torch::zeros_like( + whights ) ).to(torch::kFloat32)} ); + _negativeWeights.insert( {i,torch::where( whights <= 0,whights, + torch::zeros_like( + whights ) ).to(torch::kFloat32)} ); + _biases.insert( {i,bias.to(torch::kFloat32)} ); + linearIndex += 1; + } + if (_layersOrder[i] == Layer::MAX) + { + _maxPoolSources.insert({i, _network->getMaxPoolSources(_nlr->getLayer( i ) )}); + } + } +} + +torch::Tensor AlphaCrown::createSymbolicVariablesMatrix() +{ + // Create the identity matrix and the zero matrix + auto eye_tensor = torch::eye(_inputSize, torch::kFloat32); // Ensure float32 + auto zero_tensor = torch::zeros({_inputSize, 1}, torch::kFloat32); // Ensure float32 + + // Concatenate the two tensors horizontally (along dim=1) + return torch::cat({eye_tensor, zero_tensor}, 1); // Will be of type float32 +} + +torch::Tensor AlphaCrown::lower_ReLU_relaxation( const torch::Tensor &u, const torch::Tensor &l ) +{ + torch::Tensor mult; + mult = torch::where( u - l == 0, torch::tensor( 1.0 ), u / ( u - l ) ); + mult = torch::where( l >= 0, torch::tensor( 1.0 ), mult ); + mult = torch::where( u <= 0, torch::tensor( 0.0 ), mult ); + return mult.to(torch::kFloat32); +} + +std::tuple AlphaCrown::upper_ReLU_relaxation( const torch::Tensor &u, + const torch::Tensor &l ) +{ + torch::Tensor mult = torch::where( u - l == 0, torch::tensor( 1.0 ), u / ( u - l ) ); + mult = torch::where( l >= 0, torch::tensor( 1.0 ), mult ); + mult = torch::where( u <= 0, torch::tensor( 0.0 ), mult ); + + torch::Tensor add = torch::where( u - l == 0, torch::tensor( 0.0 ), -l * mult ); + add = torch::where( l >= 0, torch::tensor( 0.0 ), add ); + + return std::make_tuple( mult.to(torch::kFloat32), add.to(torch::kFloat32) ); +} +torch::Tensor AlphaCrown::getMaxOfSymbolicVariables( const torch::Tensor &matrix ) +{ + auto coefficients = matrix.index( + { torch::indexing::Slice(), torch::indexing::Slice( torch::indexing::None, -1 ) } ); + auto free_coefficients = matrix.index( { torch::indexing::Slice(), -1 } ); + + auto positive_mask = coefficients >= 0; + + torch::Tensor u_values = + torch::sum( torch::where( positive_mask, coefficients * _ubInput, coefficients * _lbInput ), + 1 ) + + free_coefficients; + + return u_values; +} + +torch::Tensor AlphaCrown::getMinOfSymbolicVariables( const torch::Tensor &matrix ) +{ + auto coefficients = matrix.index( + { torch::indexing::Slice(), torch::indexing::Slice( torch::indexing::None, -1 ) } ); + auto free_coefficients = matrix.index( { torch::indexing::Slice(), -1 } ); + + auto positive_mask = coefficients >= 0; + + torch::Tensor l_values = + torch::sum( torch::where( positive_mask, coefficients * _lbInput, coefficients * _ubInput ), + 1 ) + + free_coefficients; + + return l_values; +} + +void AlphaCrown::relaxReluLayer(unsigned layerNumber, torch::Tensor + &EQ_up, torch::Tensor &EQ_low){ + + auto u_values_EQ_up = AlphaCrown::getMaxOfSymbolicVariables(EQ_up); + auto l_values_EQ_up = AlphaCrown::getMinOfSymbolicVariables(EQ_low); + auto [upperRelaxationSlope, upperRelaxationIntercept] = + AlphaCrown::upper_ReLU_relaxation(l_values_EQ_up, u_values_EQ_up); + + auto u_values_EQ_low = AlphaCrown::getMaxOfSymbolicVariables(EQ_up); + auto l_values_EQ_low = AlphaCrown::getMinOfSymbolicVariables(EQ_low); + auto alphaSlope = AlphaCrown::lower_ReLU_relaxation(l_values_EQ_low, + u_values_EQ_low); + + EQ_up = EQ_up * upperRelaxationSlope.unsqueeze( 1 ); + EQ_up = AlphaCrown::addVecToLastColumnValue( EQ_up, upperRelaxationIntercept ); + EQ_low = EQ_low * alphaSlope.unsqueeze( 1 ); + + _upperRelaxationSlopes.insert({layerNumber, upperRelaxationSlope} ); + // back but insert to dict + _upperRelaxationIntercepts.insert({layerNumber, upperRelaxationIntercept} ); + _indexAlphaSlopeMap.insert( {layerNumber, _initialAlphaSlopes.size()} ); + _initialAlphaSlopes.push_back( alphaSlope ); + +} + +void AlphaCrown::relaxMaxPoolLayer(unsigned layerNumber, + torch::Tensor &EQ_up, + torch::Tensor &EQ_low) +{ + std::cout << "Relaxing MaxPool layer number: " << layerNumber << std::endl; + const auto &groups = _maxPoolSources[layerNumber]; + TORCH_CHECK(!groups.empty(), "MaxPool layer has no groups"); + + const auto cols = EQ_up.size(1); + auto next_EQ_up = torch::zeros({ (long)groups.size(), cols }, torch::kFloat32); + auto next_EQ_low = torch::zeros({ (long)groups.size(), cols }, torch::kFloat32); + + std::vector upIdx; upIdx.reserve(groups.size()); + std::vector loIdx; loIdx.reserve(groups.size()); + std::vector slopes; slopes.reserve(groups.size()); + std::vector ints; ints.reserve(groups.size()); + + for (size_t k = 0; k < groups.size(); ++k) + { + // Get per-neuron relaxation parameters & indices + auto R = relaxMaxNeuron(groups, k, EQ_up, EQ_low); + + // Build next rows: + // Upper: slope * EQ_up[R.idx_up] (+ intercept on last column) + auto up_row = EQ_up.index({ (long)R.idx_up, torch::indexing::Slice() }) * R.slope; + auto bvec = torch::full({1}, R.intercept, torch::kFloat32); + up_row = AlphaCrown::addVecToLastColumnValue(up_row, bvec); + + // Lower: copy EQ_low[R.idx_low] + auto low_row = EQ_low.index({ (long)R.idx_low, torch::indexing::Slice() }).clone(); + + next_EQ_up.index_put_ ( { (long)k, torch::indexing::Slice() }, up_row ); + next_EQ_low.index_put_( { (long)k, torch::indexing::Slice() }, low_row ); + + // Persist + upIdx.push_back(R.idx_up); + loIdx.push_back(R.idx_low); + slopes.push_back(R.slope); + ints.push_back(R.intercept); + } + + + _maxUpperChoice[layerNumber] = torch::from_blob( + upIdx.data(), {(long)upIdx.size()}, torch::TensorOptions().dtype(torch::kLong)).clone(); + _maxLowerChoice[layerNumber] = torch::from_blob( + loIdx.data(), {(long)loIdx.size()}, torch::TensorOptions().dtype(torch::kLong)).clone(); + _upperRelaxationSlopes[layerNumber] = + torch::from_blob(slopes.data(), {(long)slopes.size()}, torch::TensorOptions().dtype(torch::kFloat32)).clone(); + _upperRelaxationIntercepts[layerNumber] = + torch::from_blob(ints.data(), {(long)ints.size()}, torch::TensorOptions().dtype(torch::kFloat32)).clone(); + + // Advance EQs + EQ_up = next_EQ_up; + EQ_low = next_EQ_low; +} + + + + +std::pair +AlphaCrown::boundsFromEQ(const torch::Tensor &EQ, const std::vector &rows) +{ + TORCH_CHECK(!rows.empty(), "boundsFromEQ: empty rows"); + auto idx = torch::from_blob(const_cast(rows.data()), + {(long)rows.size()}, + torch::TensorOptions().dtype(torch::kLong)).clone(); + auto sub = EQ.index({ idx, torch::indexing::Slice() }); // |S| x (n+1) + auto U = getMaxOfSymbolicVariables(sub); // |S| + auto L = getMinOfSymbolicVariables(sub); // |S| + return {U, L}; +} + + + +AlphaCrown::MaxRelaxResult AlphaCrown::relaxMaxNeuron(const std::vector> &groups, + size_t k, + const torch::Tensor &EQ_up, + const torch::Tensor &EQ_low) +{ + constexpr double EPS = 1e-12; + + // Collect absolute previous-layer row indices for output k + std::vector srcRows; srcRows.reserve(16); + const auto &srcList = groups[k]; + for (const auto &ni : srcList) { + srcRows.push_back((long)ni._neuron); + } + TORCH_CHECK(!srcRows.empty(), "MaxPool group has no sources"); + + + auto [U_low, L_low] = boundsFromEQ(EQ_low, srcRows); + auto M_low = (U_low + L_low) / 2.0; + long j_rel_low = torch::argmax(M_low).item(); + long idx_low_abs = srcRows[(size_t)j_rel_low]; + + + auto [U_up, L_up] = boundsFromEQ(EQ_up, srcRows); + + // i = argmax U_up, j = second argmax U_up (or i if single source) + int64_t kTop = std::min(2, U_up.size(0)); + auto top2 = torch::topk(U_up, kTop, /dim=/0, /largest=/true, /sorted=/true); + auto Uidxs = std::get<1>(top2); + long i_rel = Uidxs[0].item(); + long j_rel2 = (kTop > 1) ? Uidxs[1].item() : Uidxs[0].item(); + + double li = L_up[i_rel].item(); + double ui = U_up[i_rel].item(); + double uj = U_up[j_rel2].item(); + + // Case 1: (li == max(L_up)) ∧ (li >= uj) + auto Lmax_pair = torch::max(L_up, /dim=/0); + long l_arg = std::get<1>(Lmax_pair).item(); + bool case1 = (i_rel == l_arg) && (li + EPS >= uj); + + float slope, intercept; + if (case1 || (ui - li) <= EPS) { + // Case 1 (or degenerate): y ≤ x_i → a=1, intercept=0 + slope = 1.0f; + intercept = 0.0f; + } else { + // Case 2: a=(ui-uj)/(ui-li), b=uj → store as (a*xi + (b - a*li)) + double a = (ui - uj) / (ui - li); + if (a < 0.0) a = 0.0; + if (a > 1.0) a = 1.0; + slope = (float)a; + intercept = (float)(uj - a * li); // this is what you ADD to last column + } + + long idx_up_abs = srcRows[(size_t)i_rel]; + return MaxRelaxResult{ idx_up_abs, idx_low_abs, slope, intercept }; +} + + +void AlphaCrown::computeMaxPoolLayer(unsigned layerNumber, + torch::Tensor &EQ_up, + torch::Tensor &EQ_low) +{ + auto idxUp = _maxUpperChoice.at(layerNumber); // int64 [m] + auto idxLo = _maxLowerChoice.at(layerNumber); // int64 [m] + auto a = _upperRelaxationSlopes.at(layerNumber).to(torch::kFloat32); // [m] + auto b = _upperRelaxationIntercepts.at(layerNumber).to(torch::kFloat32); // [m] + + // Select rows from current EQs + auto up_sel = EQ_up.index ({ idxUp, torch::indexing::Slice() }); // m x (n+1) + auto low_sel = EQ_low.index({ idxLo, torch::indexing::Slice() }); + + // Upper: scale + add intercept on last column + auto next_up = up_sel * a.unsqueeze(1); + next_up = AlphaCrown::addVecToLastColumnValue(next_up, b); + + // Lower: copy chosen rows + auto next_low = low_sel.clone(); + + EQ_up = next_up; + EQ_low = next_low; +} + + + + +void AlphaCrown::findBounds() +{ + torch::Tensor EQ_up = createSymbolicVariablesMatrix(); + torch::Tensor EQ_low = createSymbolicVariablesMatrix(); + + for ( unsigned i = 0; i < _network->getNumberOfLayers(); i++ ){ + Layer::Type layerType = _layersOrder[i]; + switch (layerType) + { + case Layer::INPUT: + break; + case Layer::WEIGHTED_SUM: + computeWeightedSumLayer(i, EQ_up, EQ_low); + break; + case Layer::RELU: + relaxReluLayer(i, EQ_up, EQ_low); + break; + case Layer::MAX: + { + relaxMaxPoolLayer( i, EQ_up, EQ_low ); + break; + } + default: + AlphaCrown::log ( "Unsupported layer type\n"); + throw MarabouError( MarabouError::DEBUGGING_ERROR ); + } + } +} + + +std::tuple AlphaCrown::computeBounds + (std::vector &alphaSlopes) +{ + torch::Tensor EQ_up = createSymbolicVariablesMatrix(); + torch::Tensor EQ_low = createSymbolicVariablesMatrix(); + for ( unsigned i = 0; i < _network->getNumberOfLayers(); i++ ) + { + auto layerType = _layersOrder[i]; + switch (layerType) + { + case Layer::INPUT: + break; + case Layer::WEIGHTED_SUM: + computeWeightedSumLayer (i, EQ_up, EQ_low); + break; + case Layer::RELU: + computeReluLayer (i, EQ_up, EQ_low, alphaSlopes); + break; + case Layer::MAX: + computeMaxPoolLayer( i, EQ_up, EQ_low ); + break; + default: + log ("Unsupported layer type\n"); + throw MarabouError (MarabouError::DEBUGGING_ERROR); + } + } + auto outputUpBound = getMaxOfSymbolicVariables(EQ_up); + auto outputLowBound = getMinOfSymbolicVariables(EQ_low); + return std::make_tuple(outputUpBound, outputLowBound); + +} + + +void AlphaCrown::computeWeightedSumLayer(unsigned i, torch::Tensor &EQ_up, torch::Tensor &EQ_low){ + //auto linearLayer = _linearLayers[i]; + auto Wi_positive = _positiveWeights[i]; + auto Wi_negative = _negativeWeights[i]; + auto Bi = _biases[i]; + + auto EQ_up_afterLayer = Wi_positive.mm( EQ_up ) + Wi_negative.mm( EQ_low ); + EQ_up_afterLayer = + AlphaCrown::addVecToLastColumnValue( EQ_up_afterLayer, Bi ); + + + auto EQ_low_afterLayer = Wi_positive.mm( EQ_low ) + Wi_negative.mm( EQ_up ); + EQ_low_afterLayer = + AlphaCrown::addVecToLastColumnValue(EQ_low_afterLayer, Bi ); + + EQ_up = EQ_up_afterLayer; + EQ_low = EQ_low_afterLayer; + +} + + +void AlphaCrown::computeReluLayer(unsigned layerNumber, torch::Tensor + &EQ_up, torch::Tensor &EQ_low, std::vector &alphaSlopes){ + EQ_up = EQ_up * _upperRelaxationSlopes[layerNumber].unsqueeze( 1 ); // + EQ_up = AlphaCrown::addVecToLastColumnValue( EQ_up, _upperRelaxationIntercepts[layerNumber] ); + unsigned indexInAlpha = _indexAlphaSlopeMap[layerNumber]; + EQ_low = EQ_low * alphaSlopes[indexInAlpha].unsqueeze( 1 ); +} + + + + +void AlphaCrown::updateBounds(std::vector &alphaSlopes){ + torch::Tensor EQ_up = createSymbolicVariablesMatrix(); + torch::Tensor EQ_low = createSymbolicVariablesMatrix(); + + + for ( unsigned i = 0; i < _network->getNumberOfLayers(); i++ ) + { + auto layerType = _layersOrder[i]; + switch (layerType) + { + case Layer::INPUT: + break; + case Layer::WEIGHTED_SUM: + computeWeightedSumLayer (i, EQ_up, EQ_low); + break; + case Layer::RELU: + computeReluLayer (i, EQ_up, EQ_low, alphaSlopes); + break; + case Layer::MAX: + computeMaxPoolLayer( i, EQ_up, EQ_low ); + break; + default: + log ("Unsupported layer type\n"); + throw MarabouError (MarabouError::DEBUGGING_ERROR); + } + auto upBound = getMaxOfSymbolicVariables(EQ_up); + auto lowBound = getMinOfSymbolicVariables(EQ_low); + updateBoundsOfLayer(i, upBound, lowBound); + } +} + +void AlphaCrown::updateBoundsOfLayer(unsigned layerIndex, torch::Tensor &upBounds, torch::Tensor &lowBounds) +{ + + Layer * layer = _layerOwner->getLayerIndexToLayer()[layerIndex]; + //TODO it should be: Layer *layer = _layerOwner->getLayer(layerIndex); if we added non const getter + + for (int j = 0; j < upBounds.size(0); j++) + { + if ( layer->neuronEliminated( j ) ) continue; + double lb_val = lowBounds[j].item(); + if ( layer->getLb( j ) < lb_val ) + { + log( Stringf( "Neuron %u_%u lower-bound updated from %f to %f", + layerIndex, + j, + layer->getLb( j ), + lb_val ) ); + + std::cout << "Neuron " << layerIndex << "_" << j + << " lower-bound updated from " << layer->getLb(j) + << " to " << lb_val << std::endl; + layer->setLb( j, lb_val ); + _layerOwner->receiveTighterBound( + Tightening( layer->neuronToVariable( j ), lb_val, Tightening::LB ) ); + } + + + auto ub_val = upBounds[j].item(); + if ( layer->getUb( j ) > ub_val ) + { + log( Stringf( "Neuron %u_%u upper-bound updated from %f to %f", + layerIndex, + j, + layer->getUb( j ), + ub_val ) ); + std::cout << "Neuron " << layerIndex << "_" << j + << " upper-bound updated from " << layer->getUb(j) + << " to " << ub_val << std::endl; + + layer->setUb( j, ub_val ); + _layerOwner->receiveTighterBound( + Tightening( layer->neuronToVariable( j ), ub_val, Tightening::UB ) ); + } + + } +} + + +void AlphaCrown::optimizeBounds( int loops ) +{ + + + std::cout << "Starting AlphaCrown run with " << loops << " optimization loops." << std::endl; + std::vector alphaSlopesForUpBound; + std::vector alphaSlopesForLowBound; + for ( auto &tensor : _initialAlphaSlopes ) + { + alphaSlopesForUpBound.push_back( tensor.detach().clone().requires_grad_(true) ); + alphaSlopesForLowBound.push_back( tensor.detach().clone().requires_grad_(true) ); + } + GDloop( loops, "max", alphaSlopesForUpBound ); + GDloop( loops, "min", alphaSlopesForLowBound ); + updateBounds( alphaSlopesForUpBound ); + updateBounds( alphaSlopesForLowBound); + std::cout << "AlphaCrown run completed." << std::endl; +} + + +void AlphaCrown::GDloop( int loops, + const std::string val_to_opt, + std::vector &alphaSlopes ) +{ + torch::optim::Adam optimizer( alphaSlopes, 0.005 ); + for ( int i = 0; i < loops; i++ ) + { + optimizer.zero_grad(); + + auto [max_val, min_val] = AlphaCrown::computeBounds( alphaSlopes ); + auto loss = ( val_to_opt == "max" ) ? max_val.sum() : -min_val.sum(); + loss.backward(torch::Tensor(), true); + + optimizer.step(); + + for ( auto &tensor : alphaSlopes ) + { + tensor.clamp( 0, 1 ); + } + + log( Stringf( "Optimization loop %d completed", i + 1 ) ); + std::cout << "std Optimization loop completed " << i+1 << std::endl; + } +} + + +torch::Tensor AlphaCrown::addVecToLastColumnValue(const torch::Tensor &matrix, + const torch::Tensor &vec) +{ + auto result = matrix.clone(); + if (result.dim() == 2) + { + // add 'vec' per row to last column + result.slice(1, result.size(1) - 1, result.size(1)) += vec.unsqueeze(1); + } + else if (result.dim() == 1) + { + // add scalar to last entry (the constant term) + TORCH_CHECK(vec.numel() == 1, "1-D addVec expects scalar vec"); + result.index_put_({ result.size(0) - 1 }, + result.index({ result.size(0) - 1 }) + vec.item()); + } + else + { + TORCH_CHECK(false, "addVecToLastColumnValue expects 1-D or 2-D tensor"); + } + return result; +} + + + +void AlphaCrown::log( const String &message ) +{ + if ( GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING ) + printf( "DeepPolyAnalysis: %s\n", message.ascii() ); +} + + +} // namespace NLR \ No newline at end of file diff --git a/src/nlr/AlphaCrown.h b/src/nlr/AlphaCrown.h new file mode 100644 index 0000000000..6042354e99 --- /dev/null +++ b/src/nlr/AlphaCrown.h @@ -0,0 +1,100 @@ +#ifndef ALPHACROWN_H +#define ALPHACROWN_H + +#include "CustomDNN.h" +#include "LayerOwner.h" +#include + +#undef Warning +#include + +namespace NLR { +class AlphaCrown +{ +public: + AlphaCrown( LayerOwner *layerOwner ); + + void findBounds(); + void optimizeBounds( int loops = 50 ); + void run() + + { + findBounds(); + updateBounds(_initialAlphaSlopes); + optimizeBounds( 2 ); + + } + +private: + LayerOwner *_layerOwner; + NetworkLevelReasoner *_nlr; + CustomDNN *_network; + void GDloop( int loops, const std::string val_to_opt, std::vector &alphaSlopes ); + std::tuple + computeBounds( std::vector &alphaSlopes ); + int _inputSize; + torch::Tensor _lbInput; + torch::Tensor _ubInput; + + std::vector _linearLayers; + std::vector _layersOrder; + std::map _positiveWeights; + std::map _negativeWeights; + std::map _biases; + std::map _indexAlphaSlopeMap; + std::map _linearIndexMap; + + std::map>> _maxPoolSources; + std::map _maxUpperChoice; // int64 [m]: absolute row index for upper bound + std::map _maxLowerChoice; // int64 [m]: absolute row index for lower bound + void relaxMaxPoolLayer(unsigned layerNumber, torch::Tensor &EQ_up, torch::Tensor &EQ_low); + void computeMaxPoolLayer(unsigned layerNumber, torch::Tensor &EQ_up, torch::Tensor &EQ_low); + + std::pair boundsFromEQ(const torch::Tensor &EQ, const std::vector &rows); + struct MaxRelaxResult { + long idx_up; // absolute row in previous EQ for the upper bound + long idx_low; // absolute row in previous EQ for the lower bound + float slope; // upper slope a + float intercept; // upper intercept (b - a*l_i) + }; + + MaxRelaxResult relaxMaxNeuron(const std::vector> &groups, + size_t k, + const torch::Tensor &EQ_up, + const torch::Tensor &EQ_low); + + std::map _upperRelaxationSlopes; + std::map _upperRelaxationIntercepts; + + std::vector _initialAlphaSlopes; + + torch::Tensor createSymbolicVariablesMatrix(); + void relaxReluLayer(unsigned layerNumber, torch::Tensor &EQ_up, torch::Tensor &EQ_low); + void computeWeightedSumLayer(unsigned i, torch::Tensor &EQ_up, torch::Tensor &EQ_low); + void computeReluLayer(unsigned i, torch::Tensor &EQ_up, torch::Tensor &EQ_low, std::vector &alphaSlopes); + + void updateBounds(std::vector &alphaSlopes); + void updateBoundsOfLayer(unsigned layerIndex, torch::Tensor &upBounds, torch::Tensor &lowBounds); + + torch::Tensor addVecToLastColumnValue( const torch::Tensor &matrix, + const torch::Tensor &vec ); + // { + // auto result = matrix.clone(); + // result.slice( 1, result.size( 1 ) - 1, result.size( 1 ) ) += vec.unsqueeze( 1 ); + // return result; + // } + static torch::Tensor lower_ReLU_relaxation( const torch::Tensor &u, const torch::Tensor &l ); + + static std::tuple upper_ReLU_relaxation( const torch::Tensor &u, + const torch::Tensor &l ); + + torch::Tensor getMaxOfSymbolicVariables( const torch::Tensor &matrix ); + torch::Tensor getMinOfSymbolicVariables( const torch::Tensor &matrix ); + + + void log( const String &message ); +}; +} // namespace NLR + + +#endif //ALPHACROWN_H \ No newline at end of file diff --git a/src/nlr/CMakeLists.txt b/src/nlr/CMakeLists.txt index e377a638ba..f008a36da0 100644 --- a/src/nlr/CMakeLists.txt +++ b/src/nlr/CMakeLists.txt @@ -7,7 +7,7 @@ target_include_directories(${MARABOU_LIB} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}") target_sources(${MARABOU_TEST_LIB} PRIVATE ${SRCS}) target_include_directories(${MARABOU_TEST_LIB} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}") -set (NETWORK_LEVEL_REASONER_TESTS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/tests") +set(NETWORK_LEVEL_REASONER_TESTS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/tests") macro(network_level_reasoner_add_unit_test name) set(USE_MOCK_COMMON TRUE) set(USE_MOCK_ENGINE TRUE) @@ -15,15 +15,16 @@ macro(network_level_reasoner_add_unit_test name) endmacro() network_level_reasoner_add_unit_test(DeepPolyAnalysis) +network_level_reasoner_add_unit_test(AlphaCrown) network_level_reasoner_add_unit_test(NetworkLevelReasoner) network_level_reasoner_add_unit_test(WsLayerElimination) network_level_reasoner_add_unit_test(ParallelSolver) if (${ENABLE_GUROBI}) network_level_reasoner_add_unit_test(LPRelaxation) -endif() +endif () if (${BUILD_PYTHON}) target_include_directories(${MARABOU_PY} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}") -endif() +endif () diff --git a/src/nlr/IterativePropagator.h b/src/nlr/IterativePropagator.h index 7a7fba671a..0c3a593f89 100644 --- a/src/nlr/IterativePropagator.h +++ b/src/nlr/IterativePropagator.h @@ -27,7 +27,7 @@ namespace NLR { #define IterativePropagator_LOG( x, ... ) \ - LOG( GlobalConfiguration::PREPROCESSOR_LOGGING, "Iterativepropagator: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::PREPROCESSOR_LOGGING, "Iterativepropagator: %s\n", x ) class IterativePropagator : public ParallelSolver { diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index cee0abbb65..248fc968c8 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -31,7 +31,7 @@ namespace NLR { #define LPFormulator_LOG( x, ... ) \ - LOG( GlobalConfiguration::PREPROCESSOR_LOGGING, "LP Preprocessor: %s\n", x ) + MARABOU_LOG( GlobalConfiguration::PREPROCESSOR_LOGGING, "LP Preprocessor: %s\n", x ) class LPFormulator : public ParallelSolver { diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 08e6900538..b27949cdf9 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -27,6 +27,13 @@ Layer::~Layer() freeMemoryIfNeeded(); } +void Layer::setBounds( unsigned int neuron, double lower, double upper ) +{ + ASSERT( neuron < _size ); + _lb[neuron] = lower; + _ub[neuron] = upper; +} + void Layer::setLayerOwner( LayerOwner *layerOwner ) { _layerOwner = layerOwner; diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index 900276eda3..d84237f2f1 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -58,6 +58,7 @@ class Layer Layer( const Layer *other ); Layer( unsigned index, Type type, unsigned size, LayerOwner *layerOwner ); ~Layer(); + void setBounds( unsigned int neuron, double lower, double upper ); void setLayerOwner( LayerOwner *layerOwner ); void addSourceLayer( unsigned layerNumber, unsigned layerSize ); diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index 8390a0190b..b8774af74d 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -34,13 +34,15 @@ #include -#define NLR_LOG( x, ... ) LOG( GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING, "NLR: %s\n", x ) +#define NLR_LOG( x, ... ) \ + MARABOU_LOG( GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING, "NLR: %s\n", x ) namespace NLR { NetworkLevelReasoner::NetworkLevelReasoner() : _tableau( NULL ) , _deepPolyAnalysis( nullptr ) + , _alphaCrown( nullptr ) { } @@ -127,8 +129,17 @@ void NetworkLevelReasoner::evaluate( double *input, double *output ) const Layer *outputLayer = _layerIndexToLayer[_layerIndexToLayer.size() - 1]; memcpy( output, outputLayer->getAssignment(), sizeof( double ) * outputLayer->getSize() ); } +void NetworkLevelReasoner::setBounds( unsigned layer, + unsigned int neuron, + double lower, + double upper ) +{ + ASSERT( layer < _layerIndexToLayer.size() ); + _layerIndexToLayer[layer]->setBounds( neuron, lower, upper ); +} -void NetworkLevelReasoner::concretizeInputAssignment( Map &assignment ) +void NetworkLevelReasoner::concretizeInputAssignment( Map &assignment, + const double *pgdAdversarialInput ) { Layer *inputLayer = _layerIndexToLayer[0]; ASSERT( inputLayer->getLayerType() == Layer::INPUT ); @@ -145,6 +156,8 @@ void NetworkLevelReasoner::concretizeInputAssignment( Map &ass { unsigned variable = inputLayer->neuronToVariable( index ); double value = _tableau->getValue( variable ); + if ( pgdAdversarialInput ) + value = pgdAdversarialInput[index]; input[index] = value; assignment[variable] = value; } @@ -200,6 +213,8 @@ void NetworkLevelReasoner::clearConstraintTightenings() void NetworkLevelReasoner::symbolicBoundPropagation() { + _boundTightenings.clear(); + for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) _layerIndexToLayer[i]->computeSymbolicBounds(); } @@ -211,6 +226,15 @@ void NetworkLevelReasoner::deepPolyPropagation() _deepPolyAnalysis->run(); } +void NetworkLevelReasoner::alphaCrown() +{ +#ifdef BUILD_TORCH + if ( _alphaCrown == nullptr ) + _alphaCrown = std::unique_ptr( new AlphaCrown( this ) ); + _alphaCrown->run(); +#endif +} + void NetworkLevelReasoner::lpRelaxationPropagation() { LPFormulator lpFormulator( this ); diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index 2660795be6..95d292b8a6 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -16,6 +16,7 @@ #ifndef __NetworkLevelReasoner_h__ #define __NetworkLevelReasoner_h__ +#include "AlphaCrown.h" #include "DeepPolyAnalysis.h" #include "ITableau.h" #include "Layer.h" @@ -74,12 +75,14 @@ class NetworkLevelReasoner : public LayerOwner Perform an evaluation of the network for a specific input. */ void evaluate( double *input, double *output ); + void setBounds( unsigned layer, unsigned int neuron, double lower, double upper ); /* Perform an evaluation of the network for the current input variable assignment and store the resulting variable assignment in the assignment. */ - void concretizeInputAssignment( Map &assignment ); + void concretizeInputAssignment( Map &assignment, + const double *pgdAdversarialInput = nullptr ); /* Perform a simulation of the network for a specific input @@ -124,6 +127,7 @@ class NetworkLevelReasoner : public LayerOwner void intervalArithmeticBoundPropagation(); void symbolicBoundPropagation(); void deepPolyPropagation(); + void alphaCrown(); void lpRelaxationPropagation(); void LPTighteningForOneLayer( unsigned targetIndex ); void MILPPropagation(); @@ -209,6 +213,7 @@ class NetworkLevelReasoner : public LayerOwner std::unique_ptr _deepPolyAnalysis; + std::unique_ptr _alphaCrown; void freeMemoryIfNeeded(); diff --git a/src/nlr/tests/Test_AlphaCrown.h b/src/nlr/tests/Test_AlphaCrown.h new file mode 100644 index 0000000000..1b6c7fbd4a --- /dev/null +++ b/src/nlr/tests/Test_AlphaCrown.h @@ -0,0 +1,211 @@ +// +// Created by maya-swisa on 8/6/25. +// + +#ifndef TEST_ALPHACROWN_H +#define TEST_ALPHACROWN_H + +#include "../../engine/tests/MockTableau.h" +#include "AcasParser.h" +#include "CWAttack.h" +#include "Engine.h" +#include "InputQuery.h" +#include "Layer.h" +#include "NetworkLevelReasoner.h" +#include "PropertyParser.h" +#include "Tightening.h" + +#include +#include + +class AlphaCrownAnalysisTestSuite : public CxxTest::TestSuite +{ +public: + void setUp() + { + } + + void tearDown() + { + } + + // void testWithAttack() + // { + // #ifdef BUILD_TORCH + // + // auto networkFilePath = "../../../resources/nnet/acasxu/" + // "ACASXU_experimental_v2a_1_1.nnet"; + // auto propertyFilePath = "../../../resources/properties/" + // "acas_property_4.txt"; + // + // auto *_acasParser = new AcasParser( networkFilePath ); + // InputQuery _inputQuery; + // _acasParser->generateQuery( _inputQuery ); + // PropertyParser().parse( propertyFilePath, _inputQuery ); + // std::unique_ptr _engine = std::make_unique(); + // Options *options = Options::get(); + // options->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "alphacrown" ); + // // obtain the alpha crown proceeder + // _engine->processInputQuery( _inputQuery ); + // NLR::NetworkLevelReasoner *_networkLevelReasoner = + // _engine->getNetworkLevelReasoner(); TS_ASSERT_THROWS_NOTHING( + // _networkLevelReasoner->obtainCurrentBounds() ); std::unique_ptr cwAttack = + // std::make_unique( _networkLevelReasoner ); auto + // attackResultAfterBoundTightening = cwAttack->runAttack(); TS_ASSERT( + // !attackResultAfterBoundTightening ); delete _acasParser; + + void populateNetwork( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 R 1 R 1 1 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 1 \ / 0 \ / + \/ \/ \/ + /\ /\ /\ + 1 / \ 1 / \ 1 / \ + / \ R / \ R / 1 \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + -1 -1 + + The example described in Fig. 3 of + https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 0 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + + nlr.setBias( 5, 0, 1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void test_alphacrown_relus() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetwork( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke alpha crow + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.alphaCrown() ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + for ( const auto &bound : bounds ) + { + if ( bound._type == Tightening::LB ) + printf( "lower:\n" ); + else + printf( "upper:\n" ); + std::cout << "var : " << bound._variable << " bound : " << bound._value << std::endl; + } + + double large = 1000000; + nlr.setBounds( nlr.getNumberOfLayers() -1 , 1, 0.1 , large ); + std::unique_ptr cwAttack = std::make_unique( &nlr ); + auto attackResultAfterBoundTightening = cwAttack->runAttack(); + TS_ASSERT( !attackResultAfterBoundTightening ); + + nlr.setBounds( nlr.getNumberOfLayers() -1 , 1, -large , -0.1 ); + cwAttack = std::make_unique( &nlr ); + attackResultAfterBoundTightening = cwAttack->runAttack(); + TS_ASSERT( !attackResultAfterBoundTightening ); + + nlr.setBounds( nlr.getNumberOfLayers() -1 , 0 , -large , 0.99 ); + cwAttack = std::make_unique( &nlr ); + attackResultAfterBoundTightening = cwAttack->runAttack(); + TS_ASSERT( !attackResultAfterBoundTightening ); + + nlr.setBounds( nlr.getNumberOfLayers() -1 , 0 , 1.1 , large ); + cwAttack = std::make_unique( &nlr ); + attackResultAfterBoundTightening = cwAttack->runAttack(); + TS_ASSERT( !attackResultAfterBoundTightening ); + + + } +}; + +#endif // TEST_ALPHACROWN_H diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index b74cd3931c..9ac9e083f3 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -7610,7 +7610,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Map assignment; - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment, TODO ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 4 ) ); @@ -7623,7 +7623,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.nextValues[0] = 1; tableau.nextValues[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment, TODO ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 1 ) ); @@ -7635,7 +7635,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.nextValues[0] = 1; tableau.nextValues[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment, TODO ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 0 ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 0 ) ); diff --git a/src/query_loader/QueryLoader.h b/src/query_loader/QueryLoader.h index 195a002c47..c8d1c0732c 100644 --- a/src/query_loader/QueryLoader.h +++ b/src/query_loader/QueryLoader.h @@ -19,7 +19,7 @@ #include "IQuery.h" -#define QL_LOG( x, ... ) LOG( GlobalConfiguration::QUERY_LOADER_LOGGING, "QueryLoader: %s\n", x ) +#define QL_LOG( x, ... ) MARABOU_LOG( GlobalConfiguration::QUERY_LOADER_LOGGING, "QueryLoader: %s\n", x ) class QueryLoader { diff --git a/tools/download_libtorch.sh b/tools/download_libtorch.sh new file mode 100755 index 0000000000..90f1884b42 --- /dev/null +++ b/tools/download_libtorch.sh @@ -0,0 +1,19 @@ +#!/bin/bash +curdir=$pwd +mydir="${0%/*}" +version=$1 + +cd $mydir + +# Need to download the cxx11-abi version of libtorch in order to ensure compatability +# with boost. +# +# See https://discuss.pytorch.org/t/issues-linking-with-libtorch-c-11-abi/29510 for details. +echo "Downloading PyTorch" +wget https://download.pytorch.org/libtorch/cpu/libtorch-cxx11-abi-shared-with-deps-$version%2Bcpu.zip -O libtorch-$version.zip -q --show-progress --progress=bar:force:noscroll + +echo "Unzipping PyTorch" +unzip libtorch-$version.zip >> /dev/null +mv libtorch libtorch-$version + +cd $curdir From 01a938d81fe3fe53e0c9ab1ba9a311ba2b1f2b42 Mon Sep 17 00:00:00 2001 From: Avi Porges <151055500+Avi-Porges@users.noreply.github.com> Date: Tue, 2 Sep 2025 22:26:02 +0300 Subject: [PATCH 33/33] delete whitespace --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index bf6c6cbed4..b0667982b2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -20,7 +20,7 @@ option(RUN_SYSTEM_TEST "Run system tests on build" OFF) option(RUN_MEMORY_TEST "Run cxxtest testing with ASAN ON" ON) option(RUN_PYTHON_TEST "Run Python API tests if building with Python" OFF) option(ENABLE_GUROBI "Enable use the Gurobi optimizer" OFF) -option(ENABLE_OPENBLAS "Do symbolic bound tighting using blas" OFF) # Not available on Windows +option(ENABLE_OPENBLAS "Do symbolic bound tighting using blas" ON) # Not available on Windows option(CODE_COVERAGE "Add code coverage" OFF) # Available only in debug mode option(BUILD_TORCH "Build libtorch" ON) ################### @@ -508,4 +508,4 @@ add_dependencies(build_input_parsers ${MPS_PARSER} ${ACAS_PARSER} add_subdirectory(${SRC_DIR}) add_subdirectory(${TOOLS_DIR}) -add_subdirectory(${REGRESS_DIR}) \ No newline at end of file +add_subdirectory(${REGRESS_DIR})