diff --git a/.gitignore b/.gitignore index f25699b..f28bd8c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,14 +1,26 @@ -# Warning! Ignore all files w/out extensions -# Ignore all -* -# Unignore all with extensions -!*.* -# Unignore all dirs -!*/ -# Unignore other -!LICENSE -!Makefile - +# MINE +bin/ +build/ + +# Log output +logs/ + +# CMAKE +CMakeLists.txt.user +CMakeCache.txt +CMakeFiles +CMakeScripts +Testing +Makefile +cmake_install.cmake +install_manifest.txt +compile_commands.json +CTestTestfile.cmake +_deps +CMakeUserPresets.json + + +# C++ # Cache .ccls-cache # out diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..1880127 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,23 @@ +cmake_minimum_required(VERSION 3.10) + +# Project name and version +project(LEDAtools VERSION 1.0 LANGUAGES CXX) + +# Specify the C++ standard +set(CMAKE_CXX_STANDARD 20) +set(CMAKE_CXX_STANDARD_REQUIRED True) + +# Global compiler flags +add_compile_options(-O3 -Wall -Wextra -Wno-sign-compare) +# add_compile_options(-O0 -g3 -Wall -Wextra -Wno-sign-compare) + +# Include directories +include_directories(${PROJECT_SOURCE_DIR}/include) + +add_subdirectory(src) +add_subdirectory(examples) +# add_subdirectory(test) + +# Installation directories +set(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR}/bin) +set(LIBRARY_OUTPUT_PATH ${CMAKE_BINARY_DIR}/lib) diff --git a/Makefile b/Makefile deleted file mode 100644 index 84fa351..0000000 --- a/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -CXXFLAGS=-O3 -g3 -std=c++11 -Wall -Wextra -Wno-sign-compare -LDLIBS= -lntl -lgmp -lm -TARGETS=constant_weight_encodable_bits enumeration_complexity parameter_generator work_factor_computation - -all: $(TARGETS) - -clean: - rm -f $(TARGETS) diff --git a/README_new.md b/README_new.md new file mode 100644 index 0000000..a4b4e98 --- /dev/null +++ b/README_new.md @@ -0,0 +1,29 @@ +* Added +- CMakeLists + +Dependencies: +- spdlog to log +- fmt (come with spdlog) + +Executables +- work_factor_computation +Compute the work factor. It accepts either a json or a plain set of parameters + +* Structure +- include/utils +All the hpp headers +- src/utils +Al the cpp corresponding to the prev headers +- src/tools +All the output tools, that is, executables to use + +* Compile +To create the binaries (inside the local `bin` directory) +Inside `build` + +```sh +cmake -DCMAKE_INSTALL_PREFIX=.. .. +make install -j +``` + +Then, you can execute the files as ./bin/ diff --git a/binomials.hpp b/binomials.hpp deleted file mode 100644 index 35c0378..0000000 --- a/binomials.hpp +++ /dev/null @@ -1,94 +0,0 @@ -#pragma once -#include -#include -#include -#include - -/* binomials are precomputed up to MAX_N-choose-MAX_T */ -#define MAX_N 2000 -#define MAX_T 300 - -#define LOW_K_MAX_N 10000 -#define LOW_K_MAX_T 10 - -const NTL::RR nat_log_2 = NTL::log(NTL::RR(2)); - -static inline NTL::RR log2_RR(NTL::RR v){ - return NTL::log(v)/nat_log_2; -} - -NTL::Mat binomial_table; /*contains all binomials up to MAX_N-choose-MAX_T */ -NTL::Mat low_k_binomial_table; /*contains all binomials up to LOW_K_MAX_N-choose-LOW_K_MAX_T */ - -NTL::RR pi; -/*NOTE: NTL allows to access matrices as 1- based with Matlab notation */ -void InitBinomials(){ - std::cerr << "Precomputing n-choose-t up to n: " << MAX_N << - " t: " << MAX_T << std::endl; - binomial_table.SetDims(MAX_N+1,MAX_T+1); - binomial_table[0][0] = NTL::ZZ(1); - for (unsigned i = 1 ; i <= MAX_N; i++){ - binomial_table[i][0] = NTL::ZZ(1); - binomial_table[i][1] = NTL::ZZ(i); - for(unsigned j=2 ; (j <= i) && (j <= MAX_T) ; j++){ - binomial_table[i][j] = binomial_table[i][j-1] * NTL::ZZ(i-j+1) / NTL::ZZ(j); - } - } - std::cerr << "Precomputing low n-choose-t up to n: " << LOW_K_MAX_N << - " t: " << LOW_K_MAX_T << std::endl; - low_k_binomial_table.SetDims(LOW_K_MAX_N+1,LOW_K_MAX_T+1); - low_k_binomial_table[0][0] = NTL::ZZ(1); - for (unsigned i = 0 ; i <= LOW_K_MAX_N; i++){ - low_k_binomial_table[i][0] = NTL::ZZ(1); - low_k_binomial_table[i][1] = NTL::ZZ(i); - for(unsigned j=2 ; (j <= i) && (j <= LOW_K_MAX_T) ; j++){ - low_k_binomial_table[i][j] = low_k_binomial_table[i][j-1] * NTL::ZZ(i-j+1) / NTL::ZZ(j); - } - } - std::cerr << "done" << std::endl; - pi = NTL::ComputePi_RR(); -} - - - -NTL::RR lnFactorial(NTL::RR n){ - /* log of Stirling series approximated to the fourth term - * n log(n) - n + 1/2 log(2 \pi n) + log(- 139/(51840 n^3) + - * + 1/(288 n^2) + 1/(12 n) + 1) */ - return n * NTL::log(n) - n + 0.5 * NTL::log(2*pi*n) + - NTL::log( - NTL::RR(139)/(n*n*n * 51840) + - NTL::RR(1)/(n*n*288) + - NTL::RR(1)/(n*12) + - 1); -} - -NTL::RR lnBinom(NTL::RR n, NTL::RR k){ - if ( (k == NTL::RR(0) ) || (k == n) ) { - return NTL::RR(0); - } - return lnFactorial(n) - (lnFactorial(k) + lnFactorial(n-k) ); -} - - -NTL::ZZ binomial_wrapper(long n, long k){ - if(k>n) return NTL::ZZ(0); - /* employ memoized if available */ - if ((n <= MAX_N) && (k < MAX_T)){ - return binomial_table[n][k]; - } - if ((n <= LOW_K_MAX_N) && (k < LOW_K_MAX_T)){ - return low_k_binomial_table[n][k]; - } - - /* shortcut computation for fast cases (k < 10) where - * Stirling may not provide good approximations */ - if (k < 10) { - NTL::ZZ result = NTL::ZZ(1); - for(int i = 1 ; i <= k; i++){ - result = (result * (n+1-i))/i; - } - return result; - } - /*Fall back to Stirling*/ - return NTL::conv( NTL::exp( lnBinom(NTL::RR(n),NTL::RR(k)) )); -} diff --git a/bit_error_probabilities.hpp b/bit_error_probabilities.hpp deleted file mode 100644 index b2145e7..0000000 --- a/bit_error_probabilities.hpp +++ /dev/null @@ -1,309 +0,0 @@ -#pragma once -#include -#include -#include "proper_primes.hpp" -#include "binomials.hpp" - -// choice of the approximation praxis for the estimated fraction of an error -// to appear in the next iteration of a bit-flipping decoder -#define ROUNDING_PRAXIS round - -/* Probability that a variable node is correct, and a parity equation involving - * it is satisfied */ -NTL::RR compute_p_cc(const uint64_t d_c, - const uint64_t n, - const uint64_t t){ - NTL::RR result = NTL::RR(0); - uint64_t bound = (d_c - 1) < t ? d_c - 1 : t; - - /* the number of errors falling in the PC equation should be at least - * the amount which cannot be placed in a non checked place */ - uint64_t LowerTHitBound = (n-d_c) < t ? t-(n-d_c) : 0; - /* and it should be even, since the PC equation must be satisfied */ - LowerTHitBound = LowerTHitBound % 2 ? LowerTHitBound + 1 : LowerTHitBound; - - for(uint64_t j = LowerTHitBound; j <= bound; j = j+2 ){ - result += to_RR( binomial_wrapper(d_c-1,j) * binomial_wrapper(n-d_c,t-j) ) / - to_RR( binomial_wrapper(n-1,t) ); - } - return result; -} - -/* Probability that a variable node is correct, and a parity equation involving - * it is *not* satisfied */ -NTL::RR compute_p_ci(const uint64_t d_c, - const uint64_t n, - const uint64_t t){ - NTL::RR result = NTL::RR(0); - uint64_t bound = (d_c - 1) < t ? d_c - 1 : t; - - /* the number of errors falling in the PC equation should be at least - * the amount which cannot be placed in a non checked place */ - uint64_t LowerTHitBound = (n-d_c) < t ? t-(n-d_c) : 1; - /* and it should be odd, since the PC equation must be non satisfied */ - LowerTHitBound = LowerTHitBound % 2 ? LowerTHitBound : LowerTHitBound + 1; - - for(uint64_t j = LowerTHitBound; j <= bound; j = j+2 ){ - result += to_RR( binomial_wrapper(d_c-1,j) * binomial_wrapper(n-d_c,t-j) ) - / to_RR( binomial_wrapper(n-1,t) ); - } - return result; -} - -/* Probability that a variable node is *not* correct, and a parity equation involving - * it is *not* satisfied */ -NTL::RR compute_p_ic(const uint64_t d_c, - const uint64_t n, - const uint64_t t){ - NTL::RR result = NTL::RR(0); - uint64_t UpperTBound = (d_c - 1) < t - 1 ? d_c - 1 : t - 1; - - /* the number of errors falling in the PC equation should be at least - * the amount which cannot be placed in a non checked place */ - uint64_t LowerTHitBound = (n-d_c-1) < (t-1) ? (t-1)-(n-d_c-1) : 0; - /* and it should be even, since the PC equation must be unsatisfied (when - * accounting for the one we are considering as already placed*/ - LowerTHitBound = LowerTHitBound % 2 ? LowerTHitBound + 1 : LowerTHitBound; - - for(uint64_t j = LowerTHitBound; j <= UpperTBound; j = j+2 ){ - result += NTL::to_RR( binomial_wrapper(d_c-1,j) * binomial_wrapper(n-d_c,t-j-1) ) - / to_RR( binomial_wrapper(n-1,t-1) ); - } - return result; -} - -/* Probability that a variable node is *not* correct, and a parity equation involving - * it is satisfied */ -NTL::RR compute_p_ii(const uint64_t d_c, - const uint64_t n, - const uint64_t t){ - - NTL::RR result = NTL::RR(0); - uint64_t bound = (d_c - 1) < t - 1 ? d_c - 1 : t - 1; - - /* the number of errors falling in the PC equation should be at least - * the amount which cannot be placed in a non checked place */ - uint64_t LowerTHitBound = (n-d_c) < (t-1) ? (t-1)-(n-d_c) : 1; - /* and it should be odd, since the PC equation must be satisfied (when - * accounting for the one we are considering as already placed)*/ - LowerTHitBound = LowerTHitBound % 2 ? LowerTHitBound : LowerTHitBound +1; - for(uint64_t j = LowerTHitBound; j <= bound; j = j+2 ){ - result += NTL::to_RR( binomial_wrapper(d_c-1,j) * binomial_wrapper(n-d_c,t-j-1) ) - / to_RR( binomial_wrapper(n-1,t-1) ); - } - return result; -} - -/* note p_cc + p_ci = 1 */ -/* note p_ic + p_ii = 1 */ - -/* Probability that a given erroneous variable is deemed as such, and is thus - * corrected, given a threshold for the amount of unsatisfied parity check - * equations. Called P_ic in most texts */ -NTL::RR ComputePrBitCorrection( const NTL::RR p_ic, - const uint64_t d_v, - // const uint64_t t, - const uint64_t threshold ){ -// Pic=0; /* p_correct */ -// for (j=b,dv, -// term=binomial(dv,j)*(p_ic^j)*(1-p_ic)^(dv-j); -// Pic=Pic+term; -// ); - NTL::RR result = NTL::RR(0), success, failure; - for (uint64_t j = threshold; j <= d_v; j++){ - NTL::pow(success, p_ic, NTL::to_RR(j)); - NTL::pow(failure, NTL::RR(1)-p_ic, NTL::to_RR(d_v-j)); - result += NTL::to_RR(binomial_wrapper(d_v,j)) * success * failure; - } - return result; -} - -/* Probability that a given correct variable is not deemed as such, and is thus - * fault-induced, given a threshold for the amount of unsatisfied parity check - * equations. Called P_ci in most texts, p_induce in official comment */ -NTL::RR ComputePrBitFaultInduction( const NTL::RR p_ci, - const uint64_t d_v, - // const uint64_t t, /* unused */ - const uint64_t threshold ){ - - NTL::RR result= NTL::RR(0), success, failure; - for (uint64_t j = threshold; j <= d_v; j++){ - NTL::pow(success, p_ci, NTL::to_RR(j)); - NTL::pow(failure, NTL::RR(1)-p_ci, NTL::to_RR(d_v-j)); - result += NTL::to_RR(binomial_wrapper(d_v,j)) * success * failure; - } - return result; -} - -/* computes the probability that toCorrect bits are corrected - * known as P{N_ic = toCorrect} */ -NTL::RR ComputePrBitCorrectionMulti( const NTL::RR p_ic, - const uint64_t d_v, - const uint64_t t, - const uint64_t threshold, - const uint64_t toCorrect){ - NTL::RR ProbCorrectOne = ComputePrBitCorrection(p_ic,d_v,threshold); - return NTL::to_RR(binomial_wrapper(t,toCorrect)) * - NTL::pow(ProbCorrectOne,NTL::RR(toCorrect)) * - NTL::pow(1-ProbCorrectOne,NTL::RR(t-toCorrect)); -} - -/* computes the probability that toInduce faults are induced - * known as P{N_ci = toInduce} or Pr{f_wrong = to_induce} */ -NTL::RR ComputePrBitInduceMulti(const NTL::RR p_ci, - const uint64_t d_v, - const uint64_t t, - const uint64_t n, - const uint64_t threshold, - const uint64_t toInduce){ -// if(toInduce <= 1 ){ -// return NTL::RR(0); -// } - NTL::RR ProbInduceOne = ComputePrBitFaultInduction(p_ci,d_v,threshold); - return NTL::to_RR(binomial_wrapper(n-t,toInduce)) * - NTL::pow(ProbInduceOne,NTL::RR(toInduce)) * - NTL::pow(1-ProbInduceOne,NTL::RR(n-t-toInduce)); -} - -uint64_t FindNextNumErrors(const uint64_t n_0, - const uint64_t p, - const uint64_t d_v, - const uint64_t t){ - NTL::RR p_ci, p_ic; - p_ci = compute_p_ci(n_0*d_v,n_0*p,t); - p_ic = compute_p_ic(n_0*d_v,n_0*p,t); - uint64_t t_next=t; -// uint64_t best_threshold = (d_v - 1)/2; - for(uint64_t i = (d_v - 1)/2; i <= d_v - 1; i++){ - NTL::RR t_approx= t - - t * ComputePrBitCorrection(p_ic, d_v, i) + - (n_0*p - t) * ComputePrBitFaultInduction(p_ci, d_v, i); - unsigned long int t_curr = NTL::conv(NTL::ROUNDING_PRAXIS(t_approx)) ; - /*Note : we increase the threshold only if it improves strictly on the - * predicted error correction. */ - if (t_curr < t_next){ - t_next = t_curr; -// best_threshold = i; - } - } - /* considering that any code will correct a single bit error, if - * t_next == 1, we save a computation iteration and shortcut to t_next == 0*/ - if (t_next == 1) { - t_next = 0; - } - return t_next; -} - -/* computes the exact 1-iteration DFR and the best threshold on the number of - * upcs to achieve it */ -std::pair Find1IterDFR(const uint64_t n_0, - const uint64_t p, - const uint64_t d_v, - const uint64_t t){ - NTL::RR p_ci, p_ic, P_correct, P_induce; - NTL::RR DFR, best_DFR = NTL::RR(1); - p_ci = compute_p_ci(n_0*d_v,n_0*p,t); - p_ic = compute_p_ic(n_0*d_v,n_0*p,t); - uint64_t best_threshold = (d_v - 1)/2; - for(uint64_t b = best_threshold; b <= d_v - 1; b++){ - DFR = NTL::RR(1) - ComputePrBitCorrectionMulti(p_ic, d_v, t, b, t) * ComputePrBitInduceMulti(p_ci,d_v,t,n_0*p,b,0); - /*Note : we increase the threshold only if it improves strictly on the - * predicted error correction. */ - if (DFR < best_DFR){ - best_DFR = DFR; - best_threshold = b; - } - } -// std::cout << best_threshold << std::endl; - return std::make_pair(best_DFR,best_threshold); -} - - -/* computes the exact 1-iteration probability of leaving at most t_leftover - * uncorrected errors out of t. */ -std::pair Find1IterTLeftoverPr(const uint64_t n_0, - const uint64_t p, - const uint64_t d_v, - const uint64_t t, - const uint64_t t_leftover){ - NTL::RR p_ci, p_ic; - NTL::RR DFR, best_DFR = NTL::RR(1); - p_ci = compute_p_ci(n_0*d_v,n_0*p,t); - p_ic = compute_p_ic(n_0*d_v,n_0*p,t); - int n= p*n_0; - uint64_t best_threshold = (d_v + 1)/2; - - for(uint64_t b = best_threshold; b <= d_v ; b++){ - DFR = NTL::RR(0); - NTL::RR P_correct = ComputePrBitCorrection(p_ic, d_v,b); - NTL::RR P_induce = ComputePrBitFaultInduction(p_ci,d_v,b); - for(int tau = 0 ; tau <= t_leftover; tau++){ - for(int n_to_induce = 0 ; n_to_induce <= t_leftover; n_to_induce++) { - NTL::RR prob_induce_n = NTL::to_RR(binomial_wrapper(n-t,n_to_induce)) * - NTL::pow(P_induce,NTL::to_RR(n_to_induce)) * - NTL::pow(NTL::RR(1)-P_induce,NTL::to_RR(n-t-n_to_induce)); - int n_to_correct = (int)t + n_to_induce - tau; - NTL::RR prob_correct_n = NTL::to_RR(binomial_wrapper(t,n_to_correct)); - prob_correct_n *= NTL::pow(P_correct,NTL::to_RR(n_to_correct)); - - prob_correct_n *= NTL::pow(NTL::RR(1)-P_correct,NTL::to_RR((int)t-n_to_correct)); /*unsigned exp?*/ - DFR += prob_correct_n*prob_induce_n; - } - } - DFR = NTL::RR(1) - DFR; - if (DFR < best_DFR){ - best_DFR = DFR; - best_threshold = b; - } - } - return std::make_pair(best_DFR,best_threshold); -} - -// find minimum p which, asymptotically, corrects all errors -// search performed via binary search as the DFR is decreasing monot. -// in of p -uint64_t Findpth(const uint64_t n_0, - const uint64_t d_v_prime, - const uint64_t t){ - - unsigned int prime_idx = 0, prime_idx_prec; - uint64_t p = proper_primes[prime_idx]; - while(p < d_v_prime || p < t ){ - prime_idx++; - p=proper_primes[prime_idx]; - } - - uint64_t hi, lo; - lo = prime_idx; - hi = PRIMES_NO; - prime_idx_prec = lo; - - uint64_t limit_error_num = t; - while(hi-lo > 1){ - prime_idx_prec = prime_idx; - prime_idx = (lo+hi)/2; - p = proper_primes[prime_idx]; - // compute number of remaining errors after +infty iters - limit_error_num = t; - uint64_t current_error_num; -// std::cout << "using p:"<< p << ", errors dropping as "; - do { - current_error_num = limit_error_num; - limit_error_num = FindNextNumErrors(n_0, p, d_v_prime, current_error_num); -// std::cout << limit_error_num << " "; - } while ( - (limit_error_num != current_error_num) && - (limit_error_num != 0) - ); -// std::cout << std::endl; - if (limit_error_num > 0){ - lo = prime_idx; - } else { - hi = prime_idx; - } - } - if(limit_error_num == 0) { - return proper_primes[prime_idx]; - } - return proper_primes[prime_idx_prec]; -} diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt new file mode 100644 index 0000000..cf73c45 --- /dev/null +++ b/examples/CMakeLists.txt @@ -0,0 +1,14 @@ +find_library(ledautils ledautils) +message(STATUS "ledautils library: ${ledautils_LIBRARIES}") + +find_package(spdlog REQUIRED) +find_package(fmt REQUIRED) + +add_executable(isd_cost_estimate_ex isd_cost_estimate_ex.cpp) +set(LIBS ledautils ${NTL_LIB} ${GMP_LIB} ${M_LIB} spdlog::spdlog fmt::fmt) +target_link_libraries(isd_cost_estimate_ex PRIVATE ${LIBS}) + +# # Optionally, specify include directories for the examples +# target_include_directories(isd_cost_estimate_ex PRIVATE ${CMAKE_SOURCE_DIR}/include) + +install(TARGETS isd_cost_estimate_ex DESTINATION bin) diff --git a/examples/isd_cost_estimate_ex.cpp b/examples/isd_cost_estimate_ex.cpp new file mode 100644 index 0000000..a623a08 --- /dev/null +++ b/examples/isd_cost_estimate_ex.cpp @@ -0,0 +1,132 @@ +#include // for uint32_t +#include +#include +#include +#include +#include +#include +#include +#include + +// #include "logging.hpp" + +struct Cost { + std::string algorithm; + std::string type; // CFP1, CFP2, CFP3, SDP + bool is_quantum; + double time_complexity; + double space_complexity; +}; + +struct Value { + uint32_t codeword_size; + uint32_t code_dimension; + uint32_t number_of_errors; + uint32_t qc_block_size; + bool is_kra; + bool is_red_fac; + std::map costs; +}; + +void displayCost(const Cost &cost) { + std::cout << " Algorithm: " << cost.algorithm << '\n'; + std::cout << " Type: " << cost.type << '\n'; + std::cout << " Is Quantum: " << (cost.is_quantum ? "Yes" : "No") << '\n'; + std::cout << " Time Complexity: " << cost.time_complexity << '\n'; + std::cout << " Space Complexity: " << cost.space_complexity << '\n'; +} + +// Function to display a Value object +void displayValue(const Value &value) { + std::cout << "Value:\n"; + std::cout << " Codeword Size: " << value.codeword_size << '\n'; + std::cout << " Code Dimension: " << value.code_dimension << '\n'; + std::cout << " Number of Errors: " << value.number_of_errors << '\n'; + std::cout << " QC Block Size: " << value.qc_block_size << '\n'; + std::cout << " Is KRA: " << (value.is_kra ? "Yes" : "No") << '\n'; + std::cout << " Is Reduction factor applied: " + << (value.is_red_fac ? "Yes" : "No") << '\n'; + + std::cout << "Costs:\n"; + for (const auto &costPair : value.costs) { + displayCost(costPair.second); + std::cout << "-----\n"; + } +} + +int main() { + std::cout<< "logger setted up" << std::endl; + + std::vector values; + Cost pra = {"Prange", "", false, 171.3, 0.0}; + Cost lbr = {"Lee-Brickell", "", false, 158.4, 0.0}; + Cost leo = {"Leon", "", false, 154.4, 0.0}; + Cost ste = {"Stern", "", false, 147.4, 0.0}; + Cost fis = {"Fin-Send", "", false, 147.4, 0.0}; + + std::map costs = {{"Prange", pra}, + {"Lee-Brickell", lbr}, + {"Leon", leo}, + {"Stern", ste}, + {"Fin-Send", fis}}; + + Value val = {24646, 12323, 142, 12323, true, true, costs}; + + // displayValue(val); + + Result current_res, min_res; + uint32_t n = val.codeword_size; + uint32_t k = val.code_dimension; + uint32_t t = val.number_of_errors; + double qc_red_fac = get_qc_red_factor_classic_log(val.qc_block_size, n-k, QCAttackType::KRA3); + double diff; + std::string name; + std::cout << "qc_red_fac " << qc_red_fac << std::endl; + for (const auto &algo : std::unordered_set{ + Algorithm::Prange, Algorithm::Lee_Brickell, Algorithm::Leon, + Algorithm::Stern, Algorithm::Finiasz_Sendrier}) { + switch (algo) { + case Algorithm::Prange: + current_res = isd_log_cost_classic_Prange(n, k, t); + name = "Prange"; + break; + case Algorithm::Lee_Brickell: + current_res = isd_log_cost_classic_LB(n, k, t); + name = "Lee-Brickell"; + break; + case Algorithm::Leon: + current_res = isd_log_cost_classic_Leon(n, k, t); + name = "Leon"; + break; + case Algorithm::Stern: + current_res = isd_log_cost_classic_Stern(n, k, t); + name = "Stern"; + break; + case Algorithm::Finiasz_Sendrier: + current_res = isd_log_cost_classic_FS(n, k, t); + name = "Fin-Send"; + break; + case Algorithm::MMT: + current_res = isd_log_cost_classic_MMT(n, k, t); + name = "MMT "; + break; + case Algorithm::BJMM: + current_res = isd_log_cost_classic_BJMM(n, k, t); + name = "BJMM "; + break; + default: + std::cerr << "Unknown algorithm\n"; + break; + } + current_res.value -= qc_red_fac; + diff = std::abs(costs[name].time_complexity - current_res.value); + std::cout << name << ". Obtained: " << current_res.value + << " Expected: " << costs[name].time_complexity + << " Diff: " << diff << std::endl; + if (diff >= 1.0) { + std::cerr << "WARNING: huge diff"; + } + } + + return 0; +} diff --git a/include/globals.hpp b/include/globals.hpp new file mode 100644 index 0000000..3e1dcf1 --- /dev/null +++ b/include/globals.hpp @@ -0,0 +1,9 @@ +#pragma once + +#include + +const std::string OUT_DIR_RESULTS = "out/results/json/"; +// It seems impossible in C++ +// std::string OUT_FILE_RESULT_FMT_JSON = "{n:06}_{r:06}_{t:03}.json"; +const std::string LOG_DIR = "logs/"; +const std::string LOG_FILE_DEFAULT = LOG_DIR + "default.log"; diff --git a/proper_primes.hpp b/include/proper_primes.hpp similarity index 99% rename from proper_primes.hpp rename to include/proper_primes.hpp index 38f54e6..8c09d39 100644 --- a/proper_primes.hpp +++ b/include/proper_primes.hpp @@ -2,7 +2,7 @@ #include #define PRIMES_NO 5483 -uint32_t proper_primes[5483] = { +const uint32_t proper_primes[5483] = { 3, 5, 11, diff --git a/include/utils/binomials.hpp b/include/utils/binomials.hpp new file mode 100644 index 0000000..940f3bd --- /dev/null +++ b/include/utils/binomials.hpp @@ -0,0 +1,27 @@ +#pragma once +#include +#include +#include +#include +#include + +/* binomials are precomputed up to MAX_N-choose-MAX_T */ +#define MAX_N 2000 +#define MAX_T 300 + +#define LOW_K_MAX_N 10000 +#define LOW_K_MAX_T 10 + +extern NTL::RR pi; +extern NTL::RR nat_log_2; + +extern NTL::Mat binomial_table; +extern NTL::Mat low_k_binomial_table; +extern bool is_data_initialized; + +void InitBinomials(); + +NTL::RR lnFactorial(NTL::RR n); +NTL::RR lnBinom(NTL::RR n, NTL::RR k); +NTL::ZZ binomial_wrapper(long n, long k); +NTL::RR log2_RR(NTL::RR v); diff --git a/include/utils/bit_error_probabilities.hpp b/include/utils/bit_error_probabilities.hpp new file mode 100644 index 0000000..053d053 --- /dev/null +++ b/include/utils/bit_error_probabilities.hpp @@ -0,0 +1,34 @@ +#pragma once +#include "binomials.hpp" +#include "proper_primes.hpp" +#include +#include + +// choice of the approximation praxis for the estimated fraction of an error +// to appear in the next iteration of a bit-flipping decoder +#define ROUNDING_PRAXIS round + +NTL::RR compute_p_cc(const uint64_t d_c, const uint64_t n, const uint64_t t); +NTL::RR compute_p_ci(const uint64_t d_c, const uint64_t n, const uint64_t t); +NTL::RR compute_p_ic(const uint64_t d_c, const uint64_t n, const uint64_t t); +NTL::RR compute_p_ii(const uint64_t d_c, const uint64_t n, const uint64_t t); +NTL::RR ComputePrBitCorrection(const NTL::RR p_ic, const uint64_t d_v, + const uint64_t threshold); +NTL::RR ComputePrBitFaultInduction(const NTL::RR p_ci, const uint64_t d_v, + const uint64_t threshold); +NTL::RR ComputePrBitCorrectionMulti(const NTL::RR p_ic, const uint64_t d_v, + const uint64_t t, const uint64_t threshold, + const uint64_t toCorrect); +NTL::RR ComputePrBitInduceMulti(const NTL::RR p_ci, const uint64_t d_v, + const uint64_t t, const uint64_t n, + const uint64_t threshold, + const uint64_t toInduce); +uint64_t FindNextNumErrors(const uint64_t n_0, const uint64_t p, + const uint64_t d_v, const uint64_t t); +std::pair Find1IterDFR(const uint64_t n_0, const uint64_t p, + const uint64_t d_v, const uint64_t t); +std::pair +Find1IterTLeftoverPr(const uint64_t n_0, const uint64_t p, const uint64_t d_v, + const uint64_t t, const uint64_t t_leftover); +uint64_t Findpth(const uint64_t n_0, const uint64_t d_v_prime, + const uint64_t t); diff --git a/include/utils/isd_cost_estimate.hpp b/include/utils/isd_cost_estimate.hpp new file mode 100644 index 0000000..e5aa859 --- /dev/null +++ b/include/utils/isd_cost_estimate.hpp @@ -0,0 +1,92 @@ +#pragma once +#include +#include +#include +#include +#include +#include + +struct Result { + std::string alg_name; + std::map params; + double value; + double gje_cost; + double list_size; +}; + +std::string result_to_string(const Result &result); + +// Plain does not apply qc reductions +enum class QCAttackType { KRA1, KRA2, KRA3, MRA, Plain, Count}; + +std::string qc_attack_type_to_string(QCAttackType type); + +enum class Algorithm { + Prange, + Lee_Brickell, + Leon, + Stern, + Finiasz_Sendrier, + MMT, + BJMM, + // Add more algorithms here + Count, +}; + +std::string algorithm_to_string(Algorithm algo); + + +enum class QuantumAlgorithm { + Q_Lee_Brickell, + Q_Stern, // NOTE no circuit available + // Add more algorithms here + Count, +}; + +std::string quantum_algorithm_to_string(QuantumAlgorithm algo); + +/***************************Classic ISDs***************************************/ + +const NTL::RR log_probability_k_by_k_is_inv(const NTL::RR &k); +const NTL::RR probability_k_by_k_is_inv(const NTL::RR &k); +const NTL::RR classic_rref_red_cost(const NTL::RR &n, const NTL::RR &r); + +// Classic +Result c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, + const uint32_t qc_order, QCAttackType attack, + const bool compute_qc_reduction_factor, + std::unordered_set algs); + +double get_qc_red_factor_classic_log(const uint32_t qc_order, const uint32_t n0, + QCAttackType attack); + +Result isd_log_cost_classic_Prange(const uint32_t n, const uint32_t k, + const uint32_t t); +Result isd_log_cost_classic_LB(const uint32_t n, const uint32_t k, + const uint32_t t); +Result isd_log_cost_classic_Leon(const uint32_t n, const uint32_t k, + const uint32_t t); +Result isd_log_cost_classic_Stern(const uint32_t n, const uint32_t k, + const uint32_t t); +Result isd_log_cost_classic_FS(const uint32_t n, const uint32_t k, + const uint32_t t); +Result isd_log_cost_classic_MMT(const uint32_t n, const uint32_t k, + const uint32_t t); +Result isd_log_cost_classic_BJMM_approx(const uint32_t n, const uint32_t k, + const uint32_t t); +Result isd_log_cost_classic_BJMM(const uint32_t n, const uint32_t k, + const uint32_t t); + +// Quantum +Result q_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, + const uint32_t qc_order, QCAttackType attack, + const bool compute_qc_reduction_factor, + std::unordered_set algs); + +double get_qc_red_factor_quantum_log(const uint32_t qc_order, const uint32_t n0, + QCAttackType attack); + +Result isd_log_cost_quantum_LB(const uint32_t n, const uint32_t k, + const uint32_t t); +Result isd_log_cost_quantum_Stern(const uint32_t n, const uint32_t k, + const uint32_t t); diff --git a/include/utils/logging.hpp b/include/utils/logging.hpp new file mode 100644 index 0000000..7ccac17 --- /dev/null +++ b/include/utils/logging.hpp @@ -0,0 +1,81 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace Logger { + class LoggerManager { + public: + static LoggerManager& getInstance(); + + void setup_logger(const std::string& logger_name, + spdlog::level::level_enum console_level, + spdlog::level::level_enum file_level, + const std::string& pattern = "[%Y-%m-%d %H:%M:%S.%e] [%n] [%l] [%s:%#] %v"); + std::shared_ptr get_logger(const std::string& logger_name); + + template + std::string optional_to_string(const std::optional& opt); + + template + std::string array_to_string(const std::vector& vec); + + template + std::string array_to_string(const T* array, size_t size); + + private: + LoggerManager() = default; + std::map> loggers; + }; + + template + inline std::string LoggerManager::optional_to_string(const std::optional& opt) { + if (opt) { + return std::to_string(*opt); // Convert the value to string if present + } else { + return "None"; // Represent the absence of value + } + } + + template <> + inline std::string LoggerManager::optional_to_string(const std::optional& opt) { + if (opt) { + return *opt; // Return the string directly if present + } else { + return "None"; // Represent the absence of value + } + } + + template + inline std::string LoggerManager::array_to_string(const std::vector& vec) { + std::string result = "["; + for (size_t i = 0; i < vec.size(); ++i) { + result += std::to_string(vec[i]); + if (i < vec.size() - 1) { + result += ", "; + } + } + result += "]"; + return result; + } + + template + inline std::string LoggerManager::array_to_string(const T* array, size_t size) { + std::string result = "["; + for (size_t i = 0; i < size; ++i) { + result += std::to_string(array[i]); + if (i < size - 1) { + result += ", "; + } + } + result += "]"; + return result; + } +} + diff --git a/include/utils/partitions_permanents.hpp b/include/utils/partitions_permanents.hpp new file mode 100644 index 0000000..4f00f01 --- /dev/null +++ b/include/utils/partitions_permanents.hpp @@ -0,0 +1,23 @@ +#include +#include +#include + +/** + * @brief Computes the permanent of a circulant matrix. + * + * @param mpartition Array of integers representing the partition. + * @param n_0 Size of the partition. + * @return uint64_t Permanent of the circulant matrix. + */ +uint64_t ComputePermanent(int64_t mpartition[], const uint64_t n_0); + +/** + * @brief Finds a partition of m with length n_0. + * + * @param m The integer to partition. + * @param mpartition Vector to store the resulting partition. + * @param n_0 Length of the partition. + * @return int 1 if a good partition is found, 0 otherwise. + */ +int FindmPartition(const uint64_t m, std::vector &mpartition, + const uint64_t n_0); diff --git a/isd_cost_estimate.hpp b/isd_cost_estimate.hpp deleted file mode 100644 index 232e6ab..0000000 --- a/isd_cost_estimate.hpp +++ /dev/null @@ -1,695 +0,0 @@ -#pragma once -#include "binomials.hpp" -#include -#include -#include -#include -#include - -/***************************Classic ISDs***************************************/ - -double isd_log_cost_classic_BJMM_approx(const uint32_t n, - const uint32_t k, - const uint32_t t) { - return ((double)t) * - log((1.0 - (double) k / (double) n)) / log(2); -} - -// computes the probability of a random k * k being invertible -const NTL::RR log_probability_k_by_k_is_inv(const NTL::RR &k) { - NTL::RR log_pinv = NTL::RR(0.5); - for(long i = 2 ; i <=k ; i++){ - log_pinv = log_pinv * (NTL::RR(1) - NTL::power2_RR(-i)); - } - return NTL::log(log_pinv); -} - -const NTL::RR probability_k_by_k_is_inv(const NTL::RR &k) { - NTL::RR log_pinv = NTL::RR(0.5); - for(long i = 2 ; i <=k ; i++){ - log_pinv = log_pinv * (NTL::RR(1) - NTL::power2_RR(-i)); - } - return log_pinv; -} - -const NTL::RR classic_rref_red_cost(const NTL::RR &n, const NTL::RR & r){ - /* simple reduced row echelon form transform, as it is not likely to be the - * bottleneck */ - NTL::RR k = n-r; - return r*r*n/NTL::RR(2) + - (n*r)/NTL::RR(2) - - r*r*r / NTL::RR(6) + - r*r + - r / NTL::RR(6) - NTL::RR(1); -} - -const NTL::RR classic_IS_candidate_cost(const NTL::RR &n, const NTL::RR & r){ - return classic_rref_red_cost(n,r)/probability_k_by_k_is_inv(r) + r*r; -} - -const NTL::RR Fin_Send_rref_red_cost(const NTL::RR &n, - const NTL::RR &r, - const NTL::RR l){ - /* reduced size reduced row echelon form transformation, only yields an - * (r-l) sized identity matrix */ - NTL::RR k = n-r; - return - l*l*l / NTL::RR(3) - - l*l*n / NTL::RR(2) - + l*l*r / NTL::RR(2) - - 3*l*l / NTL::RR(2) - - 3*l*n / NTL::RR(2) - + l*r / NTL::RR(2) - - 13*l / NTL::RR(6) - + n*r*r / NTL::RR(2) - + n*r / NTL::RR(2) - - r*r*r / NTL::RR(6) - + r*r - + r / NTL::RR(6) - - NTL::RR(1); -} - -const NTL::RR Fin_Send_IS_candidate_cost(const NTL::RR &n, - const NTL::RR &r, - const NTL::RR &l){ - return Fin_Send_rref_red_cost(n,r,l)/probability_k_by_k_is_inv(r-l) + r*r; -} - -double isd_log_cost_classic_Prange(const uint32_t n, - const uint32_t k, - const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - - NTL::RR cost_iter = classic_IS_candidate_cost(n_real,n_real-k_real); - NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n,t)) / - NTL::to_RR(binomial_wrapper(n-k,t)); - - NTL::RR log_cost = log2_RR(num_iter)+ log2_RR(cost_iter); - return NTL::conv( log_cost ); -} - -#define P_MAX_LB 20 -double isd_log_cost_classic_LB(const uint32_t n, - const uint32_t k, - const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - NTL::RR min_log_cost = n_real; // unreachable upper bound - NTL::RR log_cost; - uint32_t best_p = 1; - uint32_t constrained_max_p = P_MAX_LB > t ? t : P_MAX_LB; - NTL::RR IS_candidate_cost; - IS_candidate_cost = classic_IS_candidate_cost(n_real,n_real-k_real); - for(uint32_t p = 1 ;p < constrained_max_p; p++ ){ - NTL::RR p_real = NTL::RR(p); - NTL::RR cost_iter = IS_candidate_cost + - NTL::to_RR(binomial_wrapper(k,p)*p*(n-k)); - NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n,t)) / - NTL::to_RR( binomial_wrapper(k,p) * - binomial_wrapper(n-k,t-p) ); - log_cost = (NTL::log(num_iter)+NTL::log(cost_iter)) / NTL::log(NTL::RR(2)); - if(min_log_cost > log_cost){ - min_log_cost = log_cost; - best_p=p; - } - } - std::cerr << std::endl << "Lee-Brickell best p: " << best_p << std::endl; - return NTL::conv( min_log_cost ); -} - -#define P_MAX_Leon P_MAX_LB -#define L_MAX_Leon 200 -double isd_log_cost_classic_Leon(const uint32_t n, - const uint32_t k, - const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - NTL::RR min_log_cost = n_real; // unreachable upper bound - NTL::RR log_cost; - uint32_t best_l=0,best_p=1, constrained_max_l, constrained_max_p; - - NTL::RR IS_candidate_cost; - IS_candidate_cost = classic_IS_candidate_cost(n_real,n_real-k_real); - constrained_max_p = P_MAX_Leon > t ? t : P_MAX_Leon; - for(uint32_t p = 1; p < constrained_max_p; p++ ){ - constrained_max_l = ( L_MAX_Leon > (n-k-(t-p)) ? (n-k-(t-p)) : L_MAX_Leon); - NTL::RR p_real = NTL::RR(p); - for(uint32_t l = 0; l < constrained_max_l; l++){ - NTL::RR KChooseP = NTL::to_RR( binomial_wrapper(k,p) ); - NTL::RR cost_iter = IS_candidate_cost + - KChooseP * p_real * NTL::to_RR(l) + - ( KChooseP / NTL::power2_RR(l))* NTL::RR(p * (n-k - l)); - NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n,t)) / - NTL::to_RR( binomial_wrapper(k,p) * - binomial_wrapper(n-k-l,t-p) ); - log_cost = ( NTL::log(num_iter) + NTL::log(cost_iter) ) / NTL::log(NTL::RR(2)); - if(min_log_cost > log_cost){ - min_log_cost = log_cost; - best_l = l; - best_p = p; - } - } - } - std::cerr << std::endl << "Leon Best l: " << best_l << " best p: " << best_p << std::endl; - return NTL::conv( min_log_cost ); -} - - -#define P_MAX_Stern P_MAX_Leon -#define L_MAX_Stern L_MAX_Leon -double isd_log_cost_classic_Stern(const uint32_t n, - const uint32_t k, - const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - NTL::RR min_log_cost = n_real; // unreachable upper bound - NTL::RR log_cost; - uint32_t best_l = 0,best_p = 2, constrained_max_l, constrained_max_p; - - NTL::RR IS_candidate_cost; - IS_candidate_cost = classic_IS_candidate_cost(n_real,n_real-k_real); - - constrained_max_p = P_MAX_Stern > t ? t : P_MAX_Stern; - for(uint32_t p = 2; p < constrained_max_p; p = p+2 ){ - constrained_max_l = ( L_MAX_Stern > (n-k-(t-p)) ? (n-k-(t-p)) : L_MAX_Stern); - NTL::ZZ kHalfChoosePHalf; - for(uint32_t l = 0; l < constrained_max_l; l++){ - NTL::RR p_real = NTL::RR(p); - kHalfChoosePHalf = binomial_wrapper(k/2,p/2); - NTL::RR kHalfChoosePHalf_real = NTL::to_RR(kHalfChoosePHalf); - - NTL::RR cost_iter = IS_candidate_cost + - kHalfChoosePHalf_real * - ( NTL::to_RR(l)*p_real + - (kHalfChoosePHalf_real / NTL::power2_RR(l)) * NTL::RR(p * (n-k - l)) - ); -// #if LOG_COST_CRITERION == 1 - NTL::RR log_stern_list_size = kHalfChoosePHalf_real * - ( p_real/NTL::RR(2) * NTL::log( k_real/NTL::RR(2))/NTL::log(NTL::RR(2) ) +NTL::to_RR(l)); - log_stern_list_size = NTL::log(log_stern_list_size) / NTL::log(NTL::RR(2)); - cost_iter = cost_iter*log_stern_list_size; -// #endif - NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n,t)) / - NTL::to_RR( kHalfChoosePHalf*kHalfChoosePHalf * - binomial_wrapper(n-k-l,t-p) ); - log_cost = log2_RR(num_iter) + log2_RR(cost_iter); - if(min_log_cost > log_cost){ - min_log_cost = log_cost; - best_l = l; - best_p = p; - } - } - } - - std::cerr << std::endl << "Stern Best l: " << best_l << " best p: " << best_p << std::endl; - return NTL::conv( min_log_cost ); -} - -#define P_MAX_FS P_MAX_Stern -#define L_MAX_FS L_MAX_Stern -double isd_log_cost_classic_FS(const uint32_t n, - const uint32_t k, - const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - NTL::RR min_log_cost = n_real; // unreachable upper bound - NTL::RR log_cost; - uint32_t best_l = 0, best_p = 2,constrained_max_l, constrained_max_p; - - NTL::RR IS_candidate_cost; - constrained_max_p = P_MAX_Stern > t ? t : P_MAX_Stern; - for(uint32_t p = 2; p < constrained_max_p; p = p+2 ){ - constrained_max_l = ( L_MAX_Stern > (n-k-(t-p)) ? (n-k-(t-p)) : L_MAX_Stern); - NTL::RR p_real = NTL::RR(p); - NTL::ZZ kPlusLHalfChoosePHalf; - for(uint32_t l = 0; l < constrained_max_l; l++){ - IS_candidate_cost = Fin_Send_IS_candidate_cost(n_real,n_real-k_real,NTL::RR(l)); - kPlusLHalfChoosePHalf = binomial_wrapper((k+l)/2,p/2); - NTL::RR kPlusLHalfChoosePHalf_real = NTL::to_RR(kPlusLHalfChoosePHalf); - NTL::RR cost_iter = IS_candidate_cost + - kPlusLHalfChoosePHalf_real * - ( NTL::to_RR(l)*p_real + - ( kPlusLHalfChoosePHalf_real / NTL::power2_RR(l)) * - NTL::RR(p * (n-k - l)) - ); -// #if LOG_COST_CRITERION == 1 - NTL::RR l_real = NTL::to_RR(l); - NTL::RR log_FS_list_size = kPlusLHalfChoosePHalf_real * - ( p_real/NTL::RR(2) * NTL::log( (k_real+l_real)/NTL::RR(2))/NTL::log(NTL::RR(2) ) +l_real); - log_FS_list_size = log2_RR(log_FS_list_size); - cost_iter = cost_iter*log_FS_list_size; -// #endif - NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n,t)) / - NTL::to_RR( kPlusLHalfChoosePHalf * kPlusLHalfChoosePHalf * - binomial_wrapper(n-k-l,t-p) ); - - log_cost = log2_RR(num_iter) + log2_RR(cost_iter); - if(min_log_cost > log_cost){ - min_log_cost = log_cost; - best_l = l; - best_p = p; - } - } - } - std::cerr << std::endl << "FS Best l: " << best_l << " best p: " << best_p << std::endl; - return NTL::conv( min_log_cost ); -} - -#define P_MAX_MMT (P_MAX_FS+25) // P_MAX_MMT -#define L_MAX_MMT 350 //L_MAX_MMT -#define L_MIN_MMT 2 -double isd_log_cost_classic_MMT(const uint32_t n, - const uint32_t k, - const uint32_t t) { - uint32_t r = n-k; - NTL::RR n_real = NTL::RR(n); - NTL::RR r_real = NTL::RR(r); - NTL::RR k_real = n_real-r_real; - - - NTL::RR min_log_cost = n_real; // unreachable upper bound - NTL::RR log_cost, log_mem_cost; - uint32_t best_l= L_MIN_MMT, best_l1, best_p = 4, - constrained_max_l = 0, constrained_max_p; - - NTL::RR FS_IS_candidate_cost; - constrained_max_p = P_MAX_MMT > t ? t : P_MAX_MMT; - /* p should be divisible by 4 in MMT */ - for(uint32_t p = 4; p <= constrained_max_p; p = p+4 ){ - constrained_max_l = ( L_MAX_MMT > (n-k-(t-p)) ? (n-k-(t-p)) : L_MAX_MMT ); - for(uint32_t l = L_MIN_MMT; l <= constrained_max_l; l++){ - NTL::RR l_real = NTL::to_RR(l); - NTL::ZZ kPlusLHalfChoosePHalf = binomial_wrapper((k+l)/2,p/2); - NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n,t)) / - NTL::to_RR( kPlusLHalfChoosePHalf * kPlusLHalfChoosePHalf * - binomial_wrapper(n-k-l,t-p) ); - FS_IS_candidate_cost = Fin_Send_IS_candidate_cost(n_real,r_real,l_real); - NTL::ZZ kPlusLHalfChoosePFourths = binomial_wrapper((k+l)/2,p/4); - NTL::RR kPlusLHalfChoosePFourths_real = NTL::to_RR(kPlusLHalfChoosePFourths); - NTL::RR minOperandRight, min; - NTL::RR PChoosePHalf = NTL::to_RR(binomial_wrapper(p,p/2)); - NTL::RR kPlusLChoosePHalf = NTL::to_RR(binomial_wrapper((k+l),p/2)); - minOperandRight = NTL::to_RR(binomial_wrapper((k+l)/2,p/2)) / PChoosePHalf; - min = kPlusLHalfChoosePFourths_real > minOperandRight ? minOperandRight : kPlusLHalfChoosePFourths_real; - - /* hoist out anything not depending on l_1/l_2 split*/ -#if defined(EXPLORE_REPRS) - for(uint32_t l_1 = 1 ; l_1 <= l ; l_1++){ - uint32_t l_2= l-l_1; -#else - uint32_t l_2 = NTL::conv(log2_RR(kPlusLHalfChoosePFourths_real / NTL::to_RR(binomial_wrapper(p,p/2)))); - /*clamp l_2 to a safe value , 0 < l_2 < l*/ - l_2 = l_2 <= 0 ? 1 : l_2; - l_2 = l_2 >= l ? l-1 : l_2; - - uint32_t l_1= l - l_2; -#endif - NTL::RR interm = kPlusLHalfChoosePFourths_real / NTL::power2_RR(l_2) * - NTL::to_RR(p/2*l_1); - - NTL::RR otherFactor = ( NTL::to_RR(p/4*l_2) + interm ); - NTL::RR cost_iter = FS_IS_candidate_cost + - min*otherFactor + - kPlusLHalfChoosePFourths_real * NTL::to_RR(p/2*l_2); - - NTL::RR lastAddend = otherFactor + - kPlusLHalfChoosePFourths_real * - kPlusLChoosePHalf * PChoosePHalf / - NTL::power2_RR(l) * - NTL::to_RR( p*(r-l) ); - lastAddend = lastAddend * kPlusLHalfChoosePFourths_real; - cost_iter += lastAddend; -// #if 0 - - NTL::RR log_MMT_space = r_real*n_real + - kPlusLHalfChoosePFourths_real * - (NTL::to_RR(p/4)* log2_RR(NTL::to_RR(k+l/2))+ NTL::to_RR(l_2) )+ - NTL::to_RR(min) * (NTL::to_RR(p/2)* log2_RR(NTL::to_RR(k+l))+ NTL::to_RR(l) ); - log_MMT_space = log2_RR(log_MMT_space); - cost_iter = cost_iter*log_MMT_space; -// #endif - log_cost = log2_RR(num_iter) + log2_RR(cost_iter); - if(min_log_cost > log_cost){ - min_log_cost = log_cost; - best_l = l; - best_l1 = l_1; - best_p = p; - log_mem_cost = log_MMT_space; - } -#if defined(EXPLORE_REPRS) - } -#endif - } - } - std::cerr << std::endl << "MMT Best l: " << best_l - << " best p: " << best_p - << " best l1: " << best_l1 - << std::endl; - if(best_p == constrained_max_p){ - std::cerr << std::endl << "Warning: p on exploration edge! " << std::endl; - } - if(best_l == constrained_max_l){ - std::cerr << std::endl << "Warning: l on exploration edge! " << std::endl; - } - //std::cerr << log_mem_cost << " "; - return NTL::conv( min_log_cost ); -} - - -#define P_MAX_BJMM 20 // P_MAX_MMT -#define L_MAX_BJMM 90 //L_MAX_MMT -#define Eps1_MAX_BJMM 4 -#define Eps2_MAX_BJMM 4 -double isd_log_cost_classic_BJMM(const uint32_t n, - const uint32_t k, - const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - uint32_t r = n-k; - NTL::RR r_real = NTL::RR(r); - - NTL::RR min_log_cost = n_real; // unreachable upper bound - NTL::RR log_cost; - uint32_t best_l, best_p, - best_eps_1, best_eps_2, - constrained_max_l, constrained_max_p; - - NTL::RR FS_IS_candidate_cost; - constrained_max_p = P_MAX_BJMM > t ? t : P_MAX_BJMM; - /*p should be divisible by 2 in BJMM */ - for(uint32_t p = 2; p < constrained_max_p; p = p+2 ){ - /* sweep over all the valid eps1 knowing that p/2 + eps1 should be a - * multiple of 4*/ - constrained_max_l = ( L_MAX_BJMM > (n-k-(t-p)) ? (n-k-(t-p)) : L_MAX_BJMM ); - for(uint32_t l = 0; l < constrained_max_l; l++){ - for(uint32_t eps1 = 2+(p%2) ; eps1 < Eps1_MAX_BJMM; eps1 = eps1 + 2) { - uint32_t p_1 = p/2 + eps1; - /* sweep over all the valid eps2 knowing that p_1/2 + eps2 should - * be even */ - for(uint32_t eps2 = (p_1%2) ; eps2 < Eps2_MAX_BJMM; eps2 = eps2 + 2){ - uint32_t p_2 = p_1/2 + eps2; - - - /* Available parameters p, p_1,p_2,p_3, l */ - NTL::RR l_real = NTL::RR(l); - FS_IS_candidate_cost = Fin_Send_IS_candidate_cost(n_real,n_real-k_real,l_real); - uint32_t p_3 = p_2/2; - - NTL::ZZ L3_list_len = binomial_wrapper((k+l)/2,p_3); - NTL::RR L3_list_len_real = NTL::to_RR(L3_list_len); - /* the BJMM number of iterations depends only on L3 parameters - * precompute it */ - NTL::RR num_iter = NTL::to_RR( binomial_wrapper(n,t) ) / - NTL::to_RR( binomial_wrapper((k+l),p) * - binomial_wrapper(r-l,t-p) - ); - NTL::RR P_invalid_splits = NTL::power(L3_list_len_real,2) / - NTL::to_RR( binomial_wrapper(k+l,p_2)); - num_iter = num_iter / NTL::power(P_invalid_splits,4); - - /* lengths of lists 2 to 0 have to be divided by the number of repr.s*/ - NTL::RR L2_list_len = NTL::to_RR(binomial_wrapper(k+l,p_2)) * - NTL::power(P_invalid_splits,1); - NTL::RR L1_list_len = NTL::to_RR(binomial_wrapper(k+l,p_1)) * - NTL::power(P_invalid_splits,2); - /* estimating the range for r_1 and r_2 requires to compute the - * number of representations rho_1 and rho_2 */ - - NTL::ZZ rho_2 = binomial_wrapper(p_1,p_1/2) * - binomial_wrapper(k+l-p_1,eps2); - NTL::ZZ rho_1 = binomial_wrapper(p,p/2) * - binomial_wrapper(k+l-p,eps1); - int min_r2 = NTL::conv(NTL::log(NTL::to_RR(rho_2)) / - NTL::log(NTL::RR(2))); - int max_r1 = NTL::conv(NTL::log(NTL::to_RR(rho_1)) / - NTL::log(NTL::RR(2))); - - /*enumerate r_1 and r_2 over the suggested range - * log(rho_2) < r2 < r_1 < log(rho_1)*/ - /* clamp to safe values */ - min_r2 = min_r2 > 0 ? min_r2 : 1; - max_r1 = max_r1 < (int)l ? max_r1 : l-1; - - NTL::RR p_real = NTL::RR(p); - for(int r_2 = min_r2 ; r_2 < max_r1 - 1; r_2++){ - for(int r_1 = r_2+1; r_1 < max_r1 ; r_1++){ - - /*add the cost of building Layer 3 to cost_iter */ - NTL::RR cost_iter = NTL::to_RR(4) * - (k + l + 2*L3_list_len_real + - r_2 + - NTL::power(L3_list_len_real,2)* - NTL::to_RR(2*p_3*r_2)); - - /* add the cost of building Layer 2 */ - cost_iter += 2 * (NTL::power((NTL::to_RR(rho_2) / - (NTL::power2_RR(r_2)))* - NTL::power(L3_list_len_real,2),2) - * 2 * p_2 * (r_1-r_2)); - - /* add the cost of building Layer 1 */ - cost_iter += NTL::power((NTL::to_RR(rho_1) / - NTL::power2_RR(r_1)) * - (NTL::to_RR(rho_2) / - NTL::power2_RR(r_2))* - NTL::power(L3_list_len_real,2),4) * 2 * p_1 * l; - - /* add the cost of building L0 */ - cost_iter += p * (r - l) * - NTL::power((NTL::to_RR(rho_1) / NTL::power2_RR(r_1)) * - (NTL::to_RR(rho_2) / - NTL::power2_RR(r_2))* - NTL::power(L3_list_len_real,2),4) - / NTL::to_RR(l); - - log_cost = log2_RR(num_iter) + log2_RR(cost_iter); - - if(min_log_cost > log_cost){ - min_log_cost = log_cost; - best_l = l; - best_p = p; - best_eps_1 = eps1; - best_eps_2 = eps2; - } - } - } - - } /*end of iteration over l */ - /* to review up to to here */ - } /* end for over eps2 */ - } /* end for over eps1 */ - } /* end for over p*/ - std::cerr << std::endl << "BJMM Best l: " << best_l - << " best p: " << best_p - << " best eps1: " << best_eps_1 - << " best eps2: " << best_eps_2 - << std::endl; - return NTL::conv( min_log_cost ); -} - -/***************************Quantum ISDs***************************************/ - - -const NTL::RR quantum_gauss_red_cost(const NTL::RR &n, - const NTL::RR & k) { - // return 0.5* NTL::power(n-k,3) + k*NTL::power((n-k),2); - return 1.5 * NTL::power(n - k, 2) - 0.5 * (n-k); -} - -double isd_log_cost_quantum_LB(const uint32_t n, const uint32_t k, - const uint32_t t, const uint32_t p) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - NTL::RR p_real = NTL::RR(p); - NTL::RR log_pi_fourths = NTL::log(pi * 0.25); - NTL::RR log_pinv = log_probability_k_by_k_is_inv(k_real); - - /* Check https://doi.org/10.1007/978-3-031-61489-7_2 - * for the full measures of the lee-brickell quantum attack - */ - NTL::RR iteration_cost = quantum_gauss_red_cost(n_real, k_real) + - NTL::to_RR(binomial_wrapper(k, p)) * - NTL::log(n_real - k_real) / NTL::log(NTL::RR(2)); - NTL::RR log_cost = log_pi_fourths + .5* - (lnBinom(n_real, t_real) - log_pinv - (lnBinom(k_real, p_real) + - lnBinom(n_real - k_real, t_real - p_real))); - log_cost += NTL::log(iteration_cost); - log_cost = log_cost / NTL::log(NTL::RR(2)); - return NTL::conv(log_cost); -} - -#define MAX_M (t/2) - -double isd_log_cost_quantum_stern(const uint32_t n, - const uint32_t k, - const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - NTL::RR current_complexity, log_p_success, c_it, c_dec; - - // Start computing Stern's parameter invariant portions of complexity - NTL::RR log_pi_fourths = NTL::log(pi*0.25); - // compute the probability of a random k * k being invertible - NTL::RR log_pinv = log_probability_k_by_k_is_inv(k_real); - // compute the cost of inverting the matrix, in a quantum execution env. - NTL::RR c_inv = quantum_gauss_red_cost(n_real,k_real); - - // optimize Stern's parameters : - // m : the # of errors in half of the chosen dimensions - // l : the length of the run of zeroes in the not chosen dimensions - // done via exhaustive parameter space search, minimizing the total - // complexity. - // Initial value set to codeword bruteforce to ensure the minimum is found. - NTL::RR min_stern_complexity = NTL::RR(n)*NTL::log(NTL::RR(2)); - - for(long m = 1; m <= MAX_M; m++){ - NTL::RR m_real = NTL::RR(m); - /* previous best complexity as a function of l alone. - * initialize to bruteforce-equivalent, break optimization loop as soon - * as a minimum is found */ - NTL::RR prev_best_complexity = NTL::RR(t); - for(long l = 0; l < (n-k-(t-2*m)); l++ ){ - - NTL::RR l_real = NTL::RR(l); - log_p_success = lnBinom(t_real, 2*m_real) + - lnBinom(n_real-t_real, k_real-2*m_real) + - lnBinom(2*m_real,m_real) + - lnBinom(n_real-k_real-t_real+2*m_real,l_real); - log_p_success = log_p_success - ( m_real*NTL::log(NTL::RR(4)) + - lnBinom(n_real,k_real) + - lnBinom(n_real -k_real, l_real)); - current_complexity = -(log_p_success+log_pinv)*0.5 + log_pi_fourths; - /* to match specifications , the term should be - * (n_real-k_real), as per in deVries, although - * David Hobach thesis mentions it to be - * (n_real-k_real-l_real), and it seems to match. - * amend specs for the typo. */ - c_it = l_real + - (n_real-k_real-l_real)* NTL::to_RR(binomial_wrapper(k/2,m)) / - NTL::power2_RR(-l); - - c_it = c_it * 2*m_real * NTL::to_RR(binomial_wrapper(k/2,m)); -#if IGNORE_DECODING_COST == 1 - c_dec = 0.0; -#elif IGNORE_DECODING_COST == 0 - /*cost of decoding estimated as per Golomb CWDEC - * decoding an n-bit vector with weight k is - * CWDEC_cost(k,n)=O(n^2 log_2(n)) and following deVries, where - * c_dec = CWDEC_cost(n-k, n) + k + CWDEC_cost(l,n-k)*/ - c_dec = n_real*n_real*NTL::log(n_real) + k_real + - (n_real-k_real)*(n_real-k_real)*NTL::log((n_real-k_real)); -#endif - current_complexity = current_complexity + NTL::log(c_it+c_inv+c_dec); - if(current_complexity < prev_best_complexity){ - prev_best_complexity = current_complexity; - } else{ - break; - } - } - if(current_complexity < min_stern_complexity){ - min_stern_complexity = current_complexity; - } - } - return NTL::conv( min_stern_complexity / NTL::log(NTL::RR(2.0)) ); -} - - -/***************************Aggregation ***************************************/ - -double get_qc_red_factor_log(const uint32_t qc_order, const uint32_t is_kra) { - /* For key recovery attacks (CFP) the advantage from quasi-cyclicity is p. For - * a message recovery (SDP), the DOOM advantage is sqrt(p). - */ - double qc_red_factor = is_kra ? logl(qc_order) : logl(qc_order) / 2.0; - return qc_red_factor / logl(2); -} - - -double c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, - const uint32_t qc_order, const uint32_t is_kra, - const bool compute_qc_reduction_factor) { - double min_cost = n, current_cost; - double qc_red_factor = compute_qc_reduction_factor? get_qc_red_factor_log(qc_order, is_kra): 0; - - std::cout << "Classic "; - current_cost = isd_log_cost_classic_Prange(n, k, t) - qc_red_factor; - std::cerr << "Classic Prange: " << std::setprecision(5) << current_cost - << std::endl; - std::cout << current_cost << " "; - min_cost = current_cost; - - current_cost = isd_log_cost_classic_LB(n, k, t) - qc_red_factor; - std::cerr << "Classic Lee-Brickell ISD: " << std::setprecision(5) - << current_cost << std::endl; - std::cout << current_cost << " "; - min_cost = min_cost > current_cost ? current_cost : min_cost; - - current_cost = isd_log_cost_classic_Leon(n, k, t) - qc_red_factor; - std::cerr << "Classic Leon ISD: " << std::setprecision(5) << current_cost - << std::endl; - std::cout << current_cost << " "; - min_cost = min_cost > current_cost ? current_cost : min_cost; - - current_cost = isd_log_cost_classic_Stern(n, k, t) - qc_red_factor; - std::cerr << "Classic Stern ISD: " << std::setprecision(5) << current_cost - << std::endl; - std::cout << current_cost << " "; - min_cost = min_cost > current_cost ? current_cost : min_cost; - - current_cost = isd_log_cost_classic_FS(n, k, t) - qc_red_factor; - std::cerr << "Classic Fin-Send ISD: " << std::setprecision(5) << current_cost - << std::endl; - std::cout << current_cost << " "; - min_cost = min_cost > current_cost ? current_cost : min_cost; - -#if SKIP_MMT == 0 - current_cost = isd_log_cost_classic_MMT(n, k, t) - qc_red_factor; - std::cerr << "Classic MMT ISD: " << std::setprecision(5) << current_cost - << std::endl; - std::cout << current_cost << " "; - min_cost = min_cost > current_cost ? current_cost : min_cost; -#endif - -#if SKIP_BJMM == 0 - current_cost = isd_log_cost_classic_BJMM(n, k, t) - qc_red_factor; - std::cerr << "Classic BJMM ISD: " << std::setprecision(5) << current_cost - << std::endl; - std::cout << current_cost << " "; - min_cost = min_cost > current_cost ? current_cost : min_cost; -#endif - std::cout << std::endl; - - return min_cost; -} - -double q_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, - const uint32_t qc_order, const uint32_t is_kra, const bool compute_qc_reduction_factor) { - double min_cost = n, current_cost; - /* for key recovery attacks the advantage from quasi-cyclicity is p, - * for an ISD, the DOOM advantage is just sqrt(p) */ - std::cout << "Quantum "; - double qc_red_factor = compute_qc_reduction_factor? get_qc_red_factor_log(qc_order, is_kra): 0; - - /* This is just a quick hack since experiments says that p = 1 is - * the optimal value at least for the NIST code-based finalists - */ - current_cost = isd_log_cost_quantum_LB(n, k, t, 1) - qc_red_factor; - std::cout << current_cost << " "; - // std::cout << " Q-Lee-Brickell ISD: " << /**/current_cost << std::endl; - min_cost = current_cost; - - current_cost = isd_log_cost_quantum_stern(n, k, t) - qc_red_factor; - std::cout << current_cost << " "; - // std::cout << ", Q-Stern ISD: " << current_cost << std::endl; - min_cost = min_cost > current_cost ? current_cost : min_cost; - std::cout << std::endl; - - return min_cost; -} diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt new file mode 100644 index 0000000..dcb7b69 --- /dev/null +++ b/src/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(utils) +add_subdirectory(tools) diff --git a/src/constant_weight_encodable_bits.cpp b/src/constant_weight_encodable_bits.cpp new file mode 100644 index 0000000..e69de29 diff --git a/src/tools/CMakeLists.txt b/src/tools/CMakeLists.txt new file mode 100644 index 0000000..e02872a --- /dev/null +++ b/src/tools/CMakeLists.txt @@ -0,0 +1,36 @@ +find_package(OpenMP REQUIRED) +message(STATUS "OpenMP library: ${OpenMP_CXX_LIBRARIES}") +find_library(GMP_LIB gmp) +find_library(NTL_LIB ntl) +find_library(M_LIB m) +find_package(spdlog REQUIRED) +find_package(fmt REQUIRED) + +# Define libraries +set(LIBS ledautils ${NTL_LIB} ${GMP_LIB} ${M_LIB} spdlog::spdlog fmt::fmt) + +message(STATUS "gmp library: ${GMP_LIBRARIES}") +message(STATUS "ntl library: ${NTL_LIBRARIES}") +message(STATUS "m library: ${M_LIBRARIES}") +message(STATUS "spdlog library: ${spdlog_LIBRARIES}") +message(STATUS "fmt library: ${fmt_LIBRARIES}") + +set(target constant_weight_encodable_bits) +add_executable(${target} ${target}.cpp) +target_link_libraries(${target} PRIVATE ${LIBS}) +install(TARGETS constant_weight_encodable_bits DESTINATION bin) + +set(target enumeration_complexity) +add_executable(${target} ${target}.cpp) +target_link_libraries(${target} PRIVATE ${LIBS}) +install(TARGETS enumeration_complexity DESTINATION bin) + +set(target parameter_generator) +add_executable(${target} ${target}.cpp) +target_link_libraries(${target} PRIVATE ${LIBS}) +install(TARGETS parameter_generator DESTINATION bin) + +set(target work_factor_computation) +add_executable(${target} ${target}.cpp) +target_link_libraries(${target} PRIVATE ${LIBS} OpenMP::OpenMP_CXX) +install(TARGETS work_factor_computation DESTINATION bin) diff --git a/constant_weight_encodable_bits.cpp b/src/tools/constant_weight_encodable_bits.cpp similarity index 95% rename from constant_weight_encodable_bits.cpp rename to src/tools/constant_weight_encodable_bits.cpp index b0d80dd..3016c44 100644 --- a/constant_weight_encodable_bits.cpp +++ b/src/tools/constant_weight_encodable_bits.cpp @@ -3,7 +3,7 @@ #include #include -#include "binomials.hpp" +#include "utils/binomials.hpp" int main(int argc, char* argv[]){ if(argc != 3){ @@ -12,9 +12,9 @@ int main(int argc, char* argv[]){ return -1; } + InitBinomials(); NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); - pi = NTL::ComputePi_RR(); uint32_t n = atoi(argv[1]); uint32_t t = atoi(argv[2]); /* reduce by a factor matching the QC block size */ diff --git a/enumeration_complexity.cpp b/src/tools/enumeration_complexity.cpp similarity index 97% rename from enumeration_complexity.cpp rename to src/tools/enumeration_complexity.cpp index 7bf0a0b..5dedbe7 100644 --- a/enumeration_complexity.cpp +++ b/src/tools/enumeration_complexity.cpp @@ -15,9 +15,9 @@ int main(int argc, char* argv[]){ return -1; } + InitBinomials(); NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); - pi = NTL::ComputePi_RR(); uint32_t p = atoi(argv[1]); uint32_t d_v = atoi(argv[2]); uint32_t n_0 = atoi(argv[3]); diff --git a/parameter_generator.cpp b/src/tools/parameter_generator.cpp similarity index 74% rename from parameter_generator.cpp rename to src/tools/parameter_generator.cpp index 280c5a5..2236c30 100644 --- a/parameter_generator.cpp +++ b/src/tools/parameter_generator.cpp @@ -1,21 +1,26 @@ #include #include #include - -#define NUM_BITS_REAL_MANTISSA 128 -#define IGNORE_DECODING_COST 0 -#define SKIP_BJMM 1 -#define SKIP_MMT 1 -#define LOG_COST_CRITERION 1 +#include #include "binomials.hpp" #include "bit_error_probabilities.hpp" #include "isd_cost_estimate.hpp" +// #include "logging.hpp" #include "partitions_permanents.hpp" #include "proper_primes.hpp" #include #include #include +#include + +#define NUM_BITS_REAL_MANTISSA 128 +#define IGNORE_DECODING_COST 0 +// #define SKIP_BJMM 1 +// #define SKIP_MMT 1 +// #define LOG_COST_CRITERION 1 +// static auto LOGGER = +// Logger::LoggerManager::getInstance().get_logger("isd_cost_estimate"); uint32_t estimate_t_val(const uint32_t c_sec_level, const uint32_t q_sec_level, const uint32_t n_0, const uint32_t p) { @@ -31,9 +36,18 @@ uint32_t estimate_t_val(const uint32_t c_sec_level, const uint32_t q_sec_level, t = (lo + hi) / 2; std::cerr << "testing t " << t << std::endl; achieved_c_sec_level = - c_isd_log_cost(n_0 * p, ((n_0 - 1) * p), t, p, 0, true); + c_isd_log_cost(n_0 * p, ((n_0 - 1) * p), t, p, QCAttackType::MRA, true, + std::unordered_set{ + Algorithm::Prange, Algorithm::Lee_Brickell, + Algorithm::Leon, Algorithm::Stern, + Algorithm::Finiasz_Sendrier, Algorithm::MMT, + Algorithm::BJMM}) + .value; achieved_q_sec_level = - q_isd_log_cost(n_0 * p, ((n_0 - 1) * p), t, p, 0, true); + q_isd_log_cost(n_0 * p, ((n_0 - 1) * p), t, p, QCAttackType::MRA, true, + std::unordered_set{ + QuantumAlgorithm::Q_Lee_Brickell, QuantumAlgorithm::Q_Stern}) + .value; if ((achieved_c_sec_level >= c_sec_level) && (achieved_q_sec_level >= q_sec_level)) { hi = t; @@ -49,7 +63,7 @@ uint32_t estimate_t_val(const uint32_t c_sec_level, const uint32_t q_sec_level, } int ComputeDvMPartition(const uint64_t d_v_prime, const uint64_t n_0, - uint64_t mpartition[], uint64_t &d_v) { + std::vector &mpartition, uint64_t &d_v) { d_v = floor(sqrt(d_v_prime)); d_v = (d_v & 0x01) ? d_v : d_v + 1; uint64_t m = ceil((double)d_v_prime / (double)d_v); @@ -67,7 +81,7 @@ int ComputeDvMPartition(const uint64_t d_v_prime, const uint64_t n_0, uint64_t estimate_dv(const uint32_t c_sec_level, // expressed as const uint32_t q_sec_level, const uint32_t n_0, - const uint32_t p, uint64_t mpartition[]) { + const uint32_t p, std::vector &mpartition) { double achieved_c_sec_level = 0.0; double achieved_q_sec_level = 0.0; double achieved_c_enum_sec_level = 0.0; @@ -104,9 +118,19 @@ uint64_t estimate_dv(const uint32_t c_sec_level, // expressed as /* last parameter indicates a KRA, reduce margin by p due to quasi cyclicity */ achieved_c_sec_level = - c_isd_log_cost(n_0 * p, p, n_0 * d_v_prime, p, 1, true); + c_isd_log_cost( + n_0 * p, p, n_0 * d_v_prime, p, QCAttackType::KRA3, true, + std::unordered_set{ + Algorithm::Prange, Algorithm::Lee_Brickell, Algorithm::Leon, + Algorithm::Stern, Algorithm::Finiasz_Sendrier, + Algorithm::MMT, Algorithm::BJMM}) + .value; achieved_q_sec_level = - q_isd_log_cost(n_0 * p, p, n_0 * d_v_prime, p, 1, true); + q_isd_log_cost(n_0 * p, p, n_0 * d_v_prime, p, QCAttackType::KRA3, true, + std::unordered_set{ + QuantumAlgorithm::Q_Lee_Brickell, + QuantumAlgorithm::Q_Stern}) + .value; } } @@ -150,7 +174,7 @@ int main(int argc, char *argv[]) { << " epsilon " << epsilon << std::endl; uint64_t p, p_th, t, d_v_prime, d_v; - uint64_t mpartition[n_0] = {0}; + std::vector mpartition(n_0, 0); int current_prime_pos = 0; while (proper_primes[current_prime_pos] < starting_prime_lower_bound) { @@ -201,7 +225,8 @@ int main(int argc, char *argv[]) { std::cout << "refining parameters" << std::endl; - uint64_t p_ok, t_ok, d_v_ok, mpartition_ok[n_0] = {0}; + std::optional p_ok, t_ok, d_v_ok; + std::vector mpartition_ok(n_0, 0); /* refinement step taking into account possible invalid m partitions */ do { @@ -240,11 +265,23 @@ int main(int argc, char *argv[]) { current_prime_pos--; } while ((p > (1.0 + epsilon) * p_th) && (current_prime_pos > 0)); - std::cout << "parameter set found: p:" << p_ok << " t: " << t_ok; - std::cout << " d_v : " << d_v_ok << " mpartition: [ "; - for (unsigned i = 0; i < n_0; i++) { - std::cout << mpartition_ok[i] << " "; + if (!p_ok || !d_v_ok || !t_ok) { + // spdlog::error("Error: One or more variables are not initialized."); + throw std::runtime_error("One or more variables are not initialized."); + } else { + // spdlog::info( + // "parameter set found: p={}, t={}, d_v={}, mpartition={}", + // Logger::LoggerManager::getInstance().optional_to_string(p_ok), + // Logger::LoggerManager::getInstance().optional_to_string(t_ok), + // Logger::LoggerManager::getInstance().optional_to_string(d_v_ok), + // Logger::LoggerManager::getInstance().array_to_string(mpartition_ok)); } - std::cout << " ]" << std::endl; + // std::cout + // << " p:" << p_ok << " t: " << t_ok; + // std::cout << " d_v : " << d_v_ok << " mpartition: [ "; + // for (unsigned i = 0; i < n_0; i++) { + // std::cout << mpartition_ok[i] << " "; + // } + // std::cout << " ]" << std::endl; return 0; } diff --git a/src/tools/work_factor_computation.cpp b/src/tools/work_factor_computation.cpp new file mode 100644 index 0000000..1abd57d --- /dev/null +++ b/src/tools/work_factor_computation.cpp @@ -0,0 +1,273 @@ +#include +#include +#include +#include +#include +#include +#include +#include // For std::setprecision +#include +#include +#include +#include +#include +// #include +#include +#include + +#include "globals.hpp" +#include +#include +#include + +#define NUM_BITS_REAL_MANTISSA 1024 +#define IGNORE_DECODING_COST 0 +// #define EXPLORE_REPRS + +void to_json(nlohmann::json &j, const Result &r) { + j = nlohmann::json{{"alg_name", r.alg_name}, + {"params", r.params}, + {"value", r.value}, + {"gje_cost", r.gje_cost}, + {"list_size", r.list_size}}; +} + +void from_json(const nlohmann::json &j, Result &r) { + j.at("alg_name").get_to(r.alg_name); + j.at("params").get_to(r.params); + j.at("value").get_to(r.value); + j.at("gje_cost").get_to(r.gje_cost); +} + +int handle_plain(const std::string args) { + std::istringstream argStream(args); + std::string token; + std::vector values; + while (std::getline(argStream, token, ',')) { + values.push_back(std::stoi(token)); + } + + if (values.size() != 4) { + std::cerr << "Expected 4 comma-separated values, but got " << values.size() + << std::endl; + return 1; + } + + int n = values[0]; + int k = values[1]; + int t = values[2]; + bool qc_block_size = values[3]; + + for (int i = 0; i < static_cast(Algorithm::Count); i++) { + Algorithm algo = static_cast(i); + std::cout << "Algorithm " << algorithm_to_string(algo) << std::endl; + Result current_c_res = c_isd_log_cost(n, k, t, qc_block_size, + QCAttackType::Plain, false, {algo}); + std::cout << "Plain " << std::endl; + std::cout << result_to_string(current_c_res) << std::endl; + + double red_fac; + red_fac = + get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::MRA); + std::cout << "Classic MRA: " << current_c_res.value - red_fac << std::endl; + red_fac = + get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::KRA1); + std::cout << "Classic KRA1: " << current_c_res.value - red_fac << std::endl; + red_fac = + get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::KRA2); + std::cout << "Classic KRA2: " << current_c_res.value - red_fac << std::endl; + red_fac = + get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::KRA3); + std::cout << "Classic KRA3: " << current_c_res.value - red_fac << std::endl; + + std::cout << "**********" << std::endl; + } + + for (int i = 0; i < static_cast(QuantumAlgorithm::Count); i++) { + QuantumAlgorithm algo = static_cast(i); + + std::cout << "Algorithm: " << quantum_algorithm_to_string(algo) + << std::endl; + Result current_q_res = + q_isd_log_cost(n, k, t, qc_block_size, QCAttackType::Plain, false, + std::unordered_set{algo}); + std::cout << "Plain " << std::endl; + std::cout << result_to_string(current_q_res) << std::endl; + + double red_fac; + red_fac = + get_qc_red_factor_quantum_log(qc_block_size, n - k, QCAttackType::MRA); + std::cout << "Quantum MRA: " << current_q_res.value - red_fac << std::endl; + } + return 0; +} + +int handle_json(std::string json_filename) { + // const std::string input_isd_values = "out/isd_values.json"; + std::ifstream file(json_filename); + + // Check if the file is open + if (!file.is_open()) { + std::cerr << "Could not open the input file " << json_filename << std::endl; + return 1; + } + + // Parse the JSON content + nlohmann::json j; + file >> j; + + int no_values = j.size(); + std::cout << "Number of values in the JSON: " << no_values << std::endl; + std::filesystem::path dirPath(OUT_DIR_RESULTS); + // Check if the directory exists + if (!std::filesystem::exists(dirPath)) { + // Try to create the directory, including parent directories + if (std::filesystem::create_directories(dirPath)) { + std::cout << "Directory created successfully: " << OUT_DIR_RESULTS + << std::endl; + } else { + std::cerr << "Failed to create directory: " << OUT_DIR_RESULTS + << std::endl; + return 1; // Return an error code + } + } + + // Define an atomic counter for processed entries + std::atomic processed_count(0); + std::atomic error_count(0); + std::atomic skipped_count(0); + std::atomic iteration(0); + + // Iterate over the list of entries. With schedule(dynamic) loop iterations + // are divided into chunks, and threads dynamically grab chunks as they + // complete their previous work. +#pragma omp parallel for schedule(dynamic) + for (const auto &entry : j) { + ++iteration; + if (iteration % 1234 == 0) { +#pragma omp critical + { + std::cout << "\rProcessed: " << std::setw(8) << std::setfill(' ') + << processed_count << "; Skipped: " << std::setw(8) + << std::setfill(' ') << skipped_count + << "; Errors: " << std::setw(8) << std::setfill(' ') + << error_count << "; Remaining: " << std::setw(8) + << std::setfill(' ') + << (no_values - skipped_count - processed_count - error_count) + << " / " << no_values << std::flush; + } + } + + uint32_t n = entry["n"]; + uint32_t r = entry["r"]; + uint32_t k = n - r; + uint32_t t = entry["t"]; + + std::string filename = + OUT_DIR_RESULTS + fmt::format("{:06}_{:06}_{:03}.json", n, k, t); + // Check if the generated file exists + if (std::filesystem::exists(filename)) { + // std::cout << "Generated file exists: " << filename << std::endl + // << ". Skipping."; + ++skipped_count; + continue; + } +#pragma omp critical + std::cout << "Processing " << filename << std::endl; + // uint32_t qc_block_size = entry["prime"]; + uint32_t qc_block_size = r; + + nlohmann::json out_values; + + Result current_c_res; + Result current_q_res; + + current_c_res = c_isd_log_cost( + n, k, t, qc_block_size, QCAttackType::Plain, false, + std::unordered_set{Algorithm::Prange, Algorithm::Stern}); + + current_q_res = q_isd_log_cost( + n, k, t, qc_block_size, QCAttackType::Plain, false, + std::unordered_set{QuantumAlgorithm::Q_Lee_Brickell}); + + std::string attack_type; + out_values["Classic"]["Plain"] = current_c_res; + out_values["Quantum"]["Plain"] = current_q_res; + + // Post-apply reduction factors + // uint32_t n0 = n / r; + // if (n0 == 0) { + // // It's a value with rate < .5; it happens for KRA2 attacks + // double red_fac = + // get_qc_red_factor_quantum_log(qc_block_size, n0, QCAttackType::MRA); + // out_values["Quantum"]["MRA"] = current_q_res.value - red_fac; + + // red_fac = + // get_qc_red_factor_classic_log(qc_block_size, n0, QCAttackType::MRA); + // out_values["Classic"]["MRA"] = current_c_res.value - red_fac; + + // red_fac = + // get_qc_red_factor_classic_log(qc_block_size, n0, QCAttackType::KRA1); + // out_values["Classic"]["KRA1"] = current_c_res.value - red_fac; + // red_fac = + // get_qc_red_factor_classic_log(qc_block_size, n0, QCAttackType::KRA2); + // out_values["Classic"]["KRA2"] = current_c_res.value - red_fac; + // red_fac = + // get_qc_red_factor_classic_log(qc_block_size, n0, QCAttackType::KRA3); + // out_values["Classic"]["KRA3"] = current_c_res.value - red_fac; + // } + + std::ofstream file(filename); + if (file.is_open()) { + file << std::fixed << std::setprecision(10) + << out_values.dump(4); // Format JSON with indentation + file.close(); + ++processed_count; + // std::cout << "Data written to " << filename << std::endl; + } else { + std::cerr << "Could not open the file!" << std::endl; + ++error_count; + } + } + + std::cout << "\rProcessed: " << std::setw(8) << std::setfill(' ') + << processed_count << "; Skipped: " << std::setw(8) + << std::setfill(' ') << skipped_count + << "; Errors: " << std::setw(8) << std::setfill(' ') << error_count + << "; Remaining: " << std::setw(8) << std::setfill(' ') + << (no_values - skipped_count - processed_count - error_count) + << " / " << no_values << std::endl; + + return 0; +} + +int main(int argc, char *argv[]) { + // Logger::LoggerManager::getInstance().setup_logger( + // "binomials", spdlog::level::info, spdlog::level::debug); + // Logger::LoggerManager::getInstance().setup_logger( + // "isd_cost_estimate", spdlog::level::info, spdlog::level::debug); + if (argc != 3) { + std::cerr << "Usage: " << argv[0] << " --json [filename] | --plain [args]" + << std::endl; + return 1; + } + + NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); + InitBinomials(); + pi = NTL::ComputePi_RR(); + + if (strcmp(argv[1], "--json") == 0) { + std::string json_filename = argv[2]; + handle_json(json_filename); + } else if (strcmp(argv[1], "--plain") == 0) { + std::string plainArgs = argv[2]; + handle_plain(plainArgs); + } else { + std::cerr << "Unknown argument: " << argv[1] << std::endl; + std::cerr << "Usage: " << argv[0] + << " --json [filename]" // "| --csv [filename]" + << std::endl; + return 1; + } + return 0; +} diff --git a/src/utils/CMakeLists.txt b/src/utils/CMakeLists.txt new file mode 100644 index 0000000..b893053 --- /dev/null +++ b/src/utils/CMakeLists.txt @@ -0,0 +1,20 @@ +# add_library(ledautils binomials.cpp logging.cpp bit_error_probabilities.cpp isd_cost_estimate.cpp) +add_library(ledautils binomials.cpp bit_error_probabilities.cpp isd_cost_estimate.cpp partitions_permanents.cpp) + +find_library(GMP_LIB gmp) +find_library(NTL_LIB ntl) +find_library(M_LIB m) +find_package(spdlog REQUIRED) +find_package(fmt REQUIRED) + +message(STATUS "gmp library: ${GMP_LIBRARIES}") +message(STATUS "ntl library: ${NTL_LIBRARIES}") +message(STATUS "m library: ${M_LIBRARIES}") +message(STATUS "spdlog library: ${spdlog_LIBRARIES}") +message(STATUS "fmt library: ${fmt_LIBRARIES}") + +# Specify include directories for this module +target_include_directories(ledautils PUBLIC ${PROJECT_SOURCE_DIR}/include/utils) + +# Install the library +install(TARGETS ledautils DESTINATION lib) diff --git a/src/utils/binomials.cpp b/src/utils/binomials.cpp new file mode 100644 index 0000000..07b0620 --- /dev/null +++ b/src/utils/binomials.cpp @@ -0,0 +1,93 @@ +#include "binomials.hpp" +// #include "logging.hpp" +#include +#include + +NTL::Mat binomial_table; +NTL::Mat low_k_binomial_table; +bool is_data_initialized = false; + +NTL::RR nat_log_2 = NTL::log(NTL::RR(2)); +NTL::RR pi = NTL::ComputePi_RR(); + +NTL::RR log2_RR(NTL::RR v) { return NTL::log(v) / nat_log_2; } + +// static auto LOGGER = +// Logger::LoggerManager::getInstance().get_logger("binomials"); + +/*NOTE: NTL allows to access matrices as 1- based with Matlab notation */ +void InitBinomials() { + // LOGGER + // ->info("Precomputing n-choose-t up to n: {}, t: {}", MAX_N, MAX_T); + binomial_table.SetDims(MAX_N + 1, MAX_T + 1); + binomial_table[0][0] = NTL::ZZ(1); + for (unsigned i = 1; i <= MAX_N; i++) { + binomial_table[i][0] = NTL::ZZ(1); + binomial_table[i][1] = NTL::ZZ(i); + for (unsigned j = 2; (j <= i) && (j <= MAX_T); j++) { + binomial_table[i][j] = + binomial_table[i][j - 1] * NTL::ZZ(i - j + 1) / NTL::ZZ(j); + } + } + binomial_table.SetDims(MAX_N + 1, MAX_T + 1); + + // LOGGER->info("Precomputing low n-choose-t up to n: {}, t: {}", LOW_K_MAX_N, + // LOW_K_MAX_T); + low_k_binomial_table.SetDims(LOW_K_MAX_N + 1, LOW_K_MAX_T + 1); + low_k_binomial_table[0][0] = NTL::ZZ(1); + for (unsigned i = 0; i <= LOW_K_MAX_N; i++) { + low_k_binomial_table[i][0] = NTL::ZZ(1); + low_k_binomial_table[i][1] = NTL::ZZ(i); + for (unsigned j = 2; (j <= i) && (j <= LOW_K_MAX_T); j++) { + low_k_binomial_table[i][j] = + low_k_binomial_table[i][j - 1] * NTL::ZZ(i - j + 1) / NTL::ZZ(j); + } + } + is_data_initialized = true; + // LOGGER->info("Done"); +} + +NTL::RR lnFactorial(NTL::RR n) { + /* log of Stirling series approximated to the fourth term + * n log(n) - n + 1/2 log(2 \pi n) + log(- 139/(51840 n^3) + + * + 1/(288 n^2) + 1/(12 n) + 1) */ + return n * NTL::log(n) - n + 0.5 * NTL::log(2 * pi * n) + + NTL::log(-NTL::RR(139) / (n * n * n * 51840) + + NTL::RR(1) / (n * n * 288) + NTL::RR(1) / (n * 12) + 1); +} + +NTL::RR lnBinom(NTL::RR n, NTL::RR k) { + if ((k == NTL::RR(0)) || (k == n)) { + return NTL::RR(0); + } + return lnFactorial(n) - (lnFactorial(k) + lnFactorial(n - k)); +} + +NTL::ZZ binomial_wrapper(long n, long k) { + if (k > n) + return NTL::ZZ(0); + /* employ memoized if available */ + if (is_data_initialized) { + if ((n <= MAX_N) && (k < MAX_T)) { + return binomial_table[n][k]; + } + if ((n <= LOW_K_MAX_N) && (k < LOW_K_MAX_T)) { + return low_k_binomial_table[n][k]; + } + } else { + // LOGGER->info( + // "Binomial table not initizialed, resorting to standard computation"); + } + + /* shortcut computation for fast cases (k < 10) where + * Stirling may not provide good approximations */ + if (k < 10) { + NTL::ZZ result = NTL::ZZ(1); + for (int i = 1; i <= k; i++) { + result = (result * (n + 1 - i)) / i; + } + return result; + } + /*Fall back to Stirling*/ + return NTL::conv(NTL::exp(lnBinom(NTL::RR(n), NTL::RR(k)))); +} diff --git a/src/utils/bit_error_probabilities.cpp b/src/utils/bit_error_probabilities.cpp new file mode 100644 index 0000000..bafd880 --- /dev/null +++ b/src/utils/bit_error_probabilities.cpp @@ -0,0 +1,289 @@ +#include "bit_error_probabilities.hpp" + +// choice of the approximation praxis for the estimated fraction of an error +// to appear in the next iteration of a bit-flipping decoder + +/* Probability that a variable node is correct, and a parity equation involving + * it is satisfied */ +NTL::RR compute_p_cc(const uint64_t d_c, const uint64_t n, const uint64_t t) { + NTL::RR result = NTL::RR(0); + uint64_t bound = (d_c - 1) < t ? d_c - 1 : t; + + /* the number of errors falling in the PC equation should be at least + * the amount which cannot be placed in a non checked place */ + uint64_t LowerTHitBound = (n - d_c) < t ? t - (n - d_c) : 0; + /* and it should be even, since the PC equation must be satisfied */ + LowerTHitBound = LowerTHitBound % 2 ? LowerTHitBound + 1 : LowerTHitBound; + + for (uint64_t j = LowerTHitBound; j <= bound; j = j + 2) { + result += + to_RR(binomial_wrapper(d_c - 1, j) * binomial_wrapper(n - d_c, t - j)) / + to_RR(binomial_wrapper(n - 1, t)); + } + return result; +} + +/* Probability that a variable node is correct, and a parity equation involving + * it is *not* satisfied */ +NTL::RR compute_p_ci(const uint64_t d_c, const uint64_t n, const uint64_t t) { + NTL::RR result = NTL::RR(0); + uint64_t bound = (d_c - 1) < t ? d_c - 1 : t; + + /* the number of errors falling in the PC equation should be at least + * the amount which cannot be placed in a non checked place */ + uint64_t LowerTHitBound = (n - d_c) < t ? t - (n - d_c) : 1; + /* and it should be odd, since the PC equation must be non satisfied */ + LowerTHitBound = LowerTHitBound % 2 ? LowerTHitBound : LowerTHitBound + 1; + + for (uint64_t j = LowerTHitBound; j <= bound; j = j + 2) { + result += + to_RR(binomial_wrapper(d_c - 1, j) * binomial_wrapper(n - d_c, t - j)) / + to_RR(binomial_wrapper(n - 1, t)); + } + return result; +} + +/* Probability that a variable node is *not* correct, and a parity equation + * involving it is *not* satisfied */ +NTL::RR compute_p_ic(const uint64_t d_c, const uint64_t n, const uint64_t t) { + NTL::RR result = NTL::RR(0); + uint64_t UpperTBound = (d_c - 1) < t - 1 ? d_c - 1 : t - 1; + + /* the number of errors falling in the PC equation should be at least + * the amount which cannot be placed in a non checked place */ + uint64_t LowerTHitBound = + (n - d_c - 1) < (t - 1) ? (t - 1) - (n - d_c - 1) : 0; + /* and it should be even, since the PC equation must be unsatisfied (when + * accounting for the one we are considering as already placed*/ + LowerTHitBound = LowerTHitBound % 2 ? LowerTHitBound + 1 : LowerTHitBound; + + for (uint64_t j = LowerTHitBound; j <= UpperTBound; j = j + 2) { + result += NTL::to_RR(binomial_wrapper(d_c - 1, j) * + binomial_wrapper(n - d_c, t - j - 1)) / + to_RR(binomial_wrapper(n - 1, t - 1)); + } + return result; +} + +/* Probability that a variable node is *not* correct, and a parity equation + * involving it is satisfied */ +NTL::RR compute_p_ii(const uint64_t d_c, const uint64_t n, const uint64_t t) { + + NTL::RR result = NTL::RR(0); + uint64_t bound = (d_c - 1) < t - 1 ? d_c - 1 : t - 1; + + /* the number of errors falling in the PC equation should be at least + * the amount which cannot be placed in a non checked place */ + uint64_t LowerTHitBound = (n - d_c) < (t - 1) ? (t - 1) - (n - d_c) : 1; + /* and it should be odd, since the PC equation must be satisfied (when + * accounting for the one we are considering as already placed)*/ + LowerTHitBound = LowerTHitBound % 2 ? LowerTHitBound : LowerTHitBound + 1; + for (uint64_t j = LowerTHitBound; j <= bound; j = j + 2) { + result += NTL::to_RR(binomial_wrapper(d_c - 1, j) * + binomial_wrapper(n - d_c, t - j - 1)) / + to_RR(binomial_wrapper(n - 1, t - 1)); + } + return result; +} + +/* note p_cc + p_ci = 1 */ +/* note p_ic + p_ii = 1 */ + +/* Probability that a given erroneous variable is deemed as such, and is thus + * corrected, given a threshold for the amount of unsatisfied parity check + * equations. Called P_ic in most texts */ +NTL::RR ComputePrBitCorrection(const NTL::RR p_ic, const uint64_t d_v, + // const uint64_t t, + const uint64_t threshold) { + // Pic=0; /* p_correct */ + // for (j=b,dv, + // term=binomial(dv,j)*(p_ic^j)*(1-p_ic)^(dv-j); + // Pic=Pic+term; + // ); + NTL::RR result = NTL::RR(0), success, failure; + for (uint64_t j = threshold; j <= d_v; j++) { + NTL::pow(success, p_ic, NTL::to_RR(j)); + NTL::pow(failure, NTL::RR(1) - p_ic, NTL::to_RR(d_v - j)); + result += NTL::to_RR(binomial_wrapper(d_v, j)) * success * failure; + } + return result; +} + +/* Probability that a given correct variable is not deemed as such, and is thus + * fault-induced, given a threshold for the amount of unsatisfied parity check + * equations. Called P_ci in most texts, p_induce in official comment */ +NTL::RR ComputePrBitFaultInduction(const NTL::RR p_ci, const uint64_t d_v, + // const uint64_t t, /* unused */ + const uint64_t threshold) { + + NTL::RR result = NTL::RR(0), success, failure; + for (uint64_t j = threshold; j <= d_v; j++) { + NTL::pow(success, p_ci, NTL::to_RR(j)); + NTL::pow(failure, NTL::RR(1) - p_ci, NTL::to_RR(d_v - j)); + result += NTL::to_RR(binomial_wrapper(d_v, j)) * success * failure; + } + return result; +} + +/* computes the probability that toCorrect bits are corrected + * known as P{N_ic = toCorrect} */ +NTL::RR ComputePrBitCorrectionMulti(const NTL::RR p_ic, const uint64_t d_v, + const uint64_t t, const uint64_t threshold, + const uint64_t toCorrect) { + NTL::RR ProbCorrectOne = ComputePrBitCorrection(p_ic, d_v, threshold); + return NTL::to_RR(binomial_wrapper(t, toCorrect)) * + NTL::pow(ProbCorrectOne, NTL::RR(toCorrect)) * + NTL::pow(1 - ProbCorrectOne, NTL::RR(t - toCorrect)); +} + +/* computes the probability that toInduce faults are induced + * known as P{N_ci = toInduce} or Pr{f_wrong = to_induce} */ +NTL::RR ComputePrBitInduceMulti(const NTL::RR p_ci, const uint64_t d_v, + const uint64_t t, const uint64_t n, + const uint64_t threshold, + const uint64_t toInduce) { + // if(toInduce <= 1 ){ + // return NTL::RR(0); + // } + NTL::RR ProbInduceOne = ComputePrBitFaultInduction(p_ci, d_v, threshold); + return NTL::to_RR(binomial_wrapper(n - t, toInduce)) * + NTL::pow(ProbInduceOne, NTL::RR(toInduce)) * + NTL::pow(1 - ProbInduceOne, NTL::RR(n - t - toInduce)); +} + +uint64_t FindNextNumErrors(const uint64_t n_0, const uint64_t p, + const uint64_t d_v, const uint64_t t) { + NTL::RR p_ci, p_ic; + p_ci = compute_p_ci(n_0 * d_v, n_0 * p, t); + p_ic = compute_p_ic(n_0 * d_v, n_0 * p, t); + uint64_t t_next = t; + // uint64_t best_threshold = (d_v - 1)/2; + for (uint64_t i = (d_v - 1) / 2; i <= d_v - 1; i++) { + NTL::RR t_approx = t - t * ComputePrBitCorrection(p_ic, d_v, i) + + (n_0 * p - t) * ComputePrBitFaultInduction(p_ci, d_v, i); + unsigned long int t_curr = + NTL::conv(NTL::ROUNDING_PRAXIS(t_approx)); + /*Note : we increase the threshold only if it improves strictly on the + * predicted error correction. */ + if (t_curr < t_next) { + t_next = t_curr; + // best_threshold = i; + } + } + /* considering that any code will correct a single bit error, if + * t_next == 1, we save a computation iteration and shortcut to t_next == 0*/ + if (t_next == 1) { + t_next = 0; + } + return t_next; +} + +/* computes the exact 1-iteration DFR and the best threshold on the number of + * upcs to achieve it */ +std::pair Find1IterDFR(const uint64_t n_0, const uint64_t p, + const uint64_t d_v, + const uint64_t t) { + NTL::RR p_ci, p_ic, P_correct, P_induce; + NTL::RR DFR, best_DFR = NTL::RR(1); + p_ci = compute_p_ci(n_0 * d_v, n_0 * p, t); + p_ic = compute_p_ic(n_0 * d_v, n_0 * p, t); + uint64_t best_threshold = (d_v - 1) / 2; + for (uint64_t b = best_threshold; b <= d_v - 1; b++) { + DFR = NTL::RR(1) - ComputePrBitCorrectionMulti(p_ic, d_v, t, b, t) * + ComputePrBitInduceMulti(p_ci, d_v, t, n_0 * p, b, 0); + /*Note : we increase the threshold only if it improves strictly on the + * predicted error correction. */ + if (DFR < best_DFR) { + best_DFR = DFR; + best_threshold = b; + } + } + // std::cout << best_threshold << std::endl; + return std::make_pair(best_DFR, best_threshold); +} + +/* computes the exact 1-iteration probability of leaving at most t_leftover + * uncorrected errors out of t. */ +std::pair +Find1IterTLeftoverPr(const uint64_t n_0, const uint64_t p, const uint64_t d_v, + const uint64_t t, const uint64_t t_leftover) { + NTL::RR p_ci, p_ic; + NTL::RR DFR, best_DFR = NTL::RR(1); + p_ci = compute_p_ci(n_0 * d_v, n_0 * p, t); + p_ic = compute_p_ic(n_0 * d_v, n_0 * p, t); + int n = p * n_0; + uint64_t best_threshold = (d_v + 1) / 2; + + for (uint64_t b = best_threshold; b <= d_v; b++) { + DFR = NTL::RR(0); + NTL::RR P_correct = ComputePrBitCorrection(p_ic, d_v, b); + NTL::RR P_induce = ComputePrBitFaultInduction(p_ci, d_v, b); + for (int tau = 0; tau <= t_leftover; tau++) { + for (int n_to_induce = 0; n_to_induce <= t_leftover; n_to_induce++) { + NTL::RR prob_induce_n = + NTL::to_RR(binomial_wrapper(n - t, n_to_induce)) * + NTL::pow(P_induce, NTL::to_RR(n_to_induce)) * + NTL::pow(NTL::RR(1) - P_induce, NTL::to_RR(n - t - n_to_induce)); + int n_to_correct = (int)t + n_to_induce - tau; + NTL::RR prob_correct_n = NTL::to_RR(binomial_wrapper(t, n_to_correct)); + prob_correct_n *= NTL::pow(P_correct, NTL::to_RR(n_to_correct)); + + prob_correct_n *= + NTL::pow(NTL::RR(1) - P_correct, + NTL::to_RR((int)t - n_to_correct)); /*unsigned exp?*/ + DFR += prob_correct_n * prob_induce_n; + } + } + DFR = NTL::RR(1) - DFR; + if (DFR < best_DFR) { + best_DFR = DFR; + best_threshold = b; + } + } + return std::make_pair(best_DFR, best_threshold); +} + +// find minimum p which, asymptotically, corrects all errors +// search performed via binary search as the DFR is decreasing monot. +// in of p +uint64_t Findpth(const uint64_t n_0, const uint64_t d_v_prime, + const uint64_t t){ + +unsigned int prime_idx = 0, prime_idx_prec; +uint64_t p = proper_primes[prime_idx]; +while (p < d_v_prime || p < t) { + prime_idx++; + p = proper_primes[prime_idx]; + } + + uint64_t hi, lo; + lo = prime_idx; + hi = PRIMES_NO; + prime_idx_prec = lo; + + uint64_t limit_error_num = t; + while (hi - lo > 1) { + prime_idx_prec = prime_idx; + prime_idx = (lo + hi) / 2; + p = proper_primes[prime_idx]; + // compute number of remaining errors after +infty iters + limit_error_num = t; + uint64_t current_error_num; + // std::cout << "using p:"<< p << ", errors dropping as "; + do { + current_error_num = limit_error_num; + limit_error_num = FindNextNumErrors(n_0, p, d_v_prime, current_error_num); + // std::cout << limit_error_num << " "; + } while ((limit_error_num != current_error_num) && (limit_error_num != 0)); + // std::cout << std::endl; + if (limit_error_num > 0) { + lo = prime_idx; + } else { + hi = prime_idx; + } + } + if (limit_error_num == 0) { + return proper_primes[prime_idx]; + } + return proper_primes[prime_idx_prec]; +} diff --git a/src/utils/isd_cost_estimate.cpp b/src/utils/isd_cost_estimate.cpp new file mode 100644 index 0000000..c1115e7 --- /dev/null +++ b/src/utils/isd_cost_estimate.cpp @@ -0,0 +1,906 @@ +#include "isd_cost_estimate.hpp" +#include "binomials.hpp" +// #include "logging.hpp" +#include +#include +#include + +#include +#include + +// string'g + +std::string qc_attack_type_to_string(QCAttackType type) { + switch(type) { + case QCAttackType::KRA1: + return "KRA-1"; + case QCAttackType::KRA2: + return "KRA-2"; + case QCAttackType::KRA3: + return "KRA-3"; + case QCAttackType::MRA: + return "MRA"; + default: + return "Unknown attack type"; + } +} + +std::string quantum_algorithm_to_string(QuantumAlgorithm algo) { + switch (algo) { + case QuantumAlgorithm::Q_Lee_Brickell: return "Quantum Lee-Brickell"; + case QuantumAlgorithm::Q_Stern: return "Quantum Stern"; + default: + return "Unknown Algorithm"; + } +} + +std::string algorithm_to_string(Algorithm algo) { + switch (algo) { + case Algorithm::Prange: + return "Prange"; + case Algorithm::Lee_Brickell: + return "Lee_Brickell"; + case Algorithm::Leon: + return "Leon"; + case Algorithm::Stern: + return "Stern"; + case Algorithm::Finiasz_Sendrier: + return "Finiasz_Sendrier"; + case Algorithm::MMT: + return "MMT"; + case Algorithm::BJMM: + return "BJMM"; + // Add more algorithms here as needed + default: + return "Unknown Algorithm"; + } +} + +std::string result_to_string(const Result &result) { + std::ostringstream oss; + + // Add algorithm name + oss << "Algorithm: " << result.alg_name << "\n"; + + // Add parameters + oss << "Parameters:\n"; + for (const auto ¶m : result.params) { + oss << " " << param.first << ": " << param.second << "\n"; + } + + // Add other fields + oss << "Value: " << result.value << "\n"; + oss << "GJE Cost: " << result.gje_cost << "\n"; + oss << "List Size: " << result.list_size << "\n"; + + return oss.str(); +} + +// static auto LOGGER = +// Logger::LoggerManager::getInstance().get_logger("isd_cost_estimate"); +/***************************Classic ISDs***************************************/ + +Result isd_log_cost_classic_BJMM_approx(const uint32_t n, const uint32_t k, + const uint32_t t) { + Result result; + result.alg_name = "BJMM"; + result.params = {{"approx", true}}; + result.value = ((double)t) * -log((1.0 - (double)k / (double)n)) / log(2); + return result; +} + +// computes the probability of a random k * k being invertible +const NTL::RR log_probability_k_by_k_is_inv(const NTL::RR &k) { + if (k >= 100) + return NTL::RR(-1.79191682); + NTL::RR log_pinv = NTL::RR(-1); + for (long i = 2; i <= k; i++) { + log_pinv = log_pinv + log2_RR(NTL::RR(1) - NTL::power2_RR(-i)); + } + return log_pinv; +} + +const NTL::RR probability_k_by_k_is_inv(const NTL::RR &k) { + if (k >= 100) + return NTL::RR(0.288788095); + NTL::RR log_pinv = NTL::RR(0.5); + for (long i = 2; i <= k; i++) { + log_pinv = log_pinv * (NTL::RR(1) - NTL::power2_RR(-i)); + } + return log_pinv; +} + +const NTL::RR classic_rref_red_cost(const NTL::RR &n, const NTL::RR &r) { + /* simple reduced row echelon form transform, as it is not likely to be the + * bottleneck */ + NTL::RR k = n - r; + return r * r * n / NTL::RR(2) + (n * r) / NTL::RR(2) - + r * r * r / NTL::RR(6) + r * r + r / NTL::RR(6) - NTL::RR(1); +} + +// const NTL::RR classic_IS_candidate_cost(const NTL::RR &n, const NTL::RR &r) { +// NB: r* r should be added only for SDP, and even there it can be omitted since +// the syndrome can be thought as another column of H +// return classic_rref_red_cost(n, r) / probability_k_by_k_is_inv(r) + r * r; +// } + +const NTL::RR Fin_Send_rref_red_cost(const NTL::RR &n, const NTL::RR &r, + const NTL::RR l) { + /* reduced size reduced row echelon form transformation, only yields an + * (r-l) sized identity matrix */ + NTL::RR k = n - r; + return -l * l * l / NTL::RR(3) - l * l * n / NTL::RR(2) + + l * l * r / NTL::RR(2) - 3 * l * l / NTL::RR(2) - + 3 * l * n / NTL::RR(2) + l * r / NTL::RR(2) - 13 * l / NTL::RR(6) + + n * r * r / NTL::RR(2) + n * r / NTL::RR(2) - r * r * r / NTL::RR(6) + + r * r + r / NTL::RR(6) - NTL::RR(1); +} + +// const NTL::RR Fin_Send_IS_candidate_cost(const NTL::RR &n, const NTL::RR &r, +// const NTL::RR &l) { +// return Fin_Send_rref_red_cost(n, r, l) / probability_k_by_k_is_inv(r - l) + +// r * r; +// } + +Result isd_log_cost_classic_Prange(const uint32_t n, const uint32_t k, + const uint32_t t) { + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + + // NTL::RR cost_iter = classic_IS_candidate_cost(n_real, n_real - k_real); + NTL::RR cost_gje = classic_rref_red_cost(n_real, k_real); + NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / + NTL::to_RR(binomial_wrapper(n - k, t)); + + NTL::RR log_cost = log2_RR(num_iter) - + log_probability_k_by_k_is_inv(n_real - k_real) + + log2_RR(cost_gje); + + Result res; + res.alg_name = "Prange"; + res.params = {}; + res.value = NTL::conv(log_cost); + res.gje_cost = NTL::conv(log2_RR(cost_gje)); + return res; +} + +#define P_MAX_LB 20 +Result isd_log_cost_classic_LB(const uint32_t n, const uint32_t k, + const uint32_t t) { + NTL::RR n_real = NTL::RR(n); + + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + NTL::RR min_log_cost = n_real; // unreachable upper bound + NTL::RR log_cost; + uint32_t best_p = 1; + uint32_t constrained_max_p = P_MAX_LB > t ? t : P_MAX_LB; + + NTL::RR cost_gje = classic_rref_red_cost(n_real, k_real); + // IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); + + for (uint32_t p = 1; p < constrained_max_p; p++) { + NTL::RR p_real = NTL::RR(p); + NTL::RR cost_iter = cost_gje / probability_k_by_k_is_inv(n_real - k_real) + + NTL::to_RR(binomial_wrapper(k, p) * p * (n - k)); + NTL::RR num_iter = + NTL::to_RR(binomial_wrapper(n, t)) / + NTL::to_RR(binomial_wrapper(k, p) * binomial_wrapper(n - k, t - p)); + log_cost = + (NTL::log(num_iter) + NTL::log(cost_iter)) / NTL::log(NTL::RR(2)); + if (min_log_cost > log_cost) { + min_log_cost = log_cost; + best_p = p; + } + } + // LOGGER->info("Lee-Brickell best p: {}", best_p); + // LOGGER->info("Lee-Brickell time: {}", NTL::conv(min_log_cost)); + Result res; + res.alg_name = "Lee-Brickell"; + res.params = {{"p", best_p}}; + res.value = NTL::conv(min_log_cost); + res.gje_cost = NTL::conv(log2_RR(cost_gje)); + return res; +} + +#define P_MAX_Leon P_MAX_LB +#define L_MAX_Leon 200 +Result isd_log_cost_classic_Leon(const uint32_t n, const uint32_t k, + const uint32_t t) { + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + NTL::RR min_log_cost = n_real; // unreachable upper bound + NTL::RR log_cost; + uint32_t best_l = 0, best_p = 1, constrained_max_l, constrained_max_p; + + NTL::RR gje_cost = classic_rref_red_cost(n_real, n_real - k_real); + // IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); + constrained_max_p = P_MAX_Leon > t ? t : P_MAX_Leon; + for (uint32_t p = 1; p < constrained_max_p; p++) { + constrained_max_l = + (L_MAX_Leon > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_Leon); + NTL::RR p_real = NTL::RR(p); + for (uint32_t l = 0; l < constrained_max_l; l++) { + NTL::RR KChooseP = NTL::to_RR(binomial_wrapper(k, p)); + NTL::RR cost_iter = + gje_cost / probability_k_by_k_is_inv(n_real - k_real) + + KChooseP * p_real * NTL::to_RR(l) + + (KChooseP / NTL::power2_RR(l)) * NTL::RR(p * (n - k - l)); + NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / + NTL::to_RR(binomial_wrapper(k, p) * + binomial_wrapper(n - k - l, t - p)); + log_cost = + (NTL::log(num_iter) + NTL::log(cost_iter)) / NTL::log(NTL::RR(2)); + if (min_log_cost > log_cost) { + min_log_cost = log_cost; + best_l = l; + best_p = p; + } + } + } + // LOGGER->info("Leon Best l {} best p: {}", best_l, best_p); + // LOGGER->info("Leon time: {}", NTL::conv(min_log_cost)); + Result res; + res.alg_name = "Lee-Brickell"; + res.params = {{"p", best_p}, {"l", best_l}}; + res.value = NTL::conv(min_log_cost); + res.gje_cost = NTL::conv(log2_RR(gje_cost)); + return res; +} + +#define P_MAX_Stern P_MAX_Leon +#define L_MAX_Stern L_MAX_Leon +Result isd_log_cost_classic_Stern(const uint32_t n, const uint32_t k, + const uint32_t t) { + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + NTL::RR min_log_cost = n_real; // unreachable upper bound + NTL::RR log_cost; + uint32_t best_l = 0, best_p = 2, constrained_max_l, constrained_max_p; + + NTL::RR gje_cost = classic_rref_red_cost(n_real, n_real - k_real); + NTL::RR log_stern_list_size; + // IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); + + constrained_max_p = P_MAX_Stern > t ? t : P_MAX_Stern; + for (uint32_t p = 2; p < constrained_max_p; p = p + 2) { + constrained_max_l = + (L_MAX_Stern > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_Stern); + NTL::ZZ kHalfChoosePHalf; + for (uint32_t l = 0; l < constrained_max_l; l++) { + NTL::RR p_real = NTL::RR(p); + kHalfChoosePHalf = binomial_wrapper(k / 2, p / 2); + NTL::RR kHalfChoosePHalf_real = NTL::to_RR(kHalfChoosePHalf); + + NTL::RR cost_iter = + gje_cost / probability_k_by_k_is_inv(n_real - k_real) + + kHalfChoosePHalf_real * (NTL::to_RR(l) * p_real + + (kHalfChoosePHalf_real / NTL::power2_RR(l)) * + NTL::RR(p * (n - k - l))); + // #if LOG_COST_CRITERION == 1 + log_stern_list_size = + kHalfChoosePHalf_real * + (p_real / NTL::RR(2) * NTL::log(k_real / NTL::RR(2)) / + NTL::log(NTL::RR(2)) + + NTL::to_RR(l)); + log_stern_list_size = log2_RR(log_stern_list_size); + // NTL::log(log_stern_list_size) / NTL::log(NTL::RR(2)); + cost_iter = cost_iter * log_stern_list_size; + // #endif + NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / + NTL::to_RR(kHalfChoosePHalf * kHalfChoosePHalf * + binomial_wrapper(n - k - l, t - p)); + log_cost = log2_RR(num_iter) + log2_RR(cost_iter); + if (min_log_cost > log_cost) { + min_log_cost = log_cost; + best_l = l; + best_p = p; + } + } + } + + // LOGGER->info("Stern Best l {}, best p: {}", best_l, best_p); + // LOGGER->info("Stern time: {}", NTL::conv(min_log_cost)); + Result res; + res.alg_name = "Stern"; + res.params = {{"p", best_p}, {"l", best_l}}; + res.value = NTL::conv(min_log_cost); + res.gje_cost = NTL::conv(log2_RR(gje_cost)); + res.list_size = NTL::conv(log_stern_list_size); + return res; +} + +#define P_MAX_FS P_MAX_Stern +#define L_MAX_FS L_MAX_Stern +Result isd_log_cost_classic_FS(const uint32_t n, const uint32_t k, + const uint32_t t) { + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + NTL::RR min_log_cost = n_real; // unreachable upper bound + NTL::RR log_cost; + uint32_t best_l = 0, best_p = 2, constrained_max_l, constrained_max_p; + + NTL::RR cost_gje; + // return Fin_Send_rref_red_cost(n, r, l) / probability_k_by_k_is_inv(r - l) + // + + constrained_max_p = P_MAX_Stern > t ? t : P_MAX_Stern; + for (uint32_t p = 2; p < constrained_max_p; p = p + 2) { + constrained_max_l = + (L_MAX_Stern > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_Stern); + NTL::RR p_real = NTL::RR(p); + NTL::ZZ kPlusLHalfChoosePHalf; + for (uint32_t l = 0; l < constrained_max_l; l++) { + NTL::RR l_real = NTL::RR(l); + cost_gje = Fin_Send_rref_red_cost(n_real, n_real - k_real, l_real); + kPlusLHalfChoosePHalf = binomial_wrapper((k + l) / 2, p / 2); + NTL::RR kPlusLHalfChoosePHalf_real = NTL::to_RR(kPlusLHalfChoosePHalf); + NTL::RR cost_iter = + cost_gje / probability_k_by_k_is_inv(n_real - k_real - l_real) + + kPlusLHalfChoosePHalf_real * + (NTL::to_RR(l) * p_real + + (kPlusLHalfChoosePHalf_real / NTL::power2_RR(l)) * + NTL::RR(p * (n - k - l))); + // #if LOG_COST_CRITERION == 1 + NTL::RR log_FS_list_size = + kPlusLHalfChoosePHalf_real * + (p_real / NTL::RR(2) * NTL::log((k_real + l_real) / NTL::RR(2)) / + NTL::log(NTL::RR(2)) + + l_real); + log_FS_list_size = log2_RR(log_FS_list_size); + cost_iter = cost_iter * log_FS_list_size; + // #endif + NTL::RR num_iter = + NTL::to_RR(binomial_wrapper(n, t)) / + NTL::to_RR(kPlusLHalfChoosePHalf * kPlusLHalfChoosePHalf * + binomial_wrapper(n - k - l, t - p)); + + log_cost = log2_RR(num_iter) + log2_RR(cost_iter); + if (min_log_cost > log_cost) { + min_log_cost = log_cost; + best_l = l; + best_p = p; + } + } + } + // LOGGER->info("FS Best l {}, best p: {}", best_l, best_p); + // LOGGER->info("FS time: {}", NTL::conv(min_log_cost)); + Result res; + res.alg_name = "Fin-Send"; + res.params = {{"p", best_p}, {"l", best_l}}; + res.value = NTL::conv(min_log_cost); + res.gje_cost = NTL::conv(log2_RR(cost_gje)); + // cost_gje not reported + return res; +} + +#define P_MAX_MMT (P_MAX_FS + 25) // P_MAX_MMT +#define L_MAX_MMT 350 // L_MAX_MMT +#define L_MIN_MMT 2 +Result isd_log_cost_classic_MMT(const uint32_t n, const uint32_t k, + const uint32_t t) { + uint32_t r = n - k; + NTL::RR n_real = NTL::RR(n); + NTL::RR r_real = NTL::RR(r); + NTL::RR k_real = n_real - r_real; + + NTL::RR min_log_cost = n_real; // unreachable upper bound + NTL::RR log_cost, log_mem_cost; + uint32_t best_l = L_MIN_MMT, best_p = 4, constrained_max_l = 0, + constrained_max_p; +#if defined(EXPLORE_REPS) + uint32_t best_l1; +#endif + + NTL::RR cost_gje; + constrained_max_p = P_MAX_MMT > t ? t : P_MAX_MMT; + /* p should be divisible by 4 in MMT */ + for (uint32_t p = 4; p <= constrained_max_p; p = p + 4) { + constrained_max_l = + (L_MAX_MMT > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_MMT); + for (uint32_t l = L_MIN_MMT; l <= constrained_max_l; l++) { + NTL::RR l_real = NTL::to_RR(l); + NTL::ZZ kPlusLHalfChoosePHalf = binomial_wrapper((k + l) / 2, p / 2); + NTL::RR num_iter = + NTL::to_RR(binomial_wrapper(n, t)) / + NTL::to_RR(kPlusLHalfChoosePHalf * kPlusLHalfChoosePHalf * + binomial_wrapper(n - k - l, t - p)); + // FS_IS_candidate_cost = Fin_Send_IS_candidate_cost(n_real, r_real, + // l_real); + cost_gje = Fin_Send_rref_red_cost(n_real, n_real - k_real, l_real); + NTL::ZZ kPlusLHalfChoosePFourths = binomial_wrapper((k + l) / 2, p / 4); + NTL::RR kPlusLHalfChoosePFourths_real = + NTL::to_RR(kPlusLHalfChoosePFourths); + NTL::RR minOperandRight, min; + NTL::RR PChoosePHalf = NTL::to_RR(binomial_wrapper(p, p / 2)); + NTL::RR kPlusLChoosePHalf = NTL::to_RR(binomial_wrapper((k + l), p / 2)); + minOperandRight = + NTL::to_RR(binomial_wrapper((k + l) / 2, p / 2)) / PChoosePHalf; + min = kPlusLHalfChoosePFourths_real > minOperandRight + ? minOperandRight + : kPlusLHalfChoosePFourths_real; + + /* hoist out anything not depending on l_1/l_2 split*/ +#if defined(EXPLORE_REPRS) + for (l_1 = 1; l_1 <= l; l_1++) { + uint32_t l_2 = l - l_1; +#else + uint32_t l_2 = NTL::conv( + log2_RR(kPlusLHalfChoosePFourths_real / + NTL::to_RR(binomial_wrapper(p, p / 2)))); + /*clamp l_2 to a safe value , 0 < l_2 < l*/ + l_2 = l_2 <= 0 ? 1 : l_2; + l_2 = l_2 >= l ? l - 1 : l_2; + + uint32_t l_1 = l - l_2; +#endif + NTL::RR interm = kPlusLHalfChoosePFourths_real / NTL::power2_RR(l_2) * + NTL::to_RR(p / 2 * l_1); + + NTL::RR otherFactor = (NTL::to_RR(p / 4 * l_2) + interm); + NTL::RR cost_iter = + cost_gje / probability_k_by_k_is_inv(n_real - k_real - l_real) + + min * otherFactor + + kPlusLHalfChoosePFourths_real * NTL::to_RR(p / 2 * l_2); + + NTL::RR lastAddend = + otherFactor + kPlusLHalfChoosePFourths_real * kPlusLChoosePHalf * + PChoosePHalf / NTL::power2_RR(l) * + NTL::to_RR(p * (r - l)); + lastAddend = lastAddend * kPlusLHalfChoosePFourths_real; + cost_iter += lastAddend; + // #if 0 + + NTL::RR log_MMT_space = + r_real * n_real + + kPlusLHalfChoosePFourths_real * + (NTL::to_RR(p / 4) * log2_RR(NTL::to_RR(k + l / 2)) + + NTL::to_RR(l_2)) + + NTL::to_RR(min) * (NTL::to_RR(p / 2) * log2_RR(NTL::to_RR(k + l)) + + NTL::to_RR(l)); + log_MMT_space = log2_RR(log_MMT_space); + cost_iter = cost_iter * log_MMT_space; + // #endif + log_cost = log2_RR(num_iter) + log2_RR(cost_iter); + if (min_log_cost > log_cost) { + min_log_cost = log_cost; + best_l = l; +#if defined(EXPLORE_REPRS) + best_l1 = l_1; +#endif + best_p = p; + log_mem_cost = log_MMT_space; + } +#if defined(EXPLORE_REPRS) + } +#endif + } + } + // LOGGER->info("MMT Best l {}, best p: {}", best_l, best_p); + if (best_p == constrained_max_p) { + // LOGGER->warn("Warning: p {p} on exploration edge!"); + } + if (best_l == constrained_max_l) { + // LOGGER->warn("Warning: l {l} on exploration edge!"); + } + + // LOGGER->info("MMT time: {}", NTL::conv(min_log_cost)); + Result res; + res.alg_name = "MMT"; + res.params = {{"p", best_p}, {"l", best_l}}; + res.value = NTL::conv(min_log_cost); + res.gje_cost = NTL::conv(log2_RR(cost_gje)); + return res; +} + +#define P_MAX_BJMM 20 // P_MAX_MMT +#define L_MAX_BJMM 90 // L_MAX_MMT +#define Eps1_MAX_BJMM 4 +#define Eps2_MAX_BJMM 4 +Result isd_log_cost_classic_BJMM(const uint32_t n, const uint32_t k, + const uint32_t t) { + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + uint32_t r = n - k; + NTL::RR r_real = NTL::RR(r); + + NTL::RR min_log_cost = n_real; // unreachable upper bound + NTL::RR log_cost; + std::optional best_p, best_l, best_eps_1, best_eps_2; + uint32_t constrained_max_l, constrained_max_p; + + NTL::RR cost_gje; + constrained_max_p = P_MAX_BJMM > t ? t : P_MAX_BJMM; + /*p should be divisible by 2 in BJMM */ + for (uint32_t p = 2; p < constrained_max_p; p = p + 2) { + /* sweep over all the valid eps1 knowing that p/2 + eps1 should be a + * multiple of 4*/ + constrained_max_l = + (L_MAX_BJMM > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_BJMM); + for (uint32_t l = 0; l < constrained_max_l; l++) { + for (uint32_t eps1 = 2 + (p % 2); eps1 < Eps1_MAX_BJMM; eps1 = eps1 + 2) { + uint32_t p_1 = p / 2 + eps1; + /* sweep over all the valid eps2 knowing that p_1/2 + eps2 should + * be even */ + for (uint32_t eps2 = (p_1 % 2); eps2 < Eps2_MAX_BJMM; eps2 = eps2 + 2) { + uint32_t p_2 = p_1 / 2 + eps2; + + /* Available parameters p, p_1,p_2,p_3, l */ + NTL::RR l_real = NTL::RR(l); + cost_gje = Fin_Send_rref_red_cost(n_real, n_real - k_real, l_real); + // TODO check why this cost (or the rref cost) is never used + // FS_IS_candidate_cost = + // Fin_Send_IS_candidate_cost(n_real, n_real - k_real, l_real); + uint32_t p_3 = p_2 / 2; + + NTL::ZZ L3_list_len = binomial_wrapper((k + l) / 2, p_3); + NTL::RR L3_list_len_real = NTL::to_RR(L3_list_len); + /* the BJMM number of iterations depends only on L3 parameters + * precompute it */ + NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / + NTL::to_RR(binomial_wrapper((k + l), p) * + binomial_wrapper(r - l, t - p)); + NTL::RR P_invalid_splits = NTL::power(L3_list_len_real, 2) / + NTL::to_RR(binomial_wrapper(k + l, p_2)); + num_iter = num_iter / NTL::power(P_invalid_splits, 4); + + /* lengths of lists 2 to 0 have to be divided by the number of + * repr.s*/ + NTL::RR L2_list_len = NTL::to_RR(binomial_wrapper(k + l, p_2)) * + NTL::power(P_invalid_splits, 1); + NTL::RR L1_list_len = NTL::to_RR(binomial_wrapper(k + l, p_1)) * + NTL::power(P_invalid_splits, 2); + /* estimating the range for r_1 and r_2 requires to compute the + * number of representations rho_1 and rho_2 */ + + NTL::ZZ rho_2 = binomial_wrapper(p_1, p_1 / 2) * + binomial_wrapper(k + l - p_1, eps2); + NTL::ZZ rho_1 = + binomial_wrapper(p, p / 2) * binomial_wrapper(k + l - p, eps1); + int min_r2 = NTL::conv(NTL::log(NTL::to_RR(rho_2)) / + NTL::log(NTL::RR(2))); + int max_r1 = NTL::conv(NTL::log(NTL::to_RR(rho_1)) / + NTL::log(NTL::RR(2))); + + /*enumerate r_1 and r_2 over the suggested range + * log(rho_2) < r2 < r_1 < log(rho_1)*/ + /* clamp to safe values */ + min_r2 = min_r2 > 0 ? min_r2 : 1; + max_r1 = max_r1 < (int)l ? max_r1 : l - 1; + + NTL::RR p_real = NTL::RR(p); + for (int r_2 = min_r2; r_2 < max_r1 - 1; r_2++) { + for (int r_1 = r_2 + 1; r_1 < max_r1; r_1++) { + + /*add the cost of building Layer 3 to cost_iter */ + NTL::RR cost_iter = + NTL::to_RR(4) * + (k + l + 2 * L3_list_len_real + r_2 + + NTL::power(L3_list_len_real, 2) * NTL::to_RR(2 * p_3 * r_2)); + + /* add the cost of building Layer 2 */ + cost_iter += + 2 * (NTL::power((NTL::to_RR(rho_2) / (NTL::power2_RR(r_2))) * + NTL::power(L3_list_len_real, 2), + 2) * + 2 * p_2 * (r_1 - r_2)); + + /* add the cost of building Layer 1 */ + cost_iter += + NTL::power((NTL::to_RR(rho_1) / NTL::power2_RR(r_1)) * + (NTL::to_RR(rho_2) / NTL::power2_RR(r_2)) * + NTL::power(L3_list_len_real, 2), + 4) * + 2 * p_1 * l; + + /* add the cost of building L0 */ + cost_iter += + p * (r - l) * + NTL::power((NTL::to_RR(rho_1) / NTL::power2_RR(r_1)) * + (NTL::to_RR(rho_2) / NTL::power2_RR(r_2)) * + NTL::power(L3_list_len_real, 2), + 4) / + NTL::to_RR(l); + + log_cost = log2_RR(num_iter) + log2_RR(cost_iter); + + if (min_log_cost > log_cost) { + min_log_cost = log_cost; + best_l = l; + best_p = p; + best_eps_1 = eps1; + best_eps_2 = eps2; + } + } + } + + } /*end of iteration over l */ + /* to review up to to here */ + } /* end for over eps2 */ + } /* end for over eps1 */ + } /* end for over p*/ + + if (!best_l || !best_eps_1 || !best_p || !best_eps_2) { + // LOGGER->error("Error: One or more variables are not initialized."); + throw std::runtime_error("One or more variables are not initialized."); + } + // LOGGER->info("BJMM Best l {}, best p: {}, best eps1: {}, best eps2: {}", + // Logger::LoggerManager::getInstance().optional_to_string(best_l), + // Logger::LoggerManager::getInstance().optional_to_string(best_p), + // Logger::LoggerManager::getInstance().optional_to_string(best_eps_1), + // Logger::LoggerManager::getInstance().optional_to_string(best_eps_2)); + // LOGGER->info("BJMM time: {}", NTL::conv(min_log_cost)); + Result res; + res.alg_name = "BJMM"; + res.params = {{"p", best_p.value()}, + {"l", best_l.value()}, + {"eps1", best_eps_1.value()}, + {"eps2", best_eps_2.value()}}; + res.value = NTL::conv(min_log_cost); + res.gje_cost = NTL::conv(log2_RR(cost_gje)); + return res; +} + +/***************************Quantum ISDs***************************************/ + +const NTL::RR quantum_gauss_red_cost(const NTL::RR &n, const NTL::RR &k) { + // return 0.5* NTL::power(n-k,3) + k*NTL::power((n-k),2); + return 1.5 * NTL::power(n - k, 2) - 0.5 * (n - k); +} + +#define P_MAX_Q_LB 3 // P_MAX_MMT +Result isd_log_cost_quantum_LB(const uint32_t n, const uint32_t k, + const uint32_t t) { + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + NTL::RR log_pi_fourths = NTL::log(pi * 0.25); + NTL::RR log_pinv = log_probability_k_by_k_is_inv(k_real); + + /* Check https://doi.org/10.1007/978-3-031-61489-7_2 + * for the full measures of the lee-brickell quantum attack + */ + NTL::RR min_log_cost = n_real; // unreachable upper bound + uint32_t p; + std::optional best_p; + for (p = 1; p < P_MAX_Q_LB; p++) { + NTL::RR p_real = NTL::RR(p); + NTL::RR iteration_cost = quantum_gauss_red_cost(n_real, k_real) + + NTL::to_RR(binomial_wrapper(k, p)) * + NTL::log(n_real - k_real) / + NTL::log(NTL::RR(2)); + NTL::RR log_cost = + log_pi_fourths + .5 * (lnBinom(n_real, t_real) - log_pinv - + (lnBinom(k_real, p_real) + + lnBinom(n_real - k_real, t_real - p_real))); + log_cost += NTL::log(iteration_cost); + log_cost = log_cost / NTL::log(NTL::RR(2)); + if (log_cost < min_log_cost) { + min_log_cost = log_cost; + best_p = p; + } + } + if (!best_p) { + // LOGGER->error("Error: One or more variables are not initialized."); + throw std::runtime_error("One or more variables are not initialized."); + } + + // LOGGER->info("Quantum LB time: {}", NTL::conv(min_log_cost)); + Result res; + res.alg_name = "Quantum Lee-Brickell"; + res.params = {{"p", best_p.value()}}; + res.value = NTL::conv(min_log_cost); + return res; +} + +#define MAX_M (t / 2) + +Result isd_log_cost_quantum_Stern(const uint32_t n, const uint32_t k, + const uint32_t t) { + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + NTL::RR current_complexity, log_p_success, c_it, c_dec; + + // Start computing Stern's parameter invariant portions of complexity + NTL::RR log_pi_fourths = NTL::log(pi * 0.25); + // compute the probability of a random k * k being invertible + NTL::RR log_pinv = log_probability_k_by_k_is_inv(k_real); + // compute the cost of inverting the matrix, in a quantum execution env. + NTL::RR c_inv = quantum_gauss_red_cost(n_real, k_real); + + // optimize Stern's parameters : + // m : the # of errors in half of the chosen dimensions + // l : the length of the run of zeroes in the not chosen dimensions + // done via exhaustive parameter space search, minimizing the total + // complexity. + // Initial value set to codeword bruteforce to ensure the minimum is found. + NTL::RR min_stern_complexity = NTL::RR(n) * NTL::log(NTL::RR(2)); + + for (long m = 1; m <= MAX_M; m++) { + NTL::RR m_real = NTL::RR(m); + /* previous best complexity as a function of l alone. + * initialize to bruteforce-equivalent, break optimization loop as soon + * as a minimum is found */ + NTL::RR prev_best_complexity = NTL::RR(t); + for (long l = 0; l < (n - k - (t - 2 * m)); l++) { + + NTL::RR l_real = NTL::RR(l); + log_p_success = lnBinom(t_real, 2 * m_real) + + lnBinom(n_real - t_real, k_real - 2 * m_real) + + lnBinom(2 * m_real, m_real) + + lnBinom(n_real - k_real - t_real + 2 * m_real, l_real); + log_p_success = log_p_success - + (m_real * NTL::log(NTL::RR(4)) + lnBinom(n_real, k_real) + + lnBinom(n_real - k_real, l_real)); + current_complexity = -(log_p_success + log_pinv) * 0.5 + log_pi_fourths; + /* to match specifications , the term should be + * (n_real-k_real), as per in deVries, although + * David Hobach thesis mentions it to be + * (n_real-k_real-l_real), and it seems to match. + * amend specs for the typo. */ + c_it = l_real + (n_real - k_real - l_real) * + NTL::to_RR(binomial_wrapper(k / 2, m)) / + NTL::power2_RR(-l); + + c_it = c_it * 2 * m_real * NTL::to_RR(binomial_wrapper(k / 2, m)); +#if IGNORE_DECODING_COST == 1 + c_dec = 0.0; +#elif IGNORE_DECODING_COST == 0 + /*cost of decoding estimated as per Golomb CWDEC + * decoding an n-bit vector with weight k is + * CWDEC_cost(k,n)=O(n^2 log_2(n)) and following deVries, where + * c_dec = CWDEC_cost(n-k, n) + k + CWDEC_cost(l,n-k)*/ + c_dec = + n_real * n_real * NTL::log(n_real) + k_real + + (n_real - k_real) * (n_real - k_real) * NTL::log((n_real - k_real)); +#endif + current_complexity = current_complexity + NTL::log(c_it + c_inv + c_dec); + if (current_complexity < prev_best_complexity) { + prev_best_complexity = current_complexity; + } else { + break; + } + } + if (current_complexity < min_stern_complexity) { + min_stern_complexity = current_complexity; + } + } + Result res; + res.alg_name = "Quantum Stern"; + res.params = {}; + res.value = NTL::conv(min_stern_complexity / NTL::log(NTL::RR(2.0))); + return res; +} + +/***************************Aggregation ***************************************/ + +double get_qc_red_factor_classic_log(const uint32_t qc_order, const uint32_t n0, + QCAttackType attack) { + /* For key recovery attacks (CFP) the advantage from quasi-cyclicity is p. For + * a message recovery (SDP), the DOOM advantage is sqrt(p). + * + * Additionally, for key recovery attacks, there is a speedup depending on the + * different kind of attacks (check LEDA specs). + */ + + switch (attack) { + case QCAttackType::KRA1: + return log2(qc_order) + log2(NTL::conv(binomial_wrapper(n0, 2))); + case QCAttackType::KRA2: + return log2(qc_order) + log2(n0); + case QCAttackType::KRA3: + return log2(qc_order); + case QCAttackType::MRA: + return log2(qc_order) / 2; + case QCAttackType::Plain: + return 0; + default: + throw std::runtime_error("Wrong attack type"); + } +} + +Result c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, + const uint32_t qc_order, QCAttackType attack, + const bool compute_qc_reduction_factor, + std::unordered_set algs) { + // attack is useless if compute_qc_reduction_factor is false + + Result current_res, min_res; + double qc_red_factor = compute_qc_reduction_factor + ? get_qc_red_factor_classic_log(qc_order, n - k, attack) + : 0; + + double min_cost = n; // the cost cannot be greater than 2^n + + for (const auto &algo : algs) { + switch (algo) { + case Algorithm::Prange: + current_res = isd_log_cost_classic_Prange(n, k, t); + break; + case Algorithm::Lee_Brickell: + current_res = isd_log_cost_classic_LB(n, k, t); + break; + case Algorithm::Leon: + current_res = isd_log_cost_classic_Leon(n, k, t); + break; + case Algorithm::Stern: + current_res = isd_log_cost_classic_Stern(n, k, t); + break; + case Algorithm::Finiasz_Sendrier: + current_res = isd_log_cost_classic_FS(n, k, t); + break; + case Algorithm::MMT: + current_res = isd_log_cost_classic_MMT(n, k, t); + break; + case Algorithm::BJMM: + current_res = isd_log_cost_classic_BJMM(n, k, t); + break; + default: + std::cerr << "Unknown algorithm\n"; + break; + } + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } + } + + return min_res; +} + +double get_qc_red_factor_quantum_log(const uint32_t qc_order, const uint32_t n0, + QCAttackType attack) { + /* For key recovery attacks (CFP) the advantage from quasi-cyclicity is p. For + * a message recovery (SDP), the DOOM advantage is sqrt(p). + * + * Additionally, for key recovery attacks, there is a speedup depending on the + * different kind of attacks (check LEDA specs). + */ + + switch (attack) { + case QCAttackType::MRA: + return log2(qc_order) / 2; + case QCAttackType::Plain: + return 0; + default: + throw std::runtime_error("Wrong attack type"); + } +} + +Result q_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, + const uint32_t qc_order, QCAttackType attack, + const bool compute_qc_reduction_factor, + std::unordered_set algs) { + Result current_res, min_res; + double min_cost = n; // cannot be greater than n + double qc_red_factor = compute_qc_reduction_factor + ? get_qc_red_factor_quantum_log(qc_order, n - k, attack) + : 0; + + for (const auto &algo : algs) { + switch (algo) { + case QuantumAlgorithm::Q_Lee_Brickell: + current_res = isd_log_cost_quantum_LB(n, k, t); + break; + case QuantumAlgorithm::Q_Stern: + current_res = isd_log_cost_quantum_Stern(n, k, t); + break; + default: + std::cerr << "Unknown quantum algorithm\n"; + break; + } + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } + } + + return min_res; +} diff --git a/src/utils/logging.cpp b/src/utils/logging.cpp new file mode 100644 index 0000000..ff595e1 --- /dev/null +++ b/src/utils/logging.cpp @@ -0,0 +1,57 @@ +#include "logging.hpp" +#include +#include +#include +#include +#include + +Logger::LoggerManager &Logger::LoggerManager::getInstance() { + static LoggerManager instance; + return instance; +} + +void Logger::LoggerManager::setup_logger( + const std::string &logger_name, spdlog::level::level_enum console_level, + spdlog::level::level_enum file_level, const std::string &pattern) { + // Creating sinks, console + auto console_sink = std::make_shared(); + console_sink->set_level(console_level); + console_sink->set_pattern(pattern); + + // ... and file + // TODO change hard-coded dir + auto file_sink = std::make_shared( + "logs/" + logger_name + ".log", true); + file_sink->set_level(file_level); + file_sink->set_pattern(pattern); + + std::vector sinks{console_sink, file_sink}; + + std::shared_ptr logger; + auto it = loggers.find(logger_name); + if (it != loggers.end()) { + std::cout << "Logger already present " << logger_name << std::endl; + logger = it->second; + } else { + std::cout << "Creating logger " << logger_name << std::endl; + logger = std::make_shared(logger_name, sinks.begin(), + sinks.end()); + loggers[logger_name] = logger; + spdlog::register_logger(logger); + } + // logger->set_level(spdlog::level::trace); // Set to the most verbose level + // logger->flush_on(spdlog::level::err); +} + +std::shared_ptr +Logger::LoggerManager::get_logger(const std::string &logger_name) { + auto it = loggers.find(logger_name); + if (it != loggers.end()) { + return it->second; + } + // Logger not found, so set it up + setup_logger(logger_name, spdlog::level::info, + spdlog::level::info); // Default levels + + return loggers[logger_name]; +} diff --git a/partitions_permanents.hpp b/src/utils/partitions_permanents.cpp similarity index 60% rename from partitions_permanents.hpp rename to src/utils/partitions_permanents.cpp index c9fa709..e2203d9 100644 --- a/partitions_permanents.hpp +++ b/src/utils/partitions_permanents.cpp @@ -1,5 +1,6 @@ #include -#include + +#include "partitions_permanents.hpp" /* Permanent formulas for circulant matrices as obtained via Sage (macsyma) @@ -71,61 +72,61 @@ switch(n_0){ return permanent; } -int FindmPartition(const uint64_t m, - uint64_t mpartition[], - const uint64_t n_0){ - // Enumerate partitions of m with length n_0, - // according to TAOCP, Vol 4 Fascicle 3b, Algorithm H - // PRE : m >= n_0 >= 2 - if ( (m < n_0) || (n_0 < 2) ){ - return 0; - } +int FindmPartition(const uint64_t m, + std::vector &mpartition, + const uint64_t n_0) { + // Enumerate partitions of m with length n_0, + // according to TAOCP, Vol 4 Fascicle 3b, Algorithm H + // PRE : m >= n_0 >= 2 + if ((m < n_0) || (n_0 < 2)) { + return 0; + } - int64_t mpartition_selected[n_0]; - int found_good_partition = 0; - int64_t mpartition_tmp[n_0+1]; - mpartition_tmp[0] = m - (n_0 - 1); - for(unsigned i = 1; i < n_0; i++){ - mpartition_tmp[i] = 1; + int64_t mpartition_selected[n_0]; + int found_good_partition = 0; + int64_t mpartition_tmp[n_0 + 1]; + mpartition_tmp[0] = m - (n_0 - 1); + for (unsigned i = 1; i < n_0; i++) { + mpartition_tmp[i] = 1; + } + // theoretically, mpartition_tmp[n_0] = -1 according to knuth + mpartition_tmp[n_0] = -1; + do { + // visit the partition + if (ComputePermanent(mpartition_tmp, n_0) % 2 == 1U) { + for (unsigned i = 0; i < n_0; i++) { + mpartition_selected[i] = (uint64_t)mpartition_tmp[i]; + } + found_good_partition = 1; + } + if (mpartition_tmp[1] < mpartition_tmp[0] - 1) { + // step H3: easy but very common case + mpartition_tmp[0]--; + mpartition_tmp[1]++; + } else { + // step H4 + int j = 2; + int64_t s = mpartition_tmp[0] + mpartition_tmp[1] - 1; + while (mpartition_tmp[j] >= mpartition_tmp[0] - 1) { + s = s + mpartition_tmp[j]; + j++; + } + if (j >= (int)n_0) { // completed enumeration of partitions + for (unsigned i = 0; i < n_0; i++) { + mpartition[i] = (uint64_t)mpartition_selected[i]; + } + return found_good_partition; + } else { + uint64_t x = mpartition_tmp[j] + 1; + mpartition_tmp[j] = x; + j--; + while (j > 0) { + mpartition_tmp[j] = x; + s = s - x; + j--; + } + mpartition_tmp[0] = s; + } } - // theoretically, mpartition_tmp[n_0] = -1 according to knuth - mpartition_tmp[n_0] = -1; - do { - // visit the partition - if (ComputePermanent(mpartition_tmp,n_0) % 2 == 1U){ - for(unsigned i = 0; i < n_0; i++){ - mpartition_selected[i] = (uint64_t) mpartition_tmp[i]; - } - found_good_partition = 1; - } - if (mpartition_tmp[1] < mpartition_tmp[0] - 1){ - // step H3: easy but very common case - mpartition_tmp[0]--; - mpartition_tmp[1]++; - } else { - // step H4 - int j = 2; - int64_t s = mpartition_tmp[0] + mpartition_tmp[1] - 1; - while ( mpartition_tmp[j] >= mpartition_tmp[0] - 1 ){ - s = s + mpartition_tmp[j]; - j++; - } - if( j >= (int)n_0 ){ // completed enumeration of partitions - for(unsigned i = 0; i < n_0; i++){ - mpartition[i] = (uint64_t) mpartition_selected[i]; - } - return found_good_partition; - } else { - uint64_t x = mpartition_tmp[j]+1; - mpartition_tmp[j] = x; - j--; - while (j > 0) { - mpartition_tmp[j] = x; - s = s - x; - j--; - } - mpartition_tmp[0] = s; - } - } - } while (1); + } while (1); } diff --git a/work_factor_computation.cpp b/work_factor_computation.cpp deleted file mode 100644 index aea3704..0000000 --- a/work_factor_computation.cpp +++ /dev/null @@ -1,60 +0,0 @@ -#include -#include - -#define NUM_BITS_REAL_MANTISSA 1024 -#define IGNORE_DECODING_COST 0 -// #define EXPLORE_REPRS - -#include "binomials.hpp" -#include "isd_cost_estimate.hpp" -#include -#include - -int main(int argc, char *argv[]) { - if (argc != 7) { - std::cout - << "Work factor computation for ISD" << std::endl - << " Usage " << argv[0] - << " " - " " - << std::endl - << " = 1 implies a non QC code " << std::endl - << " = the attack is a key recovery attack on a QC-[L|M]DPC " - << std::endl - << " = if the quasi-cyclic reduction factor " - "cost should be applied" - << std::endl; - return -1; - } - - /* reduce by a factor matching the QC block size */ - - InitBinomials(); - NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); - pi = NTL::ComputePi_RR(); - uint32_t n = atoi(argv[1]); - uint32_t k = atoi(argv[2]); - uint32_t t = atoi(argv[3]); - uint32_t qc_block_size = atoi(argv[4]); - uint32_t is_kra = atoi(argv[5]); - uint32_t is_red_factor_applied = atoi(argv[6]); - - std::cout << " Input params: " << std::endl - << "- : " << n << std::endl - << "- : " << k << std::endl - << "- : " << t << std::endl - << "- : " << qc_block_size << std::endl - << "- : " << is_kra << std::endl - << "- : " << is_red_factor_applied << std::endl; - - std::cout << "Minimum classic cost :" - << c_isd_log_cost(n, k, t, qc_block_size, is_kra, - is_red_factor_applied) - << " Minimum quantum cost :" - << q_isd_log_cost(n, k, t, qc_block_size, is_kra, - is_red_factor_applied); - if (is_red_factor_applied && qc_block_size != 1) - std::cout << " (including qc_effects) "; - std::cout << std::endl; - return 0; -}