From e04bbb3a8e7093dfcdefebc132f4a49fb79969fb Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Sat, 20 Jul 2024 10:43:14 +0200 Subject: [PATCH 01/55] MOD Makefile - compile bin files in separate directory - require openmp for parallel computation --- Makefile | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 84fa351..86b145e 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,22 @@ CXXFLAGS=-O3 -g3 -std=c++11 -Wall -Wextra -Wno-sign-compare +OMPFLAGS=-fopenmp LDLIBS= -lntl -lgmp -lm -TARGETS=constant_weight_encodable_bits enumeration_complexity parameter_generator work_factor_computation +BIN_DIR=bin +TARGETS=constant_weight_encodable_bits enumeration_complexity parameter_generator work_factor_computation work_factor_computation_parallel +BIN_TARGETS=$(addprefix $(BIN_DIR)/, $(TARGETS)) -all: $(TARGETS) +all: $(BIN_DIR) $(BIN_TARGETS) + +$(BIN_DIR): + mkdir -p $(BIN_DIR) + +$(BIN_DIR)/work_factor_computation_parallel: work_factor_computation_parallel.cpp + $(CXX) $(CXXFLAGS) $(OMPFLAGS) $< -o $@ $(LDLIBS) + +$(BIN_DIR)/%: %.cpp + $(CXX) $(CXXFLAGS) $< -o $@ $(LDLIBS) clean: - rm -f $(TARGETS) + rm -f $(BIN_DIR)/* + rmdir $(BIN_DIR) + From f159ea5611f18930b201fff3f7cc0c6cbdabf0fe Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Sat, 20 Jul 2024 11:13:16 +0200 Subject: [PATCH 02/55] ADD parallel work factor computation --- work_factor_computation_parallel.cpp | 81 ++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 work_factor_computation_parallel.cpp diff --git a/work_factor_computation_parallel.cpp b/work_factor_computation_parallel.cpp new file mode 100644 index 0000000..252b18a --- /dev/null +++ b/work_factor_computation_parallel.cpp @@ -0,0 +1,81 @@ +#include +#include +#include +#include // For std::setprecision +#include +#include +#include +#include +#include + +#define NUM_BITS_REAL_MANTISSA 1024 +#define IGNORE_DECODING_COST 0 +// #define EXPLORE_REPRS + +#include "binomials.hpp" +#include "isd_cost_estimate.hpp" +#include + + +int main(int argc, char *argv[]) { + std::ifstream file("out/isd_values.json"); + + // Check if the file is open + if (!file.is_open()) { + std::cerr << "Could not open the file!" << std::endl; + return 1; + } + + // Parse the JSON content + nlohmann::json j; + file >> j; + + InitBinomials(); + NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); + pi = NTL::ComputePi_RR(); + bool is_kra_values[] = {true, false}; + // Iterate over the list of entries + #pragma omp parallel for + for (const auto &entry : j) { + uint32_t n = entry["n"]; + uint32_t r = entry["r"]; + uint32_t k = n - r; + uint32_t t = entry["t"]; + uint32_t qc_block_size = entry["prime"]; + bool is_red_factor_applied = true; + // int n0 = entry["n0"]; + // int v = entry["v"]; + // int lambd = entry["lambd"]; + + // Output the data + // std::cout << "n: " << n << ", r: " << r << ", t: " << t << std::endl; + + for (bool is_kra : is_kra_values) { + double min_c_cost = + c_isd_log_cost(n, k, t, qc_block_size, is_kra, is_red_factor_applied); + double min_q_cost = + q_isd_log_cost(n, k, t, qc_block_size, is_kra, is_red_factor_applied); + nlohmann::json out_values; + out_values["C2"] = min_c_cost; + out_values["Q2"] = min_q_cost; + + std::ostringstream oss; + oss << std::setw(6) << std::setfill('0') << n << "_" << std::setw(6) + << std::setfill('0') << r << "_" << std::setw(3) << std::setfill('0') + << t << "_" << std::setw(1) << is_kra; + std::string filename = "out/" + oss.str() + ".json"; + + // Write the JSON object to the file + std::ofstream file(filename); + if (file.is_open()) { + file << std::fixed << std::setprecision(10) + << out_values.dump(4); // Format JSON with indentation + file.close(); + std::cout << "Data written to " << filename << std::endl; + } else { + std::cerr << "Could not open the file!" << std::endl; + } + } + } + return 0; +} From 78e39d7f88c46e417f8c69f780f53dda0848e0da Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Sat, 20 Jul 2024 11:23:53 +0200 Subject: [PATCH 03/55] MOD cost estimates w/ skip macros & min initialization - All SKIP algorithms are defined as macros - Classical/Quantum minimum cost is initialized to max double --- isd_cost_estimate.hpp | 47 +++++++++++++++++++++++++++++++++---------- 1 file changed, 36 insertions(+), 11 deletions(-) diff --git a/isd_cost_estimate.hpp b/isd_cost_estimate.hpp index 232e6ab..79bb293 100644 --- a/isd_cost_estimate.hpp +++ b/isd_cost_estimate.hpp @@ -6,6 +6,16 @@ #include #include +#define SKIP_PRANGE 1 +#define SKIP_LB 1 +#define SKIP_LEON 1 +#define SKIP_STERN 0 +#define SKIP_FS 1 +#define SKIP_BJMM 1 +#define SKIP_MMT 1 +#define SKIP_Q_LB 0 +#define SKIP_Q_STERN 1 + /***************************Classic ISDs***************************************/ double isd_log_cost_classic_BJMM_approx(const uint32_t n, @@ -615,39 +625,52 @@ double get_qc_red_factor_log(const uint32_t qc_order, const uint32_t is_kra) { double c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, const uint32_t qc_order, const uint32_t is_kra, const bool compute_qc_reduction_factor) { - double min_cost = n, current_cost; - double qc_red_factor = compute_qc_reduction_factor? get_qc_red_factor_log(qc_order, is_kra): 0; + double min_cost, current_cost; + double qc_red_factor = + compute_qc_reduction_factor ? get_qc_red_factor_log(qc_order, is_kra) : 0; + + min_cost = std::numeric_limits::max(); std::cout << "Classic "; +#if SKIP_PRANGE == 0 current_cost = isd_log_cost_classic_Prange(n, k, t) - qc_red_factor; std::cerr << "Classic Prange: " << std::setprecision(5) << current_cost << std::endl; std::cout << current_cost << " "; - min_cost = current_cost; + min_cost = min_cost > current_cost ? current_cost : min_cost; +#endif +#if SKIP_LB == 0 current_cost = isd_log_cost_classic_LB(n, k, t) - qc_red_factor; std::cerr << "Classic Lee-Brickell ISD: " << std::setprecision(5) << current_cost << std::endl; std::cout << current_cost << " "; min_cost = min_cost > current_cost ? current_cost : min_cost; +#endif +#if SKIP_LEON == 0 current_cost = isd_log_cost_classic_Leon(n, k, t) - qc_red_factor; std::cerr << "Classic Leon ISD: " << std::setprecision(5) << current_cost << std::endl; std::cout << current_cost << " "; min_cost = min_cost > current_cost ? current_cost : min_cost; +#endif +#if SKIP_STERN == 0 current_cost = isd_log_cost_classic_Stern(n, k, t) - qc_red_factor; std::cerr << "Classic Stern ISD: " << std::setprecision(5) << current_cost << std::endl; std::cout << current_cost << " "; min_cost = min_cost > current_cost ? current_cost : min_cost; +#endif +#if SKIP_FS == 0 current_cost = isd_log_cost_classic_FS(n, k, t) - qc_red_factor; std::cerr << "Classic Fin-Send ISD: " << std::setprecision(5) << current_cost << std::endl; std::cout << current_cost << " "; min_cost = min_cost > current_cost ? current_cost : min_cost; +#endif #if SKIP_MMT == 0 current_cost = isd_log_cost_classic_MMT(n, k, t) - qc_red_factor; @@ -671,25 +694,27 @@ double c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, double q_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, const uint32_t qc_order, const uint32_t is_kra, const bool compute_qc_reduction_factor) { - double min_cost = n, current_cost; - /* for key recovery attacks the advantage from quasi-cyclicity is p, - * for an ISD, the DOOM advantage is just sqrt(p) */ + double min_cost, current_cost; std::cout << "Quantum "; double qc_red_factor = compute_qc_reduction_factor? get_qc_red_factor_log(qc_order, is_kra): 0; + min_cost = std::numeric_limits::max(); + /* This is just a quick hack since experiments says that p = 1 is * the optimal value at least for the NIST code-based finalists */ +#if SKIP_Q_LB == 0 current_cost = isd_log_cost_quantum_LB(n, k, t, 1) - qc_red_factor; - std::cout << current_cost << " "; - // std::cout << " Q-Lee-Brickell ISD: " << /**/current_cost << std::endl; - min_cost = current_cost; + std::cout << " Q-Lee-Brickell ISD: " << /**/current_cost << std::endl; + min_cost = min_cost > current_cost ? current_cost : min_cost; +#endif +#if SKIP_Q_STERN == 0 current_cost = isd_log_cost_quantum_stern(n, k, t) - qc_red_factor; - std::cout << current_cost << " "; - // std::cout << ", Q-Stern ISD: " << current_cost << std::endl; + std::cout << ", Q-Stern ISD: " << current_cost << std::endl; min_cost = min_cost > current_cost ? current_cost : min_cost; std::cout << std::endl; +#endif return min_cost; } From b826bf6019a6130d85329aeb1cf658058bec8701 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Sat, 20 Jul 2024 12:18:53 +0200 Subject: [PATCH 04/55] ADD logging utilities, moved to C++17, solved warnings --- Makefile | 5 +- isd_cost_estimate.hpp | 1064 +++++++++++++------------- logging.hpp | 71 ++ work_factor_computation_parallel.cpp | 12 +- 4 files changed, 612 insertions(+), 540 deletions(-) create mode 100644 logging.hpp diff --git a/Makefile b/Makefile index 86b145e..b26530a 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,7 @@ -CXXFLAGS=-O3 -g3 -std=c++11 -Wall -Wextra -Wno-sign-compare +CXX = g++ +CXXFLAGS=-O3 -g3 -std=c++17 -Wall -Wextra -Wno-sign-compare OMPFLAGS=-fopenmp -LDLIBS= -lntl -lgmp -lm +LDLIBS= -lntl -lgmp -lm -lspdlog -lfmt BIN_DIR=bin TARGETS=constant_weight_encodable_bits enumeration_complexity parameter_generator work_factor_computation work_factor_computation_parallel BIN_TARGETS=$(addprefix $(BIN_DIR)/, $(TARGETS)) diff --git a/isd_cost_estimate.hpp b/isd_cost_estimate.hpp index 79bb293..be49cf0 100644 --- a/isd_cost_estimate.hpp +++ b/isd_cost_estimate.hpp @@ -1,10 +1,12 @@ -#pragma once +#pragma once #include "binomials.hpp" -#include +#include "logging.hpp" #include +#include +#include #include #include -#include +#include #define SKIP_PRANGE 1 #define SKIP_LB 1 @@ -18,497 +20,504 @@ /***************************Classic ISDs***************************************/ -double isd_log_cost_classic_BJMM_approx(const uint32_t n, - const uint32_t k, +double isd_log_cost_classic_BJMM_approx(const uint32_t n, const uint32_t k, const uint32_t t) { - return ((double)t) * - log((1.0 - (double) k / (double) n)) / log(2); + return ((double)t) * -log((1.0 - (double)k / (double)n)) / log(2); } // computes the probability of a random k * k being invertible const NTL::RR log_probability_k_by_k_is_inv(const NTL::RR &k) { - NTL::RR log_pinv = NTL::RR(0.5); - for(long i = 2 ; i <=k ; i++){ - log_pinv = log_pinv * (NTL::RR(1) - NTL::power2_RR(-i)); - } - return NTL::log(log_pinv); + NTL::RR log_pinv = NTL::RR(0.5); + for (long i = 2; i <= k; i++) { + log_pinv = log_pinv * (NTL::RR(1) - NTL::power2_RR(-i)); + } + return NTL::log(log_pinv); } const NTL::RR probability_k_by_k_is_inv(const NTL::RR &k) { - NTL::RR log_pinv = NTL::RR(0.5); - for(long i = 2 ; i <=k ; i++){ - log_pinv = log_pinv * (NTL::RR(1) - NTL::power2_RR(-i)); - } - return log_pinv; + NTL::RR log_pinv = NTL::RR(0.5); + for (long i = 2; i <= k; i++) { + log_pinv = log_pinv * (NTL::RR(1) - NTL::power2_RR(-i)); + } + return log_pinv; } -const NTL::RR classic_rref_red_cost(const NTL::RR &n, const NTL::RR & r){ - /* simple reduced row echelon form transform, as it is not likely to be the - * bottleneck */ - NTL::RR k = n-r; - return r*r*n/NTL::RR(2) + - (n*r)/NTL::RR(2) - - r*r*r / NTL::RR(6) + - r*r + - r / NTL::RR(6) - NTL::RR(1); +const NTL::RR classic_rref_red_cost(const NTL::RR &n, const NTL::RR &r) { + /* simple reduced row echelon form transform, as it is not likely to be the + * bottleneck */ + NTL::RR k = n - r; + return r * r * n / NTL::RR(2) + (n * r) / NTL::RR(2) - + r * r * r / NTL::RR(6) + r * r + r / NTL::RR(6) - NTL::RR(1); } -const NTL::RR classic_IS_candidate_cost(const NTL::RR &n, const NTL::RR & r){ - return classic_rref_red_cost(n,r)/probability_k_by_k_is_inv(r) + r*r; +const NTL::RR classic_IS_candidate_cost(const NTL::RR &n, const NTL::RR &r) { + return classic_rref_red_cost(n, r) / probability_k_by_k_is_inv(r) + r * r; } -const NTL::RR Fin_Send_rref_red_cost(const NTL::RR &n, - const NTL::RR &r, - const NTL::RR l){ - /* reduced size reduced row echelon form transformation, only yields an - * (r-l) sized identity matrix */ - NTL::RR k = n-r; - return - l*l*l / NTL::RR(3) - - l*l*n / NTL::RR(2) - + l*l*r / NTL::RR(2) - - 3*l*l / NTL::RR(2) - - 3*l*n / NTL::RR(2) - + l*r / NTL::RR(2) - - 13*l / NTL::RR(6) - + n*r*r / NTL::RR(2) - + n*r / NTL::RR(2) - - r*r*r / NTL::RR(6) - + r*r - + r / NTL::RR(6) - - NTL::RR(1); +const NTL::RR Fin_Send_rref_red_cost(const NTL::RR &n, const NTL::RR &r, + const NTL::RR l) { + /* reduced size reduced row echelon form transformation, only yields an + * (r-l) sized identity matrix */ + NTL::RR k = n - r; + return -l * l * l / NTL::RR(3) - l * l * n / NTL::RR(2) + + l * l * r / NTL::RR(2) - 3 * l * l / NTL::RR(2) - + 3 * l * n / NTL::RR(2) + l * r / NTL::RR(2) - 13 * l / NTL::RR(6) + + n * r * r / NTL::RR(2) + n * r / NTL::RR(2) - r * r * r / NTL::RR(6) + + r * r + r / NTL::RR(6) - NTL::RR(1); } -const NTL::RR Fin_Send_IS_candidate_cost(const NTL::RR &n, - const NTL::RR &r, - const NTL::RR &l){ - return Fin_Send_rref_red_cost(n,r,l)/probability_k_by_k_is_inv(r-l) + r*r; +const NTL::RR Fin_Send_IS_candidate_cost(const NTL::RR &n, const NTL::RR &r, + const NTL::RR &l) { + return Fin_Send_rref_red_cost(n, r, l) / probability_k_by_k_is_inv(r - l) + + r * r; } -double isd_log_cost_classic_Prange(const uint32_t n, - const uint32_t k, +double isd_log_cost_classic_Prange(const uint32_t n, const uint32_t k, const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); - NTL::RR cost_iter = classic_IS_candidate_cost(n_real,n_real-k_real); - NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n,t)) / - NTL::to_RR(binomial_wrapper(n-k,t)); + NTL::RR cost_iter = classic_IS_candidate_cost(n_real, n_real - k_real); + NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / + NTL::to_RR(binomial_wrapper(n - k, t)); - NTL::RR log_cost = log2_RR(num_iter)+ log2_RR(cost_iter); - return NTL::conv( log_cost ); + NTL::RR log_cost = log2_RR(num_iter) + log2_RR(cost_iter); + return NTL::conv(log_cost); } #define P_MAX_LB 20 -double isd_log_cost_classic_LB(const uint32_t n, - const uint32_t k, +double isd_log_cost_classic_LB(const uint32_t n, const uint32_t k, const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - NTL::RR min_log_cost = n_real; // unreachable upper bound - NTL::RR log_cost; - uint32_t best_p = 1; - uint32_t constrained_max_p = P_MAX_LB > t ? t : P_MAX_LB; - NTL::RR IS_candidate_cost; - IS_candidate_cost = classic_IS_candidate_cost(n_real,n_real-k_real); - for(uint32_t p = 1 ;p < constrained_max_p; p++ ){ - NTL::RR p_real = NTL::RR(p); - NTL::RR cost_iter = IS_candidate_cost + - NTL::to_RR(binomial_wrapper(k,p)*p*(n-k)); - NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n,t)) / - NTL::to_RR( binomial_wrapper(k,p) * - binomial_wrapper(n-k,t-p) ); - log_cost = (NTL::log(num_iter)+NTL::log(cost_iter)) / NTL::log(NTL::RR(2)); - if(min_log_cost > log_cost){ - min_log_cost = log_cost; - best_p=p; - } + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + NTL::RR min_log_cost = n_real; // unreachable upper bound + NTL::RR log_cost; + uint32_t best_p = 1; + uint32_t constrained_max_p = P_MAX_LB > t ? t : P_MAX_LB; + NTL::RR IS_candidate_cost; + IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); + for (uint32_t p = 1; p < constrained_max_p; p++) { + NTL::RR p_real = NTL::RR(p); + NTL::RR cost_iter = + IS_candidate_cost + NTL::to_RR(binomial_wrapper(k, p) * p * (n - k)); + NTL::RR num_iter = + NTL::to_RR(binomial_wrapper(n, t)) / + NTL::to_RR(binomial_wrapper(k, p) * binomial_wrapper(n - k, t - p)); + log_cost = + (NTL::log(num_iter) + NTL::log(cost_iter)) / NTL::log(NTL::RR(2)); + if (min_log_cost > log_cost) { + min_log_cost = log_cost; + best_p = p; } - std::cerr << std::endl << "Lee-Brickell best p: " << best_p << std::endl; - return NTL::conv( min_log_cost ); + } + spdlog::info("Lee-Brickell best p: {}", best_p); + return NTL::conv(min_log_cost); } #define P_MAX_Leon P_MAX_LB #define L_MAX_Leon 200 -double isd_log_cost_classic_Leon(const uint32_t n, - const uint32_t k, +double isd_log_cost_classic_Leon(const uint32_t n, const uint32_t k, const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - NTL::RR min_log_cost = n_real; // unreachable upper bound - NTL::RR log_cost; - uint32_t best_l=0,best_p=1, constrained_max_l, constrained_max_p; - - NTL::RR IS_candidate_cost; - IS_candidate_cost = classic_IS_candidate_cost(n_real,n_real-k_real); - constrained_max_p = P_MAX_Leon > t ? t : P_MAX_Leon; - for(uint32_t p = 1; p < constrained_max_p; p++ ){ - constrained_max_l = ( L_MAX_Leon > (n-k-(t-p)) ? (n-k-(t-p)) : L_MAX_Leon); - NTL::RR p_real = NTL::RR(p); - for(uint32_t l = 0; l < constrained_max_l; l++){ - NTL::RR KChooseP = NTL::to_RR( binomial_wrapper(k,p) ); - NTL::RR cost_iter = IS_candidate_cost + - KChooseP * p_real * NTL::to_RR(l) + - ( KChooseP / NTL::power2_RR(l))* NTL::RR(p * (n-k - l)); - NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n,t)) / - NTL::to_RR( binomial_wrapper(k,p) * - binomial_wrapper(n-k-l,t-p) ); - log_cost = ( NTL::log(num_iter) + NTL::log(cost_iter) ) / NTL::log(NTL::RR(2)); - if(min_log_cost > log_cost){ - min_log_cost = log_cost; - best_l = l; - best_p = p; - } - } + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + NTL::RR min_log_cost = n_real; // unreachable upper bound + NTL::RR log_cost; + uint32_t best_l = 0, best_p = 1, constrained_max_l, constrained_max_p; + + NTL::RR IS_candidate_cost; + IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); + constrained_max_p = P_MAX_Leon > t ? t : P_MAX_Leon; + for (uint32_t p = 1; p < constrained_max_p; p++) { + constrained_max_l = + (L_MAX_Leon > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_Leon); + NTL::RR p_real = NTL::RR(p); + for (uint32_t l = 0; l < constrained_max_l; l++) { + NTL::RR KChooseP = NTL::to_RR(binomial_wrapper(k, p)); + NTL::RR cost_iter = + IS_candidate_cost + KChooseP * p_real * NTL::to_RR(l) + + (KChooseP / NTL::power2_RR(l)) * NTL::RR(p * (n - k - l)); + NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / + NTL::to_RR(binomial_wrapper(k, p) * + binomial_wrapper(n - k - l, t - p)); + log_cost = + (NTL::log(num_iter) + NTL::log(cost_iter)) / NTL::log(NTL::RR(2)); + if (min_log_cost > log_cost) { + min_log_cost = log_cost; + best_l = l; + best_p = p; + } } - std::cerr << std::endl << "Leon Best l: " << best_l << " best p: " << best_p << std::endl; - return NTL::conv( min_log_cost ); + } + spdlog::info("Leon Best l {} best p: {}", best_l, best_p); + return NTL::conv(min_log_cost); } - #define P_MAX_Stern P_MAX_Leon #define L_MAX_Stern L_MAX_Leon -double isd_log_cost_classic_Stern(const uint32_t n, - const uint32_t k, - const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - NTL::RR min_log_cost = n_real; // unreachable upper bound - NTL::RR log_cost; - uint32_t best_l = 0,best_p = 2, constrained_max_l, constrained_max_p; - - NTL::RR IS_candidate_cost; - IS_candidate_cost = classic_IS_candidate_cost(n_real,n_real-k_real); - - constrained_max_p = P_MAX_Stern > t ? t : P_MAX_Stern; - for(uint32_t p = 2; p < constrained_max_p; p = p+2 ){ - constrained_max_l = ( L_MAX_Stern > (n-k-(t-p)) ? (n-k-(t-p)) : L_MAX_Stern); - NTL::ZZ kHalfChoosePHalf; - for(uint32_t l = 0; l < constrained_max_l; l++){ - NTL::RR p_real = NTL::RR(p); - kHalfChoosePHalf = binomial_wrapper(k/2,p/2); - NTL::RR kHalfChoosePHalf_real = NTL::to_RR(kHalfChoosePHalf); - - NTL::RR cost_iter = IS_candidate_cost + - kHalfChoosePHalf_real * - ( NTL::to_RR(l)*p_real + - (kHalfChoosePHalf_real / NTL::power2_RR(l)) * NTL::RR(p * (n-k - l)) - ); -// #if LOG_COST_CRITERION == 1 - NTL::RR log_stern_list_size = kHalfChoosePHalf_real * - ( p_real/NTL::RR(2) * NTL::log( k_real/NTL::RR(2))/NTL::log(NTL::RR(2) ) +NTL::to_RR(l)); - log_stern_list_size = NTL::log(log_stern_list_size) / NTL::log(NTL::RR(2)); - cost_iter = cost_iter*log_stern_list_size; -// #endif - NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n,t)) / - NTL::to_RR( kHalfChoosePHalf*kHalfChoosePHalf * - binomial_wrapper(n-k-l,t-p) ); - log_cost = log2_RR(num_iter) + log2_RR(cost_iter); - if(min_log_cost > log_cost){ - min_log_cost = log_cost; - best_l = l; - best_p = p; - } - } +double isd_log_cost_classic_Stern(const uint32_t n, const uint32_t k, + const uint32_t t) { + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + NTL::RR min_log_cost = n_real; // unreachable upper bound + NTL::RR log_cost; + uint32_t best_l = 0, best_p = 2, constrained_max_l, constrained_max_p; + + NTL::RR IS_candidate_cost; + IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); + + constrained_max_p = P_MAX_Stern > t ? t : P_MAX_Stern; + for (uint32_t p = 2; p < constrained_max_p; p = p + 2) { + constrained_max_l = + (L_MAX_Stern > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_Stern); + NTL::ZZ kHalfChoosePHalf; + for (uint32_t l = 0; l < constrained_max_l; l++) { + NTL::RR p_real = NTL::RR(p); + kHalfChoosePHalf = binomial_wrapper(k / 2, p / 2); + NTL::RR kHalfChoosePHalf_real = NTL::to_RR(kHalfChoosePHalf); + + NTL::RR cost_iter = + IS_candidate_cost + + kHalfChoosePHalf_real * (NTL::to_RR(l) * p_real + + (kHalfChoosePHalf_real / NTL::power2_RR(l)) * + NTL::RR(p * (n - k - l))); + // #if LOG_COST_CRITERION == 1 + NTL::RR log_stern_list_size = + kHalfChoosePHalf_real * + (p_real / NTL::RR(2) * NTL::log(k_real / NTL::RR(2)) / + NTL::log(NTL::RR(2)) + + NTL::to_RR(l)); + log_stern_list_size = + NTL::log(log_stern_list_size) / NTL::log(NTL::RR(2)); + cost_iter = cost_iter * log_stern_list_size; + // #endif + NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / + NTL::to_RR(kHalfChoosePHalf * kHalfChoosePHalf * + binomial_wrapper(n - k - l, t - p)); + log_cost = log2_RR(num_iter) + log2_RR(cost_iter); + if (min_log_cost > log_cost) { + min_log_cost = log_cost; + best_l = l; + best_p = p; + } } + } - std::cerr << std::endl << "Stern Best l: " << best_l << " best p: " << best_p << std::endl; - return NTL::conv( min_log_cost ); + spdlog::info("Stern Best l {}, best p: {}", best_l, best_p); + return NTL::conv(min_log_cost); } -#define P_MAX_FS P_MAX_Stern -#define L_MAX_FS L_MAX_Stern -double isd_log_cost_classic_FS(const uint32_t n, - const uint32_t k, - const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - NTL::RR min_log_cost = n_real; // unreachable upper bound - NTL::RR log_cost; - uint32_t best_l = 0, best_p = 2,constrained_max_l, constrained_max_p; - - NTL::RR IS_candidate_cost; - constrained_max_p = P_MAX_Stern > t ? t : P_MAX_Stern; - for(uint32_t p = 2; p < constrained_max_p; p = p+2 ){ - constrained_max_l = ( L_MAX_Stern > (n-k-(t-p)) ? (n-k-(t-p)) : L_MAX_Stern); - NTL::RR p_real = NTL::RR(p); - NTL::ZZ kPlusLHalfChoosePHalf; - for(uint32_t l = 0; l < constrained_max_l; l++){ - IS_candidate_cost = Fin_Send_IS_candidate_cost(n_real,n_real-k_real,NTL::RR(l)); - kPlusLHalfChoosePHalf = binomial_wrapper((k+l)/2,p/2); - NTL::RR kPlusLHalfChoosePHalf_real = NTL::to_RR(kPlusLHalfChoosePHalf); - NTL::RR cost_iter = IS_candidate_cost + - kPlusLHalfChoosePHalf_real * - ( NTL::to_RR(l)*p_real + - ( kPlusLHalfChoosePHalf_real / NTL::power2_RR(l)) * - NTL::RR(p * (n-k - l)) - ); -// #if LOG_COST_CRITERION == 1 - NTL::RR l_real = NTL::to_RR(l); - NTL::RR log_FS_list_size = kPlusLHalfChoosePHalf_real * - ( p_real/NTL::RR(2) * NTL::log( (k_real+l_real)/NTL::RR(2))/NTL::log(NTL::RR(2) ) +l_real); - log_FS_list_size = log2_RR(log_FS_list_size); - cost_iter = cost_iter*log_FS_list_size; -// #endif - NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n,t)) / - NTL::to_RR( kPlusLHalfChoosePHalf * kPlusLHalfChoosePHalf * - binomial_wrapper(n-k-l,t-p) ); - - log_cost = log2_RR(num_iter) + log2_RR(cost_iter); - if(min_log_cost > log_cost){ - min_log_cost = log_cost; - best_l = l; - best_p = p; - } - } +#define P_MAX_FS P_MAX_Stern +#define L_MAX_FS L_MAX_Stern +double isd_log_cost_classic_FS(const uint32_t n, const uint32_t k, + const uint32_t t) { + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + NTL::RR min_log_cost = n_real; // unreachable upper bound + NTL::RR log_cost; + uint32_t best_l = 0, best_p = 2, constrained_max_l, constrained_max_p; + + NTL::RR IS_candidate_cost; + constrained_max_p = P_MAX_Stern > t ? t : P_MAX_Stern; + for (uint32_t p = 2; p < constrained_max_p; p = p + 2) { + constrained_max_l = + (L_MAX_Stern > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_Stern); + NTL::RR p_real = NTL::RR(p); + NTL::ZZ kPlusLHalfChoosePHalf; + for (uint32_t l = 0; l < constrained_max_l; l++) { + IS_candidate_cost = + Fin_Send_IS_candidate_cost(n_real, n_real - k_real, NTL::RR(l)); + kPlusLHalfChoosePHalf = binomial_wrapper((k + l) / 2, p / 2); + NTL::RR kPlusLHalfChoosePHalf_real = NTL::to_RR(kPlusLHalfChoosePHalf); + NTL::RR cost_iter = + IS_candidate_cost + + kPlusLHalfChoosePHalf_real * + (NTL::to_RR(l) * p_real + + (kPlusLHalfChoosePHalf_real / NTL::power2_RR(l)) * + NTL::RR(p * (n - k - l))); + // #if LOG_COST_CRITERION == 1 + NTL::RR l_real = NTL::to_RR(l); + NTL::RR log_FS_list_size = + kPlusLHalfChoosePHalf_real * + (p_real / NTL::RR(2) * NTL::log((k_real + l_real) / NTL::RR(2)) / + NTL::log(NTL::RR(2)) + + l_real); + log_FS_list_size = log2_RR(log_FS_list_size); + cost_iter = cost_iter * log_FS_list_size; + // #endif + NTL::RR num_iter = + NTL::to_RR(binomial_wrapper(n, t)) / + NTL::to_RR(kPlusLHalfChoosePHalf * kPlusLHalfChoosePHalf * + binomial_wrapper(n - k - l, t - p)); + + log_cost = log2_RR(num_iter) + log2_RR(cost_iter); + if (min_log_cost > log_cost) { + min_log_cost = log_cost; + best_l = l; + best_p = p; + } } - std::cerr << std::endl << "FS Best l: " << best_l << " best p: " << best_p << std::endl; - return NTL::conv( min_log_cost ); + } + spdlog::info("FS Best l {}, best p: {}", best_l, best_p); + return NTL::conv(min_log_cost); } -#define P_MAX_MMT (P_MAX_FS+25) // P_MAX_MMT -#define L_MAX_MMT 350 //L_MAX_MMT +#define P_MAX_MMT (P_MAX_FS + 25) // P_MAX_MMT +#define L_MAX_MMT 350 // L_MAX_MMT #define L_MIN_MMT 2 -double isd_log_cost_classic_MMT(const uint32_t n, - const uint32_t k, - const uint32_t t) { - uint32_t r = n-k; - NTL::RR n_real = NTL::RR(n); - NTL::RR r_real = NTL::RR(r); - NTL::RR k_real = n_real-r_real; - - - NTL::RR min_log_cost = n_real; // unreachable upper bound - NTL::RR log_cost, log_mem_cost; - uint32_t best_l= L_MIN_MMT, best_l1, best_p = 4, - constrained_max_l = 0, constrained_max_p; - - NTL::RR FS_IS_candidate_cost; - constrained_max_p = P_MAX_MMT > t ? t : P_MAX_MMT; - /* p should be divisible by 4 in MMT */ - for(uint32_t p = 4; p <= constrained_max_p; p = p+4 ){ - constrained_max_l = ( L_MAX_MMT > (n-k-(t-p)) ? (n-k-(t-p)) : L_MAX_MMT ); - for(uint32_t l = L_MIN_MMT; l <= constrained_max_l; l++){ - NTL::RR l_real = NTL::to_RR(l); - NTL::ZZ kPlusLHalfChoosePHalf = binomial_wrapper((k+l)/2,p/2); - NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n,t)) / - NTL::to_RR( kPlusLHalfChoosePHalf * kPlusLHalfChoosePHalf * - binomial_wrapper(n-k-l,t-p) ); - FS_IS_candidate_cost = Fin_Send_IS_candidate_cost(n_real,r_real,l_real); - NTL::ZZ kPlusLHalfChoosePFourths = binomial_wrapper((k+l)/2,p/4); - NTL::RR kPlusLHalfChoosePFourths_real = NTL::to_RR(kPlusLHalfChoosePFourths); - NTL::RR minOperandRight, min; - NTL::RR PChoosePHalf = NTL::to_RR(binomial_wrapper(p,p/2)); - NTL::RR kPlusLChoosePHalf = NTL::to_RR(binomial_wrapper((k+l),p/2)); - minOperandRight = NTL::to_RR(binomial_wrapper((k+l)/2,p/2)) / PChoosePHalf; - min = kPlusLHalfChoosePFourths_real > minOperandRight ? minOperandRight : kPlusLHalfChoosePFourths_real; - - /* hoist out anything not depending on l_1/l_2 split*/ +double isd_log_cost_classic_MMT(const uint32_t n, const uint32_t k, + const uint32_t t) { + uint32_t r = n - k; + NTL::RR n_real = NTL::RR(n); + NTL::RR r_real = NTL::RR(r); + NTL::RR k_real = n_real - r_real; + + NTL::RR min_log_cost = n_real; // unreachable upper bound + NTL::RR log_cost, log_mem_cost; + uint32_t best_l = L_MIN_MMT, best_p = 4, constrained_max_l = 0, + constrained_max_p; +#if defined(EXPLORE_REPS) + uint32_t best_l1; +#endif + + NTL::RR FS_IS_candidate_cost; + constrained_max_p = P_MAX_MMT > t ? t : P_MAX_MMT; + /* p should be divisible by 4 in MMT */ + for (uint32_t p = 4; p <= constrained_max_p; p = p + 4) { + constrained_max_l = + (L_MAX_MMT > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_MMT); + for (uint32_t l = L_MIN_MMT; l <= constrained_max_l; l++) { + NTL::RR l_real = NTL::to_RR(l); + NTL::ZZ kPlusLHalfChoosePHalf = binomial_wrapper((k + l) / 2, p / 2); + NTL::RR num_iter = + NTL::to_RR(binomial_wrapper(n, t)) / + NTL::to_RR(kPlusLHalfChoosePHalf * kPlusLHalfChoosePHalf * + binomial_wrapper(n - k - l, t - p)); + FS_IS_candidate_cost = Fin_Send_IS_candidate_cost(n_real, r_real, l_real); + NTL::ZZ kPlusLHalfChoosePFourths = binomial_wrapper((k + l) / 2, p / 4); + NTL::RR kPlusLHalfChoosePFourths_real = + NTL::to_RR(kPlusLHalfChoosePFourths); + NTL::RR minOperandRight, min; + NTL::RR PChoosePHalf = NTL::to_RR(binomial_wrapper(p, p / 2)); + NTL::RR kPlusLChoosePHalf = NTL::to_RR(binomial_wrapper((k + l), p / 2)); + minOperandRight = + NTL::to_RR(binomial_wrapper((k + l) / 2, p / 2)) / PChoosePHalf; + min = kPlusLHalfChoosePFourths_real > minOperandRight + ? minOperandRight + : kPlusLHalfChoosePFourths_real; + + /* hoist out anything not depending on l_1/l_2 split*/ #if defined(EXPLORE_REPRS) - for(uint32_t l_1 = 1 ; l_1 <= l ; l_1++){ - uint32_t l_2= l-l_1; + for (l_1 = 1; l_1 <= l; l_1++) { + uint32_t l_2 = l - l_1; #else - uint32_t l_2 = NTL::conv(log2_RR(kPlusLHalfChoosePFourths_real / NTL::to_RR(binomial_wrapper(p,p/2)))); - /*clamp l_2 to a safe value , 0 < l_2 < l*/ - l_2 = l_2 <= 0 ? 1 : l_2; - l_2 = l_2 >= l ? l-1 : l_2; - - uint32_t l_1= l - l_2; + uint32_t l_2 = NTL::conv( + log2_RR(kPlusLHalfChoosePFourths_real / + NTL::to_RR(binomial_wrapper(p, p / 2)))); + /*clamp l_2 to a safe value , 0 < l_2 < l*/ + l_2 = l_2 <= 0 ? 1 : l_2; + l_2 = l_2 >= l ? l - 1 : l_2; + + uint32_t l_1 = l - l_2; #endif - NTL::RR interm = kPlusLHalfChoosePFourths_real / NTL::power2_RR(l_2) * - NTL::to_RR(p/2*l_1); - - NTL::RR otherFactor = ( NTL::to_RR(p/4*l_2) + interm ); - NTL::RR cost_iter = FS_IS_candidate_cost + - min*otherFactor + - kPlusLHalfChoosePFourths_real * NTL::to_RR(p/2*l_2); - - NTL::RR lastAddend = otherFactor + - kPlusLHalfChoosePFourths_real * - kPlusLChoosePHalf * PChoosePHalf / - NTL::power2_RR(l) * - NTL::to_RR( p*(r-l) ); - lastAddend = lastAddend * kPlusLHalfChoosePFourths_real; - cost_iter += lastAddend; -// #if 0 - - NTL::RR log_MMT_space = r_real*n_real + - kPlusLHalfChoosePFourths_real * - (NTL::to_RR(p/4)* log2_RR(NTL::to_RR(k+l/2))+ NTL::to_RR(l_2) )+ - NTL::to_RR(min) * (NTL::to_RR(p/2)* log2_RR(NTL::to_RR(k+l))+ NTL::to_RR(l) ); - log_MMT_space = log2_RR(log_MMT_space); - cost_iter = cost_iter*log_MMT_space; -// #endif - log_cost = log2_RR(num_iter) + log2_RR(cost_iter); - if(min_log_cost > log_cost){ - min_log_cost = log_cost; - best_l = l; - best_l1 = l_1; - best_p = p; - log_mem_cost = log_MMT_space; - } + NTL::RR interm = kPlusLHalfChoosePFourths_real / NTL::power2_RR(l_2) * + NTL::to_RR(p / 2 * l_1); + + NTL::RR otherFactor = (NTL::to_RR(p / 4 * l_2) + interm); + NTL::RR cost_iter = + FS_IS_candidate_cost + min * otherFactor + + kPlusLHalfChoosePFourths_real * NTL::to_RR(p / 2 * l_2); + + NTL::RR lastAddend = + otherFactor + kPlusLHalfChoosePFourths_real * kPlusLChoosePHalf * + PChoosePHalf / NTL::power2_RR(l) * + NTL::to_RR(p * (r - l)); + lastAddend = lastAddend * kPlusLHalfChoosePFourths_real; + cost_iter += lastAddend; + // #if 0 + + NTL::RR log_MMT_space = + r_real * n_real + + kPlusLHalfChoosePFourths_real * + (NTL::to_RR(p / 4) * log2_RR(NTL::to_RR(k + l / 2)) + + NTL::to_RR(l_2)) + + NTL::to_RR(min) * (NTL::to_RR(p / 2) * log2_RR(NTL::to_RR(k + l)) + + NTL::to_RR(l)); + log_MMT_space = log2_RR(log_MMT_space); + cost_iter = cost_iter * log_MMT_space; + // #endif + log_cost = log2_RR(num_iter) + log2_RR(cost_iter); + if (min_log_cost > log_cost) { + min_log_cost = log_cost; + best_l = l; #if defined(EXPLORE_REPRS) - } + best_l1 = l_1; +#endif + best_p = p; + log_mem_cost = log_MMT_space; + } +#if defined(EXPLORE_REPRS) + } #endif - } } - std::cerr << std::endl << "MMT Best l: " << best_l - << " best p: " << best_p - << " best l1: " << best_l1 - << std::endl; - if(best_p == constrained_max_p){ - std::cerr << std::endl << "Warning: p on exploration edge! " << std::endl; - } - if(best_l == constrained_max_l){ - std::cerr << std::endl << "Warning: l on exploration edge! " << std::endl; - } - //std::cerr << log_mem_cost << " "; - return NTL::conv( min_log_cost ); + } + spdlog::info("MMT Best l {}, best p: {}", best_l, best_p); + if (best_p == constrained_max_p) { + spdlog::warn("Warning: p {p} on exploration edge!"); + } + if (best_l == constrained_max_l) { + spdlog::warn("Warning: l {l} on exploration edge!"); + } + return NTL::conv(min_log_cost); } - #define P_MAX_BJMM 20 // P_MAX_MMT -#define L_MAX_BJMM 90 //L_MAX_MMT +#define L_MAX_BJMM 90 // L_MAX_MMT #define Eps1_MAX_BJMM 4 #define Eps2_MAX_BJMM 4 -double isd_log_cost_classic_BJMM(const uint32_t n, - const uint32_t k, +double isd_log_cost_classic_BJMM(const uint32_t n, const uint32_t k, const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - uint32_t r = n-k; - NTL::RR r_real = NTL::RR(r); - - NTL::RR min_log_cost = n_real; // unreachable upper bound - NTL::RR log_cost; - uint32_t best_l, best_p, - best_eps_1, best_eps_2, - constrained_max_l, constrained_max_p; - - NTL::RR FS_IS_candidate_cost; - constrained_max_p = P_MAX_BJMM > t ? t : P_MAX_BJMM; - /*p should be divisible by 2 in BJMM */ - for(uint32_t p = 2; p < constrained_max_p; p = p+2 ){ - /* sweep over all the valid eps1 knowing that p/2 + eps1 should be a - * multiple of 4*/ - constrained_max_l = ( L_MAX_BJMM > (n-k-(t-p)) ? (n-k-(t-p)) : L_MAX_BJMM ); - for(uint32_t l = 0; l < constrained_max_l; l++){ - for(uint32_t eps1 = 2+(p%2) ; eps1 < Eps1_MAX_BJMM; eps1 = eps1 + 2) { - uint32_t p_1 = p/2 + eps1; - /* sweep over all the valid eps2 knowing that p_1/2 + eps2 should - * be even */ - for(uint32_t eps2 = (p_1%2) ; eps2 < Eps2_MAX_BJMM; eps2 = eps2 + 2){ - uint32_t p_2 = p_1/2 + eps2; - - - /* Available parameters p, p_1,p_2,p_3, l */ - NTL::RR l_real = NTL::RR(l); - FS_IS_candidate_cost = Fin_Send_IS_candidate_cost(n_real,n_real-k_real,l_real); - uint32_t p_3 = p_2/2; - - NTL::ZZ L3_list_len = binomial_wrapper((k+l)/2,p_3); - NTL::RR L3_list_len_real = NTL::to_RR(L3_list_len); - /* the BJMM number of iterations depends only on L3 parameters - * precompute it */ - NTL::RR num_iter = NTL::to_RR( binomial_wrapper(n,t) ) / - NTL::to_RR( binomial_wrapper((k+l),p) * - binomial_wrapper(r-l,t-p) - ); - NTL::RR P_invalid_splits = NTL::power(L3_list_len_real,2) / - NTL::to_RR( binomial_wrapper(k+l,p_2)); - num_iter = num_iter / NTL::power(P_invalid_splits,4); - - /* lengths of lists 2 to 0 have to be divided by the number of repr.s*/ - NTL::RR L2_list_len = NTL::to_RR(binomial_wrapper(k+l,p_2)) * - NTL::power(P_invalid_splits,1); - NTL::RR L1_list_len = NTL::to_RR(binomial_wrapper(k+l,p_1)) * - NTL::power(P_invalid_splits,2); - /* estimating the range for r_1 and r_2 requires to compute the - * number of representations rho_1 and rho_2 */ - - NTL::ZZ rho_2 = binomial_wrapper(p_1,p_1/2) * - binomial_wrapper(k+l-p_1,eps2); - NTL::ZZ rho_1 = binomial_wrapper(p,p/2) * - binomial_wrapper(k+l-p,eps1); - int min_r2 = NTL::conv(NTL::log(NTL::to_RR(rho_2)) / - NTL::log(NTL::RR(2))); - int max_r1 = NTL::conv(NTL::log(NTL::to_RR(rho_1)) / - NTL::log(NTL::RR(2))); - - /*enumerate r_1 and r_2 over the suggested range - * log(rho_2) < r2 < r_1 < log(rho_1)*/ - /* clamp to safe values */ - min_r2 = min_r2 > 0 ? min_r2 : 1; - max_r1 = max_r1 < (int)l ? max_r1 : l-1; - - NTL::RR p_real = NTL::RR(p); - for(int r_2 = min_r2 ; r_2 < max_r1 - 1; r_2++){ - for(int r_1 = r_2+1; r_1 < max_r1 ; r_1++){ - - /*add the cost of building Layer 3 to cost_iter */ - NTL::RR cost_iter = NTL::to_RR(4) * - (k + l + 2*L3_list_len_real + - r_2 + - NTL::power(L3_list_len_real,2)* - NTL::to_RR(2*p_3*r_2)); - - /* add the cost of building Layer 2 */ - cost_iter += 2 * (NTL::power((NTL::to_RR(rho_2) / - (NTL::power2_RR(r_2)))* - NTL::power(L3_list_len_real,2),2) - * 2 * p_2 * (r_1-r_2)); - - /* add the cost of building Layer 1 */ - cost_iter += NTL::power((NTL::to_RR(rho_1) / - NTL::power2_RR(r_1)) * - (NTL::to_RR(rho_2) / - NTL::power2_RR(r_2))* - NTL::power(L3_list_len_real,2),4) * 2 * p_1 * l; - - /* add the cost of building L0 */ - cost_iter += p * (r - l) * - NTL::power((NTL::to_RR(rho_1) / NTL::power2_RR(r_1)) * - (NTL::to_RR(rho_2) / - NTL::power2_RR(r_2))* - NTL::power(L3_list_len_real,2),4) - / NTL::to_RR(l); - - log_cost = log2_RR(num_iter) + log2_RR(cost_iter); - - if(min_log_cost > log_cost){ - min_log_cost = log_cost; - best_l = l; - best_p = p; - best_eps_1 = eps1; - best_eps_2 = eps2; - } - } - } - - } /*end of iteration over l */ - /* to review up to to here */ - } /* end for over eps2 */ - } /* end for over eps1 */ - } /* end for over p*/ - std::cerr << std::endl << "BJMM Best l: " << best_l - << " best p: " << best_p - << " best eps1: " << best_eps_1 - << " best eps2: " << best_eps_2 - << std::endl; - return NTL::conv( min_log_cost ); + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + uint32_t r = n - k; + NTL::RR r_real = NTL::RR(r); + + NTL::RR min_log_cost = n_real; // unreachable upper bound + NTL::RR log_cost; + std::optional best_p, best_l, best_eps_1, best_eps_2, + constrained_max_l, constrained_max_p; + + NTL::RR FS_IS_candidate_cost; + constrained_max_p = P_MAX_BJMM > t ? t : P_MAX_BJMM; + /*p should be divisible by 2 in BJMM */ + for (uint32_t p = 2; p < constrained_max_p; p = p + 2) { + /* sweep over all the valid eps1 knowing that p/2 + eps1 should be a + * multiple of 4*/ + constrained_max_l = + (L_MAX_BJMM > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_BJMM); + for (uint32_t l = 0; l < constrained_max_l; l++) { + for (uint32_t eps1 = 2 + (p % 2); eps1 < Eps1_MAX_BJMM; eps1 = eps1 + 2) { + uint32_t p_1 = p / 2 + eps1; + /* sweep over all the valid eps2 knowing that p_1/2 + eps2 should + * be even */ + for (uint32_t eps2 = (p_1 % 2); eps2 < Eps2_MAX_BJMM; eps2 = eps2 + 2) { + uint32_t p_2 = p_1 / 2 + eps2; + + /* Available parameters p, p_1,p_2,p_3, l */ + NTL::RR l_real = NTL::RR(l); + FS_IS_candidate_cost = + Fin_Send_IS_candidate_cost(n_real, n_real - k_real, l_real); + uint32_t p_3 = p_2 / 2; + + NTL::ZZ L3_list_len = binomial_wrapper((k + l) / 2, p_3); + NTL::RR L3_list_len_real = NTL::to_RR(L3_list_len); + /* the BJMM number of iterations depends only on L3 parameters + * precompute it */ + NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / + NTL::to_RR(binomial_wrapper((k + l), p) * + binomial_wrapper(r - l, t - p)); + NTL::RR P_invalid_splits = NTL::power(L3_list_len_real, 2) / + NTL::to_RR(binomial_wrapper(k + l, p_2)); + num_iter = num_iter / NTL::power(P_invalid_splits, 4); + + /* lengths of lists 2 to 0 have to be divided by the number of + * repr.s*/ + NTL::RR L2_list_len = NTL::to_RR(binomial_wrapper(k + l, p_2)) * + NTL::power(P_invalid_splits, 1); + NTL::RR L1_list_len = NTL::to_RR(binomial_wrapper(k + l, p_1)) * + NTL::power(P_invalid_splits, 2); + /* estimating the range for r_1 and r_2 requires to compute the + * number of representations rho_1 and rho_2 */ + + NTL::ZZ rho_2 = binomial_wrapper(p_1, p_1 / 2) * + binomial_wrapper(k + l - p_1, eps2); + NTL::ZZ rho_1 = + binomial_wrapper(p, p / 2) * binomial_wrapper(k + l - p, eps1); + int min_r2 = NTL::conv(NTL::log(NTL::to_RR(rho_2)) / + NTL::log(NTL::RR(2))); + int max_r1 = NTL::conv(NTL::log(NTL::to_RR(rho_1)) / + NTL::log(NTL::RR(2))); + + /*enumerate r_1 and r_2 over the suggested range + * log(rho_2) < r2 < r_1 < log(rho_1)*/ + /* clamp to safe values */ + min_r2 = min_r2 > 0 ? min_r2 : 1; + max_r1 = max_r1 < (int)l ? max_r1 : l - 1; + + NTL::RR p_real = NTL::RR(p); + for (int r_2 = min_r2; r_2 < max_r1 - 1; r_2++) { + for (int r_1 = r_2 + 1; r_1 < max_r1; r_1++) { + + /*add the cost of building Layer 3 to cost_iter */ + NTL::RR cost_iter = + NTL::to_RR(4) * + (k + l + 2 * L3_list_len_real + r_2 + + NTL::power(L3_list_len_real, 2) * NTL::to_RR(2 * p_3 * r_2)); + + /* add the cost of building Layer 2 */ + cost_iter += + 2 * (NTL::power((NTL::to_RR(rho_2) / (NTL::power2_RR(r_2))) * + NTL::power(L3_list_len_real, 2), + 2) * + 2 * p_2 * (r_1 - r_2)); + + /* add the cost of building Layer 1 */ + cost_iter += + NTL::power((NTL::to_RR(rho_1) / NTL::power2_RR(r_1)) * + (NTL::to_RR(rho_2) / NTL::power2_RR(r_2)) * + NTL::power(L3_list_len_real, 2), + 4) * + 2 * p_1 * l; + + /* add the cost of building L0 */ + cost_iter += + p * (r - l) * + NTL::power((NTL::to_RR(rho_1) / NTL::power2_RR(r_1)) * + (NTL::to_RR(rho_2) / NTL::power2_RR(r_2)) * + NTL::power(L3_list_len_real, 2), + 4) / + NTL::to_RR(l); + + log_cost = log2_RR(num_iter) + log2_RR(cost_iter); + + if (min_log_cost > log_cost) { + min_log_cost = log_cost; + best_l = l; + best_p = p; + best_eps_1 = eps1; + best_eps_2 = eps2; + } + } + } + + } /*end of iteration over l */ + /* to review up to to here */ + } /* end for over eps2 */ + } /* end for over eps1 */ + } /* end for over p*/ + + if (!best_l || !best_eps_1 || !constrained_max_l || !constrained_max_p) { + spdlog::error("Error: One or more variables are not initialized."); + throw std::runtime_error("One or more variables are not initialized."); + } else { + spdlog::info("BJMM Best l {}, best p: {}, best eps1: {}, best eps2: {}", + optional_to_string(best_l), optional_to_string(best_eps_1), + optional_to_string(constrained_max_l), + optional_to_string(constrained_max_p)); + } + return NTL::conv(min_log_cost); } /***************************Quantum ISDs***************************************/ - -const NTL::RR quantum_gauss_red_cost(const NTL::RR &n, - const NTL::RR & k) { +const NTL::RR quantum_gauss_red_cost(const NTL::RR &n, const NTL::RR &k) { // return 0.5* NTL::power(n-k,3) + k*NTL::power((n-k),2); - return 1.5 * NTL::power(n - k, 2) - 0.5 * (n-k); + return 1.5 * NTL::power(n - k, 2) - 0.5 * (n - k); } double isd_log_cost_quantum_LB(const uint32_t n, const uint32_t k, @@ -526,91 +535,91 @@ double isd_log_cost_quantum_LB(const uint32_t n, const uint32_t k, NTL::RR iteration_cost = quantum_gauss_red_cost(n_real, k_real) + NTL::to_RR(binomial_wrapper(k, p)) * NTL::log(n_real - k_real) / NTL::log(NTL::RR(2)); - NTL::RR log_cost = log_pi_fourths + .5* - (lnBinom(n_real, t_real) - log_pinv - (lnBinom(k_real, p_real) + - lnBinom(n_real - k_real, t_real - p_real))); + NTL::RR log_cost = + log_pi_fourths + .5 * (lnBinom(n_real, t_real) - log_pinv - + (lnBinom(k_real, p_real) + + lnBinom(n_real - k_real, t_real - p_real))); log_cost += NTL::log(iteration_cost); log_cost = log_cost / NTL::log(NTL::RR(2)); return NTL::conv(log_cost); } -#define MAX_M (t/2) +#define MAX_M (t / 2) -double isd_log_cost_quantum_stern(const uint32_t n, - const uint32_t k, +double isd_log_cost_quantum_stern(const uint32_t n, const uint32_t k, const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - NTL::RR current_complexity, log_p_success, c_it, c_dec; - - // Start computing Stern's parameter invariant portions of complexity - NTL::RR log_pi_fourths = NTL::log(pi*0.25); - // compute the probability of a random k * k being invertible - NTL::RR log_pinv = log_probability_k_by_k_is_inv(k_real); - // compute the cost of inverting the matrix, in a quantum execution env. - NTL::RR c_inv = quantum_gauss_red_cost(n_real,k_real); - - // optimize Stern's parameters : - // m : the # of errors in half of the chosen dimensions - // l : the length of the run of zeroes in the not chosen dimensions - // done via exhaustive parameter space search, minimizing the total - // complexity. - // Initial value set to codeword bruteforce to ensure the minimum is found. - NTL::RR min_stern_complexity = NTL::RR(n)*NTL::log(NTL::RR(2)); - - for(long m = 1; m <= MAX_M; m++){ - NTL::RR m_real = NTL::RR(m); - /* previous best complexity as a function of l alone. - * initialize to bruteforce-equivalent, break optimization loop as soon - * as a minimum is found */ - NTL::RR prev_best_complexity = NTL::RR(t); - for(long l = 0; l < (n-k-(t-2*m)); l++ ){ - - NTL::RR l_real = NTL::RR(l); - log_p_success = lnBinom(t_real, 2*m_real) + - lnBinom(n_real-t_real, k_real-2*m_real) + - lnBinom(2*m_real,m_real) + - lnBinom(n_real-k_real-t_real+2*m_real,l_real); - log_p_success = log_p_success - ( m_real*NTL::log(NTL::RR(4)) + - lnBinom(n_real,k_real) + - lnBinom(n_real -k_real, l_real)); - current_complexity = -(log_p_success+log_pinv)*0.5 + log_pi_fourths; - /* to match specifications , the term should be - * (n_real-k_real), as per in deVries, although - * David Hobach thesis mentions it to be - * (n_real-k_real-l_real), and it seems to match. - * amend specs for the typo. */ - c_it = l_real + - (n_real-k_real-l_real)* NTL::to_RR(binomial_wrapper(k/2,m)) / - NTL::power2_RR(-l); - - c_it = c_it * 2*m_real * NTL::to_RR(binomial_wrapper(k/2,m)); + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + NTL::RR current_complexity, log_p_success, c_it, c_dec; + + // Start computing Stern's parameter invariant portions of complexity + NTL::RR log_pi_fourths = NTL::log(pi * 0.25); + // compute the probability of a random k * k being invertible + NTL::RR log_pinv = log_probability_k_by_k_is_inv(k_real); + // compute the cost of inverting the matrix, in a quantum execution env. + NTL::RR c_inv = quantum_gauss_red_cost(n_real, k_real); + + // optimize Stern's parameters : + // m : the # of errors in half of the chosen dimensions + // l : the length of the run of zeroes in the not chosen dimensions + // done via exhaustive parameter space search, minimizing the total + // complexity. + // Initial value set to codeword bruteforce to ensure the minimum is found. + NTL::RR min_stern_complexity = NTL::RR(n) * NTL::log(NTL::RR(2)); + + for (long m = 1; m <= MAX_M; m++) { + NTL::RR m_real = NTL::RR(m); + /* previous best complexity as a function of l alone. + * initialize to bruteforce-equivalent, break optimization loop as soon + * as a minimum is found */ + NTL::RR prev_best_complexity = NTL::RR(t); + for (long l = 0; l < (n - k - (t - 2 * m)); l++) { + + NTL::RR l_real = NTL::RR(l); + log_p_success = lnBinom(t_real, 2 * m_real) + + lnBinom(n_real - t_real, k_real - 2 * m_real) + + lnBinom(2 * m_real, m_real) + + lnBinom(n_real - k_real - t_real + 2 * m_real, l_real); + log_p_success = log_p_success - + (m_real * NTL::log(NTL::RR(4)) + lnBinom(n_real, k_real) + + lnBinom(n_real - k_real, l_real)); + current_complexity = -(log_p_success + log_pinv) * 0.5 + log_pi_fourths; + /* to match specifications , the term should be + * (n_real-k_real), as per in deVries, although + * David Hobach thesis mentions it to be + * (n_real-k_real-l_real), and it seems to match. + * amend specs for the typo. */ + c_it = l_real + (n_real - k_real - l_real) * + NTL::to_RR(binomial_wrapper(k / 2, m)) / + NTL::power2_RR(-l); + + c_it = c_it * 2 * m_real * NTL::to_RR(binomial_wrapper(k / 2, m)); #if IGNORE_DECODING_COST == 1 - c_dec = 0.0; + c_dec = 0.0; #elif IGNORE_DECODING_COST == 0 - /*cost of decoding estimated as per Golomb CWDEC - * decoding an n-bit vector with weight k is - * CWDEC_cost(k,n)=O(n^2 log_2(n)) and following deVries, where - * c_dec = CWDEC_cost(n-k, n) + k + CWDEC_cost(l,n-k)*/ - c_dec = n_real*n_real*NTL::log(n_real) + k_real + - (n_real-k_real)*(n_real-k_real)*NTL::log((n_real-k_real)); + /*cost of decoding estimated as per Golomb CWDEC + * decoding an n-bit vector with weight k is + * CWDEC_cost(k,n)=O(n^2 log_2(n)) and following deVries, where + * c_dec = CWDEC_cost(n-k, n) + k + CWDEC_cost(l,n-k)*/ + c_dec = + n_real * n_real * NTL::log(n_real) + k_real + + (n_real - k_real) * (n_real - k_real) * NTL::log((n_real - k_real)); #endif - current_complexity = current_complexity + NTL::log(c_it+c_inv+c_dec); - if(current_complexity < prev_best_complexity){ - prev_best_complexity = current_complexity; - } else{ - break; - } - } - if(current_complexity < min_stern_complexity){ - min_stern_complexity = current_complexity; - } + current_complexity = current_complexity + NTL::log(c_it + c_inv + c_dec); + if (current_complexity < prev_best_complexity) { + prev_best_complexity = current_complexity; + } else { + break; + } } - return NTL::conv( min_stern_complexity / NTL::log(NTL::RR(2.0)) ); + if (current_complexity < min_stern_complexity) { + min_stern_complexity = current_complexity; + } + } + return NTL::conv(min_stern_complexity / NTL::log(NTL::RR(2.0))); } - /***************************Aggregation ***************************************/ double get_qc_red_factor_log(const uint32_t qc_order, const uint32_t is_kra) { @@ -621,7 +630,6 @@ double get_qc_red_factor_log(const uint32_t qc_order, const uint32_t is_kra) { return qc_red_factor / logl(2); } - double c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, const uint32_t qc_order, const uint32_t is_kra, const bool compute_qc_reduction_factor) { @@ -631,72 +639,59 @@ double c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, min_cost = std::numeric_limits::max(); - std::cout << "Classic "; #if SKIP_PRANGE == 0 current_cost = isd_log_cost_classic_Prange(n, k, t) - qc_red_factor; - std::cerr << "Classic Prange: " << std::setprecision(5) << current_cost - << std::endl; - std::cout << current_cost << " "; + spdlog::info("Classic Prange: {:.5f}", current_cost); min_cost = min_cost > current_cost ? current_cost : min_cost; #endif #if SKIP_LB == 0 current_cost = isd_log_cost_classic_LB(n, k, t) - qc_red_factor; - std::cerr << "Classic Lee-Brickell ISD: " << std::setprecision(5) - << current_cost << std::endl; - std::cout << current_cost << " "; + spdlog::info("Classic Lee-Brickell: {:.5f}", current_cost); min_cost = min_cost > current_cost ? current_cost : min_cost; #endif #if SKIP_LEON == 0 current_cost = isd_log_cost_classic_Leon(n, k, t) - qc_red_factor; - std::cerr << "Classic Leon ISD: " << std::setprecision(5) << current_cost - << std::endl; - std::cout << current_cost << " "; + spdlog::info("Classic Leon: {:.5f}", current_cost); min_cost = min_cost > current_cost ? current_cost : min_cost; #endif #if SKIP_STERN == 0 current_cost = isd_log_cost_classic_Stern(n, k, t) - qc_red_factor; - std::cerr << "Classic Stern ISD: " << std::setprecision(5) << current_cost - << std::endl; - std::cout << current_cost << " "; + spdlog::info("Classic Stern: {:.5f}", current_cost); min_cost = min_cost > current_cost ? current_cost : min_cost; #endif #if SKIP_FS == 0 current_cost = isd_log_cost_classic_FS(n, k, t) - qc_red_factor; - std::cerr << "Classic Fin-Send ISD: " << std::setprecision(5) << current_cost - << std::endl; - std::cout << current_cost << " "; + spdlog::info("Classic Fin-Send: {:.5f}", current_cost); min_cost = min_cost > current_cost ? current_cost : min_cost; #endif #if SKIP_MMT == 0 current_cost = isd_log_cost_classic_MMT(n, k, t) - qc_red_factor; - std::cerr << "Classic MMT ISD: " << std::setprecision(5) << current_cost - << std::endl; - std::cout << current_cost << " "; + spdlog::info("Classic MMT: {:.5f}", current_cost); min_cost = min_cost > current_cost ? current_cost : min_cost; #endif #if SKIP_BJMM == 0 current_cost = isd_log_cost_classic_BJMM(n, k, t) - qc_red_factor; - std::cerr << "Classic BJMM ISD: " << std::setprecision(5) << current_cost - << std::endl; - std::cout << current_cost << " "; + spdlog::info("Classic BJMM: {:.5f}", current_cost); min_cost = min_cost > current_cost ? current_cost : min_cost; #endif - std::cout << std::endl; + std::cout << std::endl; - return min_cost; + return min_cost; } double q_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, - const uint32_t qc_order, const uint32_t is_kra, const bool compute_qc_reduction_factor) { + const uint32_t qc_order, const uint32_t is_kra, + const bool compute_qc_reduction_factor) { double min_cost, current_cost; std::cout << "Quantum "; - double qc_red_factor = compute_qc_reduction_factor? get_qc_red_factor_log(qc_order, is_kra): 0; + double qc_red_factor = + compute_qc_reduction_factor ? get_qc_red_factor_log(qc_order, is_kra) : 0; min_cost = std::numeric_limits::max(); @@ -705,15 +700,14 @@ double q_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, */ #if SKIP_Q_LB == 0 current_cost = isd_log_cost_quantum_LB(n, k, t, 1) - qc_red_factor; - std::cout << " Q-Lee-Brickell ISD: " << /**/current_cost << std::endl; + spdlog::info("Quantum Lee-Brickell: {:.5f}", current_cost); min_cost = min_cost > current_cost ? current_cost : min_cost; #endif #if SKIP_Q_STERN == 0 current_cost = isd_log_cost_quantum_stern(n, k, t) - qc_red_factor; - std::cout << ", Q-Stern ISD: " << current_cost << std::endl; + spdlog::info("Quantum Stern: {:.5f}", current_cost); min_cost = min_cost > current_cost ? current_cost : min_cost; - std::cout << std::endl; #endif return min_cost; diff --git a/logging.hpp b/logging.hpp new file mode 100644 index 0000000..e6da66f --- /dev/null +++ b/logging.hpp @@ -0,0 +1,71 @@ +#pragma once +#include +#include +#include +#include +#include + +void configure_logger() { + // Initialize the logger + auto logger = spdlog::basic_logger_mt("default_logger", "logs/default.log"); + spdlog::set_default_logger(logger); + + // Retrieve the environment variable for log level + const char *log_level_env = std::getenv("LOG_LEVEL"); + + if (log_level_env) { + std::string log_level_str(log_level_env); + + // Configure the log level based on the environment variable + if (log_level_str == "trace") { + spdlog::set_level(spdlog::level::trace); + } else if (log_level_str == "debug") { + spdlog::set_level(spdlog::level::debug); + } else if (log_level_str == "info") { + spdlog::set_level(spdlog::level::info); + } else if (log_level_str == "warn") { + spdlog::set_level(spdlog::level::warn); + } else if (log_level_str == "err") { + spdlog::set_level(spdlog::level::err); + } else if (log_level_str == "critical") { + spdlog::set_level(spdlog::level::critical); + } else { + spdlog::set_level(spdlog::level::info); // Default level + } + } else { + spdlog::set_level(spdlog::level::info); // Default level if environment + // variable is not set + } +} + +std::string optional_to_string(const std::optional &opt) { + if (opt) { + return std::to_string(*opt); + } else { + return "Not Initialized"; + } +} + +template std::string array_to_string(const T *array, size_t size) { + std::string result = "["; + for (size_t i = 0; i < size; ++i) { + result += std::to_string(array[i]); + if (i < size - 1) { + result += ", "; + } + } + result += "]"; + return result; +} + +template std::string array_to_string(const std::vector &vec) { + std::string result = "["; + for (size_t i = 0; i < vec.size(); ++i) { + result += std::to_string(vec[i]); + if (i < vec.size() - 1) { + result += ", "; + } + } + result += "]"; + return result; +} diff --git a/work_factor_computation_parallel.cpp b/work_factor_computation_parallel.cpp index 252b18a..f572ff1 100644 --- a/work_factor_computation_parallel.cpp +++ b/work_factor_computation_parallel.cpp @@ -14,10 +14,13 @@ #include "binomials.hpp" #include "isd_cost_estimate.hpp" +#include "logging.hpp" #include +int main() { + // Configure the logger + configure_logger(); -int main(int argc, char *argv[]) { std::ifstream file("out/isd_values.json"); // Check if the file is open @@ -34,8 +37,8 @@ int main(int argc, char *argv[]) { NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); pi = NTL::ComputePi_RR(); bool is_kra_values[] = {true, false}; - // Iterate over the list of entries - #pragma omp parallel for +// Iterate over the list of entries +#pragma omp parallel for for (const auto &entry : j) { uint32_t n = entry["n"]; uint32_t r = entry["r"]; @@ -51,6 +54,9 @@ int main(int argc, char *argv[]) { // std::cout << "n: " << n << ", r: " << r << ", t: " << t << std::endl; for (bool is_kra : is_kra_values) { + spdlog::info("Processing n {}, k {}, t {}, qc_block_size {}, is_kra {}, " + "is_red_factor_applied {}", + n, k, t, qc_block_size, is_kra, is_red_factor_applied); double min_c_cost = c_isd_log_cost(n, k, t, qc_block_size, is_kra, is_red_factor_applied); double min_q_cost = From 254dd9e87d99421f45d1493f0f5acc62f3c92c3c Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Sat, 20 Jul 2024 13:38:55 +0200 Subject: [PATCH 05/55] MOD parameter_generator and incs for logging and std::vector --- parameter_generator.cpp | 41 +++++++++----- partitions_permanents.hpp | 111 +++++++++++++++++++------------------- 2 files changed, 84 insertions(+), 68 deletions(-) diff --git a/parameter_generator.cpp b/parameter_generator.cpp index 280c5a5..35b72c3 100644 --- a/parameter_generator.cpp +++ b/parameter_generator.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #define NUM_BITS_REAL_MANTISSA 128 #define IGNORE_DECODING_COST 0 @@ -11,6 +12,7 @@ #include "binomials.hpp" #include "bit_error_probabilities.hpp" #include "isd_cost_estimate.hpp" +#include "logging.hpp" #include "partitions_permanents.hpp" #include "proper_primes.hpp" #include @@ -49,7 +51,8 @@ uint32_t estimate_t_val(const uint32_t c_sec_level, const uint32_t q_sec_level, } int ComputeDvMPartition(const uint64_t d_v_prime, const uint64_t n_0, - uint64_t mpartition[], uint64_t &d_v) { + std::vector &mpartition, + uint64_t &d_v) { d_v = floor(sqrt(d_v_prime)); d_v = (d_v & 0x01) ? d_v : d_v + 1; uint64_t m = ceil((double)d_v_prime / (double)d_v); @@ -65,9 +68,10 @@ int ComputeDvMPartition(const uint64_t d_v_prime, const uint64_t n_0, return partition_ok; } -uint64_t estimate_dv(const uint32_t c_sec_level, // expressed as - const uint32_t q_sec_level, const uint32_t n_0, - const uint32_t p, uint64_t mpartition[]) { +uint64_t +estimate_dv(const uint32_t c_sec_level, // expressed as + const uint32_t q_sec_level, const uint32_t n_0, const uint32_t p, + std::vector &mpartition) { double achieved_c_sec_level = 0.0; double achieved_q_sec_level = 0.0; double achieved_c_enum_sec_level = 0.0; @@ -150,7 +154,7 @@ int main(int argc, char *argv[]) { << " epsilon " << epsilon << std::endl; uint64_t p, p_th, t, d_v_prime, d_v; - uint64_t mpartition[n_0] = {0}; + std::vector mpartition(n_0, 0); int current_prime_pos = 0; while (proper_primes[current_prime_pos] < starting_prime_lower_bound) { @@ -201,7 +205,9 @@ int main(int argc, char *argv[]) { std::cout << "refining parameters" << std::endl; - uint64_t p_ok, t_ok, d_v_ok, mpartition_ok[n_0] = {0}; + std::optional p_ok; + uint64_t t_ok, d_v_ok; + std::vector mpartition_ok(n_0, 0); /* refinement step taking into account possible invalid m partitions */ do { @@ -240,11 +246,20 @@ int main(int argc, char *argv[]) { current_prime_pos--; } while ((p > (1.0 + epsilon) * p_th) && (current_prime_pos > 0)); - std::cout << "parameter set found: p:" << p_ok << " t: " << t_ok; - std::cout << " d_v : " << d_v_ok << " mpartition: [ "; - for (unsigned i = 0; i < n_0; i++) { - std::cout << mpartition_ok[i] << " "; + if (!p_ok || !d_v_ok) { + spdlog::error("Error: One or more variables are not initialized."); + throw std::runtime_error("One or more variables are not initialized."); + } else { + spdlog::info("parameter set found: p={}, t={}, d_v={}, mpartition={}", + optional_to_string(p_ok), t_ok, optional_to_string(d_v_ok), + array_to_string(mpartition_ok)); + } + // std::cout + // << " p:" << p_ok << " t: " << t_ok; + // std::cout << " d_v : " << d_v_ok << " mpartition: [ "; + // for (unsigned i = 0; i < n_0; i++) { + // std::cout << mpartition_ok[i] << " "; + // } + // std::cout << " ]" << std::endl; + return 0; } - std::cout << " ]" << std::endl; - return 0; -} diff --git a/partitions_permanents.hpp b/partitions_permanents.hpp index c9fa709..81f25de 100644 --- a/partitions_permanents.hpp +++ b/partitions_permanents.hpp @@ -1,5 +1,6 @@ #include #include +#include /* Permanent formulas for circulant matrices as obtained via Sage (macsyma) @@ -71,61 +72,61 @@ switch(n_0){ return permanent; } -int FindmPartition(const uint64_t m, - uint64_t mpartition[], - const uint64_t n_0){ - // Enumerate partitions of m with length n_0, - // according to TAOCP, Vol 4 Fascicle 3b, Algorithm H - // PRE : m >= n_0 >= 2 - if ( (m < n_0) || (n_0 < 2) ){ - return 0; - } +int FindmPartition(const uint64_t m, + std::vector &mpartition, + const uint64_t n_0) { + // Enumerate partitions of m with length n_0, + // according to TAOCP, Vol 4 Fascicle 3b, Algorithm H + // PRE : m >= n_0 >= 2 + if ((m < n_0) || (n_0 < 2)) { + return 0; + } - int64_t mpartition_selected[n_0]; - int found_good_partition = 0; - int64_t mpartition_tmp[n_0+1]; - mpartition_tmp[0] = m - (n_0 - 1); - for(unsigned i = 1; i < n_0; i++){ - mpartition_tmp[i] = 1; + int64_t mpartition_selected[n_0]; + int found_good_partition = 0; + int64_t mpartition_tmp[n_0 + 1]; + mpartition_tmp[0] = m - (n_0 - 1); + for (unsigned i = 1; i < n_0; i++) { + mpartition_tmp[i] = 1; + } + // theoretically, mpartition_tmp[n_0] = -1 according to knuth + mpartition_tmp[n_0] = -1; + do { + // visit the partition + if (ComputePermanent(mpartition_tmp, n_0) % 2 == 1U) { + for (unsigned i = 0; i < n_0; i++) { + mpartition_selected[i] = (uint64_t)mpartition_tmp[i]; + } + found_good_partition = 1; + } + if (mpartition_tmp[1] < mpartition_tmp[0] - 1) { + // step H3: easy but very common case + mpartition_tmp[0]--; + mpartition_tmp[1]++; + } else { + // step H4 + int j = 2; + int64_t s = mpartition_tmp[0] + mpartition_tmp[1] - 1; + while (mpartition_tmp[j] >= mpartition_tmp[0] - 1) { + s = s + mpartition_tmp[j]; + j++; + } + if (j >= (int)n_0) { // completed enumeration of partitions + for (unsigned i = 0; i < n_0; i++) { + mpartition[i] = (uint64_t)mpartition_selected[i]; + } + return found_good_partition; + } else { + uint64_t x = mpartition_tmp[j] + 1; + mpartition_tmp[j] = x; + j--; + while (j > 0) { + mpartition_tmp[j] = x; + s = s - x; + j--; + } + mpartition_tmp[0] = s; + } } - // theoretically, mpartition_tmp[n_0] = -1 according to knuth - mpartition_tmp[n_0] = -1; - do { - // visit the partition - if (ComputePermanent(mpartition_tmp,n_0) % 2 == 1U){ - for(unsigned i = 0; i < n_0; i++){ - mpartition_selected[i] = (uint64_t) mpartition_tmp[i]; - } - found_good_partition = 1; - } - if (mpartition_tmp[1] < mpartition_tmp[0] - 1){ - // step H3: easy but very common case - mpartition_tmp[0]--; - mpartition_tmp[1]++; - } else { - // step H4 - int j = 2; - int64_t s = mpartition_tmp[0] + mpartition_tmp[1] - 1; - while ( mpartition_tmp[j] >= mpartition_tmp[0] - 1 ){ - s = s + mpartition_tmp[j]; - j++; - } - if( j >= (int)n_0 ){ // completed enumeration of partitions - for(unsigned i = 0; i < n_0; i++){ - mpartition[i] = (uint64_t) mpartition_selected[i]; - } - return found_good_partition; - } else { - uint64_t x = mpartition_tmp[j]+1; - mpartition_tmp[j] = x; - j--; - while (j > 0) { - mpartition_tmp[j] = x; - s = s - x; - j--; - } - mpartition_tmp[0] = s; - } - } - } while (1); + } while (1); } From bcb804ad7fcdd4a831e4a11b74cff90f378cd00d Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Sat, 20 Jul 2024 13:50:54 +0200 Subject: [PATCH 06/55] Minor --- isd_cost_estimate.hpp | 1 - 1 file changed, 1 deletion(-) diff --git a/isd_cost_estimate.hpp b/isd_cost_estimate.hpp index be49cf0..4c9ba36 100644 --- a/isd_cost_estimate.hpp +++ b/isd_cost_estimate.hpp @@ -689,7 +689,6 @@ double q_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, const uint32_t qc_order, const uint32_t is_kra, const bool compute_qc_reduction_factor) { double min_cost, current_cost; - std::cout << "Quantum "; double qc_red_factor = compute_qc_reduction_factor ? get_qc_red_factor_log(qc_order, is_kra) : 0; From 54b8a33532154d979e0d0d1b8428c7deda51f188 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Sat, 20 Jul 2024 13:55:08 +0200 Subject: [PATCH 07/55] ADD logs/ dir to gitignore --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index f25699b..ee4c7e5 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,9 @@ !LICENSE !Makefile +# Log output +logs/ + # Cache .ccls-cache # out From 6846108efad1ff7998e05dce18592218bb87851e Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Sat, 20 Jul 2024 13:55:36 +0200 Subject: [PATCH 08/55] ADD bin dir to gitignore, delete dangerous ignore alls --- .gitignore | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/.gitignore b/.gitignore index ee4c7e5..ef02118 100644 --- a/.gitignore +++ b/.gitignore @@ -1,13 +1,6 @@ # Warning! Ignore all files w/out extensions -# Ignore all -* -# Unignore all with extensions -!*.* -# Unignore all dirs -!*/ -# Unignore other -!LICENSE -!Makefile +# bin dir +bin/ # Log output logs/ From 5834eb35b6332f58f6b36ea92988a08e9a265de8 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Sat, 20 Jul 2024 16:21:49 +0200 Subject: [PATCH 09/55] ADD dir input parameter to logging --- logging.hpp | 5 +++-- work_factor_computation_parallel.cpp | 5 +---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/logging.hpp b/logging.hpp index e6da66f..99ce090 100644 --- a/logging.hpp +++ b/logging.hpp @@ -5,9 +5,10 @@ #include #include -void configure_logger() { +void configure_logger(const std::optional filename) { // Initialize the logger - auto logger = spdlog::basic_logger_mt("default_logger", "logs/default.log"); + const std::string ff = filename.has_value() ? filename.value(): "logs/default.log"; + auto logger = spdlog::basic_logger_mt("default_logger", ff); spdlog::set_default_logger(logger); // Retrieve the environment variable for log level diff --git a/work_factor_computation_parallel.cpp b/work_factor_computation_parallel.cpp index f572ff1..eed7d4e 100644 --- a/work_factor_computation_parallel.cpp +++ b/work_factor_computation_parallel.cpp @@ -19,7 +19,7 @@ int main() { // Configure the logger - configure_logger(); + configure_logger(std::nullopt); std::ifstream file("out/isd_values.json"); @@ -50,9 +50,6 @@ int main() { // int v = entry["v"]; // int lambd = entry["lambd"]; - // Output the data - // std::cout << "n: " << n << ", r: " << r << ", t: " << t << std::endl; - for (bool is_kra : is_kra_values) { spdlog::info("Processing n {}, k {}, t {}, qc_block_size {}, is_kra {}, " "is_red_factor_applied {}", From a8b8abc9322c7b8611435ac3048f0c18c60ab635 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Sat, 20 Jul 2024 17:18:26 +0200 Subject: [PATCH 10/55] MOD change ISD functions result from double to Result struct --- isd_cost_estimate.hpp | 243 ++++++++++++++++++--------- parameter_generator.cpp | 8 +- work_factor_computation.cpp | 4 +- work_factor_computation_parallel.cpp | 22 ++- 4 files changed, 189 insertions(+), 88 deletions(-) diff --git a/isd_cost_estimate.hpp b/isd_cost_estimate.hpp index 4c9ba36..d275a88 100644 --- a/isd_cost_estimate.hpp +++ b/isd_cost_estimate.hpp @@ -4,8 +4,6 @@ #include #include #include -#include -#include #include #define SKIP_PRANGE 1 @@ -18,11 +16,21 @@ #define SKIP_Q_LB 0 #define SKIP_Q_STERN 1 +struct Result { + std::string alg_name; + std::map params; + double value; +}; + /***************************Classic ISDs***************************************/ -double isd_log_cost_classic_BJMM_approx(const uint32_t n, const uint32_t k, +Result isd_log_cost_classic_BJMM_approx(const uint32_t n, const uint32_t k, const uint32_t t) { - return ((double)t) * -log((1.0 - (double)k / (double)n)) / log(2); + Result result; + result.alg_name = "BJMM"; + result.params = {{"approx", true}}; + result.value = ((double)t) * -log((1.0 - (double)k / (double)n)) / log(2); + return result; } // computes the probability of a random k * k being invertible @@ -72,7 +80,7 @@ const NTL::RR Fin_Send_IS_candidate_cost(const NTL::RR &n, const NTL::RR &r, r * r; } -double isd_log_cost_classic_Prange(const uint32_t n, const uint32_t k, +Result isd_log_cost_classic_Prange(const uint32_t n, const uint32_t k, const uint32_t t) { NTL::RR n_real = NTL::RR(n); NTL::RR k_real = NTL::RR(k); @@ -83,11 +91,16 @@ double isd_log_cost_classic_Prange(const uint32_t n, const uint32_t k, NTL::to_RR(binomial_wrapper(n - k, t)); NTL::RR log_cost = log2_RR(num_iter) + log2_RR(cost_iter); - return NTL::conv(log_cost); + + Result res; + res.alg_name = "Prange"; + res.params = {}; + res.value = NTL::conv(log_cost); + return res; } #define P_MAX_LB 20 -double isd_log_cost_classic_LB(const uint32_t n, const uint32_t k, +Result isd_log_cost_classic_LB(const uint32_t n, const uint32_t k, const uint32_t t) { NTL::RR n_real = NTL::RR(n); NTL::RR k_real = NTL::RR(k); @@ -113,12 +126,16 @@ double isd_log_cost_classic_LB(const uint32_t n, const uint32_t k, } } spdlog::info("Lee-Brickell best p: {}", best_p); - return NTL::conv(min_log_cost); + Result res; + res.alg_name = "Lee-Brickell"; + res.params = {{"p", best_p}}; + res.value = NTL::conv(min_log_cost); + return res; } #define P_MAX_Leon P_MAX_LB #define L_MAX_Leon 200 -double isd_log_cost_classic_Leon(const uint32_t n, const uint32_t k, +Result isd_log_cost_classic_Leon(const uint32_t n, const uint32_t k, const uint32_t t) { NTL::RR n_real = NTL::RR(n); NTL::RR k_real = NTL::RR(k); @@ -152,12 +169,16 @@ double isd_log_cost_classic_Leon(const uint32_t n, const uint32_t k, } } spdlog::info("Leon Best l {} best p: {}", best_l, best_p); - return NTL::conv(min_log_cost); + Result res; + res.alg_name = "Lee-Brickell"; + res.params = {{"p", best_p}, {"l", best_l}}; + res.value = NTL::conv(min_log_cost); + return res; } #define P_MAX_Stern P_MAX_Leon #define L_MAX_Stern L_MAX_Leon -double isd_log_cost_classic_Stern(const uint32_t n, const uint32_t k, +Result isd_log_cost_classic_Stern(const uint32_t n, const uint32_t k, const uint32_t t) { NTL::RR n_real = NTL::RR(n); NTL::RR k_real = NTL::RR(k); @@ -207,12 +228,16 @@ double isd_log_cost_classic_Stern(const uint32_t n, const uint32_t k, } spdlog::info("Stern Best l {}, best p: {}", best_l, best_p); - return NTL::conv(min_log_cost); + Result res; + res.alg_name = "Stern"; + res.params = {{"p", best_p}, {"l", best_l}}; + res.value = NTL::conv(min_log_cost); + return res; } #define P_MAX_FS P_MAX_Stern #define L_MAX_FS L_MAX_Stern -double isd_log_cost_classic_FS(const uint32_t n, const uint32_t k, +Result isd_log_cost_classic_FS(const uint32_t n, const uint32_t k, const uint32_t t) { NTL::RR n_real = NTL::RR(n); NTL::RR k_real = NTL::RR(k); @@ -263,13 +288,17 @@ double isd_log_cost_classic_FS(const uint32_t n, const uint32_t k, } } spdlog::info("FS Best l {}, best p: {}", best_l, best_p); - return NTL::conv(min_log_cost); + Result res; + res.alg_name = "Fin-Send"; + res.params = {{"p", best_p}, {"l", best_l}}; + res.value = NTL::conv(min_log_cost); + return res; } #define P_MAX_MMT (P_MAX_FS + 25) // P_MAX_MMT #define L_MAX_MMT 350 // L_MAX_MMT #define L_MIN_MMT 2 -double isd_log_cost_classic_MMT(const uint32_t n, const uint32_t k, +Result isd_log_cost_classic_MMT(const uint32_t n, const uint32_t k, const uint32_t t) { uint32_t r = n - k; NTL::RR n_real = NTL::RR(n); @@ -372,14 +401,18 @@ double isd_log_cost_classic_MMT(const uint32_t n, const uint32_t k, if (best_l == constrained_max_l) { spdlog::warn("Warning: l {l} on exploration edge!"); } - return NTL::conv(min_log_cost); + Result res; + res.alg_name = "MMT"; + res.params = {{"p", best_p}, {"l", best_l}}; + res.value = NTL::conv(min_log_cost); + return res; } #define P_MAX_BJMM 20 // P_MAX_MMT #define L_MAX_BJMM 90 // L_MAX_MMT #define Eps1_MAX_BJMM 4 #define Eps2_MAX_BJMM 4 -double isd_log_cost_classic_BJMM(const uint32_t n, const uint32_t k, +Result isd_log_cost_classic_BJMM(const uint32_t n, const uint32_t k, const uint32_t t) { NTL::RR n_real = NTL::RR(n); NTL::RR k_real = NTL::RR(k); @@ -389,8 +422,8 @@ double isd_log_cost_classic_BJMM(const uint32_t n, const uint32_t k, NTL::RR min_log_cost = n_real; // unreachable upper bound NTL::RR log_cost; - std::optional best_p, best_l, best_eps_1, best_eps_2, - constrained_max_l, constrained_max_p; + std::optional best_p, best_l, best_eps_1, best_eps_2; + uint32_t constrained_max_l, constrained_max_p; NTL::RR FS_IS_candidate_cost; constrained_max_p = P_MAX_BJMM > t ? t : P_MAX_BJMM; @@ -501,16 +534,21 @@ double isd_log_cost_classic_BJMM(const uint32_t n, const uint32_t k, } /* end for over eps1 */ } /* end for over p*/ - if (!best_l || !best_eps_1 || !constrained_max_l || !constrained_max_p) { + if (!best_l || !best_eps_1 || !best_p || !best_eps_2) { spdlog::error("Error: One or more variables are not initialized."); throw std::runtime_error("One or more variables are not initialized."); - } else { - spdlog::info("BJMM Best l {}, best p: {}, best eps1: {}, best eps2: {}", - optional_to_string(best_l), optional_to_string(best_eps_1), - optional_to_string(constrained_max_l), - optional_to_string(constrained_max_p)); } - return NTL::conv(min_log_cost); + spdlog::info("BJMM Best l {}, best p: {}, best eps1: {}, best eps2: {}", + optional_to_string(best_l), optional_to_string(best_p), + optional_to_string(best_eps_1), optional_to_string(best_eps_2)); + Result res; + res.alg_name = "BJMM"; + res.params = {{"p", best_p.value()}, + {"l", best_l.value()}, + {"eps1", best_eps_1.value()}, + {"eps2", best_eps_2.value()}}; + res.value = NTL::conv(min_log_cost); + return res; } /***************************Quantum ISDs***************************************/ @@ -520,33 +558,53 @@ const NTL::RR quantum_gauss_red_cost(const NTL::RR &n, const NTL::RR &k) { return 1.5 * NTL::power(n - k, 2) - 0.5 * (n - k); } -double isd_log_cost_quantum_LB(const uint32_t n, const uint32_t k, - const uint32_t t, const uint32_t p) { +#define P_MAX_Q_LB 3 // P_MAX_MMT +Result isd_log_cost_quantum_LB(const uint32_t n, const uint32_t k, + const uint32_t t) { NTL::RR n_real = NTL::RR(n); NTL::RR k_real = NTL::RR(k); NTL::RR t_real = NTL::RR(t); - NTL::RR p_real = NTL::RR(p); NTL::RR log_pi_fourths = NTL::log(pi * 0.25); NTL::RR log_pinv = log_probability_k_by_k_is_inv(k_real); /* Check https://doi.org/10.1007/978-3-031-61489-7_2 * for the full measures of the lee-brickell quantum attack */ - NTL::RR iteration_cost = quantum_gauss_red_cost(n_real, k_real) + - NTL::to_RR(binomial_wrapper(k, p)) * - NTL::log(n_real - k_real) / NTL::log(NTL::RR(2)); - NTL::RR log_cost = - log_pi_fourths + .5 * (lnBinom(n_real, t_real) - log_pinv - - (lnBinom(k_real, p_real) + - lnBinom(n_real - k_real, t_real - p_real))); - log_cost += NTL::log(iteration_cost); - log_cost = log_cost / NTL::log(NTL::RR(2)); - return NTL::conv(log_cost); + NTL::RR min_log_cost = n_real; // unreachable upper bound + uint32_t p; + std::optional best_p; + for (p = 1; p < P_MAX_Q_LB; p++) { + NTL::RR p_real = NTL::RR(p); + NTL::RR iteration_cost = quantum_gauss_red_cost(n_real, k_real) + + NTL::to_RR(binomial_wrapper(k, p)) * + NTL::log(n_real - k_real) / + NTL::log(NTL::RR(2)); + NTL::RR log_cost = + log_pi_fourths + .5 * (lnBinom(n_real, t_real) - log_pinv - + (lnBinom(k_real, p_real) + + lnBinom(n_real - k_real, t_real - p_real))); + log_cost += NTL::log(iteration_cost); + log_cost = log_cost / NTL::log(NTL::RR(2)); + if (log_cost < min_log_cost) { + min_log_cost = log_cost; + best_p = p; + } + } + if (!best_p) { + spdlog::error("Error: One or more variables are not initialized."); + throw std::runtime_error("One or more variables are not initialized."); + } + + Result res; + res.alg_name = "Quantum Lee-Brickell"; + res.params = {{"p", best_p.value()}}; + res.value = NTL::conv(min_log_cost); + return res; } #define MAX_M (t / 2) -double isd_log_cost_quantum_stern(const uint32_t n, const uint32_t k, +Result isd_log_cost_quantum_stern(const uint32_t n, const uint32_t k, const uint32_t t) { NTL::RR n_real = NTL::RR(n); NTL::RR k_real = NTL::RR(k); @@ -617,7 +675,11 @@ double isd_log_cost_quantum_stern(const uint32_t n, const uint32_t k, min_stern_complexity = current_complexity; } } - return NTL::conv(min_stern_complexity / NTL::log(NTL::RR(2.0))); + Result res; + res.alg_name = "Quantum Stern"; + res.params = {}; + res.value = NTL::conv(min_stern_complexity / NTL::log(NTL::RR(2.0))); + return res; } /***************************Aggregation ***************************************/ @@ -630,84 +692,109 @@ double get_qc_red_factor_log(const uint32_t qc_order, const uint32_t is_kra) { return qc_red_factor / logl(2); } -double c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, +Result c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, const uint32_t qc_order, const uint32_t is_kra, const bool compute_qc_reduction_factor) { - double min_cost, current_cost; + Result current_res, min_res; double qc_red_factor = compute_qc_reduction_factor ? get_qc_red_factor_log(qc_order, is_kra) : 0; - min_cost = std::numeric_limits::max(); + double min_cost = n; // the cost cannot be greater than 2^n #if SKIP_PRANGE == 0 - current_cost = isd_log_cost_classic_Prange(n, k, t) - qc_red_factor; - spdlog::info("Classic Prange: {:.5f}", current_cost); - min_cost = min_cost > current_cost ? current_cost : min_cost; + current_res = isd_log_cost_classic_Prange(n, k, t); + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } #endif #if SKIP_LB == 0 - current_cost = isd_log_cost_classic_LB(n, k, t) - qc_red_factor; - spdlog::info("Classic Lee-Brickell: {:.5f}", current_cost); - min_cost = min_cost > current_cost ? current_cost : min_cost; + current_res = isd_log_cost_classic_LB(n, k, t); + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } #endif #if SKIP_LEON == 0 - current_cost = isd_log_cost_classic_Leon(n, k, t) - qc_red_factor; - spdlog::info("Classic Leon: {:.5f}", current_cost); - min_cost = min_cost > current_cost ? current_cost : min_cost; + current_res = isd_log_cost_classic_Leon(n, k, t); + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } #endif #if SKIP_STERN == 0 - current_cost = isd_log_cost_classic_Stern(n, k, t) - qc_red_factor; - spdlog::info("Classic Stern: {:.5f}", current_cost); - min_cost = min_cost > current_cost ? current_cost : min_cost; + current_res = isd_log_cost_classic_LB(n, k, t); + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } #endif #if SKIP_FS == 0 - current_cost = isd_log_cost_classic_FS(n, k, t) - qc_red_factor; - spdlog::info("Classic Fin-Send: {:.5f}", current_cost); - min_cost = min_cost > current_cost ? current_cost : min_cost; + current_res = isd_log_cost_classic_FS(n, k, t); + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } #endif #if SKIP_MMT == 0 - current_cost = isd_log_cost_classic_MMT(n, k, t) - qc_red_factor; - spdlog::info("Classic MMT: {:.5f}", current_cost); - min_cost = min_cost > current_cost ? current_cost : min_cost; + current_res = isd_log_cost_classic_MMT(n, k, t); + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } #endif #if SKIP_BJMM == 0 - current_cost = isd_log_cost_classic_BJMM(n, k, t) - qc_red_factor; - spdlog::info("Classic BJMM: {:.5f}", current_cost); - min_cost = min_cost > current_cost ? current_cost : min_cost; + current_res = isd_log_cost_classic_LB(n, k, t); + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } #endif - std::cout << std::endl; - return min_cost; + return min_res; } -double q_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, +Result q_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, const uint32_t qc_order, const uint32_t is_kra, const bool compute_qc_reduction_factor) { - double min_cost, current_cost; + Result current_res, min_res; + double min_cost = n; // cannot be greater than n double qc_red_factor = compute_qc_reduction_factor ? get_qc_red_factor_log(qc_order, is_kra) : 0; - min_cost = std::numeric_limits::max(); - /* This is just a quick hack since experiments says that p = 1 is * the optimal value at least for the NIST code-based finalists */ #if SKIP_Q_LB == 0 - current_cost = isd_log_cost_quantum_LB(n, k, t, 1) - qc_red_factor; - spdlog::info("Quantum Lee-Brickell: {:.5f}", current_cost); - min_cost = min_cost > current_cost ? current_cost : min_cost; + current_res = isd_log_cost_quantum_LB(n, k, t); + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } #endif #if SKIP_Q_STERN == 0 - current_cost = isd_log_cost_quantum_stern(n, k, t) - qc_red_factor; - spdlog::info("Quantum Stern: {:.5f}", current_cost); - min_cost = min_cost > current_cost ? current_cost : min_cost; + current_res = isd_log_cost_classic_stern(n, k, t); + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } #endif - return min_cost; + return min_res; } diff --git a/parameter_generator.cpp b/parameter_generator.cpp index 35b72c3..a39f6c4 100644 --- a/parameter_generator.cpp +++ b/parameter_generator.cpp @@ -33,9 +33,9 @@ uint32_t estimate_t_val(const uint32_t c_sec_level, const uint32_t q_sec_level, t = (lo + hi) / 2; std::cerr << "testing t " << t << std::endl; achieved_c_sec_level = - c_isd_log_cost(n_0 * p, ((n_0 - 1) * p), t, p, 0, true); + c_isd_log_cost(n_0 * p, ((n_0 - 1) * p), t, p, 0, true).value; achieved_q_sec_level = - q_isd_log_cost(n_0 * p, ((n_0 - 1) * p), t, p, 0, true); + q_isd_log_cost(n_0 * p, ((n_0 - 1) * p), t, p, 0, true).value; if ((achieved_c_sec_level >= c_sec_level) && (achieved_q_sec_level >= q_sec_level)) { hi = t; @@ -108,9 +108,9 @@ estimate_dv(const uint32_t c_sec_level, // expressed as /* last parameter indicates a KRA, reduce margin by p due to quasi cyclicity */ achieved_c_sec_level = - c_isd_log_cost(n_0 * p, p, n_0 * d_v_prime, p, 1, true); + c_isd_log_cost(n_0 * p, p, n_0 * d_v_prime, p, 1, true).value; achieved_q_sec_level = - q_isd_log_cost(n_0 * p, p, n_0 * d_v_prime, p, 1, true); + q_isd_log_cost(n_0 * p, p, n_0 * d_v_prime, p, 1, true).value; } } diff --git a/work_factor_computation.cpp b/work_factor_computation.cpp index aea3704..01f97a3 100644 --- a/work_factor_computation.cpp +++ b/work_factor_computation.cpp @@ -49,10 +49,10 @@ int main(int argc, char *argv[]) { std::cout << "Minimum classic cost :" << c_isd_log_cost(n, k, t, qc_block_size, is_kra, - is_red_factor_applied) + is_red_factor_applied).value << " Minimum quantum cost :" << q_isd_log_cost(n, k, t, qc_block_size, is_kra, - is_red_factor_applied); + is_red_factor_applied).value; if (is_red_factor_applied && qc_block_size != 1) std::cout << " (including qc_effects) "; std::cout << std::endl; diff --git a/work_factor_computation_parallel.cpp b/work_factor_computation_parallel.cpp index eed7d4e..6171326 100644 --- a/work_factor_computation_parallel.cpp +++ b/work_factor_computation_parallel.cpp @@ -17,6 +17,17 @@ #include "logging.hpp" #include +void to_json(nlohmann::json &j, const Result &r) { + j = nlohmann::json{ + {"alg_name", r.alg_name}, {"params", r.params}, {"value", r.value}}; +} + +void from_json(const nlohmann::json &j, Result &r) { + j.at("alg_name").get_to(r.alg_name); + j.at("params").get_to(r.params); + j.at("value").get_to(r.value); +} + int main() { // Configure the logger configure_logger(std::nullopt); @@ -50,17 +61,20 @@ int main() { // int v = entry["v"]; // int lambd = entry["lambd"]; + Result current_c_res; + Result current_q_res; + for (bool is_kra : is_kra_values) { spdlog::info("Processing n {}, k {}, t {}, qc_block_size {}, is_kra {}, " "is_red_factor_applied {}", n, k, t, qc_block_size, is_kra, is_red_factor_applied); - double min_c_cost = + current_c_res = c_isd_log_cost(n, k, t, qc_block_size, is_kra, is_red_factor_applied); - double min_q_cost = + current_q_res = q_isd_log_cost(n, k, t, qc_block_size, is_kra, is_red_factor_applied); nlohmann::json out_values; - out_values["C2"] = min_c_cost; - out_values["Q2"] = min_q_cost; + out_values["C2"] = current_c_res; + out_values["Q2"] = current_q_res; std::ostringstream oss; oss << std::setw(6) << std::setfill('0') << n << "_" << std::setw(6) From 9c2fa047dfe2b9698be3c075023a02fb25ed34a1 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Sat, 20 Jul 2024 18:31:04 +0200 Subject: [PATCH 11/55] MOD Makefile, switch to C++20 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index b26530a..8c884bf 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ CXX = g++ -CXXFLAGS=-O3 -g3 -std=c++17 -Wall -Wextra -Wno-sign-compare +CXXFLAGS=-O3 -g3 -std=c++20 -Wall -Wextra -Wno-sign-compare OMPFLAGS=-fopenmp LDLIBS= -lntl -lgmp -lm -lspdlog -lfmt BIN_DIR=bin From 0efdd71611ab4295b603b304e9283e6325d024a7 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Sat, 20 Jul 2024 18:32:51 +0200 Subject: [PATCH 12/55] ADD global paths --- globals.hpp | 9 +++++ work_factor_computation_parallel.cpp | 51 ++++++++++++++++++---------- 2 files changed, 42 insertions(+), 18 deletions(-) create mode 100644 globals.hpp diff --git a/globals.hpp b/globals.hpp new file mode 100644 index 0000000..779ae97 --- /dev/null +++ b/globals.hpp @@ -0,0 +1,9 @@ +#pragma once + +#include + +const std::string OUT_DIR_RESULTS = "out/results/json"; +// It seems impossible in C++ +// std::string OUT_FILE_RESULT_FMT_JSON = "{n:06}_{r:06}_{t:03}.json"; +const std::string LOG_DIR = "logs/"; +const std::string LOG_FILE_DEFAULT = LOG_DIR + "default.log"; diff --git a/work_factor_computation_parallel.cpp b/work_factor_computation_parallel.cpp index 6171326..8cbb78b 100644 --- a/work_factor_computation_parallel.cpp +++ b/work_factor_computation_parallel.cpp @@ -8,14 +8,16 @@ #include #include -#define NUM_BITS_REAL_MANTISSA 1024 -#define IGNORE_DECODING_COST 0 -// #define EXPLORE_REPRS - #include "binomials.hpp" #include "isd_cost_estimate.hpp" #include "logging.hpp" +#include "globals.hpp" #include +#include // Requires C++20 + +#define NUM_BITS_REAL_MANTISSA 1024 +#define IGNORE_DECODING_COST 0 +// #define EXPLORE_REPRS void to_json(nlohmann::json &j, const Result &r) { j = nlohmann::json{ @@ -30,11 +32,13 @@ void from_json(const nlohmann::json &j, Result &r) { int main() { // Configure the logger + configure_logger(std::nullopt); + // TODO take from input? std::ifstream file("out/isd_values.json"); - // Check if the file is open + // Check if the file is open if (!file.is_open()) { std::cerr << "Could not open the file!" << std::endl; return 1; @@ -48,6 +52,19 @@ int main() { NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); pi = NTL::ComputePi_RR(); bool is_kra_values[] = {true, false}; + std::filesystem::path dirPath(OUT_DIR_RESULTS); + // Check if the directory exists + if (!std::filesystem::exists(dirPath)) { + // Try to create the directory, including parent directories + if (std::filesystem::create_directories(dirPath)) { + std::cout << "Directory created successfully: " << OUT_DIR_RESULTS + << std::endl; + } else { + std::cerr << "Failed to create directory: " << OUT_DIR_RESULTS + << std::endl; + return 1; // Return an error code + } + } // Iterate over the list of entries #pragma omp parallel for for (const auto &entry : j) { @@ -61,6 +78,11 @@ int main() { // int v = entry["v"]; // int lambd = entry["lambd"]; + std::string filename = + OUT_DIR_RESULTS + fmt::format("/{:06}_{:06}_{:03}.json", n, r, t); + + nlohmann::json out_values; + Result current_c_res; Result current_q_res; @@ -68,21 +90,15 @@ int main() { spdlog::info("Processing n {}, k {}, t {}, qc_block_size {}, is_kra {}, " "is_red_factor_applied {}", n, k, t, qc_block_size, is_kra, is_red_factor_applied); - current_c_res = + current_c_res = c_isd_log_cost(n, k, t, qc_block_size, is_kra, is_red_factor_applied); - current_q_res = + current_q_res = q_isd_log_cost(n, k, t, qc_block_size, is_kra, is_red_factor_applied); - nlohmann::json out_values; - out_values["C2"] = current_c_res; - out_values["Q2"] = current_q_res; - - std::ostringstream oss; - oss << std::setw(6) << std::setfill('0') << n << "_" << std::setw(6) - << std::setfill('0') << r << "_" << std::setw(3) << std::setfill('0') - << t << "_" << std::setw(1) << is_kra; - std::string filename = "out/" + oss.str() + ".json"; + std::string is_kra_name = is_kra ? "KRA": "MRA"; + out_values[is_kra_name]["C"] = current_c_res; + out_values[is_kra_name]["Q"] = current_q_res; + } - // Write the JSON object to the file std::ofstream file(filename); if (file.is_open()) { file << std::fixed << std::setprecision(10) @@ -92,7 +108,6 @@ int main() { } else { std::cerr << "Could not open the file!" << std::endl; } - } } return 0; } From 19635db7d3f16f20e3e8d55e763ef1467c71bcef Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Sat, 20 Jul 2024 18:33:56 +0200 Subject: [PATCH 13/55] FIX Wrong function calls --- isd_cost_estimate.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/isd_cost_estimate.hpp b/isd_cost_estimate.hpp index d275a88..2cb43fa 100644 --- a/isd_cost_estimate.hpp +++ b/isd_cost_estimate.hpp @@ -729,7 +729,7 @@ Result c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, #endif #if SKIP_STERN == 0 - current_res = isd_log_cost_classic_LB(n, k, t); + current_res = isd_log_cost_classic_Stern(n, k, t); current_res.value -= qc_red_factor; if (current_res.value < min_cost) { min_res = current_res; @@ -756,7 +756,7 @@ Result c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, #endif #if SKIP_BJMM == 0 - current_res = isd_log_cost_classic_LB(n, k, t); + current_res = isd_log_cost_classic_BJMM(n, k, t); current_res.value -= qc_red_factor; if (current_res.value < min_cost) { min_res = current_res; From da6979f4cf4da9a4cab18575577e6d63806e34cf Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Sat, 20 Jul 2024 18:43:05 +0200 Subject: [PATCH 14/55] Minors --- globals.hpp | 2 +- work_factor_computation_parallel.cpp | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/globals.hpp b/globals.hpp index 779ae97..3e1dcf1 100644 --- a/globals.hpp +++ b/globals.hpp @@ -2,7 +2,7 @@ #include -const std::string OUT_DIR_RESULTS = "out/results/json"; +const std::string OUT_DIR_RESULTS = "out/results/json/"; // It seems impossible in C++ // std::string OUT_FILE_RESULT_FMT_JSON = "{n:06}_{r:06}_{t:03}.json"; const std::string LOG_DIR = "logs/"; diff --git a/work_factor_computation_parallel.cpp b/work_factor_computation_parallel.cpp index 8cbb78b..44783ad 100644 --- a/work_factor_computation_parallel.cpp +++ b/work_factor_computation_parallel.cpp @@ -35,12 +35,12 @@ int main() { configure_logger(std::nullopt); - // TODO take from input? - std::ifstream file("out/isd_values.json"); + const std::string input_isd_values = "out/isd_values.json"; + std::ifstream file(input_isd_values); // Check if the file is open if (!file.is_open()) { - std::cerr << "Could not open the file!" << std::endl; + std::cerr << "Could not open the input file " << input_isd_values << std::endl; return 1; } @@ -79,7 +79,7 @@ int main() { // int lambd = entry["lambd"]; std::string filename = - OUT_DIR_RESULTS + fmt::format("/{:06}_{:06}_{:03}.json", n, r, t); + OUT_DIR_RESULTS + fmt::format("{:06}_{:06}_{:03}.json", n, r, t); nlohmann::json out_values; From f8fccf49de80142c02e11b70beaa089f1e3f4a26 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Mon, 22 Jul 2024 11:42:32 +0200 Subject: [PATCH 15/55] Minor includes updated --- isd_cost_estimate.hpp | 2 ++ logging.hpp | 2 +- work_factor_computation_parallel.cpp | 5 ++--- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/isd_cost_estimate.hpp b/isd_cost_estimate.hpp index 2cb43fa..5c22f79 100644 --- a/isd_cost_estimate.hpp +++ b/isd_cost_estimate.hpp @@ -5,6 +5,8 @@ #include #include #include +#include +#include #define SKIP_PRANGE 1 #define SKIP_LB 1 diff --git a/logging.hpp b/logging.hpp index 99ce090..420386d 100644 --- a/logging.hpp +++ b/logging.hpp @@ -1,9 +1,9 @@ #pragma once -#include #include #include #include #include +#include void configure_logger(const std::optional filename) { // Initialize the logger diff --git a/work_factor_computation_parallel.cpp b/work_factor_computation_parallel.cpp index 44783ad..86a441c 100644 --- a/work_factor_computation_parallel.cpp +++ b/work_factor_computation_parallel.cpp @@ -1,11 +1,10 @@ #include #include -#include #include // For std::setprecision #include +#include #include #include -#include #include #include "binomials.hpp" @@ -13,7 +12,7 @@ #include "logging.hpp" #include "globals.hpp" #include -#include // Requires C++20 +#include #define NUM_BITS_REAL_MANTISSA 1024 #define IGNORE_DECODING_COST 0 From 7dcbff8ba03a4edfb6d7d73098d4c78e20dd3d04 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Mon, 22 Jul 2024 12:04:42 +0200 Subject: [PATCH 16/55] ADD cmake file --- .gitignore | 18 +++++++++++++++++- CMakeLists.txt | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 1 deletion(-) create mode 100644 CMakeLists.txt diff --git a/.gitignore b/.gitignore index ef02118..d74b8d7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,26 @@ -# Warning! Ignore all files w/out extensions +# MINE # bin dir bin/ # Log output logs/ +# CMAKE +CMakeLists.txt.user +CMakeCache.txt +CMakeFiles +CMakeScripts +Testing +Makefile +cmake_install.cmake +install_manifest.txt +compile_commands.json +CTestTestfile.cmake +_deps +CMakeUserPresets.json + + +# C++ # Cache .ccls-cache # out diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..1e8eb0c --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,46 @@ +cmake_minimum_required(VERSION 3.10) + +# Project name and version +project(MyProject VERSION 1.0 LANGUAGES CXX) + +# Specify the C++ standard +set(CMAKE_CXX_STANDARD 20) +set(CMAKE_CXX_STANDARD_REQUIRED True) + +# Compiler options +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -g3 -Wall -Wextra -Wno-sign-compare") + +# Define the executable targets +set(TARGETS + constant_weight_encodable_bits + enumeration_complexity + parameter_generator + work_factor_computation +) + +# Include directories (if any) +# include_directories(${CMAKE_SOURCE_DIR}/include) + +# Libraries +find_package(OpenMP) +find_package(spdlog REQUIRED) +find_package(fmt REQUIRED) +find_library(gmp gmp) +find_library(ntl ntl) +find_library(m m) + +# Define the output directory +set(BIN_DIR ${CMAKE_BINARY_DIR}/bin) +file(MAKE_DIRECTORY ${BIN_DIR}) + +foreach(target ${TARGETS}) + add_executable(${target} ${target}.cpp) + set_target_properties(${target} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${BIN_DIR}) + target_link_libraries(${target} ntl gmp m spdlog::spdlog fmt::fmt) +endforeach() + +# Special case for work_factor_computation_parallel with OpenMP +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp") +add_executable(work_factor_computation_parallel work_factor_computation_parallel.cpp) +set_target_properties(work_factor_computation_parallel PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${BIN_DIR}) +target_link_libraries(work_factor_computation_parallel ntl gmp m spdlog::spdlog fmt::fmt OpenMP::OpenMP_CXX) From f5473d17ab4fe11a2c38617070b3ffa1e561fdcc Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Mon, 22 Jul 2024 12:06:54 +0200 Subject: [PATCH 17/55] Untrack Makefile --- Makefile | 23 ----------------------- 1 file changed, 23 deletions(-) delete mode 100644 Makefile diff --git a/Makefile b/Makefile deleted file mode 100644 index 8c884bf..0000000 --- a/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -CXX = g++ -CXXFLAGS=-O3 -g3 -std=c++20 -Wall -Wextra -Wno-sign-compare -OMPFLAGS=-fopenmp -LDLIBS= -lntl -lgmp -lm -lspdlog -lfmt -BIN_DIR=bin -TARGETS=constant_weight_encodable_bits enumeration_complexity parameter_generator work_factor_computation work_factor_computation_parallel -BIN_TARGETS=$(addprefix $(BIN_DIR)/, $(TARGETS)) - -all: $(BIN_DIR) $(BIN_TARGETS) - -$(BIN_DIR): - mkdir -p $(BIN_DIR) - -$(BIN_DIR)/work_factor_computation_parallel: work_factor_computation_parallel.cpp - $(CXX) $(CXXFLAGS) $(OMPFLAGS) $< -o $@ $(LDLIBS) - -$(BIN_DIR)/%: %.cpp - $(CXX) $(CXXFLAGS) $< -o $@ $(LDLIBS) - -clean: - rm -f $(BIN_DIR)/* - rmdir $(BIN_DIR) - From 5bc3c51276c5d495562f33b6e3c0aceedc0394e9 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Mon, 22 Jul 2024 13:55:40 +0200 Subject: [PATCH 18/55] MOD CMakeLists - Handle spdlog and fmt not installed on system --- CMakeLists.txt | 38 ++++++++++++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1e8eb0c..334361f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -17,18 +17,47 @@ set(TARGETS parameter_generator work_factor_computation ) +# Define libraries +set(LIBS ntl gmp m ) # Include directories (if any) # include_directories(${CMAKE_SOURCE_DIR}/include) # Libraries find_package(OpenMP) -find_package(spdlog REQUIRED) -find_package(fmt REQUIRED) find_library(gmp gmp) find_library(ntl ntl) find_library(m m) +find_package(spdlog QUIET) +# If spdlog is not found, add the local include directory +if (NOT spdlog_FOUND) + message(WARNING "spdlog not found, using local include directory at $ENV{HOME}/vc/spdlog/include") + include_directories($ENV{HOME}/vc/spdlog/include) + link_directories($ENV{HOME}/vc/spdlog/build) + list(APPEND LIBS libspdlog.a) +else() + list(APPEND LIBS spdlog) + set(LIBNAMES "spdlog") +endif() + +find_package(fmt QUIET) +if (NOT fmt_FOUND) + message(WARNING "fmt not found, using local include directory at $ENV{HOME}/vc/fmt/include") + include_directories($ENV{HOME}/vc/fmt/include) + link_directories($ENV{HOME}/vc/fmt/build) + list(APPEND LIBS libfmt.a) +else() + message(STATUS "fmt found: ${fmt_DIR}") + list(APPEND LIBS fmt) +endif() + + +# Print include directories to verify +get_directory_property(dirs INCLUDE_DIRECTORIES) +message(STATUS "Include directories: ${dirs}") +message(STATUS "Libraries: ${LIBS}") + # Define the output directory set(BIN_DIR ${CMAKE_BINARY_DIR}/bin) file(MAKE_DIRECTORY ${BIN_DIR}) @@ -36,11 +65,12 @@ file(MAKE_DIRECTORY ${BIN_DIR}) foreach(target ${TARGETS}) add_executable(${target} ${target}.cpp) set_target_properties(${target} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${BIN_DIR}) - target_link_libraries(${target} ntl gmp m spdlog::spdlog fmt::fmt) + target_link_libraries(${target} ${LIBS}) endforeach() # Special case for work_factor_computation_parallel with OpenMP set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp") +list(APPEND LIBS OpenMP::OpenMP_CXX) add_executable(work_factor_computation_parallel work_factor_computation_parallel.cpp) set_target_properties(work_factor_computation_parallel PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${BIN_DIR}) -target_link_libraries(work_factor_computation_parallel ntl gmp m spdlog::spdlog fmt::fmt OpenMP::OpenMP_CXX) +target_link_libraries(work_factor_computation_parallel ${LIBS}) From ce3a335537f49da06a52921ee47547f8f08fc71a Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Mon, 22 Jul 2024 13:59:44 +0200 Subject: [PATCH 19/55] FIX maybe_unitnitialized warnings --- parameter_generator.cpp | 37 +++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/parameter_generator.cpp b/parameter_generator.cpp index a39f6c4..d420e27 100644 --- a/parameter_generator.cpp +++ b/parameter_generator.cpp @@ -51,8 +51,7 @@ uint32_t estimate_t_val(const uint32_t c_sec_level, const uint32_t q_sec_level, } int ComputeDvMPartition(const uint64_t d_v_prime, const uint64_t n_0, - std::vector &mpartition, - uint64_t &d_v) { + std::vector &mpartition, uint64_t &d_v) { d_v = floor(sqrt(d_v_prime)); d_v = (d_v & 0x01) ? d_v : d_v + 1; uint64_t m = ceil((double)d_v_prime / (double)d_v); @@ -68,10 +67,9 @@ int ComputeDvMPartition(const uint64_t d_v_prime, const uint64_t n_0, return partition_ok; } -uint64_t -estimate_dv(const uint32_t c_sec_level, // expressed as - const uint32_t q_sec_level, const uint32_t n_0, const uint32_t p, - std::vector &mpartition) { +uint64_t estimate_dv(const uint32_t c_sec_level, // expressed as + const uint32_t q_sec_level, const uint32_t n_0, + const uint32_t p, std::vector &mpartition) { double achieved_c_sec_level = 0.0; double achieved_q_sec_level = 0.0; double achieved_c_enum_sec_level = 0.0; @@ -205,8 +203,7 @@ int main(int argc, char *argv[]) { std::cout << "refining parameters" << std::endl; - std::optional p_ok; - uint64_t t_ok, d_v_ok; + std::optional p_ok, t_ok, d_v_ok; std::vector mpartition_ok(n_0, 0); /* refinement step taking into account possible invalid m partitions */ @@ -246,20 +243,20 @@ int main(int argc, char *argv[]) { current_prime_pos--; } while ((p > (1.0 + epsilon) * p_th) && (current_prime_pos > 0)); - if (!p_ok || !d_v_ok) { + if (!p_ok || !d_v_ok || !t_ok) { spdlog::error("Error: One or more variables are not initialized."); throw std::runtime_error("One or more variables are not initialized."); } else { spdlog::info("parameter set found: p={}, t={}, d_v={}, mpartition={}", - optional_to_string(p_ok), t_ok, optional_to_string(d_v_ok), - array_to_string(mpartition_ok)); - } - // std::cout - // << " p:" << p_ok << " t: " << t_ok; - // std::cout << " d_v : " << d_v_ok << " mpartition: [ "; - // for (unsigned i = 0; i < n_0; i++) { - // std::cout << mpartition_ok[i] << " "; - // } - // std::cout << " ]" << std::endl; - return 0; + optional_to_string(p_ok), optional_to_string(t_ok), + optional_to_string(d_v_ok), array_to_string(mpartition_ok)); } + // std::cout + // << " p:" << p_ok << " t: " << t_ok; + // std::cout << " d_v : " << d_v_ok << " mpartition: [ "; + // for (unsigned i = 0; i < n_0; i++) { + // std::cout << mpartition_ok[i] << " "; + // } + // std::cout << " ]" << std::endl; + return 0; +} From 9614f62f864c4162319dda192ce8693e9f228554 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Mon, 22 Jul 2024 17:16:27 +0200 Subject: [PATCH 20/55] DEL hard-coded local libraries path from cmake --- CMakeLists.txt | 30 ++++-------------------------- 1 file changed, 4 insertions(+), 26 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 334361f..a9949d7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -18,40 +18,18 @@ set(TARGETS work_factor_computation ) # Define libraries -set(LIBS ntl gmp m ) +set(LIBS ntl gmp m spdlog fmt) # Include directories (if any) # include_directories(${CMAKE_SOURCE_DIR}/include) # Libraries -find_package(OpenMP) find_library(gmp gmp) find_library(ntl ntl) find_library(m m) - -find_package(spdlog QUIET) -# If spdlog is not found, add the local include directory -if (NOT spdlog_FOUND) - message(WARNING "spdlog not found, using local include directory at $ENV{HOME}/vc/spdlog/include") - include_directories($ENV{HOME}/vc/spdlog/include) - link_directories($ENV{HOME}/vc/spdlog/build) - list(APPEND LIBS libspdlog.a) -else() - list(APPEND LIBS spdlog) - set(LIBNAMES "spdlog") -endif() - -find_package(fmt QUIET) -if (NOT fmt_FOUND) - message(WARNING "fmt not found, using local include directory at $ENV{HOME}/vc/fmt/include") - include_directories($ENV{HOME}/vc/fmt/include) - link_directories($ENV{HOME}/vc/fmt/build) - list(APPEND LIBS libfmt.a) -else() - message(STATUS "fmt found: ${fmt_DIR}") - list(APPEND LIBS fmt) -endif() - +find_package(OpenMP REQUIRED) +find_package(spdlog REQUIRED) +find_package(fmt REQUIRED) # Print include directories to verify get_directory_property(dirs INCLUDE_DIRECTORIES) From 34ff15df12402a9edce7293560e978afd886c1a7 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Wed, 24 Jul 2024 10:03:50 +0200 Subject: [PATCH 21/55] FIX log2 pinv computation --- isd_cost_estimate.hpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/isd_cost_estimate.hpp b/isd_cost_estimate.hpp index 5c22f79..edf8abc 100644 --- a/isd_cost_estimate.hpp +++ b/isd_cost_estimate.hpp @@ -37,11 +37,11 @@ Result isd_log_cost_classic_BJMM_approx(const uint32_t n, const uint32_t k, // computes the probability of a random k * k being invertible const NTL::RR log_probability_k_by_k_is_inv(const NTL::RR &k) { - NTL::RR log_pinv = NTL::RR(0.5); + NTL::RR log_pinv = NTL::RR(-1); for (long i = 2; i <= k; i++) { - log_pinv = log_pinv * (NTL::RR(1) - NTL::power2_RR(-i)); + log_pinv = log_pinv + NTL::log(NTL::RR(1) - NTL::power2_RR(-i))/NTL::log(2); } - return NTL::log(log_pinv); + return log_pinv; } const NTL::RR probability_k_by_k_is_inv(const NTL::RR &k) { From 907dc34c415ffd145690d72755771e3ee96273a3 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Wed, 24 Jul 2024 10:14:09 +0200 Subject: [PATCH 22/55] ADD return fixed value for pinv computation --- isd_cost_estimate.hpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/isd_cost_estimate.hpp b/isd_cost_estimate.hpp index edf8abc..2489aed 100644 --- a/isd_cost_estimate.hpp +++ b/isd_cost_estimate.hpp @@ -37,14 +37,19 @@ Result isd_log_cost_classic_BJMM_approx(const uint32_t n, const uint32_t k, // computes the probability of a random k * k being invertible const NTL::RR log_probability_k_by_k_is_inv(const NTL::RR &k) { + if (k >= 100) + return NTL::RR(-1.79191682); NTL::RR log_pinv = NTL::RR(-1); for (long i = 2; i <= k; i++) { - log_pinv = log_pinv + NTL::log(NTL::RR(1) - NTL::power2_RR(-i))/NTL::log(2); + log_pinv = + log_pinv + NTL::log(NTL::RR(1) - NTL::power2_RR(-i)) / NTL::log(2); } return log_pinv; } const NTL::RR probability_k_by_k_is_inv(const NTL::RR &k) { + if (k >= 100) + return NTL::RR(0.288788095); NTL::RR log_pinv = NTL::RR(0.5); for (long i = 2; i <= k; i++) { log_pinv = log_pinv * (NTL::RR(1) - NTL::power2_RR(-i)); From 700239e60367ffe38755d3d08561363bd87728fe Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Wed, 24 Jul 2024 10:24:18 +0200 Subject: [PATCH 23/55] ADD log(2) magic number --- isd_cost_estimate.hpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/isd_cost_estimate.hpp b/isd_cost_estimate.hpp index 2489aed..5c9a698 100644 --- a/isd_cost_estimate.hpp +++ b/isd_cost_estimate.hpp @@ -39,10 +39,11 @@ Result isd_log_cost_classic_BJMM_approx(const uint32_t n, const uint32_t k, const NTL::RR log_probability_k_by_k_is_inv(const NTL::RR &k) { if (k >= 100) return NTL::RR(-1.79191682); + NTL::RR log_2 = NTL::RR(0.69314718); NTL::RR log_pinv = NTL::RR(-1); for (long i = 2; i <= k; i++) { log_pinv = - log_pinv + NTL::log(NTL::RR(1) - NTL::power2_RR(-i)) / NTL::log(2); + log_pinv + NTL::log(NTL::RR(1) - NTL::power2_RR(-i)) / log_2; } return log_pinv; } From 7e1a4f5c8cf04264f7ef702e8a0fa40eb439836d Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Wed, 24 Jul 2024 17:17:30 +0200 Subject: [PATCH 24/55] MOD use log2_RR function instead of magic number --- isd_cost_estimate.hpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/isd_cost_estimate.hpp b/isd_cost_estimate.hpp index 5c9a698..eafb7dc 100644 --- a/isd_cost_estimate.hpp +++ b/isd_cost_estimate.hpp @@ -39,11 +39,10 @@ Result isd_log_cost_classic_BJMM_approx(const uint32_t n, const uint32_t k, const NTL::RR log_probability_k_by_k_is_inv(const NTL::RR &k) { if (k >= 100) return NTL::RR(-1.79191682); - NTL::RR log_2 = NTL::RR(0.69314718); NTL::RR log_pinv = NTL::RR(-1); for (long i = 2; i <= k; i++) { log_pinv = - log_pinv + NTL::log(NTL::RR(1) - NTL::power2_RR(-i)) / log_2; + log_pinv + log2_RR(NTL::RR(1) - NTL::power2_RR(-i)); } return log_pinv; } From 60c7ad511fab32d5229f7c4f019ab427351273b4 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Wed, 24 Jul 2024 17:19:48 +0200 Subject: [PATCH 25/55] MOD result to return also GJE cost --- isd_cost_estimate.hpp | 82 +++++++++++++++++----------- work_factor_computation_parallel.cpp | 3 +- 2 files changed, 52 insertions(+), 33 deletions(-) diff --git a/isd_cost_estimate.hpp b/isd_cost_estimate.hpp index eafb7dc..3117397 100644 --- a/isd_cost_estimate.hpp +++ b/isd_cost_estimate.hpp @@ -17,11 +17,13 @@ #define SKIP_MMT 1 #define SKIP_Q_LB 0 #define SKIP_Q_STERN 1 + struct Result { std::string alg_name; std::map params; double value; + double gje_cost; }; /***************************Classic ISDs***************************************/ @@ -65,9 +67,10 @@ const NTL::RR classic_rref_red_cost(const NTL::RR &n, const NTL::RR &r) { r * r * r / NTL::RR(6) + r * r + r / NTL::RR(6) - NTL::RR(1); } -const NTL::RR classic_IS_candidate_cost(const NTL::RR &n, const NTL::RR &r) { - return classic_rref_red_cost(n, r) / probability_k_by_k_is_inv(r) + r * r; -} +// const NTL::RR classic_IS_candidate_cost(const NTL::RR &n, const NTL::RR &r) { +// NB: r* r should be added only for SDP, and even there it can be omitted since the syndrome can be thought as another column of H +// return classic_rref_red_cost(n, r) / probability_k_by_k_is_inv(r) + r * r; +// } const NTL::RR Fin_Send_rref_red_cost(const NTL::RR &n, const NTL::RR &r, const NTL::RR l) { @@ -81,11 +84,11 @@ const NTL::RR Fin_Send_rref_red_cost(const NTL::RR &n, const NTL::RR &r, r * r + r / NTL::RR(6) - NTL::RR(1); } -const NTL::RR Fin_Send_IS_candidate_cost(const NTL::RR &n, const NTL::RR &r, - const NTL::RR &l) { - return Fin_Send_rref_red_cost(n, r, l) / probability_k_by_k_is_inv(r - l) + - r * r; -} +// const NTL::RR Fin_Send_IS_candidate_cost(const NTL::RR &n, const NTL::RR &r, +// const NTL::RR &l) { +// return Fin_Send_rref_red_cost(n, r, l) / probability_k_by_k_is_inv(r - l) + +// r * r; +// } Result isd_log_cost_classic_Prange(const uint32_t n, const uint32_t k, const uint32_t t) { @@ -93,16 +96,18 @@ Result isd_log_cost_classic_Prange(const uint32_t n, const uint32_t k, NTL::RR k_real = NTL::RR(k); NTL::RR t_real = NTL::RR(t); - NTL::RR cost_iter = classic_IS_candidate_cost(n_real, n_real - k_real); + // NTL::RR cost_iter = classic_IS_candidate_cost(n_real, n_real - k_real); + NTL::RR cost_gje = classic_rref_red_cost(n_real, k_real); NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / NTL::to_RR(binomial_wrapper(n - k, t)); - NTL::RR log_cost = log2_RR(num_iter) + log2_RR(cost_iter); + NTL::RR log_cost = log2_RR(num_iter) - log_probability_k_by_k_is_inv(n_real - k_real) + log2_RR(cost_gje); Result res; res.alg_name = "Prange"; res.params = {}; res.value = NTL::conv(log_cost); + res.gje_cost = NTL::conv(log2_RR(cost_gje)); return res; } @@ -116,12 +121,14 @@ Result isd_log_cost_classic_LB(const uint32_t n, const uint32_t k, NTL::RR log_cost; uint32_t best_p = 1; uint32_t constrained_max_p = P_MAX_LB > t ? t : P_MAX_LB; - NTL::RR IS_candidate_cost; - IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); + + NTL::RR cost_gje = classic_rref_red_cost(n_real, k_real); + // IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); + for (uint32_t p = 1; p < constrained_max_p; p++) { NTL::RR p_real = NTL::RR(p); - NTL::RR cost_iter = - IS_candidate_cost + NTL::to_RR(binomial_wrapper(k, p) * p * (n - k)); + NTL::RR cost_iter = cost_gje / probability_k_by_k_is_inv(n_real - k_real) + + NTL::to_RR(binomial_wrapper(k, p) * p * (n - k)); NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / NTL::to_RR(binomial_wrapper(k, p) * binomial_wrapper(n - k, t - p)); @@ -137,6 +144,7 @@ Result isd_log_cost_classic_LB(const uint32_t n, const uint32_t k, res.alg_name = "Lee-Brickell"; res.params = {{"p", best_p}}; res.value = NTL::conv(min_log_cost); + res.gje_cost = NTL::conv(log2_RR(cost_gje)); return res; } @@ -151,8 +159,8 @@ Result isd_log_cost_classic_Leon(const uint32_t n, const uint32_t k, NTL::RR log_cost; uint32_t best_l = 0, best_p = 1, constrained_max_l, constrained_max_p; - NTL::RR IS_candidate_cost; - IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); + NTL::RR gje_cost = classic_rref_red_cost(n_real, n_real - k_real); + // IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); constrained_max_p = P_MAX_Leon > t ? t : P_MAX_Leon; for (uint32_t p = 1; p < constrained_max_p; p++) { constrained_max_l = @@ -161,7 +169,7 @@ Result isd_log_cost_classic_Leon(const uint32_t n, const uint32_t k, for (uint32_t l = 0; l < constrained_max_l; l++) { NTL::RR KChooseP = NTL::to_RR(binomial_wrapper(k, p)); NTL::RR cost_iter = - IS_candidate_cost + KChooseP * p_real * NTL::to_RR(l) + + gje_cost / probability_k_by_k_is_inv(n_real - k_real) + KChooseP * p_real * NTL::to_RR(l) + (KChooseP / NTL::power2_RR(l)) * NTL::RR(p * (n - k - l)); NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / NTL::to_RR(binomial_wrapper(k, p) * @@ -180,6 +188,7 @@ Result isd_log_cost_classic_Leon(const uint32_t n, const uint32_t k, res.alg_name = "Lee-Brickell"; res.params = {{"p", best_p}, {"l", best_l}}; res.value = NTL::conv(min_log_cost); + res.gje_cost = NTL::conv(log2_RR(gje_cost)); return res; } @@ -194,8 +203,8 @@ Result isd_log_cost_classic_Stern(const uint32_t n, const uint32_t k, NTL::RR log_cost; uint32_t best_l = 0, best_p = 2, constrained_max_l, constrained_max_p; - NTL::RR IS_candidate_cost; - IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); + NTL::RR gje_cost = classic_rref_red_cost(n_real, n_real - k_real); + // IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); constrained_max_p = P_MAX_Stern > t ? t : P_MAX_Stern; for (uint32_t p = 2; p < constrained_max_p; p = p + 2) { @@ -208,7 +217,7 @@ Result isd_log_cost_classic_Stern(const uint32_t n, const uint32_t k, NTL::RR kHalfChoosePHalf_real = NTL::to_RR(kHalfChoosePHalf); NTL::RR cost_iter = - IS_candidate_cost + + gje_cost/ probability_k_by_k_is_inv(n_real - k_real) + kHalfChoosePHalf_real * (NTL::to_RR(l) * p_real + (kHalfChoosePHalf_real / NTL::power2_RR(l)) * NTL::RR(p * (n - k - l))); @@ -239,6 +248,7 @@ Result isd_log_cost_classic_Stern(const uint32_t n, const uint32_t k, res.alg_name = "Stern"; res.params = {{"p", best_p}, {"l", best_l}}; res.value = NTL::conv(min_log_cost); + res.gje_cost = NTL::conv(log2_RR(gje_cost)); return res; } @@ -253,7 +263,8 @@ Result isd_log_cost_classic_FS(const uint32_t n, const uint32_t k, NTL::RR log_cost; uint32_t best_l = 0, best_p = 2, constrained_max_l, constrained_max_p; - NTL::RR IS_candidate_cost; + NTL::RR cost_gje; +// return Fin_Send_rref_red_cost(n, r, l) / probability_k_by_k_is_inv(r - l) + constrained_max_p = P_MAX_Stern > t ? t : P_MAX_Stern; for (uint32_t p = 2; p < constrained_max_p; p = p + 2) { constrained_max_l = @@ -261,18 +272,18 @@ Result isd_log_cost_classic_FS(const uint32_t n, const uint32_t k, NTL::RR p_real = NTL::RR(p); NTL::ZZ kPlusLHalfChoosePHalf; for (uint32_t l = 0; l < constrained_max_l; l++) { - IS_candidate_cost = - Fin_Send_IS_candidate_cost(n_real, n_real - k_real, NTL::RR(l)); + NTL::RR l_real = NTL::RR(l); + cost_gje = + Fin_Send_rref_red_cost(n_real, n_real - k_real, l_real); kPlusLHalfChoosePHalf = binomial_wrapper((k + l) / 2, p / 2); NTL::RR kPlusLHalfChoosePHalf_real = NTL::to_RR(kPlusLHalfChoosePHalf); NTL::RR cost_iter = - IS_candidate_cost + + cost_gje / probability_k_by_k_is_inv(n_real - k_real - l_real) + kPlusLHalfChoosePHalf_real * (NTL::to_RR(l) * p_real + (kPlusLHalfChoosePHalf_real / NTL::power2_RR(l)) * NTL::RR(p * (n - k - l))); // #if LOG_COST_CRITERION == 1 - NTL::RR l_real = NTL::to_RR(l); NTL::RR log_FS_list_size = kPlusLHalfChoosePHalf_real * (p_real / NTL::RR(2) * NTL::log((k_real + l_real) / NTL::RR(2)) / @@ -299,6 +310,8 @@ Result isd_log_cost_classic_FS(const uint32_t n, const uint32_t k, res.alg_name = "Fin-Send"; res.params = {{"p", best_p}, {"l", best_l}}; res.value = NTL::conv(min_log_cost); + res.gje_cost = NTL::conv(log2_RR(cost_gje)); + //cost_gje not reported return res; } @@ -320,7 +333,7 @@ Result isd_log_cost_classic_MMT(const uint32_t n, const uint32_t k, uint32_t best_l1; #endif - NTL::RR FS_IS_candidate_cost; + NTL::RR cost_gje; constrained_max_p = P_MAX_MMT > t ? t : P_MAX_MMT; /* p should be divisible by 4 in MMT */ for (uint32_t p = 4; p <= constrained_max_p; p = p + 4) { @@ -333,7 +346,8 @@ Result isd_log_cost_classic_MMT(const uint32_t n, const uint32_t k, NTL::to_RR(binomial_wrapper(n, t)) / NTL::to_RR(kPlusLHalfChoosePHalf * kPlusLHalfChoosePHalf * binomial_wrapper(n - k - l, t - p)); - FS_IS_candidate_cost = Fin_Send_IS_candidate_cost(n_real, r_real, l_real); + // FS_IS_candidate_cost = Fin_Send_IS_candidate_cost(n_real, r_real, l_real); + cost_gje = Fin_Send_rref_red_cost(n_real, n_real - k_real, l_real); NTL::ZZ kPlusLHalfChoosePFourths = binomial_wrapper((k + l) / 2, p / 4); NTL::RR kPlusLHalfChoosePFourths_real = NTL::to_RR(kPlusLHalfChoosePFourths); @@ -365,7 +379,7 @@ Result isd_log_cost_classic_MMT(const uint32_t n, const uint32_t k, NTL::RR otherFactor = (NTL::to_RR(p / 4 * l_2) + interm); NTL::RR cost_iter = - FS_IS_candidate_cost + min * otherFactor + + cost_gje/probability_k_by_k_is_inv(n_real - k_real - l_real) + min * otherFactor + kPlusLHalfChoosePFourths_real * NTL::to_RR(p / 2 * l_2); NTL::RR lastAddend = @@ -412,6 +426,7 @@ Result isd_log_cost_classic_MMT(const uint32_t n, const uint32_t k, res.alg_name = "MMT"; res.params = {{"p", best_p}, {"l", best_l}}; res.value = NTL::conv(min_log_cost); + res.gje_cost = NTL::conv(log2_RR(cost_gje)); return res; } @@ -432,7 +447,7 @@ Result isd_log_cost_classic_BJMM(const uint32_t n, const uint32_t k, std::optional best_p, best_l, best_eps_1, best_eps_2; uint32_t constrained_max_l, constrained_max_p; - NTL::RR FS_IS_candidate_cost; + NTL::RR cost_gje; constrained_max_p = P_MAX_BJMM > t ? t : P_MAX_BJMM; /*p should be divisible by 2 in BJMM */ for (uint32_t p = 2; p < constrained_max_p; p = p + 2) { @@ -450,8 +465,10 @@ Result isd_log_cost_classic_BJMM(const uint32_t n, const uint32_t k, /* Available parameters p, p_1,p_2,p_3, l */ NTL::RR l_real = NTL::RR(l); - FS_IS_candidate_cost = - Fin_Send_IS_candidate_cost(n_real, n_real - k_real, l_real); + cost_gje = Fin_Send_rref_red_cost(n_real, n_real - k_real, l_real); + // TODO check why this cost (or the rref cost) is never used + // FS_IS_candidate_cost = + // Fin_Send_IS_candidate_cost(n_real, n_real - k_real, l_real); uint32_t p_3 = p_2 / 2; NTL::ZZ L3_list_len = binomial_wrapper((k + l) / 2, p_3); @@ -555,6 +572,7 @@ Result isd_log_cost_classic_BJMM(const uint32_t n, const uint32_t k, {"eps1", best_eps_1.value()}, {"eps2", best_eps_2.value()}}; res.value = NTL::conv(min_log_cost); + res.gje_cost = NTL::conv(log2_RR(cost_gje)); return res; } @@ -795,7 +813,7 @@ Result q_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, #endif #if SKIP_Q_STERN == 0 - current_res = isd_log_cost_classic_stern(n, k, t); + current_res = isd_log_cost_classic_Stern(n, k, t); current_res.value -= qc_red_factor; if (current_res.value < min_cost) { min_res = current_res; diff --git a/work_factor_computation_parallel.cpp b/work_factor_computation_parallel.cpp index 86a441c..3802c1b 100644 --- a/work_factor_computation_parallel.cpp +++ b/work_factor_computation_parallel.cpp @@ -20,13 +20,14 @@ void to_json(nlohmann::json &j, const Result &r) { j = nlohmann::json{ - {"alg_name", r.alg_name}, {"params", r.params}, {"value", r.value}}; + {"alg_name", r.alg_name}, {"params", r.params}, {"value", r.value}, {"gje_cost", r.gje_cost}}; } void from_json(const nlohmann::json &j, Result &r) { j.at("alg_name").get_to(r.alg_name); j.at("params").get_to(r.params); j.at("value").get_to(r.value); + j.at("gje_cost").get_to(r.gje_cost); } int main() { From cb5a4bdc4b0e63c97285ad9f29ae1cf2bb817880 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Thu, 25 Jul 2024 13:57:00 +0200 Subject: [PATCH 26/55] MOD project structure --- .gitignore | 4 +- CMakeLists.txt | 52 ++++--------------- binomials.hpp => include/binomials.hpp | 0 .../bit_error_probabilities.hpp | 0 globals.hpp => include/globals.hpp | 0 .../isd_cost_estimate.hpp | 0 logging.hpp => include/logging.hpp | 0 .../partitions_permanents.hpp | 0 .../proper_primes.hpp | 0 src/CMakeLists.txt | 32 ++++++++++++ .../constant_weight_encodable_bits.cpp | 0 .../enumeration_complexity.cpp | 0 .../parameter_generator.cpp | 0 .../work_factor_computation.cpp | 0 .../work_factor_computation_parallel.cpp | 0 15 files changed, 43 insertions(+), 45 deletions(-) rename binomials.hpp => include/binomials.hpp (100%) rename bit_error_probabilities.hpp => include/bit_error_probabilities.hpp (100%) rename globals.hpp => include/globals.hpp (100%) rename isd_cost_estimate.hpp => include/isd_cost_estimate.hpp (100%) rename logging.hpp => include/logging.hpp (100%) rename partitions_permanents.hpp => include/partitions_permanents.hpp (100%) rename proper_primes.hpp => include/proper_primes.hpp (100%) create mode 100644 src/CMakeLists.txt rename constant_weight_encodable_bits.cpp => src/constant_weight_encodable_bits.cpp (100%) rename enumeration_complexity.cpp => src/enumeration_complexity.cpp (100%) rename parameter_generator.cpp => src/parameter_generator.cpp (100%) rename work_factor_computation.cpp => src/work_factor_computation.cpp (100%) rename work_factor_computation_parallel.cpp => src/work_factor_computation_parallel.cpp (100%) diff --git a/.gitignore b/.gitignore index d74b8d7..594a653 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,6 @@ # MINE -# bin dir -bin/ +# build dir +build/ # Log output logs/ diff --git a/CMakeLists.txt b/CMakeLists.txt index a9949d7..06de975 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,54 +1,20 @@ cmake_minimum_required(VERSION 3.10) # Project name and version -project(MyProject VERSION 1.0 LANGUAGES CXX) +project(LEDAtools VERSION 1.0 LANGUAGES CXX) # Specify the C++ standard set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED True) -# Compiler options -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -g3 -Wall -Wextra -Wno-sign-compare") +# Global compiler flags +add_compile_options(-O3 -g3 -Wall -Wextra -Wno-sign-compare) -# Define the executable targets -set(TARGETS - constant_weight_encodable_bits - enumeration_complexity - parameter_generator - work_factor_computation -) -# Define libraries -set(LIBS ntl gmp m spdlog fmt) +# Include directories +include_directories(include) -# Include directories (if any) -# include_directories(${CMAKE_SOURCE_DIR}/include) +# Add the src directory +add_subdirectory(src) -# Libraries -find_library(gmp gmp) -find_library(ntl ntl) -find_library(m m) -find_package(OpenMP REQUIRED) -find_package(spdlog REQUIRED) -find_package(fmt REQUIRED) - -# Print include directories to verify -get_directory_property(dirs INCLUDE_DIRECTORIES) -message(STATUS "Include directories: ${dirs}") -message(STATUS "Libraries: ${LIBS}") - -# Define the output directory -set(BIN_DIR ${CMAKE_BINARY_DIR}/bin) -file(MAKE_DIRECTORY ${BIN_DIR}) - -foreach(target ${TARGETS}) - add_executable(${target} ${target}.cpp) - set_target_properties(${target} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${BIN_DIR}) - target_link_libraries(${target} ${LIBS}) -endforeach() - -# Special case for work_factor_computation_parallel with OpenMP -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp") -list(APPEND LIBS OpenMP::OpenMP_CXX) -add_executable(work_factor_computation_parallel work_factor_computation_parallel.cpp) -set_target_properties(work_factor_computation_parallel PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${BIN_DIR}) -target_link_libraries(work_factor_computation_parallel ${LIBS}) +# # Add the test directory +# add_subdirectory(test) diff --git a/binomials.hpp b/include/binomials.hpp similarity index 100% rename from binomials.hpp rename to include/binomials.hpp diff --git a/bit_error_probabilities.hpp b/include/bit_error_probabilities.hpp similarity index 100% rename from bit_error_probabilities.hpp rename to include/bit_error_probabilities.hpp diff --git a/globals.hpp b/include/globals.hpp similarity index 100% rename from globals.hpp rename to include/globals.hpp diff --git a/isd_cost_estimate.hpp b/include/isd_cost_estimate.hpp similarity index 100% rename from isd_cost_estimate.hpp rename to include/isd_cost_estimate.hpp diff --git a/logging.hpp b/include/logging.hpp similarity index 100% rename from logging.hpp rename to include/logging.hpp diff --git a/partitions_permanents.hpp b/include/partitions_permanents.hpp similarity index 100% rename from partitions_permanents.hpp rename to include/partitions_permanents.hpp diff --git a/proper_primes.hpp b/include/proper_primes.hpp similarity index 100% rename from proper_primes.hpp rename to include/proper_primes.hpp diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt new file mode 100644 index 0000000..3122f85 --- /dev/null +++ b/src/CMakeLists.txt @@ -0,0 +1,32 @@ +# Define the executable targets +set(TARGETS + constant_weight_encodable_bits + enumeration_complexity + parameter_generator + work_factor_computation + work_factor_computation_parallel +) + +# Libraries +find_library(gmp gmp) +find_library(ntl ntl) +find_library(m m) +# Packages +message(STATUS "m library: ${M_LIBRARIES}") +find_package(OpenMP REQUIRED) +message(STATUS "OpenMP library: ${OpenMP_CXX_LIBRARIES}") +find_package(spdlog REQUIRED) +message(STATUS "spdlog library: ${spdlog_LIBRARIES}") +find_package(fmt REQUIRED) +message(STATUS "fmt library: ${fmt_LIBRARIES}") + +# Define libraries +set(LIBS ntl gmp m spdlog fmt) + +foreach(target ${TARGETS}) + add_executable(${target} ${target}.cpp) + target_link_libraries(${target} PRIVATE ${LIBS}) +endforeach() + +# Special case for work_factor_computation_parallel with OpenMP +target_link_libraries(work_factor_computation_parallel PRIVATE OpenMP::OpenMP_CXX) diff --git a/constant_weight_encodable_bits.cpp b/src/constant_weight_encodable_bits.cpp similarity index 100% rename from constant_weight_encodable_bits.cpp rename to src/constant_weight_encodable_bits.cpp diff --git a/enumeration_complexity.cpp b/src/enumeration_complexity.cpp similarity index 100% rename from enumeration_complexity.cpp rename to src/enumeration_complexity.cpp diff --git a/parameter_generator.cpp b/src/parameter_generator.cpp similarity index 100% rename from parameter_generator.cpp rename to src/parameter_generator.cpp diff --git a/work_factor_computation.cpp b/src/work_factor_computation.cpp similarity index 100% rename from work_factor_computation.cpp rename to src/work_factor_computation.cpp diff --git a/work_factor_computation_parallel.cpp b/src/work_factor_computation_parallel.cpp similarity index 100% rename from work_factor_computation_parallel.cpp rename to src/work_factor_computation_parallel.cpp From 5ae078d234a4fca7c925d1114c10a9d119724171 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Thu, 25 Jul 2024 17:48:25 +0200 Subject: [PATCH 27/55] Minors --- include/isd_cost_estimate.hpp | 3 +-- src/CMakeLists.txt | 21 +++++++++++++-------- src/work_factor_computation_parallel.cpp | 2 +- 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/include/isd_cost_estimate.hpp b/include/isd_cost_estimate.hpp index 3117397..8c83609 100644 --- a/include/isd_cost_estimate.hpp +++ b/include/isd_cost_estimate.hpp @@ -4,9 +4,9 @@ #include #include #include +#include #include #include -#include #define SKIP_PRANGE 1 #define SKIP_LB 1 @@ -17,7 +17,6 @@ #define SKIP_MMT 1 #define SKIP_Q_LB 0 #define SKIP_Q_STERN 1 - struct Result { std::string alg_name; diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 3122f85..91ed8bb 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -8,20 +8,23 @@ set(TARGETS ) # Libraries -find_library(gmp gmp) -find_library(ntl ntl) -find_library(m m) -# Packages -message(STATUS "m library: ${M_LIBRARIES}") +# Find libraries +find_library(GMP_LIB gmp) +find_library(NTL_LIB ntl) +find_library(M_LIB m) find_package(OpenMP REQUIRED) -message(STATUS "OpenMP library: ${OpenMP_CXX_LIBRARIES}") find_package(spdlog REQUIRED) -message(STATUS "spdlog library: ${spdlog_LIBRARIES}") find_package(fmt REQUIRED) + +message(STATUS "gmp library: ${GMP_LIBRARIES}") +message(STATUS "ntl library: ${NTL_LIBRARIES}") +message(STATUS "m library: ${M_LIBRARIES}") +message(STATUS "OpenMP library: ${OpenMP_CXX_LIBRARIES}") +message(STATUS "spdlog library: ${spdlog_LIBRARIES}") message(STATUS "fmt library: ${fmt_LIBRARIES}") # Define libraries -set(LIBS ntl gmp m spdlog fmt) +set(LIBS ${NTL_LIB} ${GMP_LIB} ${M_LIB} spdlog::spdlog fmt::fmt) foreach(target ${TARGETS}) add_executable(${target} ${target}.cpp) @@ -30,3 +33,5 @@ endforeach() # Special case for work_factor_computation_parallel with OpenMP target_link_libraries(work_factor_computation_parallel PRIVATE OpenMP::OpenMP_CXX) + +# message(STATUS "m library: ${M_LIBRARIES}") diff --git a/src/work_factor_computation_parallel.cpp b/src/work_factor_computation_parallel.cpp index 3802c1b..91dc596 100644 --- a/src/work_factor_computation_parallel.cpp +++ b/src/work_factor_computation_parallel.cpp @@ -2,7 +2,6 @@ #include #include // For std::setprecision #include -#include #include #include #include @@ -13,6 +12,7 @@ #include "globals.hpp" #include #include +#include #define NUM_BITS_REAL_MANTISSA 1024 #define IGNORE_DECODING_COST 0 From 541bbca6fc57bb87a386e1fc65a16b1cd8c724ec Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Thu, 25 Jul 2024 19:27:24 +0200 Subject: [PATCH 28/55] MOD structure w/ headers and sources separated --- CMakeLists.txt | 3 + include/binomials.hpp | 92 +-- include/bit_error_probabilities.hpp | 325 +-------- include/isd_cost_estimate.hpp | 834 +---------------------- include/logging.hpp | 59 +- include/proper_primes.hpp | 2 +- src/CMakeLists.txt | 32 +- src/binomials.cpp | 85 +++ src/bit_error_probabilities.cpp | 289 ++++++++ src/constant_weight_encodable_bits.cpp | 2 +- src/enumeration_complexity.cpp | 2 +- src/isd_cost_estimate.cpp | 803 ++++++++++++++++++++++ src/logging.cpp | 46 ++ src/parameter_generator.cpp | 7 +- src/work_factor_computation.cpp | 1 + src/work_factor_computation_parallel.cpp | 1 + 16 files changed, 1334 insertions(+), 1249 deletions(-) create mode 100644 src/binomials.cpp create mode 100644 src/bit_error_probabilities.cpp create mode 100644 src/isd_cost_estimate.cpp create mode 100644 src/logging.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 06de975..10620f1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,5 +16,8 @@ include_directories(include) # Add the src directory add_subdirectory(src) +# # Add the example directory +# add_subdirectory(examples) + # # Add the test directory # add_subdirectory(test) diff --git a/include/binomials.hpp b/include/binomials.hpp index 35c0378..14f69fa 100644 --- a/include/binomials.hpp +++ b/include/binomials.hpp @@ -1,8 +1,9 @@ #pragma once -#include -#include #include +#include #include +#include +#include /* binomials are precomputed up to MAX_N-choose-MAX_T */ #define MAX_N 2000 @@ -11,84 +12,13 @@ #define LOW_K_MAX_N 10000 #define LOW_K_MAX_T 10 -const NTL::RR nat_log_2 = NTL::log(NTL::RR(2)); - -static inline NTL::RR log2_RR(NTL::RR v){ - return NTL::log(v)/nat_log_2; -} - -NTL::Mat binomial_table; /*contains all binomials up to MAX_N-choose-MAX_T */ -NTL::Mat low_k_binomial_table; /*contains all binomials up to LOW_K_MAX_N-choose-LOW_K_MAX_T */ - -NTL::RR pi; -/*NOTE: NTL allows to access matrices as 1- based with Matlab notation */ -void InitBinomials(){ - std::cerr << "Precomputing n-choose-t up to n: " << MAX_N << - " t: " << MAX_T << std::endl; - binomial_table.SetDims(MAX_N+1,MAX_T+1); - binomial_table[0][0] = NTL::ZZ(1); - for (unsigned i = 1 ; i <= MAX_N; i++){ - binomial_table[i][0] = NTL::ZZ(1); - binomial_table[i][1] = NTL::ZZ(i); - for(unsigned j=2 ; (j <= i) && (j <= MAX_T) ; j++){ - binomial_table[i][j] = binomial_table[i][j-1] * NTL::ZZ(i-j+1) / NTL::ZZ(j); - } - } - std::cerr << "Precomputing low n-choose-t up to n: " << LOW_K_MAX_N << - " t: " << LOW_K_MAX_T << std::endl; - low_k_binomial_table.SetDims(LOW_K_MAX_N+1,LOW_K_MAX_T+1); - low_k_binomial_table[0][0] = NTL::ZZ(1); - for (unsigned i = 0 ; i <= LOW_K_MAX_N; i++){ - low_k_binomial_table[i][0] = NTL::ZZ(1); - low_k_binomial_table[i][1] = NTL::ZZ(i); - for(unsigned j=2 ; (j <= i) && (j <= LOW_K_MAX_T) ; j++){ - low_k_binomial_table[i][j] = low_k_binomial_table[i][j-1] * NTL::ZZ(i-j+1) / NTL::ZZ(j); - } - } - std::cerr << "done" << std::endl; - pi = NTL::ComputePi_RR(); -} - - - -NTL::RR lnFactorial(NTL::RR n){ - /* log of Stirling series approximated to the fourth term - * n log(n) - n + 1/2 log(2 \pi n) + log(- 139/(51840 n^3) + - * + 1/(288 n^2) + 1/(12 n) + 1) */ - return n * NTL::log(n) - n + 0.5 * NTL::log(2*pi*n) + - NTL::log( - NTL::RR(139)/(n*n*n * 51840) + - NTL::RR(1)/(n*n*288) + - NTL::RR(1)/(n*12) + - 1); -} - -NTL::RR lnBinom(NTL::RR n, NTL::RR k){ - if ( (k == NTL::RR(0) ) || (k == n) ) { - return NTL::RR(0); - } - return lnFactorial(n) - (lnFactorial(k) + lnFactorial(n-k) ); -} +extern NTL::RR pi; +extern NTL::RR nat_log_2; +void InitConstants(); +void InitBinomials(); -NTL::ZZ binomial_wrapper(long n, long k){ - if(k>n) return NTL::ZZ(0); - /* employ memoized if available */ - if ((n <= MAX_N) && (k < MAX_T)){ - return binomial_table[n][k]; - } - if ((n <= LOW_K_MAX_N) && (k < LOW_K_MAX_T)){ - return low_k_binomial_table[n][k]; - } - - /* shortcut computation for fast cases (k < 10) where - * Stirling may not provide good approximations */ - if (k < 10) { - NTL::ZZ result = NTL::ZZ(1); - for(int i = 1 ; i <= k; i++){ - result = (result * (n+1-i))/i; - } - return result; - } - /*Fall back to Stirling*/ - return NTL::conv( NTL::exp( lnBinom(NTL::RR(n),NTL::RR(k)) )); -} +NTL::RR lnFactorial(NTL::RR n); +NTL::RR lnBinom(NTL::RR n, NTL::RR k); +NTL::ZZ binomial_wrapper(long n, long k); +NTL::RR log2_RR(NTL::RR v); diff --git a/include/bit_error_probabilities.hpp b/include/bit_error_probabilities.hpp index b2145e7..053d053 100644 --- a/include/bit_error_probabilities.hpp +++ b/include/bit_error_probabilities.hpp @@ -1,309 +1,34 @@ #pragma once +#include "binomials.hpp" +#include "proper_primes.hpp" #include #include -#include "proper_primes.hpp" -#include "binomials.hpp" // choice of the approximation praxis for the estimated fraction of an error // to appear in the next iteration of a bit-flipping decoder #define ROUNDING_PRAXIS round -/* Probability that a variable node is correct, and a parity equation involving - * it is satisfied */ -NTL::RR compute_p_cc(const uint64_t d_c, - const uint64_t n, - const uint64_t t){ - NTL::RR result = NTL::RR(0); - uint64_t bound = (d_c - 1) < t ? d_c - 1 : t; - - /* the number of errors falling in the PC equation should be at least - * the amount which cannot be placed in a non checked place */ - uint64_t LowerTHitBound = (n-d_c) < t ? t-(n-d_c) : 0; - /* and it should be even, since the PC equation must be satisfied */ - LowerTHitBound = LowerTHitBound % 2 ? LowerTHitBound + 1 : LowerTHitBound; - - for(uint64_t j = LowerTHitBound; j <= bound; j = j+2 ){ - result += to_RR( binomial_wrapper(d_c-1,j) * binomial_wrapper(n-d_c,t-j) ) / - to_RR( binomial_wrapper(n-1,t) ); - } - return result; -} - -/* Probability that a variable node is correct, and a parity equation involving - * it is *not* satisfied */ -NTL::RR compute_p_ci(const uint64_t d_c, - const uint64_t n, - const uint64_t t){ - NTL::RR result = NTL::RR(0); - uint64_t bound = (d_c - 1) < t ? d_c - 1 : t; - - /* the number of errors falling in the PC equation should be at least - * the amount which cannot be placed in a non checked place */ - uint64_t LowerTHitBound = (n-d_c) < t ? t-(n-d_c) : 1; - /* and it should be odd, since the PC equation must be non satisfied */ - LowerTHitBound = LowerTHitBound % 2 ? LowerTHitBound : LowerTHitBound + 1; - - for(uint64_t j = LowerTHitBound; j <= bound; j = j+2 ){ - result += to_RR( binomial_wrapper(d_c-1,j) * binomial_wrapper(n-d_c,t-j) ) - / to_RR( binomial_wrapper(n-1,t) ); - } - return result; -} - -/* Probability that a variable node is *not* correct, and a parity equation involving - * it is *not* satisfied */ -NTL::RR compute_p_ic(const uint64_t d_c, - const uint64_t n, - const uint64_t t){ - NTL::RR result = NTL::RR(0); - uint64_t UpperTBound = (d_c - 1) < t - 1 ? d_c - 1 : t - 1; - - /* the number of errors falling in the PC equation should be at least - * the amount which cannot be placed in a non checked place */ - uint64_t LowerTHitBound = (n-d_c-1) < (t-1) ? (t-1)-(n-d_c-1) : 0; - /* and it should be even, since the PC equation must be unsatisfied (when - * accounting for the one we are considering as already placed*/ - LowerTHitBound = LowerTHitBound % 2 ? LowerTHitBound + 1 : LowerTHitBound; - - for(uint64_t j = LowerTHitBound; j <= UpperTBound; j = j+2 ){ - result += NTL::to_RR( binomial_wrapper(d_c-1,j) * binomial_wrapper(n-d_c,t-j-1) ) - / to_RR( binomial_wrapper(n-1,t-1) ); - } - return result; -} - -/* Probability that a variable node is *not* correct, and a parity equation involving - * it is satisfied */ -NTL::RR compute_p_ii(const uint64_t d_c, - const uint64_t n, - const uint64_t t){ - - NTL::RR result = NTL::RR(0); - uint64_t bound = (d_c - 1) < t - 1 ? d_c - 1 : t - 1; - - /* the number of errors falling in the PC equation should be at least - * the amount which cannot be placed in a non checked place */ - uint64_t LowerTHitBound = (n-d_c) < (t-1) ? (t-1)-(n-d_c) : 1; - /* and it should be odd, since the PC equation must be satisfied (when - * accounting for the one we are considering as already placed)*/ - LowerTHitBound = LowerTHitBound % 2 ? LowerTHitBound : LowerTHitBound +1; - for(uint64_t j = LowerTHitBound; j <= bound; j = j+2 ){ - result += NTL::to_RR( binomial_wrapper(d_c-1,j) * binomial_wrapper(n-d_c,t-j-1) ) - / to_RR( binomial_wrapper(n-1,t-1) ); - } - return result; -} - -/* note p_cc + p_ci = 1 */ -/* note p_ic + p_ii = 1 */ - -/* Probability that a given erroneous variable is deemed as such, and is thus - * corrected, given a threshold for the amount of unsatisfied parity check - * equations. Called P_ic in most texts */ -NTL::RR ComputePrBitCorrection( const NTL::RR p_ic, - const uint64_t d_v, - // const uint64_t t, - const uint64_t threshold ){ -// Pic=0; /* p_correct */ -// for (j=b,dv, -// term=binomial(dv,j)*(p_ic^j)*(1-p_ic)^(dv-j); -// Pic=Pic+term; -// ); - NTL::RR result = NTL::RR(0), success, failure; - for (uint64_t j = threshold; j <= d_v; j++){ - NTL::pow(success, p_ic, NTL::to_RR(j)); - NTL::pow(failure, NTL::RR(1)-p_ic, NTL::to_RR(d_v-j)); - result += NTL::to_RR(binomial_wrapper(d_v,j)) * success * failure; - } - return result; -} - -/* Probability that a given correct variable is not deemed as such, and is thus - * fault-induced, given a threshold for the amount of unsatisfied parity check - * equations. Called P_ci in most texts, p_induce in official comment */ -NTL::RR ComputePrBitFaultInduction( const NTL::RR p_ci, - const uint64_t d_v, - // const uint64_t t, /* unused */ - const uint64_t threshold ){ - - NTL::RR result= NTL::RR(0), success, failure; - for (uint64_t j = threshold; j <= d_v; j++){ - NTL::pow(success, p_ci, NTL::to_RR(j)); - NTL::pow(failure, NTL::RR(1)-p_ci, NTL::to_RR(d_v-j)); - result += NTL::to_RR(binomial_wrapper(d_v,j)) * success * failure; - } - return result; -} - -/* computes the probability that toCorrect bits are corrected - * known as P{N_ic = toCorrect} */ -NTL::RR ComputePrBitCorrectionMulti( const NTL::RR p_ic, - const uint64_t d_v, - const uint64_t t, +NTL::RR compute_p_cc(const uint64_t d_c, const uint64_t n, const uint64_t t); +NTL::RR compute_p_ci(const uint64_t d_c, const uint64_t n, const uint64_t t); +NTL::RR compute_p_ic(const uint64_t d_c, const uint64_t n, const uint64_t t); +NTL::RR compute_p_ii(const uint64_t d_c, const uint64_t n, const uint64_t t); +NTL::RR ComputePrBitCorrection(const NTL::RR p_ic, const uint64_t d_v, + const uint64_t threshold); +NTL::RR ComputePrBitFaultInduction(const NTL::RR p_ci, const uint64_t d_v, + const uint64_t threshold); +NTL::RR ComputePrBitCorrectionMulti(const NTL::RR p_ic, const uint64_t d_v, + const uint64_t t, const uint64_t threshold, + const uint64_t toCorrect); +NTL::RR ComputePrBitInduceMulti(const NTL::RR p_ci, const uint64_t d_v, + const uint64_t t, const uint64_t n, const uint64_t threshold, - const uint64_t toCorrect){ - NTL::RR ProbCorrectOne = ComputePrBitCorrection(p_ic,d_v,threshold); - return NTL::to_RR(binomial_wrapper(t,toCorrect)) * - NTL::pow(ProbCorrectOne,NTL::RR(toCorrect)) * - NTL::pow(1-ProbCorrectOne,NTL::RR(t-toCorrect)); -} - -/* computes the probability that toInduce faults are induced - * known as P{N_ci = toInduce} or Pr{f_wrong = to_induce} */ -NTL::RR ComputePrBitInduceMulti(const NTL::RR p_ci, - const uint64_t d_v, - const uint64_t t, - const uint64_t n, - const uint64_t threshold, - const uint64_t toInduce){ -// if(toInduce <= 1 ){ -// return NTL::RR(0); -// } - NTL::RR ProbInduceOne = ComputePrBitFaultInduction(p_ci,d_v,threshold); - return NTL::to_RR(binomial_wrapper(n-t,toInduce)) * - NTL::pow(ProbInduceOne,NTL::RR(toInduce)) * - NTL::pow(1-ProbInduceOne,NTL::RR(n-t-toInduce)); -} - -uint64_t FindNextNumErrors(const uint64_t n_0, - const uint64_t p, - const uint64_t d_v, - const uint64_t t){ - NTL::RR p_ci, p_ic; - p_ci = compute_p_ci(n_0*d_v,n_0*p,t); - p_ic = compute_p_ic(n_0*d_v,n_0*p,t); - uint64_t t_next=t; -// uint64_t best_threshold = (d_v - 1)/2; - for(uint64_t i = (d_v - 1)/2; i <= d_v - 1; i++){ - NTL::RR t_approx= t - - t * ComputePrBitCorrection(p_ic, d_v, i) + - (n_0*p - t) * ComputePrBitFaultInduction(p_ci, d_v, i); - unsigned long int t_curr = NTL::conv(NTL::ROUNDING_PRAXIS(t_approx)) ; - /*Note : we increase the threshold only if it improves strictly on the - * predicted error correction. */ - if (t_curr < t_next){ - t_next = t_curr; -// best_threshold = i; - } - } - /* considering that any code will correct a single bit error, if - * t_next == 1, we save a computation iteration and shortcut to t_next == 0*/ - if (t_next == 1) { - t_next = 0; - } - return t_next; -} - -/* computes the exact 1-iteration DFR and the best threshold on the number of - * upcs to achieve it */ -std::pair Find1IterDFR(const uint64_t n_0, - const uint64_t p, - const uint64_t d_v, - const uint64_t t){ - NTL::RR p_ci, p_ic, P_correct, P_induce; - NTL::RR DFR, best_DFR = NTL::RR(1); - p_ci = compute_p_ci(n_0*d_v,n_0*p,t); - p_ic = compute_p_ic(n_0*d_v,n_0*p,t); - uint64_t best_threshold = (d_v - 1)/2; - for(uint64_t b = best_threshold; b <= d_v - 1; b++){ - DFR = NTL::RR(1) - ComputePrBitCorrectionMulti(p_ic, d_v, t, b, t) * ComputePrBitInduceMulti(p_ci,d_v,t,n_0*p,b,0); - /*Note : we increase the threshold only if it improves strictly on the - * predicted error correction. */ - if (DFR < best_DFR){ - best_DFR = DFR; - best_threshold = b; - } - } -// std::cout << best_threshold << std::endl; - return std::make_pair(best_DFR,best_threshold); -} - - -/* computes the exact 1-iteration probability of leaving at most t_leftover - * uncorrected errors out of t. */ -std::pair Find1IterTLeftoverPr(const uint64_t n_0, - const uint64_t p, - const uint64_t d_v, - const uint64_t t, - const uint64_t t_leftover){ - NTL::RR p_ci, p_ic; - NTL::RR DFR, best_DFR = NTL::RR(1); - p_ci = compute_p_ci(n_0*d_v,n_0*p,t); - p_ic = compute_p_ic(n_0*d_v,n_0*p,t); - int n= p*n_0; - uint64_t best_threshold = (d_v + 1)/2; - - for(uint64_t b = best_threshold; b <= d_v ; b++){ - DFR = NTL::RR(0); - NTL::RR P_correct = ComputePrBitCorrection(p_ic, d_v,b); - NTL::RR P_induce = ComputePrBitFaultInduction(p_ci,d_v,b); - for(int tau = 0 ; tau <= t_leftover; tau++){ - for(int n_to_induce = 0 ; n_to_induce <= t_leftover; n_to_induce++) { - NTL::RR prob_induce_n = NTL::to_RR(binomial_wrapper(n-t,n_to_induce)) * - NTL::pow(P_induce,NTL::to_RR(n_to_induce)) * - NTL::pow(NTL::RR(1)-P_induce,NTL::to_RR(n-t-n_to_induce)); - int n_to_correct = (int)t + n_to_induce - tau; - NTL::RR prob_correct_n = NTL::to_RR(binomial_wrapper(t,n_to_correct)); - prob_correct_n *= NTL::pow(P_correct,NTL::to_RR(n_to_correct)); - - prob_correct_n *= NTL::pow(NTL::RR(1)-P_correct,NTL::to_RR((int)t-n_to_correct)); /*unsigned exp?*/ - DFR += prob_correct_n*prob_induce_n; - } - } - DFR = NTL::RR(1) - DFR; - if (DFR < best_DFR){ - best_DFR = DFR; - best_threshold = b; - } - } - return std::make_pair(best_DFR,best_threshold); -} - -// find minimum p which, asymptotically, corrects all errors -// search performed via binary search as the DFR is decreasing monot. -// in of p -uint64_t Findpth(const uint64_t n_0, - const uint64_t d_v_prime, - const uint64_t t){ - - unsigned int prime_idx = 0, prime_idx_prec; - uint64_t p = proper_primes[prime_idx]; - while(p < d_v_prime || p < t ){ - prime_idx++; - p=proper_primes[prime_idx]; - } - - uint64_t hi, lo; - lo = prime_idx; - hi = PRIMES_NO; - prime_idx_prec = lo; - - uint64_t limit_error_num = t; - while(hi-lo > 1){ - prime_idx_prec = prime_idx; - prime_idx = (lo+hi)/2; - p = proper_primes[prime_idx]; - // compute number of remaining errors after +infty iters - limit_error_num = t; - uint64_t current_error_num; -// std::cout << "using p:"<< p << ", errors dropping as "; - do { - current_error_num = limit_error_num; - limit_error_num = FindNextNumErrors(n_0, p, d_v_prime, current_error_num); -// std::cout << limit_error_num << " "; - } while ( - (limit_error_num != current_error_num) && - (limit_error_num != 0) - ); -// std::cout << std::endl; - if (limit_error_num > 0){ - lo = prime_idx; - } else { - hi = prime_idx; - } - } - if(limit_error_num == 0) { - return proper_primes[prime_idx]; - } - return proper_primes[prime_idx_prec]; -} + const uint64_t toInduce); +uint64_t FindNextNumErrors(const uint64_t n_0, const uint64_t p, + const uint64_t d_v, const uint64_t t); +std::pair Find1IterDFR(const uint64_t n_0, const uint64_t p, + const uint64_t d_v, const uint64_t t); +std::pair +Find1IterTLeftoverPr(const uint64_t n_0, const uint64_t p, const uint64_t d_v, + const uint64_t t, const uint64_t t_leftover); +uint64_t Findpth(const uint64_t n_0, const uint64_t d_v_prime, + const uint64_t t); diff --git a/include/isd_cost_estimate.hpp b/include/isd_cost_estimate.hpp index 8c83609..6832713 100644 --- a/include/isd_cost_estimate.hpp +++ b/include/isd_cost_estimate.hpp @@ -1,824 +1,54 @@ #pragma once -#include "binomials.hpp" -#include "logging.hpp" #include #include -#include #include #include #include -#define SKIP_PRANGE 1 -#define SKIP_LB 1 -#define SKIP_LEON 1 +#define SKIP_PRANGE 0 +#define SKIP_LB 0 +#define SKIP_LEON 0 #define SKIP_STERN 0 -#define SKIP_FS 1 -#define SKIP_BJMM 1 -#define SKIP_MMT 1 +#define SKIP_FS 0 +#define SKIP_BJMM 0 +#define SKIP_MMT 0 #define SKIP_Q_LB 0 -#define SKIP_Q_STERN 1 +#define SKIP_Q_STERN 0 struct Result { - std::string alg_name; - std::map params; - double value; - double gje_cost; + std::string alg_name; + std::map params; + double value; + double gje_cost; }; /***************************Classic ISDs***************************************/ -Result isd_log_cost_classic_BJMM_approx(const uint32_t n, const uint32_t k, - const uint32_t t) { - Result result; - result.alg_name = "BJMM"; - result.params = {{"approx", true}}; - result.value = ((double)t) * -log((1.0 - (double)k / (double)n)) / log(2); - return result; -} +const NTL::RR log_probability_k_by_k_is_inv(const NTL::RR &k); +const NTL::RR probability_k_by_k_is_inv(const NTL::RR &k); +const NTL::RR classic_rref_red_cost(const NTL::RR &n, const NTL::RR &r); -// computes the probability of a random k * k being invertible -const NTL::RR log_probability_k_by_k_is_inv(const NTL::RR &k) { - if (k >= 100) - return NTL::RR(-1.79191682); - NTL::RR log_pinv = NTL::RR(-1); - for (long i = 2; i <= k; i++) { - log_pinv = - log_pinv + log2_RR(NTL::RR(1) - NTL::power2_RR(-i)); - } - return log_pinv; -} - -const NTL::RR probability_k_by_k_is_inv(const NTL::RR &k) { - if (k >= 100) - return NTL::RR(0.288788095); - NTL::RR log_pinv = NTL::RR(0.5); - for (long i = 2; i <= k; i++) { - log_pinv = log_pinv * (NTL::RR(1) - NTL::power2_RR(-i)); - } - return log_pinv; -} - -const NTL::RR classic_rref_red_cost(const NTL::RR &n, const NTL::RR &r) { - /* simple reduced row echelon form transform, as it is not likely to be the - * bottleneck */ - NTL::RR k = n - r; - return r * r * n / NTL::RR(2) + (n * r) / NTL::RR(2) - - r * r * r / NTL::RR(6) + r * r + r / NTL::RR(6) - NTL::RR(1); -} - -// const NTL::RR classic_IS_candidate_cost(const NTL::RR &n, const NTL::RR &r) { -// NB: r* r should be added only for SDP, and even there it can be omitted since the syndrome can be thought as another column of H -// return classic_rref_red_cost(n, r) / probability_k_by_k_is_inv(r) + r * r; -// } - -const NTL::RR Fin_Send_rref_red_cost(const NTL::RR &n, const NTL::RR &r, - const NTL::RR l) { - /* reduced size reduced row echelon form transformation, only yields an - * (r-l) sized identity matrix */ - NTL::RR k = n - r; - return -l * l * l / NTL::RR(3) - l * l * n / NTL::RR(2) + - l * l * r / NTL::RR(2) - 3 * l * l / NTL::RR(2) - - 3 * l * n / NTL::RR(2) + l * r / NTL::RR(2) - 13 * l / NTL::RR(6) + - n * r * r / NTL::RR(2) + n * r / NTL::RR(2) - r * r * r / NTL::RR(6) + - r * r + r / NTL::RR(6) - NTL::RR(1); -} - -// const NTL::RR Fin_Send_IS_candidate_cost(const NTL::RR &n, const NTL::RR &r, -// const NTL::RR &l) { -// return Fin_Send_rref_red_cost(n, r, l) / probability_k_by_k_is_inv(r - l) + -// r * r; -// } - -Result isd_log_cost_classic_Prange(const uint32_t n, const uint32_t k, - const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - - // NTL::RR cost_iter = classic_IS_candidate_cost(n_real, n_real - k_real); - NTL::RR cost_gje = classic_rref_red_cost(n_real, k_real); - NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / - NTL::to_RR(binomial_wrapper(n - k, t)); - - NTL::RR log_cost = log2_RR(num_iter) - log_probability_k_by_k_is_inv(n_real - k_real) + log2_RR(cost_gje); - - Result res; - res.alg_name = "Prange"; - res.params = {}; - res.value = NTL::conv(log_cost); - res.gje_cost = NTL::conv(log2_RR(cost_gje)); - return res; -} - -#define P_MAX_LB 20 -Result isd_log_cost_classic_LB(const uint32_t n, const uint32_t k, - const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - NTL::RR min_log_cost = n_real; // unreachable upper bound - NTL::RR log_cost; - uint32_t best_p = 1; - uint32_t constrained_max_p = P_MAX_LB > t ? t : P_MAX_LB; - - NTL::RR cost_gje = classic_rref_red_cost(n_real, k_real); - // IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); - - for (uint32_t p = 1; p < constrained_max_p; p++) { - NTL::RR p_real = NTL::RR(p); - NTL::RR cost_iter = cost_gje / probability_k_by_k_is_inv(n_real - k_real) + - NTL::to_RR(binomial_wrapper(k, p) * p * (n - k)); - NTL::RR num_iter = - NTL::to_RR(binomial_wrapper(n, t)) / - NTL::to_RR(binomial_wrapper(k, p) * binomial_wrapper(n - k, t - p)); - log_cost = - (NTL::log(num_iter) + NTL::log(cost_iter)) / NTL::log(NTL::RR(2)); - if (min_log_cost > log_cost) { - min_log_cost = log_cost; - best_p = p; - } - } - spdlog::info("Lee-Brickell best p: {}", best_p); - Result res; - res.alg_name = "Lee-Brickell"; - res.params = {{"p", best_p}}; - res.value = NTL::conv(min_log_cost); - res.gje_cost = NTL::conv(log2_RR(cost_gje)); - return res; -} - -#define P_MAX_Leon P_MAX_LB -#define L_MAX_Leon 200 -Result isd_log_cost_classic_Leon(const uint32_t n, const uint32_t k, - const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - NTL::RR min_log_cost = n_real; // unreachable upper bound - NTL::RR log_cost; - uint32_t best_l = 0, best_p = 1, constrained_max_l, constrained_max_p; - - NTL::RR gje_cost = classic_rref_red_cost(n_real, n_real - k_real); - // IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); - constrained_max_p = P_MAX_Leon > t ? t : P_MAX_Leon; - for (uint32_t p = 1; p < constrained_max_p; p++) { - constrained_max_l = - (L_MAX_Leon > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_Leon); - NTL::RR p_real = NTL::RR(p); - for (uint32_t l = 0; l < constrained_max_l; l++) { - NTL::RR KChooseP = NTL::to_RR(binomial_wrapper(k, p)); - NTL::RR cost_iter = - gje_cost / probability_k_by_k_is_inv(n_real - k_real) + KChooseP * p_real * NTL::to_RR(l) + - (KChooseP / NTL::power2_RR(l)) * NTL::RR(p * (n - k - l)); - NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / - NTL::to_RR(binomial_wrapper(k, p) * - binomial_wrapper(n - k - l, t - p)); - log_cost = - (NTL::log(num_iter) + NTL::log(cost_iter)) / NTL::log(NTL::RR(2)); - if (min_log_cost > log_cost) { - min_log_cost = log_cost; - best_l = l; - best_p = p; - } - } - } - spdlog::info("Leon Best l {} best p: {}", best_l, best_p); - Result res; - res.alg_name = "Lee-Brickell"; - res.params = {{"p", best_p}, {"l", best_l}}; - res.value = NTL::conv(min_log_cost); - res.gje_cost = NTL::conv(log2_RR(gje_cost)); - return res; -} - -#define P_MAX_Stern P_MAX_Leon -#define L_MAX_Stern L_MAX_Leon -Result isd_log_cost_classic_Stern(const uint32_t n, const uint32_t k, - const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - NTL::RR min_log_cost = n_real; // unreachable upper bound - NTL::RR log_cost; - uint32_t best_l = 0, best_p = 2, constrained_max_l, constrained_max_p; - - NTL::RR gje_cost = classic_rref_red_cost(n_real, n_real - k_real); - // IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); - - constrained_max_p = P_MAX_Stern > t ? t : P_MAX_Stern; - for (uint32_t p = 2; p < constrained_max_p; p = p + 2) { - constrained_max_l = - (L_MAX_Stern > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_Stern); - NTL::ZZ kHalfChoosePHalf; - for (uint32_t l = 0; l < constrained_max_l; l++) { - NTL::RR p_real = NTL::RR(p); - kHalfChoosePHalf = binomial_wrapper(k / 2, p / 2); - NTL::RR kHalfChoosePHalf_real = NTL::to_RR(kHalfChoosePHalf); - - NTL::RR cost_iter = - gje_cost/ probability_k_by_k_is_inv(n_real - k_real) + - kHalfChoosePHalf_real * (NTL::to_RR(l) * p_real + - (kHalfChoosePHalf_real / NTL::power2_RR(l)) * - NTL::RR(p * (n - k - l))); - // #if LOG_COST_CRITERION == 1 - NTL::RR log_stern_list_size = - kHalfChoosePHalf_real * - (p_real / NTL::RR(2) * NTL::log(k_real / NTL::RR(2)) / - NTL::log(NTL::RR(2)) + - NTL::to_RR(l)); - log_stern_list_size = - NTL::log(log_stern_list_size) / NTL::log(NTL::RR(2)); - cost_iter = cost_iter * log_stern_list_size; - // #endif - NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / - NTL::to_RR(kHalfChoosePHalf * kHalfChoosePHalf * - binomial_wrapper(n - k - l, t - p)); - log_cost = log2_RR(num_iter) + log2_RR(cost_iter); - if (min_log_cost > log_cost) { - min_log_cost = log_cost; - best_l = l; - best_p = p; - } - } - } - - spdlog::info("Stern Best l {}, best p: {}", best_l, best_p); - Result res; - res.alg_name = "Stern"; - res.params = {{"p", best_p}, {"l", best_l}}; - res.value = NTL::conv(min_log_cost); - res.gje_cost = NTL::conv(log2_RR(gje_cost)); - return res; -} - -#define P_MAX_FS P_MAX_Stern -#define L_MAX_FS L_MAX_Stern -Result isd_log_cost_classic_FS(const uint32_t n, const uint32_t k, - const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - NTL::RR min_log_cost = n_real; // unreachable upper bound - NTL::RR log_cost; - uint32_t best_l = 0, best_p = 2, constrained_max_l, constrained_max_p; - - NTL::RR cost_gje; -// return Fin_Send_rref_red_cost(n, r, l) / probability_k_by_k_is_inv(r - l) + - constrained_max_p = P_MAX_Stern > t ? t : P_MAX_Stern; - for (uint32_t p = 2; p < constrained_max_p; p = p + 2) { - constrained_max_l = - (L_MAX_Stern > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_Stern); - NTL::RR p_real = NTL::RR(p); - NTL::ZZ kPlusLHalfChoosePHalf; - for (uint32_t l = 0; l < constrained_max_l; l++) { - NTL::RR l_real = NTL::RR(l); - cost_gje = - Fin_Send_rref_red_cost(n_real, n_real - k_real, l_real); - kPlusLHalfChoosePHalf = binomial_wrapper((k + l) / 2, p / 2); - NTL::RR kPlusLHalfChoosePHalf_real = NTL::to_RR(kPlusLHalfChoosePHalf); - NTL::RR cost_iter = - cost_gje / probability_k_by_k_is_inv(n_real - k_real - l_real) + - kPlusLHalfChoosePHalf_real * - (NTL::to_RR(l) * p_real + - (kPlusLHalfChoosePHalf_real / NTL::power2_RR(l)) * - NTL::RR(p * (n - k - l))); - // #if LOG_COST_CRITERION == 1 - NTL::RR log_FS_list_size = - kPlusLHalfChoosePHalf_real * - (p_real / NTL::RR(2) * NTL::log((k_real + l_real) / NTL::RR(2)) / - NTL::log(NTL::RR(2)) + - l_real); - log_FS_list_size = log2_RR(log_FS_list_size); - cost_iter = cost_iter * log_FS_list_size; - // #endif - NTL::RR num_iter = - NTL::to_RR(binomial_wrapper(n, t)) / - NTL::to_RR(kPlusLHalfChoosePHalf * kPlusLHalfChoosePHalf * - binomial_wrapper(n - k - l, t - p)); - - log_cost = log2_RR(num_iter) + log2_RR(cost_iter); - if (min_log_cost > log_cost) { - min_log_cost = log_cost; - best_l = l; - best_p = p; - } - } - } - spdlog::info("FS Best l {}, best p: {}", best_l, best_p); - Result res; - res.alg_name = "Fin-Send"; - res.params = {{"p", best_p}, {"l", best_l}}; - res.value = NTL::conv(min_log_cost); - res.gje_cost = NTL::conv(log2_RR(cost_gje)); - //cost_gje not reported - return res; -} - -#define P_MAX_MMT (P_MAX_FS + 25) // P_MAX_MMT -#define L_MAX_MMT 350 // L_MAX_MMT -#define L_MIN_MMT 2 -Result isd_log_cost_classic_MMT(const uint32_t n, const uint32_t k, - const uint32_t t) { - uint32_t r = n - k; - NTL::RR n_real = NTL::RR(n); - NTL::RR r_real = NTL::RR(r); - NTL::RR k_real = n_real - r_real; - - NTL::RR min_log_cost = n_real; // unreachable upper bound - NTL::RR log_cost, log_mem_cost; - uint32_t best_l = L_MIN_MMT, best_p = 4, constrained_max_l = 0, - constrained_max_p; -#if defined(EXPLORE_REPS) - uint32_t best_l1; -#endif - - NTL::RR cost_gje; - constrained_max_p = P_MAX_MMT > t ? t : P_MAX_MMT; - /* p should be divisible by 4 in MMT */ - for (uint32_t p = 4; p <= constrained_max_p; p = p + 4) { - constrained_max_l = - (L_MAX_MMT > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_MMT); - for (uint32_t l = L_MIN_MMT; l <= constrained_max_l; l++) { - NTL::RR l_real = NTL::to_RR(l); - NTL::ZZ kPlusLHalfChoosePHalf = binomial_wrapper((k + l) / 2, p / 2); - NTL::RR num_iter = - NTL::to_RR(binomial_wrapper(n, t)) / - NTL::to_RR(kPlusLHalfChoosePHalf * kPlusLHalfChoosePHalf * - binomial_wrapper(n - k - l, t - p)); - // FS_IS_candidate_cost = Fin_Send_IS_candidate_cost(n_real, r_real, l_real); - cost_gje = Fin_Send_rref_red_cost(n_real, n_real - k_real, l_real); - NTL::ZZ kPlusLHalfChoosePFourths = binomial_wrapper((k + l) / 2, p / 4); - NTL::RR kPlusLHalfChoosePFourths_real = - NTL::to_RR(kPlusLHalfChoosePFourths); - NTL::RR minOperandRight, min; - NTL::RR PChoosePHalf = NTL::to_RR(binomial_wrapper(p, p / 2)); - NTL::RR kPlusLChoosePHalf = NTL::to_RR(binomial_wrapper((k + l), p / 2)); - minOperandRight = - NTL::to_RR(binomial_wrapper((k + l) / 2, p / 2)) / PChoosePHalf; - min = kPlusLHalfChoosePFourths_real > minOperandRight - ? minOperandRight - : kPlusLHalfChoosePFourths_real; - - /* hoist out anything not depending on l_1/l_2 split*/ -#if defined(EXPLORE_REPRS) - for (l_1 = 1; l_1 <= l; l_1++) { - uint32_t l_2 = l - l_1; -#else - uint32_t l_2 = NTL::conv( - log2_RR(kPlusLHalfChoosePFourths_real / - NTL::to_RR(binomial_wrapper(p, p / 2)))); - /*clamp l_2 to a safe value , 0 < l_2 < l*/ - l_2 = l_2 <= 0 ? 1 : l_2; - l_2 = l_2 >= l ? l - 1 : l_2; - - uint32_t l_1 = l - l_2; -#endif - NTL::RR interm = kPlusLHalfChoosePFourths_real / NTL::power2_RR(l_2) * - NTL::to_RR(p / 2 * l_1); - - NTL::RR otherFactor = (NTL::to_RR(p / 4 * l_2) + interm); - NTL::RR cost_iter = - cost_gje/probability_k_by_k_is_inv(n_real - k_real - l_real) + min * otherFactor + - kPlusLHalfChoosePFourths_real * NTL::to_RR(p / 2 * l_2); - - NTL::RR lastAddend = - otherFactor + kPlusLHalfChoosePFourths_real * kPlusLChoosePHalf * - PChoosePHalf / NTL::power2_RR(l) * - NTL::to_RR(p * (r - l)); - lastAddend = lastAddend * kPlusLHalfChoosePFourths_real; - cost_iter += lastAddend; - // #if 0 - - NTL::RR log_MMT_space = - r_real * n_real + - kPlusLHalfChoosePFourths_real * - (NTL::to_RR(p / 4) * log2_RR(NTL::to_RR(k + l / 2)) + - NTL::to_RR(l_2)) + - NTL::to_RR(min) * (NTL::to_RR(p / 2) * log2_RR(NTL::to_RR(k + l)) + - NTL::to_RR(l)); - log_MMT_space = log2_RR(log_MMT_space); - cost_iter = cost_iter * log_MMT_space; - // #endif - log_cost = log2_RR(num_iter) + log2_RR(cost_iter); - if (min_log_cost > log_cost) { - min_log_cost = log_cost; - best_l = l; -#if defined(EXPLORE_REPRS) - best_l1 = l_1; -#endif - best_p = p; - log_mem_cost = log_MMT_space; - } -#if defined(EXPLORE_REPRS) - } -#endif - } - } - spdlog::info("MMT Best l {}, best p: {}", best_l, best_p); - if (best_p == constrained_max_p) { - spdlog::warn("Warning: p {p} on exploration edge!"); - } - if (best_l == constrained_max_l) { - spdlog::warn("Warning: l {l} on exploration edge!"); - } - Result res; - res.alg_name = "MMT"; - res.params = {{"p", best_p}, {"l", best_l}}; - res.value = NTL::conv(min_log_cost); - res.gje_cost = NTL::conv(log2_RR(cost_gje)); - return res; -} - -#define P_MAX_BJMM 20 // P_MAX_MMT -#define L_MAX_BJMM 90 // L_MAX_MMT -#define Eps1_MAX_BJMM 4 -#define Eps2_MAX_BJMM 4 -Result isd_log_cost_classic_BJMM(const uint32_t n, const uint32_t k, - const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - uint32_t r = n - k; - NTL::RR r_real = NTL::RR(r); - - NTL::RR min_log_cost = n_real; // unreachable upper bound - NTL::RR log_cost; - std::optional best_p, best_l, best_eps_1, best_eps_2; - uint32_t constrained_max_l, constrained_max_p; - - NTL::RR cost_gje; - constrained_max_p = P_MAX_BJMM > t ? t : P_MAX_BJMM; - /*p should be divisible by 2 in BJMM */ - for (uint32_t p = 2; p < constrained_max_p; p = p + 2) { - /* sweep over all the valid eps1 knowing that p/2 + eps1 should be a - * multiple of 4*/ - constrained_max_l = - (L_MAX_BJMM > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_BJMM); - for (uint32_t l = 0; l < constrained_max_l; l++) { - for (uint32_t eps1 = 2 + (p % 2); eps1 < Eps1_MAX_BJMM; eps1 = eps1 + 2) { - uint32_t p_1 = p / 2 + eps1; - /* sweep over all the valid eps2 knowing that p_1/2 + eps2 should - * be even */ - for (uint32_t eps2 = (p_1 % 2); eps2 < Eps2_MAX_BJMM; eps2 = eps2 + 2) { - uint32_t p_2 = p_1 / 2 + eps2; - - /* Available parameters p, p_1,p_2,p_3, l */ - NTL::RR l_real = NTL::RR(l); - cost_gje = Fin_Send_rref_red_cost(n_real, n_real - k_real, l_real); - // TODO check why this cost (or the rref cost) is never used - // FS_IS_candidate_cost = - // Fin_Send_IS_candidate_cost(n_real, n_real - k_real, l_real); - uint32_t p_3 = p_2 / 2; - - NTL::ZZ L3_list_len = binomial_wrapper((k + l) / 2, p_3); - NTL::RR L3_list_len_real = NTL::to_RR(L3_list_len); - /* the BJMM number of iterations depends only on L3 parameters - * precompute it */ - NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / - NTL::to_RR(binomial_wrapper((k + l), p) * - binomial_wrapper(r - l, t - p)); - NTL::RR P_invalid_splits = NTL::power(L3_list_len_real, 2) / - NTL::to_RR(binomial_wrapper(k + l, p_2)); - num_iter = num_iter / NTL::power(P_invalid_splits, 4); - - /* lengths of lists 2 to 0 have to be divided by the number of - * repr.s*/ - NTL::RR L2_list_len = NTL::to_RR(binomial_wrapper(k + l, p_2)) * - NTL::power(P_invalid_splits, 1); - NTL::RR L1_list_len = NTL::to_RR(binomial_wrapper(k + l, p_1)) * - NTL::power(P_invalid_splits, 2); - /* estimating the range for r_1 and r_2 requires to compute the - * number of representations rho_1 and rho_2 */ - - NTL::ZZ rho_2 = binomial_wrapper(p_1, p_1 / 2) * - binomial_wrapper(k + l - p_1, eps2); - NTL::ZZ rho_1 = - binomial_wrapper(p, p / 2) * binomial_wrapper(k + l - p, eps1); - int min_r2 = NTL::conv(NTL::log(NTL::to_RR(rho_2)) / - NTL::log(NTL::RR(2))); - int max_r1 = NTL::conv(NTL::log(NTL::to_RR(rho_1)) / - NTL::log(NTL::RR(2))); - - /*enumerate r_1 and r_2 over the suggested range - * log(rho_2) < r2 < r_1 < log(rho_1)*/ - /* clamp to safe values */ - min_r2 = min_r2 > 0 ? min_r2 : 1; - max_r1 = max_r1 < (int)l ? max_r1 : l - 1; - - NTL::RR p_real = NTL::RR(p); - for (int r_2 = min_r2; r_2 < max_r1 - 1; r_2++) { - for (int r_1 = r_2 + 1; r_1 < max_r1; r_1++) { - - /*add the cost of building Layer 3 to cost_iter */ - NTL::RR cost_iter = - NTL::to_RR(4) * - (k + l + 2 * L3_list_len_real + r_2 + - NTL::power(L3_list_len_real, 2) * NTL::to_RR(2 * p_3 * r_2)); - - /* add the cost of building Layer 2 */ - cost_iter += - 2 * (NTL::power((NTL::to_RR(rho_2) / (NTL::power2_RR(r_2))) * - NTL::power(L3_list_len_real, 2), - 2) * - 2 * p_2 * (r_1 - r_2)); - - /* add the cost of building Layer 1 */ - cost_iter += - NTL::power((NTL::to_RR(rho_1) / NTL::power2_RR(r_1)) * - (NTL::to_RR(rho_2) / NTL::power2_RR(r_2)) * - NTL::power(L3_list_len_real, 2), - 4) * - 2 * p_1 * l; - - /* add the cost of building L0 */ - cost_iter += - p * (r - l) * - NTL::power((NTL::to_RR(rho_1) / NTL::power2_RR(r_1)) * - (NTL::to_RR(rho_2) / NTL::power2_RR(r_2)) * - NTL::power(L3_list_len_real, 2), - 4) / - NTL::to_RR(l); - - log_cost = log2_RR(num_iter) + log2_RR(cost_iter); - - if (min_log_cost > log_cost) { - min_log_cost = log_cost; - best_l = l; - best_p = p; - best_eps_1 = eps1; - best_eps_2 = eps2; - } - } - } - - } /*end of iteration over l */ - /* to review up to to here */ - } /* end for over eps2 */ - } /* end for over eps1 */ - } /* end for over p*/ - - if (!best_l || !best_eps_1 || !best_p || !best_eps_2) { - spdlog::error("Error: One or more variables are not initialized."); - throw std::runtime_error("One or more variables are not initialized."); - } - spdlog::info("BJMM Best l {}, best p: {}, best eps1: {}, best eps2: {}", - optional_to_string(best_l), optional_to_string(best_p), - optional_to_string(best_eps_1), optional_to_string(best_eps_2)); - Result res; - res.alg_name = "BJMM"; - res.params = {{"p", best_p.value()}, - {"l", best_l.value()}, - {"eps1", best_eps_1.value()}, - {"eps2", best_eps_2.value()}}; - res.value = NTL::conv(min_log_cost); - res.gje_cost = NTL::conv(log2_RR(cost_gje)); - return res; -} - -/***************************Quantum ISDs***************************************/ - -const NTL::RR quantum_gauss_red_cost(const NTL::RR &n, const NTL::RR &k) { - // return 0.5* NTL::power(n-k,3) + k*NTL::power((n-k),2); - return 1.5 * NTL::power(n - k, 2) - 0.5 * (n - k); -} - -#define P_MAX_Q_LB 3 // P_MAX_MMT -Result isd_log_cost_quantum_LB(const uint32_t n, const uint32_t k, - const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - NTL::RR log_pi_fourths = NTL::log(pi * 0.25); - NTL::RR log_pinv = log_probability_k_by_k_is_inv(k_real); - - /* Check https://doi.org/10.1007/978-3-031-61489-7_2 - * for the full measures of the lee-brickell quantum attack - */ - NTL::RR min_log_cost = n_real; // unreachable upper bound - uint32_t p; - std::optional best_p; - for (p = 1; p < P_MAX_Q_LB; p++) { - NTL::RR p_real = NTL::RR(p); - NTL::RR iteration_cost = quantum_gauss_red_cost(n_real, k_real) + - NTL::to_RR(binomial_wrapper(k, p)) * - NTL::log(n_real - k_real) / - NTL::log(NTL::RR(2)); - NTL::RR log_cost = - log_pi_fourths + .5 * (lnBinom(n_real, t_real) - log_pinv - - (lnBinom(k_real, p_real) + - lnBinom(n_real - k_real, t_real - p_real))); - log_cost += NTL::log(iteration_cost); - log_cost = log_cost / NTL::log(NTL::RR(2)); - if (log_cost < min_log_cost) { - min_log_cost = log_cost; - best_p = p; - } - } - if (!best_p) { - spdlog::error("Error: One or more variables are not initialized."); - throw std::runtime_error("One or more variables are not initialized."); - } - - Result res; - res.alg_name = "Quantum Lee-Brickell"; - res.params = {{"p", best_p.value()}}; - res.value = NTL::conv(min_log_cost); - return res; -} - -#define MAX_M (t / 2) - -Result isd_log_cost_quantum_stern(const uint32_t n, const uint32_t k, - const uint32_t t) { - NTL::RR n_real = NTL::RR(n); - NTL::RR k_real = NTL::RR(k); - NTL::RR t_real = NTL::RR(t); - NTL::RR current_complexity, log_p_success, c_it, c_dec; - - // Start computing Stern's parameter invariant portions of complexity - NTL::RR log_pi_fourths = NTL::log(pi * 0.25); - // compute the probability of a random k * k being invertible - NTL::RR log_pinv = log_probability_k_by_k_is_inv(k_real); - // compute the cost of inverting the matrix, in a quantum execution env. - NTL::RR c_inv = quantum_gauss_red_cost(n_real, k_real); - - // optimize Stern's parameters : - // m : the # of errors in half of the chosen dimensions - // l : the length of the run of zeroes in the not chosen dimensions - // done via exhaustive parameter space search, minimizing the total - // complexity. - // Initial value set to codeword bruteforce to ensure the minimum is found. - NTL::RR min_stern_complexity = NTL::RR(n) * NTL::log(NTL::RR(2)); - - for (long m = 1; m <= MAX_M; m++) { - NTL::RR m_real = NTL::RR(m); - /* previous best complexity as a function of l alone. - * initialize to bruteforce-equivalent, break optimization loop as soon - * as a minimum is found */ - NTL::RR prev_best_complexity = NTL::RR(t); - for (long l = 0; l < (n - k - (t - 2 * m)); l++) { - - NTL::RR l_real = NTL::RR(l); - log_p_success = lnBinom(t_real, 2 * m_real) + - lnBinom(n_real - t_real, k_real - 2 * m_real) + - lnBinom(2 * m_real, m_real) + - lnBinom(n_real - k_real - t_real + 2 * m_real, l_real); - log_p_success = log_p_success - - (m_real * NTL::log(NTL::RR(4)) + lnBinom(n_real, k_real) + - lnBinom(n_real - k_real, l_real)); - current_complexity = -(log_p_success + log_pinv) * 0.5 + log_pi_fourths; - /* to match specifications , the term should be - * (n_real-k_real), as per in deVries, although - * David Hobach thesis mentions it to be - * (n_real-k_real-l_real), and it seems to match. - * amend specs for the typo. */ - c_it = l_real + (n_real - k_real - l_real) * - NTL::to_RR(binomial_wrapper(k / 2, m)) / - NTL::power2_RR(-l); - - c_it = c_it * 2 * m_real * NTL::to_RR(binomial_wrapper(k / 2, m)); -#if IGNORE_DECODING_COST == 1 - c_dec = 0.0; -#elif IGNORE_DECODING_COST == 0 - /*cost of decoding estimated as per Golomb CWDEC - * decoding an n-bit vector with weight k is - * CWDEC_cost(k,n)=O(n^2 log_2(n)) and following deVries, where - * c_dec = CWDEC_cost(n-k, n) + k + CWDEC_cost(l,n-k)*/ - c_dec = - n_real * n_real * NTL::log(n_real) + k_real + - (n_real - k_real) * (n_real - k_real) * NTL::log((n_real - k_real)); -#endif - current_complexity = current_complexity + NTL::log(c_it + c_inv + c_dec); - if (current_complexity < prev_best_complexity) { - prev_best_complexity = current_complexity; - } else { - break; - } - } - if (current_complexity < min_stern_complexity) { - min_stern_complexity = current_complexity; - } - } - Result res; - res.alg_name = "Quantum Stern"; - res.params = {}; - res.value = NTL::conv(min_stern_complexity / NTL::log(NTL::RR(2.0))); - return res; -} - -/***************************Aggregation ***************************************/ - -double get_qc_red_factor_log(const uint32_t qc_order, const uint32_t is_kra) { - /* For key recovery attacks (CFP) the advantage from quasi-cyclicity is p. For - * a message recovery (SDP), the DOOM advantage is sqrt(p). - */ - double qc_red_factor = is_kra ? logl(qc_order) : logl(qc_order) / 2.0; - return qc_red_factor / logl(2); -} +// Classic Result c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, const uint32_t qc_order, const uint32_t is_kra, - const bool compute_qc_reduction_factor) { - Result current_res, min_res; - double qc_red_factor = - compute_qc_reduction_factor ? get_qc_red_factor_log(qc_order, is_kra) : 0; - - double min_cost = n; // the cost cannot be greater than 2^n - -#if SKIP_PRANGE == 0 - current_res = isd_log_cost_classic_Prange(n, k, t); - current_res.value -= qc_red_factor; - if (current_res.value < min_cost) { - min_res = current_res; - min_cost = current_res.value; - } -#endif - -#if SKIP_LB == 0 - current_res = isd_log_cost_classic_LB(n, k, t); - current_res.value -= qc_red_factor; - if (current_res.value < min_cost) { - min_res = current_res; - min_cost = current_res.value; - } -#endif - -#if SKIP_LEON == 0 - current_res = isd_log_cost_classic_Leon(n, k, t); - current_res.value -= qc_red_factor; - if (current_res.value < min_cost) { - min_res = current_res; - min_cost = current_res.value; - } -#endif - -#if SKIP_STERN == 0 - current_res = isd_log_cost_classic_Stern(n, k, t); - current_res.value -= qc_red_factor; - if (current_res.value < min_cost) { - min_res = current_res; - min_cost = current_res.value; - } -#endif - -#if SKIP_FS == 0 - current_res = isd_log_cost_classic_FS(n, k, t); - current_res.value -= qc_red_factor; - if (current_res.value < min_cost) { - min_res = current_res; - min_cost = current_res.value; - } -#endif - -#if SKIP_MMT == 0 - current_res = isd_log_cost_classic_MMT(n, k, t); - current_res.value -= qc_red_factor; - if (current_res.value < min_cost) { - min_res = current_res; - min_cost = current_res.value; - } -#endif - -#if SKIP_BJMM == 0 - current_res = isd_log_cost_classic_BJMM(n, k, t); - current_res.value -= qc_red_factor; - if (current_res.value < min_cost) { - min_res = current_res; - min_cost = current_res.value; - } -#endif - - return min_res; -} - + const bool compute_qc_reduction_factor); + +Result isd_log_cost_classic_Prange(const uint32_t n, const uint32_t k, const uint32_t t); +Result isd_log_cost_classic_LB(const uint32_t n, const uint32_t k, const uint32_t t); +Result isd_log_cost_classic_Leon(const uint32_t n, const uint32_t k, const uint32_t t); +Result isd_log_cost_classic_Stern(const uint32_t n, const uint32_t k, const uint32_t t); +Result isd_log_cost_classic_FS(const uint32_t n, const uint32_t k, const uint32_t t); +Result isd_log_cost_classic_MMT(const uint32_t n, const uint32_t k, const uint32_t t); +Result isd_log_cost_classic_BJMM_approx(const uint32_t n, const uint32_t k, const uint32_t t); +Result isd_log_cost_classic_BJMM(const uint32_t n, const uint32_t k, const uint32_t t); + +// Quantum Result q_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, const uint32_t qc_order, const uint32_t is_kra, - const bool compute_qc_reduction_factor) { - Result current_res, min_res; - double min_cost = n; // cannot be greater than n - double qc_red_factor = - compute_qc_reduction_factor ? get_qc_red_factor_log(qc_order, is_kra) : 0; - - /* This is just a quick hack since experiments says that p = 1 is - * the optimal value at least for the NIST code-based finalists - */ -#if SKIP_Q_LB == 0 - current_res = isd_log_cost_quantum_LB(n, k, t); - current_res.value -= qc_red_factor; - if (current_res.value < min_cost) { - min_res = current_res; - min_cost = current_res.value; - } -#endif + const bool compute_qc_reduction_factor); -#if SKIP_Q_STERN == 0 - current_res = isd_log_cost_classic_Stern(n, k, t); - current_res.value -= qc_red_factor; - if (current_res.value < min_cost) { - min_res = current_res; - min_cost = current_res.value; - } -#endif - - return min_res; -} +Result isd_log_cost_quantum_LB(const uint32_t n, const uint32_t k, + const uint32_t t); +Result isd_log_cost_quantum_Stern(const uint32_t n, const uint32_t k, + const uint32_t t); diff --git a/include/logging.hpp b/include/logging.hpp index 420386d..1e16c53 100644 --- a/include/logging.hpp +++ b/include/logging.hpp @@ -1,57 +1,18 @@ #pragma once #include -#include #include #include #include -void configure_logger(const std::optional filename) { - // Initialize the logger - const std::string ff = filename.has_value() ? filename.value(): "logs/default.log"; - auto logger = spdlog::basic_logger_mt("default_logger", ff); - spdlog::set_default_logger(logger); +void configure_logger(const std::optional filename); - // Retrieve the environment variable for log level - const char *log_level_env = std::getenv("LOG_LEVEL"); +std::string optional_to_string(const std::optional &opt); - if (log_level_env) { - std::string log_level_str(log_level_env); - - // Configure the log level based on the environment variable - if (log_level_str == "trace") { - spdlog::set_level(spdlog::level::trace); - } else if (log_level_str == "debug") { - spdlog::set_level(spdlog::level::debug); - } else if (log_level_str == "info") { - spdlog::set_level(spdlog::level::info); - } else if (log_level_str == "warn") { - spdlog::set_level(spdlog::level::warn); - } else if (log_level_str == "err") { - spdlog::set_level(spdlog::level::err); - } else if (log_level_str == "critical") { - spdlog::set_level(spdlog::level::critical); - } else { - spdlog::set_level(spdlog::level::info); // Default level - } - } else { - spdlog::set_level(spdlog::level::info); // Default level if environment - // variable is not set - } -} - -std::string optional_to_string(const std::optional &opt) { - if (opt) { - return std::to_string(*opt); - } else { - return "Not Initialized"; - } -} - -template std::string array_to_string(const T *array, size_t size) { +template std::string array_to_string(const std::vector &vec) { std::string result = "["; - for (size_t i = 0; i < size; ++i) { - result += std::to_string(array[i]); - if (i < size - 1) { + for (size_t i = 0; i < vec.size(); ++i) { + result += std::to_string(vec[i]); + if (i < vec.size() - 1) { result += ", "; } } @@ -59,11 +20,11 @@ template std::string array_to_string(const T *array, size_t size) { return result; } -template std::string array_to_string(const std::vector &vec) { +template std::string array_to_string(const T *array, size_t size) { std::string result = "["; - for (size_t i = 0; i < vec.size(); ++i) { - result += std::to_string(vec[i]); - if (i < vec.size() - 1) { + for (size_t i = 0; i < size; ++i) { + result += std::to_string(array[i]); + if (i < size - 1) { result += ", "; } } diff --git a/include/proper_primes.hpp b/include/proper_primes.hpp index 38f54e6..8c09d39 100644 --- a/include/proper_primes.hpp +++ b/include/proper_primes.hpp @@ -2,7 +2,7 @@ #include #define PRIMES_NO 5483 -uint32_t proper_primes[5483] = { +const uint32_t proper_primes[5483] = { 3, 5, 11, diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 91ed8bb..d7e013b 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,10 +1,10 @@ # Define the executable targets set(TARGETS constant_weight_encodable_bits - enumeration_complexity - parameter_generator - work_factor_computation - work_factor_computation_parallel + # enumeration_complexity + # parameter_generator + # work_factor_computation + # work_factor_computation_parallel ) # Libraries @@ -26,12 +26,22 @@ message(STATUS "fmt library: ${fmt_LIBRARIES}") # Define libraries set(LIBS ${NTL_LIB} ${GMP_LIB} ${M_LIB} spdlog::spdlog fmt::fmt) -foreach(target ${TARGETS}) - add_executable(${target} ${target}.cpp) - target_link_libraries(${target} PRIVATE ${LIBS}) -endforeach() +set(target constant_weight_encodable_bits) +add_executable(${target} ${target}.cpp binomials.cpp) +target_link_libraries(${target} PRIVATE ${LIBS}) -# Special case for work_factor_computation_parallel with OpenMP -target_link_libraries(work_factor_computation_parallel PRIVATE OpenMP::OpenMP_CXX) +set(target enumeration_complexity) +add_executable(${target} ${target}.cpp binomials.cpp) +target_link_libraries(${target} PRIVATE ${LIBS}) -# message(STATUS "m library: ${M_LIBRARIES}") +set(target parameter_generator) +add_executable(${target} ${target}.cpp binomials.cpp logging.cpp bit_error_probabilities.cpp isd_cost_estimate.cpp) +target_link_libraries(${target} PRIVATE ${LIBS}) + +set(target work_factor_computation) +add_executable(${target} ${target}.cpp binomials.cpp logging.cpp isd_cost_estimate.cpp) +target_link_libraries(${target} PRIVATE ${LIBS}) + +set(target work_factor_computation_parallel) +add_executable(${target} ${target}.cpp binomials.cpp logging.cpp isd_cost_estimate.cpp) +target_link_libraries(${target} PRIVATE ${LIBS} OpenMP::OpenMP_CXX) diff --git a/src/binomials.cpp b/src/binomials.cpp new file mode 100644 index 0000000..39c7389 --- /dev/null +++ b/src/binomials.cpp @@ -0,0 +1,85 @@ +#include "binomials.hpp" + +NTL::RR pi; +NTL::RR nat_log_2; +NTL::Mat binomial_table; +NTL::Mat low_k_binomial_table; + +NTL::RR log2_RR(NTL::RR v){ return NTL::log(v) / nat_log_2; } + +void InitConstants(){ + nat_log_2 = NTL::log(NTL::RR(2)); + pi = NTL::ComputePi_RR(); +} + +/*NOTE: NTL allows to access matrices as 1- based with Matlab notation */ +void InitBinomials() { + std::cerr << "Precomputing n-choose-t up to n: " << MAX_N << " t: " << MAX_T + << std::endl; + binomial_table.SetDims(MAX_N + 1, MAX_T + 1); + binomial_table[0][0] = NTL::ZZ(1); + for (unsigned i = 1; i <= MAX_N; i++) { + binomial_table[i][0] = NTL::ZZ(1); + binomial_table[i][1] = NTL::ZZ(i); + for (unsigned j = 2; (j <= i) && (j <= MAX_T); j++) { + binomial_table[i][j] = + binomial_table[i][j - 1] * NTL::ZZ(i - j + 1) / NTL::ZZ(j); + } + } + + std::cerr << "Precomputing low n-choose-t up to n: " << LOW_K_MAX_N + << " t: " << LOW_K_MAX_T << std::endl; + low_k_binomial_table.SetDims(LOW_K_MAX_N + 1, LOW_K_MAX_T + 1); + low_k_binomial_table[0][0] = NTL::ZZ(1); + for (unsigned i = 0; i <= LOW_K_MAX_N; i++) { + low_k_binomial_table[i][0] = NTL::ZZ(1); + low_k_binomial_table[i][1] = NTL::ZZ(i); + for (unsigned j = 2; (j <= i) && (j <= LOW_K_MAX_T); j++) { + low_k_binomial_table[i][j] = + low_k_binomial_table[i][j - 1] * NTL::ZZ(i - j + 1) / NTL::ZZ(j); + } + } + std::cerr << "done" << std::endl; +} + +NTL::RR lnFactorial(NTL::RR n){ + /* log of Stirling series approximated to the fourth term + * n log(n) - n + 1/2 log(2 \pi n) + log(- 139/(51840 n^3) + + * + 1/(288 n^2) + 1/(12 n) + 1) */ + return n * NTL::log(n) - n + 0.5 * NTL::log(2*pi*n) + + NTL::log( - NTL::RR(139)/(n*n*n * 51840) + + NTL::RR(1)/(n*n*288) + + NTL::RR(1)/(n*12) + + 1); +} + +NTL::RR lnBinom(NTL::RR n, NTL::RR k){ + if ( (k == NTL::RR(0) ) || (k == n) ) { + return NTL::RR(0); + } + return lnFactorial(n) - (lnFactorial(k) + lnFactorial(n-k) ); +} + + +NTL::ZZ binomial_wrapper(long n, long k){ + if(k>n) return NTL::ZZ(0); + /* employ memoized if available */ + if ((n <= MAX_N) && (k < MAX_T)){ + return binomial_table[n][k]; + } + if ((n <= LOW_K_MAX_N) && (k < LOW_K_MAX_T)){ + return low_k_binomial_table[n][k]; + } + + /* shortcut computation for fast cases (k < 10) where + * Stirling may not provide good approximations */ + if (k < 10) { + NTL::ZZ result = NTL::ZZ(1); + for(int i = 1 ; i <= k; i++){ + result = (result * (n+1-i))/i; + } + return result; + } + /*Fall back to Stirling*/ + return NTL::conv( NTL::exp( lnBinom(NTL::RR(n),NTL::RR(k)) )); +} diff --git a/src/bit_error_probabilities.cpp b/src/bit_error_probabilities.cpp new file mode 100644 index 0000000..bafd880 --- /dev/null +++ b/src/bit_error_probabilities.cpp @@ -0,0 +1,289 @@ +#include "bit_error_probabilities.hpp" + +// choice of the approximation praxis for the estimated fraction of an error +// to appear in the next iteration of a bit-flipping decoder + +/* Probability that a variable node is correct, and a parity equation involving + * it is satisfied */ +NTL::RR compute_p_cc(const uint64_t d_c, const uint64_t n, const uint64_t t) { + NTL::RR result = NTL::RR(0); + uint64_t bound = (d_c - 1) < t ? d_c - 1 : t; + + /* the number of errors falling in the PC equation should be at least + * the amount which cannot be placed in a non checked place */ + uint64_t LowerTHitBound = (n - d_c) < t ? t - (n - d_c) : 0; + /* and it should be even, since the PC equation must be satisfied */ + LowerTHitBound = LowerTHitBound % 2 ? LowerTHitBound + 1 : LowerTHitBound; + + for (uint64_t j = LowerTHitBound; j <= bound; j = j + 2) { + result += + to_RR(binomial_wrapper(d_c - 1, j) * binomial_wrapper(n - d_c, t - j)) / + to_RR(binomial_wrapper(n - 1, t)); + } + return result; +} + +/* Probability that a variable node is correct, and a parity equation involving + * it is *not* satisfied */ +NTL::RR compute_p_ci(const uint64_t d_c, const uint64_t n, const uint64_t t) { + NTL::RR result = NTL::RR(0); + uint64_t bound = (d_c - 1) < t ? d_c - 1 : t; + + /* the number of errors falling in the PC equation should be at least + * the amount which cannot be placed in a non checked place */ + uint64_t LowerTHitBound = (n - d_c) < t ? t - (n - d_c) : 1; + /* and it should be odd, since the PC equation must be non satisfied */ + LowerTHitBound = LowerTHitBound % 2 ? LowerTHitBound : LowerTHitBound + 1; + + for (uint64_t j = LowerTHitBound; j <= bound; j = j + 2) { + result += + to_RR(binomial_wrapper(d_c - 1, j) * binomial_wrapper(n - d_c, t - j)) / + to_RR(binomial_wrapper(n - 1, t)); + } + return result; +} + +/* Probability that a variable node is *not* correct, and a parity equation + * involving it is *not* satisfied */ +NTL::RR compute_p_ic(const uint64_t d_c, const uint64_t n, const uint64_t t) { + NTL::RR result = NTL::RR(0); + uint64_t UpperTBound = (d_c - 1) < t - 1 ? d_c - 1 : t - 1; + + /* the number of errors falling in the PC equation should be at least + * the amount which cannot be placed in a non checked place */ + uint64_t LowerTHitBound = + (n - d_c - 1) < (t - 1) ? (t - 1) - (n - d_c - 1) : 0; + /* and it should be even, since the PC equation must be unsatisfied (when + * accounting for the one we are considering as already placed*/ + LowerTHitBound = LowerTHitBound % 2 ? LowerTHitBound + 1 : LowerTHitBound; + + for (uint64_t j = LowerTHitBound; j <= UpperTBound; j = j + 2) { + result += NTL::to_RR(binomial_wrapper(d_c - 1, j) * + binomial_wrapper(n - d_c, t - j - 1)) / + to_RR(binomial_wrapper(n - 1, t - 1)); + } + return result; +} + +/* Probability that a variable node is *not* correct, and a parity equation + * involving it is satisfied */ +NTL::RR compute_p_ii(const uint64_t d_c, const uint64_t n, const uint64_t t) { + + NTL::RR result = NTL::RR(0); + uint64_t bound = (d_c - 1) < t - 1 ? d_c - 1 : t - 1; + + /* the number of errors falling in the PC equation should be at least + * the amount which cannot be placed in a non checked place */ + uint64_t LowerTHitBound = (n - d_c) < (t - 1) ? (t - 1) - (n - d_c) : 1; + /* and it should be odd, since the PC equation must be satisfied (when + * accounting for the one we are considering as already placed)*/ + LowerTHitBound = LowerTHitBound % 2 ? LowerTHitBound : LowerTHitBound + 1; + for (uint64_t j = LowerTHitBound; j <= bound; j = j + 2) { + result += NTL::to_RR(binomial_wrapper(d_c - 1, j) * + binomial_wrapper(n - d_c, t - j - 1)) / + to_RR(binomial_wrapper(n - 1, t - 1)); + } + return result; +} + +/* note p_cc + p_ci = 1 */ +/* note p_ic + p_ii = 1 */ + +/* Probability that a given erroneous variable is deemed as such, and is thus + * corrected, given a threshold for the amount of unsatisfied parity check + * equations. Called P_ic in most texts */ +NTL::RR ComputePrBitCorrection(const NTL::RR p_ic, const uint64_t d_v, + // const uint64_t t, + const uint64_t threshold) { + // Pic=0; /* p_correct */ + // for (j=b,dv, + // term=binomial(dv,j)*(p_ic^j)*(1-p_ic)^(dv-j); + // Pic=Pic+term; + // ); + NTL::RR result = NTL::RR(0), success, failure; + for (uint64_t j = threshold; j <= d_v; j++) { + NTL::pow(success, p_ic, NTL::to_RR(j)); + NTL::pow(failure, NTL::RR(1) - p_ic, NTL::to_RR(d_v - j)); + result += NTL::to_RR(binomial_wrapper(d_v, j)) * success * failure; + } + return result; +} + +/* Probability that a given correct variable is not deemed as such, and is thus + * fault-induced, given a threshold for the amount of unsatisfied parity check + * equations. Called P_ci in most texts, p_induce in official comment */ +NTL::RR ComputePrBitFaultInduction(const NTL::RR p_ci, const uint64_t d_v, + // const uint64_t t, /* unused */ + const uint64_t threshold) { + + NTL::RR result = NTL::RR(0), success, failure; + for (uint64_t j = threshold; j <= d_v; j++) { + NTL::pow(success, p_ci, NTL::to_RR(j)); + NTL::pow(failure, NTL::RR(1) - p_ci, NTL::to_RR(d_v - j)); + result += NTL::to_RR(binomial_wrapper(d_v, j)) * success * failure; + } + return result; +} + +/* computes the probability that toCorrect bits are corrected + * known as P{N_ic = toCorrect} */ +NTL::RR ComputePrBitCorrectionMulti(const NTL::RR p_ic, const uint64_t d_v, + const uint64_t t, const uint64_t threshold, + const uint64_t toCorrect) { + NTL::RR ProbCorrectOne = ComputePrBitCorrection(p_ic, d_v, threshold); + return NTL::to_RR(binomial_wrapper(t, toCorrect)) * + NTL::pow(ProbCorrectOne, NTL::RR(toCorrect)) * + NTL::pow(1 - ProbCorrectOne, NTL::RR(t - toCorrect)); +} + +/* computes the probability that toInduce faults are induced + * known as P{N_ci = toInduce} or Pr{f_wrong = to_induce} */ +NTL::RR ComputePrBitInduceMulti(const NTL::RR p_ci, const uint64_t d_v, + const uint64_t t, const uint64_t n, + const uint64_t threshold, + const uint64_t toInduce) { + // if(toInduce <= 1 ){ + // return NTL::RR(0); + // } + NTL::RR ProbInduceOne = ComputePrBitFaultInduction(p_ci, d_v, threshold); + return NTL::to_RR(binomial_wrapper(n - t, toInduce)) * + NTL::pow(ProbInduceOne, NTL::RR(toInduce)) * + NTL::pow(1 - ProbInduceOne, NTL::RR(n - t - toInduce)); +} + +uint64_t FindNextNumErrors(const uint64_t n_0, const uint64_t p, + const uint64_t d_v, const uint64_t t) { + NTL::RR p_ci, p_ic; + p_ci = compute_p_ci(n_0 * d_v, n_0 * p, t); + p_ic = compute_p_ic(n_0 * d_v, n_0 * p, t); + uint64_t t_next = t; + // uint64_t best_threshold = (d_v - 1)/2; + for (uint64_t i = (d_v - 1) / 2; i <= d_v - 1; i++) { + NTL::RR t_approx = t - t * ComputePrBitCorrection(p_ic, d_v, i) + + (n_0 * p - t) * ComputePrBitFaultInduction(p_ci, d_v, i); + unsigned long int t_curr = + NTL::conv(NTL::ROUNDING_PRAXIS(t_approx)); + /*Note : we increase the threshold only if it improves strictly on the + * predicted error correction. */ + if (t_curr < t_next) { + t_next = t_curr; + // best_threshold = i; + } + } + /* considering that any code will correct a single bit error, if + * t_next == 1, we save a computation iteration and shortcut to t_next == 0*/ + if (t_next == 1) { + t_next = 0; + } + return t_next; +} + +/* computes the exact 1-iteration DFR and the best threshold on the number of + * upcs to achieve it */ +std::pair Find1IterDFR(const uint64_t n_0, const uint64_t p, + const uint64_t d_v, + const uint64_t t) { + NTL::RR p_ci, p_ic, P_correct, P_induce; + NTL::RR DFR, best_DFR = NTL::RR(1); + p_ci = compute_p_ci(n_0 * d_v, n_0 * p, t); + p_ic = compute_p_ic(n_0 * d_v, n_0 * p, t); + uint64_t best_threshold = (d_v - 1) / 2; + for (uint64_t b = best_threshold; b <= d_v - 1; b++) { + DFR = NTL::RR(1) - ComputePrBitCorrectionMulti(p_ic, d_v, t, b, t) * + ComputePrBitInduceMulti(p_ci, d_v, t, n_0 * p, b, 0); + /*Note : we increase the threshold only if it improves strictly on the + * predicted error correction. */ + if (DFR < best_DFR) { + best_DFR = DFR; + best_threshold = b; + } + } + // std::cout << best_threshold << std::endl; + return std::make_pair(best_DFR, best_threshold); +} + +/* computes the exact 1-iteration probability of leaving at most t_leftover + * uncorrected errors out of t. */ +std::pair +Find1IterTLeftoverPr(const uint64_t n_0, const uint64_t p, const uint64_t d_v, + const uint64_t t, const uint64_t t_leftover) { + NTL::RR p_ci, p_ic; + NTL::RR DFR, best_DFR = NTL::RR(1); + p_ci = compute_p_ci(n_0 * d_v, n_0 * p, t); + p_ic = compute_p_ic(n_0 * d_v, n_0 * p, t); + int n = p * n_0; + uint64_t best_threshold = (d_v + 1) / 2; + + for (uint64_t b = best_threshold; b <= d_v; b++) { + DFR = NTL::RR(0); + NTL::RR P_correct = ComputePrBitCorrection(p_ic, d_v, b); + NTL::RR P_induce = ComputePrBitFaultInduction(p_ci, d_v, b); + for (int tau = 0; tau <= t_leftover; tau++) { + for (int n_to_induce = 0; n_to_induce <= t_leftover; n_to_induce++) { + NTL::RR prob_induce_n = + NTL::to_RR(binomial_wrapper(n - t, n_to_induce)) * + NTL::pow(P_induce, NTL::to_RR(n_to_induce)) * + NTL::pow(NTL::RR(1) - P_induce, NTL::to_RR(n - t - n_to_induce)); + int n_to_correct = (int)t + n_to_induce - tau; + NTL::RR prob_correct_n = NTL::to_RR(binomial_wrapper(t, n_to_correct)); + prob_correct_n *= NTL::pow(P_correct, NTL::to_RR(n_to_correct)); + + prob_correct_n *= + NTL::pow(NTL::RR(1) - P_correct, + NTL::to_RR((int)t - n_to_correct)); /*unsigned exp?*/ + DFR += prob_correct_n * prob_induce_n; + } + } + DFR = NTL::RR(1) - DFR; + if (DFR < best_DFR) { + best_DFR = DFR; + best_threshold = b; + } + } + return std::make_pair(best_DFR, best_threshold); +} + +// find minimum p which, asymptotically, corrects all errors +// search performed via binary search as the DFR is decreasing monot. +// in of p +uint64_t Findpth(const uint64_t n_0, const uint64_t d_v_prime, + const uint64_t t){ + +unsigned int prime_idx = 0, prime_idx_prec; +uint64_t p = proper_primes[prime_idx]; +while (p < d_v_prime || p < t) { + prime_idx++; + p = proper_primes[prime_idx]; + } + + uint64_t hi, lo; + lo = prime_idx; + hi = PRIMES_NO; + prime_idx_prec = lo; + + uint64_t limit_error_num = t; + while (hi - lo > 1) { + prime_idx_prec = prime_idx; + prime_idx = (lo + hi) / 2; + p = proper_primes[prime_idx]; + // compute number of remaining errors after +infty iters + limit_error_num = t; + uint64_t current_error_num; + // std::cout << "using p:"<< p << ", errors dropping as "; + do { + current_error_num = limit_error_num; + limit_error_num = FindNextNumErrors(n_0, p, d_v_prime, current_error_num); + // std::cout << limit_error_num << " "; + } while ((limit_error_num != current_error_num) && (limit_error_num != 0)); + // std::cout << std::endl; + if (limit_error_num > 0) { + lo = prime_idx; + } else { + hi = prime_idx; + } + } + if (limit_error_num == 0) { + return proper_primes[prime_idx]; + } + return proper_primes[prime_idx_prec]; +} diff --git a/src/constant_weight_encodable_bits.cpp b/src/constant_weight_encodable_bits.cpp index b0d80dd..7627bda 100644 --- a/src/constant_weight_encodable_bits.cpp +++ b/src/constant_weight_encodable_bits.cpp @@ -12,9 +12,9 @@ int main(int argc, char* argv[]){ return -1; } + InitConstants(); InitBinomials(); NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); - pi = NTL::ComputePi_RR(); uint32_t n = atoi(argv[1]); uint32_t t = atoi(argv[2]); /* reduce by a factor matching the QC block size */ diff --git a/src/enumeration_complexity.cpp b/src/enumeration_complexity.cpp index 7bf0a0b..6de529e 100644 --- a/src/enumeration_complexity.cpp +++ b/src/enumeration_complexity.cpp @@ -15,9 +15,9 @@ int main(int argc, char* argv[]){ return -1; } + InitConstants(); InitBinomials(); NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); - pi = NTL::ComputePi_RR(); uint32_t p = atoi(argv[1]); uint32_t d_v = atoi(argv[2]); uint32_t n_0 = atoi(argv[3]); diff --git a/src/isd_cost_estimate.cpp b/src/isd_cost_estimate.cpp new file mode 100644 index 0000000..b9363d2 --- /dev/null +++ b/src/isd_cost_estimate.cpp @@ -0,0 +1,803 @@ +#include "isd_cost_estimate.hpp" +#include "binomials.hpp" +#include "logging.hpp" +#include +#include + +/***************************Classic ISDs***************************************/ + +Result isd_log_cost_classic_BJMM_approx(const uint32_t n, const uint32_t k, + const uint32_t t) { + Result result; + result.alg_name = "BJMM"; + result.params = {{"approx", true}}; + result.value = ((double)t) * -log((1.0 - (double)k / (double)n)) / log(2); + return result; +} + +// computes the probability of a random k * k being invertible +const NTL::RR log_probability_k_by_k_is_inv(const NTL::RR &k) { + if (k >= 100) + return NTL::RR(-1.79191682); + NTL::RR log_pinv = NTL::RR(-1); + for (long i = 2; i <= k; i++) { + log_pinv = + log_pinv + log2_RR(NTL::RR(1) - NTL::power2_RR(-i)); + } + return log_pinv; +} + +const NTL::RR probability_k_by_k_is_inv(const NTL::RR &k) { + if (k >= 100) + return NTL::RR(0.288788095); + NTL::RR log_pinv = NTL::RR(0.5); + for (long i = 2; i <= k; i++) { + log_pinv = log_pinv * (NTL::RR(1) - NTL::power2_RR(-i)); + } + return log_pinv; +} + +const NTL::RR classic_rref_red_cost(const NTL::RR &n, const NTL::RR &r) { + /* simple reduced row echelon form transform, as it is not likely to be the + * bottleneck */ + NTL::RR k = n - r; + return r * r * n / NTL::RR(2) + (n * r) / NTL::RR(2) - + r * r * r / NTL::RR(6) + r * r + r / NTL::RR(6) - NTL::RR(1); +} + +// const NTL::RR classic_IS_candidate_cost(const NTL::RR &n, const NTL::RR &r) { +// NB: r* r should be added only for SDP, and even there it can be omitted since the syndrome can be thought as another column of H +// return classic_rref_red_cost(n, r) / probability_k_by_k_is_inv(r) + r * r; +// } + +const NTL::RR Fin_Send_rref_red_cost(const NTL::RR &n, const NTL::RR &r, + const NTL::RR l) { + /* reduced size reduced row echelon form transformation, only yields an + * (r-l) sized identity matrix */ + NTL::RR k = n - r; + return -l * l * l / NTL::RR(3) - l * l * n / NTL::RR(2) + + l * l * r / NTL::RR(2) - 3 * l * l / NTL::RR(2) - + 3 * l * n / NTL::RR(2) + l * r / NTL::RR(2) - 13 * l / NTL::RR(6) + + n * r * r / NTL::RR(2) + n * r / NTL::RR(2) - r * r * r / NTL::RR(6) + + r * r + r / NTL::RR(6) - NTL::RR(1); +} + +// const NTL::RR Fin_Send_IS_candidate_cost(const NTL::RR &n, const NTL::RR &r, +// const NTL::RR &l) { +// return Fin_Send_rref_red_cost(n, r, l) / probability_k_by_k_is_inv(r - l) + +// r * r; +// } + +Result isd_log_cost_classic_Prange(const uint32_t n, const uint32_t k, + const uint32_t t) { + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + + // NTL::RR cost_iter = classic_IS_candidate_cost(n_real, n_real - k_real); + NTL::RR cost_gje = classic_rref_red_cost(n_real, k_real); + NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / + NTL::to_RR(binomial_wrapper(n - k, t)); + + NTL::RR log_cost = log2_RR(num_iter) - log_probability_k_by_k_is_inv(n_real - k_real) + log2_RR(cost_gje); + + Result res; + res.alg_name = "Prange"; + res.params = {}; + res.value = NTL::conv(log_cost); + res.gje_cost = NTL::conv(log2_RR(cost_gje)); + return res; +} + +#define P_MAX_LB 20 +Result isd_log_cost_classic_LB(const uint32_t n, const uint32_t k, + const uint32_t t) { + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + NTL::RR min_log_cost = n_real; // unreachable upper bound + NTL::RR log_cost; + uint32_t best_p = 1; + uint32_t constrained_max_p = P_MAX_LB > t ? t : P_MAX_LB; + + NTL::RR cost_gje = classic_rref_red_cost(n_real, k_real); + // IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); + + for (uint32_t p = 1; p < constrained_max_p; p++) { + NTL::RR p_real = NTL::RR(p); + NTL::RR cost_iter = cost_gje / probability_k_by_k_is_inv(n_real - k_real) + + NTL::to_RR(binomial_wrapper(k, p) * p * (n - k)); + NTL::RR num_iter = + NTL::to_RR(binomial_wrapper(n, t)) / + NTL::to_RR(binomial_wrapper(k, p) * binomial_wrapper(n - k, t - p)); + log_cost = + (NTL::log(num_iter) + NTL::log(cost_iter)) / NTL::log(NTL::RR(2)); + if (min_log_cost > log_cost) { + min_log_cost = log_cost; + best_p = p; + } + } + spdlog::info("Lee-Brickell best p: {}", best_p); + Result res; + res.alg_name = "Lee-Brickell"; + res.params = {{"p", best_p}}; + res.value = NTL::conv(min_log_cost); + res.gje_cost = NTL::conv(log2_RR(cost_gje)); + return res; +} + +#define P_MAX_Leon P_MAX_LB +#define L_MAX_Leon 200 +Result isd_log_cost_classic_Leon(const uint32_t n, const uint32_t k, + const uint32_t t) { + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + NTL::RR min_log_cost = n_real; // unreachable upper bound + NTL::RR log_cost; + uint32_t best_l = 0, best_p = 1, constrained_max_l, constrained_max_p; + + NTL::RR gje_cost = classic_rref_red_cost(n_real, n_real - k_real); + // IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); + constrained_max_p = P_MAX_Leon > t ? t : P_MAX_Leon; + for (uint32_t p = 1; p < constrained_max_p; p++) { + constrained_max_l = + (L_MAX_Leon > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_Leon); + NTL::RR p_real = NTL::RR(p); + for (uint32_t l = 0; l < constrained_max_l; l++) { + NTL::RR KChooseP = NTL::to_RR(binomial_wrapper(k, p)); + NTL::RR cost_iter = + gje_cost / probability_k_by_k_is_inv(n_real - k_real) + KChooseP * p_real * NTL::to_RR(l) + + (KChooseP / NTL::power2_RR(l)) * NTL::RR(p * (n - k - l)); + NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / + NTL::to_RR(binomial_wrapper(k, p) * + binomial_wrapper(n - k - l, t - p)); + log_cost = + (NTL::log(num_iter) + NTL::log(cost_iter)) / NTL::log(NTL::RR(2)); + if (min_log_cost > log_cost) { + min_log_cost = log_cost; + best_l = l; + best_p = p; + } + } + } + spdlog::info("Leon Best l {} best p: {}", best_l, best_p); + Result res; + res.alg_name = "Lee-Brickell"; + res.params = {{"p", best_p}, {"l", best_l}}; + res.value = NTL::conv(min_log_cost); + res.gje_cost = NTL::conv(log2_RR(gje_cost)); + return res; +} + +#define P_MAX_Stern P_MAX_Leon +#define L_MAX_Stern L_MAX_Leon +Result isd_log_cost_classic_Stern(const uint32_t n, const uint32_t k, + const uint32_t t) { + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + NTL::RR min_log_cost = n_real; // unreachable upper bound + NTL::RR log_cost; + uint32_t best_l = 0, best_p = 2, constrained_max_l, constrained_max_p; + + NTL::RR gje_cost = classic_rref_red_cost(n_real, n_real - k_real); + // IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); + + constrained_max_p = P_MAX_Stern > t ? t : P_MAX_Stern; + for (uint32_t p = 2; p < constrained_max_p; p = p + 2) { + constrained_max_l = + (L_MAX_Stern > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_Stern); + NTL::ZZ kHalfChoosePHalf; + for (uint32_t l = 0; l < constrained_max_l; l++) { + NTL::RR p_real = NTL::RR(p); + kHalfChoosePHalf = binomial_wrapper(k / 2, p / 2); + NTL::RR kHalfChoosePHalf_real = NTL::to_RR(kHalfChoosePHalf); + + NTL::RR cost_iter = + gje_cost/ probability_k_by_k_is_inv(n_real - k_real) + + kHalfChoosePHalf_real * (NTL::to_RR(l) * p_real + + (kHalfChoosePHalf_real / NTL::power2_RR(l)) * + NTL::RR(p * (n - k - l))); + // #if LOG_COST_CRITERION == 1 + NTL::RR log_stern_list_size = + kHalfChoosePHalf_real * + (p_real / NTL::RR(2) * NTL::log(k_real / NTL::RR(2)) / + NTL::log(NTL::RR(2)) + + NTL::to_RR(l)); + log_stern_list_size = + NTL::log(log_stern_list_size) / NTL::log(NTL::RR(2)); + cost_iter = cost_iter * log_stern_list_size; + // #endif + NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / + NTL::to_RR(kHalfChoosePHalf * kHalfChoosePHalf * + binomial_wrapper(n - k - l, t - p)); + log_cost = log2_RR(num_iter) + log2_RR(cost_iter); + if (min_log_cost > log_cost) { + min_log_cost = log_cost; + best_l = l; + best_p = p; + } + } + } + + spdlog::info("Stern Best l {}, best p: {}", best_l, best_p); + Result res; + res.alg_name = "Stern"; + res.params = {{"p", best_p}, {"l", best_l}}; + res.value = NTL::conv(min_log_cost); + res.gje_cost = NTL::conv(log2_RR(gje_cost)); + return res; +} + +#define P_MAX_FS P_MAX_Stern +#define L_MAX_FS L_MAX_Stern +Result isd_log_cost_classic_FS(const uint32_t n, const uint32_t k, + const uint32_t t) { + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + NTL::RR min_log_cost = n_real; // unreachable upper bound + NTL::RR log_cost; + uint32_t best_l = 0, best_p = 2, constrained_max_l, constrained_max_p; + + NTL::RR cost_gje; +// return Fin_Send_rref_red_cost(n, r, l) / probability_k_by_k_is_inv(r - l) + + constrained_max_p = P_MAX_Stern > t ? t : P_MAX_Stern; + for (uint32_t p = 2; p < constrained_max_p; p = p + 2) { + constrained_max_l = + (L_MAX_Stern > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_Stern); + NTL::RR p_real = NTL::RR(p); + NTL::ZZ kPlusLHalfChoosePHalf; + for (uint32_t l = 0; l < constrained_max_l; l++) { + NTL::RR l_real = NTL::RR(l); + cost_gje = + Fin_Send_rref_red_cost(n_real, n_real - k_real, l_real); + kPlusLHalfChoosePHalf = binomial_wrapper((k + l) / 2, p / 2); + NTL::RR kPlusLHalfChoosePHalf_real = NTL::to_RR(kPlusLHalfChoosePHalf); + NTL::RR cost_iter = + cost_gje / probability_k_by_k_is_inv(n_real - k_real - l_real) + + kPlusLHalfChoosePHalf_real * + (NTL::to_RR(l) * p_real + + (kPlusLHalfChoosePHalf_real / NTL::power2_RR(l)) * + NTL::RR(p * (n - k - l))); + // #if LOG_COST_CRITERION == 1 + NTL::RR log_FS_list_size = + kPlusLHalfChoosePHalf_real * + (p_real / NTL::RR(2) * NTL::log((k_real + l_real) / NTL::RR(2)) / + NTL::log(NTL::RR(2)) + + l_real); + log_FS_list_size = log2_RR(log_FS_list_size); + cost_iter = cost_iter * log_FS_list_size; + // #endif + NTL::RR num_iter = + NTL::to_RR(binomial_wrapper(n, t)) / + NTL::to_RR(kPlusLHalfChoosePHalf * kPlusLHalfChoosePHalf * + binomial_wrapper(n - k - l, t - p)); + + log_cost = log2_RR(num_iter) + log2_RR(cost_iter); + if (min_log_cost > log_cost) { + min_log_cost = log_cost; + best_l = l; + best_p = p; + } + } + } + spdlog::info("FS Best l {}, best p: {}", best_l, best_p); + Result res; + res.alg_name = "Fin-Send"; + res.params = {{"p", best_p}, {"l", best_l}}; + res.value = NTL::conv(min_log_cost); + res.gje_cost = NTL::conv(log2_RR(cost_gje)); + //cost_gje not reported + return res; +} + +#define P_MAX_MMT (P_MAX_FS + 25) // P_MAX_MMT +#define L_MAX_MMT 350 // L_MAX_MMT +#define L_MIN_MMT 2 +Result isd_log_cost_classic_MMT(const uint32_t n, const uint32_t k, + const uint32_t t) { + uint32_t r = n - k; + NTL::RR n_real = NTL::RR(n); + NTL::RR r_real = NTL::RR(r); + NTL::RR k_real = n_real - r_real; + + NTL::RR min_log_cost = n_real; // unreachable upper bound + NTL::RR log_cost, log_mem_cost; + uint32_t best_l = L_MIN_MMT, best_p = 4, constrained_max_l = 0, + constrained_max_p; +#if defined(EXPLORE_REPS) + uint32_t best_l1; +#endif + + NTL::RR cost_gje; + constrained_max_p = P_MAX_MMT > t ? t : P_MAX_MMT; + /* p should be divisible by 4 in MMT */ + for (uint32_t p = 4; p <= constrained_max_p; p = p + 4) { + constrained_max_l = + (L_MAX_MMT > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_MMT); + for (uint32_t l = L_MIN_MMT; l <= constrained_max_l; l++) { + NTL::RR l_real = NTL::to_RR(l); + NTL::ZZ kPlusLHalfChoosePHalf = binomial_wrapper((k + l) / 2, p / 2); + NTL::RR num_iter = + NTL::to_RR(binomial_wrapper(n, t)) / + NTL::to_RR(kPlusLHalfChoosePHalf * kPlusLHalfChoosePHalf * + binomial_wrapper(n - k - l, t - p)); + // FS_IS_candidate_cost = Fin_Send_IS_candidate_cost(n_real, r_real, l_real); + cost_gje = Fin_Send_rref_red_cost(n_real, n_real - k_real, l_real); + NTL::ZZ kPlusLHalfChoosePFourths = binomial_wrapper((k + l) / 2, p / 4); + NTL::RR kPlusLHalfChoosePFourths_real = + NTL::to_RR(kPlusLHalfChoosePFourths); + NTL::RR minOperandRight, min; + NTL::RR PChoosePHalf = NTL::to_RR(binomial_wrapper(p, p / 2)); + NTL::RR kPlusLChoosePHalf = NTL::to_RR(binomial_wrapper((k + l), p / 2)); + minOperandRight = + NTL::to_RR(binomial_wrapper((k + l) / 2, p / 2)) / PChoosePHalf; + min = kPlusLHalfChoosePFourths_real > minOperandRight + ? minOperandRight + : kPlusLHalfChoosePFourths_real; + + /* hoist out anything not depending on l_1/l_2 split*/ +#if defined(EXPLORE_REPRS) + for (l_1 = 1; l_1 <= l; l_1++) { + uint32_t l_2 = l - l_1; +#else + uint32_t l_2 = NTL::conv( + log2_RR(kPlusLHalfChoosePFourths_real / + NTL::to_RR(binomial_wrapper(p, p / 2)))); + /*clamp l_2 to a safe value , 0 < l_2 < l*/ + l_2 = l_2 <= 0 ? 1 : l_2; + l_2 = l_2 >= l ? l - 1 : l_2; + + uint32_t l_1 = l - l_2; +#endif + NTL::RR interm = kPlusLHalfChoosePFourths_real / NTL::power2_RR(l_2) * + NTL::to_RR(p / 2 * l_1); + + NTL::RR otherFactor = (NTL::to_RR(p / 4 * l_2) + interm); + NTL::RR cost_iter = + cost_gje/probability_k_by_k_is_inv(n_real - k_real - l_real) + min * otherFactor + + kPlusLHalfChoosePFourths_real * NTL::to_RR(p / 2 * l_2); + + NTL::RR lastAddend = + otherFactor + kPlusLHalfChoosePFourths_real * kPlusLChoosePHalf * + PChoosePHalf / NTL::power2_RR(l) * + NTL::to_RR(p * (r - l)); + lastAddend = lastAddend * kPlusLHalfChoosePFourths_real; + cost_iter += lastAddend; + // #if 0 + + NTL::RR log_MMT_space = + r_real * n_real + + kPlusLHalfChoosePFourths_real * + (NTL::to_RR(p / 4) * log2_RR(NTL::to_RR(k + l / 2)) + + NTL::to_RR(l_2)) + + NTL::to_RR(min) * (NTL::to_RR(p / 2) * log2_RR(NTL::to_RR(k + l)) + + NTL::to_RR(l)); + log_MMT_space = log2_RR(log_MMT_space); + cost_iter = cost_iter * log_MMT_space; + // #endif + log_cost = log2_RR(num_iter) + log2_RR(cost_iter); + if (min_log_cost > log_cost) { + min_log_cost = log_cost; + best_l = l; +#if defined(EXPLORE_REPRS) + best_l1 = l_1; +#endif + best_p = p; + log_mem_cost = log_MMT_space; + } +#if defined(EXPLORE_REPRS) + } +#endif + } + } + spdlog::info("MMT Best l {}, best p: {}", best_l, best_p); + if (best_p == constrained_max_p) { + spdlog::warn("Warning: p {p} on exploration edge!"); + } + if (best_l == constrained_max_l) { + spdlog::warn("Warning: l {l} on exploration edge!"); + } + Result res; + res.alg_name = "MMT"; + res.params = {{"p", best_p}, {"l", best_l}}; + res.value = NTL::conv(min_log_cost); + res.gje_cost = NTL::conv(log2_RR(cost_gje)); + return res; +} + +#define P_MAX_BJMM 20 // P_MAX_MMT +#define L_MAX_BJMM 90 // L_MAX_MMT +#define Eps1_MAX_BJMM 4 +#define Eps2_MAX_BJMM 4 +Result isd_log_cost_classic_BJMM(const uint32_t n, const uint32_t k, + const uint32_t t) { + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + uint32_t r = n - k; + NTL::RR r_real = NTL::RR(r); + + NTL::RR min_log_cost = n_real; // unreachable upper bound + NTL::RR log_cost; + std::optional best_p, best_l, best_eps_1, best_eps_2; + uint32_t constrained_max_l, constrained_max_p; + + NTL::RR cost_gje; + constrained_max_p = P_MAX_BJMM > t ? t : P_MAX_BJMM; + /*p should be divisible by 2 in BJMM */ + for (uint32_t p = 2; p < constrained_max_p; p = p + 2) { + /* sweep over all the valid eps1 knowing that p/2 + eps1 should be a + * multiple of 4*/ + constrained_max_l = + (L_MAX_BJMM > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_BJMM); + for (uint32_t l = 0; l < constrained_max_l; l++) { + for (uint32_t eps1 = 2 + (p % 2); eps1 < Eps1_MAX_BJMM; eps1 = eps1 + 2) { + uint32_t p_1 = p / 2 + eps1; + /* sweep over all the valid eps2 knowing that p_1/2 + eps2 should + * be even */ + for (uint32_t eps2 = (p_1 % 2); eps2 < Eps2_MAX_BJMM; eps2 = eps2 + 2) { + uint32_t p_2 = p_1 / 2 + eps2; + + /* Available parameters p, p_1,p_2,p_3, l */ + NTL::RR l_real = NTL::RR(l); + cost_gje = Fin_Send_rref_red_cost(n_real, n_real - k_real, l_real); + // TODO check why this cost (or the rref cost) is never used + // FS_IS_candidate_cost = + // Fin_Send_IS_candidate_cost(n_real, n_real - k_real, l_real); + uint32_t p_3 = p_2 / 2; + + NTL::ZZ L3_list_len = binomial_wrapper((k + l) / 2, p_3); + NTL::RR L3_list_len_real = NTL::to_RR(L3_list_len); + /* the BJMM number of iterations depends only on L3 parameters + * precompute it */ + NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / + NTL::to_RR(binomial_wrapper((k + l), p) * + binomial_wrapper(r - l, t - p)); + NTL::RR P_invalid_splits = NTL::power(L3_list_len_real, 2) / + NTL::to_RR(binomial_wrapper(k + l, p_2)); + num_iter = num_iter / NTL::power(P_invalid_splits, 4); + + /* lengths of lists 2 to 0 have to be divided by the number of + * repr.s*/ + NTL::RR L2_list_len = NTL::to_RR(binomial_wrapper(k + l, p_2)) * + NTL::power(P_invalid_splits, 1); + NTL::RR L1_list_len = NTL::to_RR(binomial_wrapper(k + l, p_1)) * + NTL::power(P_invalid_splits, 2); + /* estimating the range for r_1 and r_2 requires to compute the + * number of representations rho_1 and rho_2 */ + + NTL::ZZ rho_2 = binomial_wrapper(p_1, p_1 / 2) * + binomial_wrapper(k + l - p_1, eps2); + NTL::ZZ rho_1 = + binomial_wrapper(p, p / 2) * binomial_wrapper(k + l - p, eps1); + int min_r2 = NTL::conv(NTL::log(NTL::to_RR(rho_2)) / + NTL::log(NTL::RR(2))); + int max_r1 = NTL::conv(NTL::log(NTL::to_RR(rho_1)) / + NTL::log(NTL::RR(2))); + + /*enumerate r_1 and r_2 over the suggested range + * log(rho_2) < r2 < r_1 < log(rho_1)*/ + /* clamp to safe values */ + min_r2 = min_r2 > 0 ? min_r2 : 1; + max_r1 = max_r1 < (int)l ? max_r1 : l - 1; + + NTL::RR p_real = NTL::RR(p); + for (int r_2 = min_r2; r_2 < max_r1 - 1; r_2++) { + for (int r_1 = r_2 + 1; r_1 < max_r1; r_1++) { + + /*add the cost of building Layer 3 to cost_iter */ + NTL::RR cost_iter = + NTL::to_RR(4) * + (k + l + 2 * L3_list_len_real + r_2 + + NTL::power(L3_list_len_real, 2) * NTL::to_RR(2 * p_3 * r_2)); + + /* add the cost of building Layer 2 */ + cost_iter += + 2 * (NTL::power((NTL::to_RR(rho_2) / (NTL::power2_RR(r_2))) * + NTL::power(L3_list_len_real, 2), + 2) * + 2 * p_2 * (r_1 - r_2)); + + /* add the cost of building Layer 1 */ + cost_iter += + NTL::power((NTL::to_RR(rho_1) / NTL::power2_RR(r_1)) * + (NTL::to_RR(rho_2) / NTL::power2_RR(r_2)) * + NTL::power(L3_list_len_real, 2), + 4) * + 2 * p_1 * l; + + /* add the cost of building L0 */ + cost_iter += + p * (r - l) * + NTL::power((NTL::to_RR(rho_1) / NTL::power2_RR(r_1)) * + (NTL::to_RR(rho_2) / NTL::power2_RR(r_2)) * + NTL::power(L3_list_len_real, 2), + 4) / + NTL::to_RR(l); + + log_cost = log2_RR(num_iter) + log2_RR(cost_iter); + + if (min_log_cost > log_cost) { + min_log_cost = log_cost; + best_l = l; + best_p = p; + best_eps_1 = eps1; + best_eps_2 = eps2; + } + } + } + + } /*end of iteration over l */ + /* to review up to to here */ + } /* end for over eps2 */ + } /* end for over eps1 */ + } /* end for over p*/ + + if (!best_l || !best_eps_1 || !best_p || !best_eps_2) { + spdlog::error("Error: One or more variables are not initialized."); + throw std::runtime_error("One or more variables are not initialized."); + } + spdlog::info("BJMM Best l {}, best p: {}, best eps1: {}, best eps2: {}", + optional_to_string(best_l), optional_to_string(best_p), + optional_to_string(best_eps_1), optional_to_string(best_eps_2)); + Result res; + res.alg_name = "BJMM"; + res.params = {{"p", best_p.value()}, + {"l", best_l.value()}, + {"eps1", best_eps_1.value()}, + {"eps2", best_eps_2.value()}}; + res.value = NTL::conv(min_log_cost); + res.gje_cost = NTL::conv(log2_RR(cost_gje)); + return res; +} + +/***************************Quantum ISDs***************************************/ + +const NTL::RR quantum_gauss_red_cost(const NTL::RR &n, const NTL::RR &k) { + // return 0.5* NTL::power(n-k,3) + k*NTL::power((n-k),2); + return 1.5 * NTL::power(n - k, 2) - 0.5 * (n - k); +} + +#define P_MAX_Q_LB 3 // P_MAX_MMT +Result isd_log_cost_quantum_LB(const uint32_t n, const uint32_t k, + const uint32_t t) { + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + NTL::RR log_pi_fourths = NTL::log(pi * 0.25); + NTL::RR log_pinv = log_probability_k_by_k_is_inv(k_real); + + /* Check https://doi.org/10.1007/978-3-031-61489-7_2 + * for the full measures of the lee-brickell quantum attack + */ + NTL::RR min_log_cost = n_real; // unreachable upper bound + uint32_t p; + std::optional best_p; + for (p = 1; p < P_MAX_Q_LB; p++) { + NTL::RR p_real = NTL::RR(p); + NTL::RR iteration_cost = quantum_gauss_red_cost(n_real, k_real) + + NTL::to_RR(binomial_wrapper(k, p)) * + NTL::log(n_real - k_real) / + NTL::log(NTL::RR(2)); + NTL::RR log_cost = + log_pi_fourths + .5 * (lnBinom(n_real, t_real) - log_pinv - + (lnBinom(k_real, p_real) + + lnBinom(n_real - k_real, t_real - p_real))); + log_cost += NTL::log(iteration_cost); + log_cost = log_cost / NTL::log(NTL::RR(2)); + if (log_cost < min_log_cost) { + min_log_cost = log_cost; + best_p = p; + } + } + if (!best_p) { + spdlog::error("Error: One or more variables are not initialized."); + throw std::runtime_error("One or more variables are not initialized."); + } + + Result res; + res.alg_name = "Quantum Lee-Brickell"; + res.params = {{"p", best_p.value()}}; + res.value = NTL::conv(min_log_cost); + return res; +} + +#define MAX_M (t / 2) + +Result isd_log_cost_quantum_stern(const uint32_t n, const uint32_t k, + const uint32_t t) { + NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); + NTL::RR t_real = NTL::RR(t); + NTL::RR current_complexity, log_p_success, c_it, c_dec; + + // Start computing Stern's parameter invariant portions of complexity + NTL::RR log_pi_fourths = NTL::log(pi * 0.25); + // compute the probability of a random k * k being invertible + NTL::RR log_pinv = log_probability_k_by_k_is_inv(k_real); + // compute the cost of inverting the matrix, in a quantum execution env. + NTL::RR c_inv = quantum_gauss_red_cost(n_real, k_real); + + // optimize Stern's parameters : + // m : the # of errors in half of the chosen dimensions + // l : the length of the run of zeroes in the not chosen dimensions + // done via exhaustive parameter space search, minimizing the total + // complexity. + // Initial value set to codeword bruteforce to ensure the minimum is found. + NTL::RR min_stern_complexity = NTL::RR(n) * NTL::log(NTL::RR(2)); + + for (long m = 1; m <= MAX_M; m++) { + NTL::RR m_real = NTL::RR(m); + /* previous best complexity as a function of l alone. + * initialize to bruteforce-equivalent, break optimization loop as soon + * as a minimum is found */ + NTL::RR prev_best_complexity = NTL::RR(t); + for (long l = 0; l < (n - k - (t - 2 * m)); l++) { + + NTL::RR l_real = NTL::RR(l); + log_p_success = lnBinom(t_real, 2 * m_real) + + lnBinom(n_real - t_real, k_real - 2 * m_real) + + lnBinom(2 * m_real, m_real) + + lnBinom(n_real - k_real - t_real + 2 * m_real, l_real); + log_p_success = log_p_success - + (m_real * NTL::log(NTL::RR(4)) + lnBinom(n_real, k_real) + + lnBinom(n_real - k_real, l_real)); + current_complexity = -(log_p_success + log_pinv) * 0.5 + log_pi_fourths; + /* to match specifications , the term should be + * (n_real-k_real), as per in deVries, although + * David Hobach thesis mentions it to be + * (n_real-k_real-l_real), and it seems to match. + * amend specs for the typo. */ + c_it = l_real + (n_real - k_real - l_real) * + NTL::to_RR(binomial_wrapper(k / 2, m)) / + NTL::power2_RR(-l); + + c_it = c_it * 2 * m_real * NTL::to_RR(binomial_wrapper(k / 2, m)); +#if IGNORE_DECODING_COST == 1 + c_dec = 0.0; +#elif IGNORE_DECODING_COST == 0 + /*cost of decoding estimated as per Golomb CWDEC + * decoding an n-bit vector with weight k is + * CWDEC_cost(k,n)=O(n^2 log_2(n)) and following deVries, where + * c_dec = CWDEC_cost(n-k, n) + k + CWDEC_cost(l,n-k)*/ + c_dec = + n_real * n_real * NTL::log(n_real) + k_real + + (n_real - k_real) * (n_real - k_real) * NTL::log((n_real - k_real)); +#endif + current_complexity = current_complexity + NTL::log(c_it + c_inv + c_dec); + if (current_complexity < prev_best_complexity) { + prev_best_complexity = current_complexity; + } else { + break; + } + } + if (current_complexity < min_stern_complexity) { + min_stern_complexity = current_complexity; + } + } + Result res; + res.alg_name = "Quantum Stern"; + res.params = {}; + res.value = NTL::conv(min_stern_complexity / NTL::log(NTL::RR(2.0))); + return res; +} + +/***************************Aggregation ***************************************/ + +double get_qc_red_factor_log(const uint32_t qc_order, const uint32_t is_kra) { + /* For key recovery attacks (CFP) the advantage from quasi-cyclicity is p. For + * a message recovery (SDP), the DOOM advantage is sqrt(p). + */ + double qc_red_factor = is_kra ? logl(qc_order) : logl(qc_order) / 2.0; + return qc_red_factor / logl(2); +} + +Result c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, + const uint32_t qc_order, const uint32_t is_kra, + const bool compute_qc_reduction_factor) { + Result current_res, min_res; + double qc_red_factor = + compute_qc_reduction_factor ? get_qc_red_factor_log(qc_order, is_kra) : 0; + + double min_cost = n; // the cost cannot be greater than 2^n + +#if SKIP_PRANGE == 0 + current_res = isd_log_cost_classic_Prange(n, k, t); + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } +#endif + +#if SKIP_LB == 0 + current_res = isd_log_cost_classic_LB(n, k, t); + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } +#endif + +#if SKIP_LEON == 0 + current_res = isd_log_cost_classic_Leon(n, k, t); + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } +#endif + +#if SKIP_STERN == 0 + current_res = isd_log_cost_classic_Stern(n, k, t); + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } +#endif + +#if SKIP_FS == 0 + current_res = isd_log_cost_classic_FS(n, k, t); + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } +#endif + +#if SKIP_MMT == 0 + current_res = isd_log_cost_classic_MMT(n, k, t); + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } +#endif + +#if SKIP_BJMM == 0 + current_res = isd_log_cost_classic_BJMM(n, k, t); + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } +#endif + + return min_res; +} + +Result q_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, + const uint32_t qc_order, const uint32_t is_kra, + const bool compute_qc_reduction_factor) { + Result current_res, min_res; + double min_cost = n; // cannot be greater than n + double qc_red_factor = + compute_qc_reduction_factor ? get_qc_red_factor_log(qc_order, is_kra) : 0; + + /* This is just a quick hack since experiments says that p = 1 is + * the optimal value at least for the NIST code-based finalists + */ +#if SKIP_Q_LB == 0 + current_res = isd_log_cost_quantum_LB(n, k, t); + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } +#endif + +#if SKIP_Q_STERN == 0 + current_res = isd_log_cost_classic_Stern(n, k, t); + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } +#endif + + return min_res; +} diff --git a/src/logging.cpp b/src/logging.cpp new file mode 100644 index 0000000..9e60f60 --- /dev/null +++ b/src/logging.cpp @@ -0,0 +1,46 @@ +#include "logging.hpp" +#include +#include + +void configure_logger(const std::optional filename) { + // Initialize the logger + const std::string ff = + filename.has_value() ? filename.value() : "logs/default.log"; + auto logger = spdlog::basic_logger_mt("default_logger", ff); + spdlog::set_default_logger(logger); + + // Retrieve the environment variable for log level + const char *log_level_env = std::getenv("LOG_LEVEL"); + + if (log_level_env) { + std::string log_level_str(log_level_env); + + // Configure the log level based on the environment variable + if (log_level_str == "trace") { + spdlog::set_level(spdlog::level::trace); + } else if (log_level_str == "debug") { + spdlog::set_level(spdlog::level::debug); + } else if (log_level_str == "info") { + spdlog::set_level(spdlog::level::info); + } else if (log_level_str == "warn") { + spdlog::set_level(spdlog::level::warn); + } else if (log_level_str == "err") { + spdlog::set_level(spdlog::level::err); + } else if (log_level_str == "critical") { + spdlog::set_level(spdlog::level::critical); + } else { + spdlog::set_level(spdlog::level::info); // Default level + } + } else { + spdlog::set_level(spdlog::level::info); // Default level if environment + // variable is not set + } +} + +std::string optional_to_string(const std::optional &opt) { + if (opt) { + return std::to_string(*opt); + } else { + return "Not Initialized"; + } +} diff --git a/src/parameter_generator.cpp b/src/parameter_generator.cpp index d420e27..f9202e1 100644 --- a/src/parameter_generator.cpp +++ b/src/parameter_generator.cpp @@ -5,9 +5,9 @@ #define NUM_BITS_REAL_MANTISSA 128 #define IGNORE_DECODING_COST 0 -#define SKIP_BJMM 1 -#define SKIP_MMT 1 -#define LOG_COST_CRITERION 1 +// #define SKIP_BJMM 1 +// #define SKIP_MMT 1 +// #define LOG_COST_CRITERION 1 #include "binomials.hpp" #include "bit_error_probabilities.hpp" @@ -160,6 +160,7 @@ int main(int argc, char *argv[]) { } p_th = proper_primes[current_prime_pos]; + InitConstants(); InitBinomials(); NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); pi = NTL::ComputePi_RR(); diff --git a/src/work_factor_computation.cpp b/src/work_factor_computation.cpp index 01f97a3..05a2070 100644 --- a/src/work_factor_computation.cpp +++ b/src/work_factor_computation.cpp @@ -29,6 +29,7 @@ int main(int argc, char *argv[]) { /* reduce by a factor matching the QC block size */ + InitConstants(); InitBinomials(); NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); pi = NTL::ComputePi_RR(); diff --git a/src/work_factor_computation_parallel.cpp b/src/work_factor_computation_parallel.cpp index 91dc596..5dd973c 100644 --- a/src/work_factor_computation_parallel.cpp +++ b/src/work_factor_computation_parallel.cpp @@ -48,6 +48,7 @@ int main() { nlohmann::json j; file >> j; + InitConstants(); InitBinomials(); NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); pi = NTL::ComputePi_RR(); From 86f652cf0a61bdd3b3d3025cba3632af8a8ed713 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Thu, 25 Jul 2024 20:14:48 +0200 Subject: [PATCH 29/55] MOD logging default value --- src/logging.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/logging.cpp b/src/logging.cpp index 9e60f60..920d7d2 100644 --- a/src/logging.cpp +++ b/src/logging.cpp @@ -32,7 +32,7 @@ void configure_logger(const std::optional filename) { spdlog::set_level(spdlog::level::info); // Default level } } else { - spdlog::set_level(spdlog::level::info); // Default level if environment + spdlog::set_level(spdlog::level::err); // Default level if environment // variable is not set } } From c8af174eb8b408c9686f331e0508addf77e9f39b Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Thu, 25 Jul 2024 20:15:31 +0200 Subject: [PATCH 30/55] MOD CMakeLists to export library --- src/CMakeLists.txt | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index d7e013b..1424ef3 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,12 +1,3 @@ -# Define the executable targets -set(TARGETS - constant_weight_encodable_bits - # enumeration_complexity - # parameter_generator - # work_factor_computation - # work_factor_computation_parallel -) - # Libraries # Find libraries find_library(GMP_LIB gmp) @@ -16,6 +7,8 @@ find_package(OpenMP REQUIRED) find_package(spdlog REQUIRED) find_package(fmt REQUIRED) +add_library(leda_tools binomials.cpp logging.cpp bit_error_probabilities.cpp isd_cost_estimate.cpp) + message(STATUS "gmp library: ${GMP_LIBRARIES}") message(STATUS "ntl library: ${NTL_LIBRARIES}") message(STATUS "m library: ${M_LIBRARIES}") @@ -24,24 +17,24 @@ message(STATUS "spdlog library: ${spdlog_LIBRARIES}") message(STATUS "fmt library: ${fmt_LIBRARIES}") # Define libraries -set(LIBS ${NTL_LIB} ${GMP_LIB} ${M_LIB} spdlog::spdlog fmt::fmt) +set(LIBS leda_tools ${NTL_LIB} ${GMP_LIB} ${M_LIB} spdlog::spdlog fmt::fmt) set(target constant_weight_encodable_bits) -add_executable(${target} ${target}.cpp binomials.cpp) +add_executable(${target} ${target}.cpp) target_link_libraries(${target} PRIVATE ${LIBS}) set(target enumeration_complexity) -add_executable(${target} ${target}.cpp binomials.cpp) +add_executable(${target} ${target}.cpp) target_link_libraries(${target} PRIVATE ${LIBS}) set(target parameter_generator) -add_executable(${target} ${target}.cpp binomials.cpp logging.cpp bit_error_probabilities.cpp isd_cost_estimate.cpp) +add_executable(${target} ${target}.cpp) target_link_libraries(${target} PRIVATE ${LIBS}) set(target work_factor_computation) -add_executable(${target} ${target}.cpp binomials.cpp logging.cpp isd_cost_estimate.cpp) +add_executable(${target} ${target}.cpp) target_link_libraries(${target} PRIVATE ${LIBS}) set(target work_factor_computation_parallel) -add_executable(${target} ${target}.cpp binomials.cpp logging.cpp isd_cost_estimate.cpp) +add_executable(${target} ${target}.cpp) target_link_libraries(${target} PRIVATE ${LIBS} OpenMP::OpenMP_CXX) From 5eff602dabf25a1fc49b954ed7a19b86fc9b2333 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Thu, 25 Jul 2024 20:16:11 +0200 Subject: [PATCH 31/55] ADD example directory --- CMakeLists.txt | 7 +-- examples/CMakeLists.txt | 4 ++ examples/isd_cost_estimate_ex.cpp | 86 +++++++++++++++++++++++++++++++ 3 files changed, 91 insertions(+), 6 deletions(-) create mode 100644 examples/CMakeLists.txt create mode 100644 examples/isd_cost_estimate_ex.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 10620f1..30b0891 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -13,11 +13,6 @@ add_compile_options(-O3 -g3 -Wall -Wextra -Wno-sign-compare) # Include directories include_directories(include) -# Add the src directory add_subdirectory(src) - -# # Add the example directory -# add_subdirectory(examples) - -# # Add the test directory +add_subdirectory(examples) # add_subdirectory(test) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt new file mode 100644 index 0000000..175c624 --- /dev/null +++ b/examples/CMakeLists.txt @@ -0,0 +1,4 @@ +add_executable(isd_cost_estimate_ex isd_cost_estimate_ex.cpp) +target_link_libraries(isd_cost_estimate_ex PRIVATE leda_tools) +# Optionally, specify include directories for the examples +target_include_directories(isd_cost_estimate_ex PRIVATE ${CMAKE_SOURCE_DIR}/include) diff --git a/examples/isd_cost_estimate_ex.cpp b/examples/isd_cost_estimate_ex.cpp new file mode 100644 index 0000000..60ee463 --- /dev/null +++ b/examples/isd_cost_estimate_ex.cpp @@ -0,0 +1,86 @@ +#include "isd_cost_estimate.hpp" +#include // for uint32_t +#include +#include +#include +#include + +struct Cost { + std::string algorithm; + std::string type; // CFP1, CFP2, CFP3, SDP + bool is_quantum; + double time_complexity; + double space_complexity; +}; + +struct Value { + uint32_t n0; + uint32_t prime; + uint32_t v; + uint32_t t; + std::vector costs; +}; + +void displayValues(const std::vector &values) { + // Optional: Display the values + for (const auto &value : values) { + std::cout << "n0: " << value.n0 << ", prime: " << value.prime << "\n"; + for (const auto &cost : value.costs) { + std::cout << " Algorithm: " << cost.algorithm << ", Type: " << cost.type + << ", Quantum: " << (cost.is_quantum ? "Yes" : "No") + << ", Time Complexity: " << cost.time_complexity + << ", Space Complexity: " << cost.space_complexity << "\n"; + } + } +} + +int main() { + std::cout << "Hello world\n"; + // Expected values taken from LEDA specs, Table 4.1 + std::vector values; + Value val = {2, + 23371, + 71, + 130, + { + { + "Prange", + "CFP1", + false, + 144.2, + 0.0 + }, + { + "Prange", + "CFP1", + false, + 144.2, + 0.0 + }, + + }}; + + // Value value1 = { + // 10, // n0 + // 7, // prime + // 5, // v + // 20, // t + // {{"Algorithm1", "CFP1", true, 0.5, 1.0}, + // {"Algorithm2", "SDP", false, 1.0, 2.0}} // costs + // }; + // values.push_back(value1); + // , + // { + // 15, // n0 + // 11, // prime + // 6, // v + // 30, // t + // {{"Algorithm3", "CFP2", true, 0.7, 1.5}, + // {"Algorithm4", "CFP3", false, 0.8, 1.8}} // costs + // }}; + + // Call the function to display the values + // displayValues(values); + + // return 0; +} From 11af98b6e57d120fd51f91c291c31773fa40cd33 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Fri, 26 Jul 2024 14:12:12 +0200 Subject: [PATCH 32/55] MOD structure --- .gitignore | 2 +- CMakeLists.txt | 6 ++- README_new.md | 40 +++++++++++++++++ examples/CMakeLists.txt | 6 +-- include/{ => utils}/binomials.hpp | 0 .../{ => utils}/bit_error_probabilities.hpp | 0 include/{ => utils}/isd_cost_estimate.hpp | 35 +++++++++------ include/{ => utils}/logging.hpp | 0 src/CMakeLists.txt | 42 +----------------- src/constant_weight_encodable_bits.cpp | 30 ------------- src/tools/CMakeLists.txt | 43 +++++++++++++++++++ src/tools/constant_weight_encodable_bits.cpp | 30 +++++++++++++ src/{ => tools}/enumeration_complexity.cpp | 0 src/{ => tools}/parameter_generator.cpp | 0 src/{ => tools}/work_factor_computation.cpp | 0 .../work_factor_computation_parallel.cpp | 0 src/utils/CMakeLists.txt | 16 +++++++ src/{ => utils}/binomials.cpp | 0 src/{ => utils}/bit_error_probabilities.cpp | 0 src/{ => utils}/isd_cost_estimate.cpp | 12 +++--- src/{ => utils}/logging.cpp | 0 21 files changed, 169 insertions(+), 93 deletions(-) create mode 100644 README_new.md rename include/{ => utils}/binomials.hpp (100%) rename include/{ => utils}/bit_error_probabilities.hpp (100%) rename include/{ => utils}/isd_cost_estimate.hpp (75%) rename include/{ => utils}/logging.hpp (100%) create mode 100644 src/tools/CMakeLists.txt create mode 100644 src/tools/constant_weight_encodable_bits.cpp rename src/{ => tools}/enumeration_complexity.cpp (100%) rename src/{ => tools}/parameter_generator.cpp (100%) rename src/{ => tools}/work_factor_computation.cpp (100%) rename src/{ => tools}/work_factor_computation_parallel.cpp (100%) create mode 100644 src/utils/CMakeLists.txt rename src/{ => utils}/binomials.cpp (100%) rename src/{ => utils}/bit_error_probabilities.cpp (100%) rename src/{ => utils}/isd_cost_estimate.cpp (98%) rename src/{ => utils}/logging.cpp (100%) diff --git a/.gitignore b/.gitignore index 594a653..f28bd8c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,5 @@ # MINE -# build dir +bin/ build/ # Log output diff --git a/CMakeLists.txt b/CMakeLists.txt index 30b0891..17a0be0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -11,8 +11,12 @@ set(CMAKE_CXX_STANDARD_REQUIRED True) add_compile_options(-O3 -g3 -Wall -Wextra -Wno-sign-compare) # Include directories -include_directories(include) +include_directories(${PROJECT_SOURCE_DIR}/include) add_subdirectory(src) add_subdirectory(examples) # add_subdirectory(test) + +# Installation directories +set(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR}/bin) +set(LIBRARY_OUTPUT_PATH ${CMAKE_BINARY_DIR}/lib) diff --git a/README_new.md b/README_new.md new file mode 100644 index 0000000..9363e34 --- /dev/null +++ b/README_new.md @@ -0,0 +1,40 @@ +* Added +- CMakeLists + +Dependencies: +- spdlog to log +- fmt (come with spdlog) + +Executables +- work_factor_computation_parallel +To spwan threads that compute the work_factor + +* Structure +- include/utils +All the hpp headers +- src/utils +Al the cpp corresponding to the prev headers +- src/tools +All the output tools, that is, executables to use + +* Compile + +```sh +mkdir build && cd build +cmake .. +make -j +``` + +** To create the binaries (inside the local path) +Inside `build` + +```sh +cmake -DCMAKE_INSTALL_PREFIX=/your/custom/path .. +make -j +make install +``` + +Then, you can execute the files as ./bin/ + +* TODOs +The tools should not hardcode their paths (see f.e. work_factor_computation_parallel) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 175c624..6ddec7f 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -1,4 +1,4 @@ add_executable(isd_cost_estimate_ex isd_cost_estimate_ex.cpp) -target_link_libraries(isd_cost_estimate_ex PRIVATE leda_tools) -# Optionally, specify include directories for the examples -target_include_directories(isd_cost_estimate_ex PRIVATE ${CMAKE_SOURCE_DIR}/include) +target_link_libraries(isd_cost_estimate_ex PRIVATE ledautils) +# # Optionally, specify include directories for the examples +# target_include_directories(isd_cost_estimate_ex PRIVATE ${CMAKE_SOURCE_DIR}/include) diff --git a/include/binomials.hpp b/include/utils/binomials.hpp similarity index 100% rename from include/binomials.hpp rename to include/utils/binomials.hpp diff --git a/include/bit_error_probabilities.hpp b/include/utils/bit_error_probabilities.hpp similarity index 100% rename from include/bit_error_probabilities.hpp rename to include/utils/bit_error_probabilities.hpp diff --git a/include/isd_cost_estimate.hpp b/include/utils/isd_cost_estimate.hpp similarity index 75% rename from include/isd_cost_estimate.hpp rename to include/utils/isd_cost_estimate.hpp index 6832713..dfc1378 100644 --- a/include/isd_cost_estimate.hpp +++ b/include/utils/isd_cost_estimate.hpp @@ -16,10 +16,11 @@ #define SKIP_Q_STERN 0 struct Result { - std::string alg_name; - std::map params; - double value; - double gje_cost; + std::string alg_name; + std::map params; + double value; + double gje_cost; + double list_size; }; /***************************Classic ISDs***************************************/ @@ -34,14 +35,22 @@ Result c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, const uint32_t qc_order, const uint32_t is_kra, const bool compute_qc_reduction_factor); -Result isd_log_cost_classic_Prange(const uint32_t n, const uint32_t k, const uint32_t t); -Result isd_log_cost_classic_LB(const uint32_t n, const uint32_t k, const uint32_t t); -Result isd_log_cost_classic_Leon(const uint32_t n, const uint32_t k, const uint32_t t); -Result isd_log_cost_classic_Stern(const uint32_t n, const uint32_t k, const uint32_t t); -Result isd_log_cost_classic_FS(const uint32_t n, const uint32_t k, const uint32_t t); -Result isd_log_cost_classic_MMT(const uint32_t n, const uint32_t k, const uint32_t t); -Result isd_log_cost_classic_BJMM_approx(const uint32_t n, const uint32_t k, const uint32_t t); -Result isd_log_cost_classic_BJMM(const uint32_t n, const uint32_t k, const uint32_t t); +Result isd_log_cost_classic_Prange(const uint32_t n, const uint32_t k, + const uint32_t t); +Result isd_log_cost_classic_LB(const uint32_t n, const uint32_t k, + const uint32_t t); +Result isd_log_cost_classic_Leon(const uint32_t n, const uint32_t k, + const uint32_t t); +Result isd_log_cost_classic_Stern(const uint32_t n, const uint32_t k, + const uint32_t t); +Result isd_log_cost_classic_FS(const uint32_t n, const uint32_t k, + const uint32_t t); +Result isd_log_cost_classic_MMT(const uint32_t n, const uint32_t k, + const uint32_t t); +Result isd_log_cost_classic_BJMM_approx(const uint32_t n, const uint32_t k, + const uint32_t t); +Result isd_log_cost_classic_BJMM(const uint32_t n, const uint32_t k, + const uint32_t t); // Quantum Result q_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, @@ -51,4 +60,4 @@ Result q_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, Result isd_log_cost_quantum_LB(const uint32_t n, const uint32_t k, const uint32_t t); Result isd_log_cost_quantum_Stern(const uint32_t n, const uint32_t k, - const uint32_t t); + const uint32_t t); diff --git a/include/logging.hpp b/include/utils/logging.hpp similarity index 100% rename from include/logging.hpp rename to include/utils/logging.hpp diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 1424ef3..dcb7b69 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,40 +1,2 @@ -# Libraries -# Find libraries -find_library(GMP_LIB gmp) -find_library(NTL_LIB ntl) -find_library(M_LIB m) -find_package(OpenMP REQUIRED) -find_package(spdlog REQUIRED) -find_package(fmt REQUIRED) - -add_library(leda_tools binomials.cpp logging.cpp bit_error_probabilities.cpp isd_cost_estimate.cpp) - -message(STATUS "gmp library: ${GMP_LIBRARIES}") -message(STATUS "ntl library: ${NTL_LIBRARIES}") -message(STATUS "m library: ${M_LIBRARIES}") -message(STATUS "OpenMP library: ${OpenMP_CXX_LIBRARIES}") -message(STATUS "spdlog library: ${spdlog_LIBRARIES}") -message(STATUS "fmt library: ${fmt_LIBRARIES}") - -# Define libraries -set(LIBS leda_tools ${NTL_LIB} ${GMP_LIB} ${M_LIB} spdlog::spdlog fmt::fmt) - -set(target constant_weight_encodable_bits) -add_executable(${target} ${target}.cpp) -target_link_libraries(${target} PRIVATE ${LIBS}) - -set(target enumeration_complexity) -add_executable(${target} ${target}.cpp) -target_link_libraries(${target} PRIVATE ${LIBS}) - -set(target parameter_generator) -add_executable(${target} ${target}.cpp) -target_link_libraries(${target} PRIVATE ${LIBS}) - -set(target work_factor_computation) -add_executable(${target} ${target}.cpp) -target_link_libraries(${target} PRIVATE ${LIBS}) - -set(target work_factor_computation_parallel) -add_executable(${target} ${target}.cpp) -target_link_libraries(${target} PRIVATE ${LIBS} OpenMP::OpenMP_CXX) +add_subdirectory(utils) +add_subdirectory(tools) diff --git a/src/constant_weight_encodable_bits.cpp b/src/constant_weight_encodable_bits.cpp index 7627bda..e69de29 100644 --- a/src/constant_weight_encodable_bits.cpp +++ b/src/constant_weight_encodable_bits.cpp @@ -1,30 +0,0 @@ -#define NUM_BITS_REAL_MANTISSA 128 -#include -#include -#include - -#include "binomials.hpp" - -int main(int argc, char* argv[]){ - if(argc != 3){ - std::cout << "Calculator to derive the length of the encodable bit string via CW-enc" << std::endl << " Usage " - << argv[0] << " " << std::endl; - return -1; - } - - InitConstants(); - InitBinomials(); - NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); - uint32_t n = atoi(argv[1]); - uint32_t t = atoi(argv[2]); - /* reduce by a factor matching the QC block size */ - NTL::RR encodable_length; - encodable_length = lnBinom(NTL::to_RR(n), NTL::to_RR(t))/NTL::log(NTL::RR(2)); - - NTL::RR d = NTL::to_RR( 0.69315 * ((double)n - ( (double)t - 1.0)/2.0) /((double) t) ); - - std::cout << "Maximum safely encoded: " << t*NTL::conv(NTL::floor(NTL::log(d)/NTL::log(NTL::RR(2))+1)) << std::endl; - std::cout << "#define MAX_ENCODABLE_BIT_SIZE_CW_ENCODING (" << NTL::conv(encodable_length) << ")" ; - std::cout << std::endl; - return 0; -} diff --git a/src/tools/CMakeLists.txt b/src/tools/CMakeLists.txt new file mode 100644 index 0000000..5bfe920 --- /dev/null +++ b/src/tools/CMakeLists.txt @@ -0,0 +1,43 @@ + +find_package(OpenMP REQUIRED) +message(STATUS "OpenMP library: ${OpenMP_CXX_LIBRARIES}") +find_library(GMP_LIB gmp) +find_library(NTL_LIB ntl) +find_library(M_LIB m) +find_package(spdlog REQUIRED) +find_package(fmt REQUIRED) + +# Define libraries +set(LIBS ledautils ${NTL_LIB} ${GMP_LIB} ${M_LIB} spdlog::spdlog fmt::fmt) + +message(STATUS "gmp library: ${GMP_LIBRARIES}") +message(STATUS "ntl library: ${NTL_LIBRARIES}") +message(STATUS "m library: ${M_LIBRARIES}") +message(STATUS "spdlog library: ${spdlog_LIBRARIES}") +message(STATUS "fmt library: ${fmt_LIBRARIES}") + +set(target constant_weight_encodable_bits) +add_executable(${target} ${target}.cpp) +target_link_libraries(${target} PRIVATE ${LIBS}) + +set(target enumeration_complexity) +add_executable(${target} ${target}.cpp) +target_link_libraries(${target} PRIVATE ${LIBS}) + +set(target parameter_generator) +add_executable(${target} ${target}.cpp) +target_link_libraries(${target} PRIVATE ${LIBS}) + +set(target work_factor_computation) +add_executable(${target} ${target}.cpp) +target_link_libraries(${target} PRIVATE ${LIBS}) + +set(target work_factor_computation_parallel) +add_executable(${target} ${target}.cpp) +target_link_libraries(${target} PRIVATE ${LIBS} OpenMP::OpenMP_CXX) + +install(TARGETS constant_weight_encodable_bits DESTINATION bin) +install(TARGETS enumeration_complexity DESTINATION bin) +install(TARGETS parameter_generator DESTINATION bin) +install(TARGETS work_factor_computation DESTINATION bin) +install(TARGETS work_factor_computation_parallel DESTINATION bin) diff --git a/src/tools/constant_weight_encodable_bits.cpp b/src/tools/constant_weight_encodable_bits.cpp new file mode 100644 index 0000000..09998e0 --- /dev/null +++ b/src/tools/constant_weight_encodable_bits.cpp @@ -0,0 +1,30 @@ +#define NUM_BITS_REAL_MANTISSA 128 +#include +#include +#include + +#include "utils/binomials.hpp" + +int main(int argc, char* argv[]){ + if(argc != 3){ + std::cout << "Calculator to derive the length of the encodable bit string via CW-enc" << std::endl << " Usage " + << argv[0] << " " << std::endl; + return -1; + } + + InitConstants(); + InitBinomials(); + NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); + uint32_t n = atoi(argv[1]); + uint32_t t = atoi(argv[2]); + /* reduce by a factor matching the QC block size */ + NTL::RR encodable_length; + encodable_length = lnBinom(NTL::to_RR(n), NTL::to_RR(t))/NTL::log(NTL::RR(2)); + + NTL::RR d = NTL::to_RR( 0.69315 * ((double)n - ( (double)t - 1.0)/2.0) /((double) t) ); + + std::cout << "Maximum safely encoded: " << t*NTL::conv(NTL::floor(NTL::log(d)/NTL::log(NTL::RR(2))+1)) << std::endl; + std::cout << "#define MAX_ENCODABLE_BIT_SIZE_CW_ENCODING (" << NTL::conv(encodable_length) << ")" ; + std::cout << std::endl; + return 0; +} diff --git a/src/enumeration_complexity.cpp b/src/tools/enumeration_complexity.cpp similarity index 100% rename from src/enumeration_complexity.cpp rename to src/tools/enumeration_complexity.cpp diff --git a/src/parameter_generator.cpp b/src/tools/parameter_generator.cpp similarity index 100% rename from src/parameter_generator.cpp rename to src/tools/parameter_generator.cpp diff --git a/src/work_factor_computation.cpp b/src/tools/work_factor_computation.cpp similarity index 100% rename from src/work_factor_computation.cpp rename to src/tools/work_factor_computation.cpp diff --git a/src/work_factor_computation_parallel.cpp b/src/tools/work_factor_computation_parallel.cpp similarity index 100% rename from src/work_factor_computation_parallel.cpp rename to src/tools/work_factor_computation_parallel.cpp diff --git a/src/utils/CMakeLists.txt b/src/utils/CMakeLists.txt new file mode 100644 index 0000000..9e14b4e --- /dev/null +++ b/src/utils/CMakeLists.txt @@ -0,0 +1,16 @@ +add_library(ledautils binomials.cpp logging.cpp bit_error_probabilities.cpp isd_cost_estimate.cpp) + +find_library(GMP_LIB gmp) +find_library(NTL_LIB ntl) +find_library(M_LIB m) +find_package(spdlog REQUIRED) +find_package(fmt REQUIRED) + +message(STATUS "gmp library: ${GMP_LIBRARIES}") +message(STATUS "ntl library: ${NTL_LIBRARIES}") +message(STATUS "m library: ${M_LIBRARIES}") +message(STATUS "spdlog library: ${spdlog_LIBRARIES}") +message(STATUS "fmt library: ${fmt_LIBRARIES}") + +# Specify include directories for this module +target_include_directories(ledautils PUBLIC ${PROJECT_SOURCE_DIR}/include/utils) diff --git a/src/binomials.cpp b/src/utils/binomials.cpp similarity index 100% rename from src/binomials.cpp rename to src/utils/binomials.cpp diff --git a/src/bit_error_probabilities.cpp b/src/utils/bit_error_probabilities.cpp similarity index 100% rename from src/bit_error_probabilities.cpp rename to src/utils/bit_error_probabilities.cpp diff --git a/src/isd_cost_estimate.cpp b/src/utils/isd_cost_estimate.cpp similarity index 98% rename from src/isd_cost_estimate.cpp rename to src/utils/isd_cost_estimate.cpp index b9363d2..d58b547 100644 --- a/src/isd_cost_estimate.cpp +++ b/src/utils/isd_cost_estimate.cpp @@ -182,9 +182,10 @@ Result isd_log_cost_classic_Stern(const uint32_t n, const uint32_t k, uint32_t best_l = 0, best_p = 2, constrained_max_l, constrained_max_p; NTL::RR gje_cost = classic_rref_red_cost(n_real, n_real - k_real); - // IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); + NTL::RR log_stern_list_size; + // IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); - constrained_max_p = P_MAX_Stern > t ? t : P_MAX_Stern; + constrained_max_p = P_MAX_Stern > t ? t : P_MAX_Stern; for (uint32_t p = 2; p < constrained_max_p; p = p + 2) { constrained_max_l = (L_MAX_Stern > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_Stern); @@ -200,13 +201,13 @@ Result isd_log_cost_classic_Stern(const uint32_t n, const uint32_t k, (kHalfChoosePHalf_real / NTL::power2_RR(l)) * NTL::RR(p * (n - k - l))); // #if LOG_COST_CRITERION == 1 - NTL::RR log_stern_list_size = + log_stern_list_size = kHalfChoosePHalf_real * (p_real / NTL::RR(2) * NTL::log(k_real / NTL::RR(2)) / NTL::log(NTL::RR(2)) + NTL::to_RR(l)); - log_stern_list_size = - NTL::log(log_stern_list_size) / NTL::log(NTL::RR(2)); + log_stern_list_size = log2_RR(log_stern_list_size); + // NTL::log(log_stern_list_size) / NTL::log(NTL::RR(2)); cost_iter = cost_iter * log_stern_list_size; // #endif NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / @@ -227,6 +228,7 @@ Result isd_log_cost_classic_Stern(const uint32_t n, const uint32_t k, res.params = {{"p", best_p}, {"l", best_l}}; res.value = NTL::conv(min_log_cost); res.gje_cost = NTL::conv(log2_RR(gje_cost)); + res.list_size = NTL::conv(log_stern_list_size); return res; } diff --git a/src/logging.cpp b/src/utils/logging.cpp similarity index 100% rename from src/logging.cpp rename to src/utils/logging.cpp From f48b3a2c610544a9e51f32669abb70fcf85a5d1b Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Fri, 26 Jul 2024 14:48:09 +0200 Subject: [PATCH 33/55] ADD partitions_permanents to ledautils library --- examples/isd_cost_estimate_ex.cpp | 95 ++++++++----------- include/utils/partitions_permanents.hpp | 23 +++++ src/utils/CMakeLists.txt | 3 +- .../utils/partitions_permanents.cpp | 4 +- 4 files changed, 65 insertions(+), 60 deletions(-) create mode 100644 include/utils/partitions_permanents.hpp rename include/partitions_permanents.hpp => src/utils/partitions_permanents.cpp (99%) diff --git a/examples/isd_cost_estimate_ex.cpp b/examples/isd_cost_estimate_ex.cpp index 60ee463..21f28f0 100644 --- a/examples/isd_cost_estimate_ex.cpp +++ b/examples/isd_cost_estimate_ex.cpp @@ -1,4 +1,4 @@ -#include "isd_cost_estimate.hpp" +#include #include // for uint32_t #include #include @@ -14,23 +14,35 @@ struct Cost { }; struct Value { - uint32_t n0; - uint32_t prime; - uint32_t v; - uint32_t t; + uint32_t codeword_size; + uint32_t code_dimension; + uint32_t number_of_errors; + uint32_t qc_block_size; + bool is_kra; std::vector costs; }; -void displayValues(const std::vector &values) { - // Optional: Display the values - for (const auto &value : values) { - std::cout << "n0: " << value.n0 << ", prime: " << value.prime << "\n"; - for (const auto &cost : value.costs) { - std::cout << " Algorithm: " << cost.algorithm << ", Type: " << cost.type - << ", Quantum: " << (cost.is_quantum ? "Yes" : "No") - << ", Time Complexity: " << cost.time_complexity - << ", Space Complexity: " << cost.space_complexity << "\n"; - } +void displayCost(const Cost &cost) { + std::cout << " Algorithm: " << cost.algorithm << '\n'; + std::cout << " Type: " << cost.type << '\n'; + std::cout << " Is Quantum: " << (cost.is_quantum ? "Yes" : "No") << '\n'; + std::cout << " Time Complexity: " << cost.time_complexity << '\n'; + std::cout << " Space Complexity: " << cost.space_complexity << '\n'; +} + +// Function to display a Value object +void displayValue(const Value &value) { + std::cout << "Value:\n"; + std::cout << " Codeword Size: " << value.codeword_size << '\n'; + std::cout << " Code Dimension: " << value.code_dimension << '\n'; + std::cout << " Number of Errors: " << value.number_of_errors << '\n'; + std::cout << " QC Block Size: " << value.qc_block_size << '\n'; + std::cout << " Is KRA: " << (value.is_kra ? "Yes" : "No") << '\n'; + + std::cout << "Costs:\n"; + for (const auto &cost : value.costs) { + displayCost(cost); + std::cout << "-----\n"; } } @@ -38,49 +50,18 @@ int main() { std::cout << "Hello world\n"; // Expected values taken from LEDA specs, Table 4.1 std::vector values; - Value val = {2, - 23371, - 71, - 130, + Value val = {24646, + 12323, + 142, + 12323, + true, { - { - "Prange", - "CFP1", - false, - 144.2, - 0.0 - }, - { - "Prange", - "CFP1", - false, - 144.2, - 0.0 - }, - + {"Prange", "", false, 171.3, 0.0}, + {"Lee-Brickell", "", false, 158.4, 0.0}, + {"Leon", "", false, 154.4, 0.0}, + {"Stern", "", false, 147.4, 0.0}, + {"Fin-Send", "", false, 147.4, 0.0}, }}; - // Value value1 = { - // 10, // n0 - // 7, // prime - // 5, // v - // 20, // t - // {{"Algorithm1", "CFP1", true, 0.5, 1.0}, - // {"Algorithm2", "SDP", false, 1.0, 2.0}} // costs - // }; - // values.push_back(value1); - // , - // { - // 15, // n0 - // 11, // prime - // 6, // v - // 30, // t - // {{"Algorithm3", "CFP2", true, 0.7, 1.5}, - // {"Algorithm4", "CFP3", false, 0.8, 1.8}} // costs - // }}; - - // Call the function to display the values - // displayValues(values); - - // return 0; + displayValue(val); } diff --git a/include/utils/partitions_permanents.hpp b/include/utils/partitions_permanents.hpp new file mode 100644 index 0000000..4f00f01 --- /dev/null +++ b/include/utils/partitions_permanents.hpp @@ -0,0 +1,23 @@ +#include +#include +#include + +/** + * @brief Computes the permanent of a circulant matrix. + * + * @param mpartition Array of integers representing the partition. + * @param n_0 Size of the partition. + * @return uint64_t Permanent of the circulant matrix. + */ +uint64_t ComputePermanent(int64_t mpartition[], const uint64_t n_0); + +/** + * @brief Finds a partition of m with length n_0. + * + * @param m The integer to partition. + * @param mpartition Vector to store the resulting partition. + * @param n_0 Length of the partition. + * @return int 1 if a good partition is found, 0 otherwise. + */ +int FindmPartition(const uint64_t m, std::vector &mpartition, + const uint64_t n_0); diff --git a/src/utils/CMakeLists.txt b/src/utils/CMakeLists.txt index 9e14b4e..7b8baf9 100644 --- a/src/utils/CMakeLists.txt +++ b/src/utils/CMakeLists.txt @@ -1,4 +1,5 @@ -add_library(ledautils binomials.cpp logging.cpp bit_error_probabilities.cpp isd_cost_estimate.cpp) +# add_library(ledautils binomials.cpp logging.cpp bit_error_probabilities.cpp isd_cost_estimate.cpp) +add_library(ledautils binomials.cpp logging.cpp bit_error_probabilities.cpp isd_cost_estimate.cpp partitions_permanents.cpp) find_library(GMP_LIB gmp) find_library(NTL_LIB ntl) diff --git a/include/partitions_permanents.hpp b/src/utils/partitions_permanents.cpp similarity index 99% rename from include/partitions_permanents.hpp rename to src/utils/partitions_permanents.cpp index 81f25de..e2203d9 100644 --- a/include/partitions_permanents.hpp +++ b/src/utils/partitions_permanents.cpp @@ -1,6 +1,6 @@ #include -#include -#include + +#include "partitions_permanents.hpp" /* Permanent formulas for circulant matrices as obtained via Sage (macsyma) From e9b8918271acc3e10e43a0611b5f222517219836 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Fri, 26 Jul 2024 14:48:55 +0200 Subject: [PATCH 34/55] ADD install targets --- examples/CMakeLists.txt | 2 ++ src/utils/CMakeLists.txt | 3 +++ 2 files changed, 5 insertions(+) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 6ddec7f..87c3e15 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -2,3 +2,5 @@ add_executable(isd_cost_estimate_ex isd_cost_estimate_ex.cpp) target_link_libraries(isd_cost_estimate_ex PRIVATE ledautils) # # Optionally, specify include directories for the examples # target_include_directories(isd_cost_estimate_ex PRIVATE ${CMAKE_SOURCE_DIR}/include) + +install(TARGETS isd_cost_estimate_ex DESTINATION bin) diff --git a/src/utils/CMakeLists.txt b/src/utils/CMakeLists.txt index 7b8baf9..7e6ec76 100644 --- a/src/utils/CMakeLists.txt +++ b/src/utils/CMakeLists.txt @@ -15,3 +15,6 @@ message(STATUS "fmt library: ${fmt_LIBRARIES}") # Specify include directories for this module target_include_directories(ledautils PUBLIC ${PROJECT_SOURCE_DIR}/include/utils) + +# Install the library +install(TARGETS ledautils DESTINATION lib) From d0d2cc9be653d13b06afc9c5d78a413c0f04e655 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Fri, 26 Jul 2024 14:49:13 +0200 Subject: [PATCH 35/55] MOD parallel computation --- src/tools/work_factor_computation_parallel.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/tools/work_factor_computation_parallel.cpp b/src/tools/work_factor_computation_parallel.cpp index 5dd973c..782d647 100644 --- a/src/tools/work_factor_computation_parallel.cpp +++ b/src/tools/work_factor_computation_parallel.cpp @@ -5,14 +5,14 @@ #include #include #include - -#include "binomials.hpp" -#include "isd_cost_estimate.hpp" -#include "logging.hpp" -#include "globals.hpp" -#include +#include #include #include +#include +#include +#include + +#include "globals.hpp" #define NUM_BITS_REAL_MANTISSA 1024 #define IGNORE_DECODING_COST 0 @@ -20,7 +20,7 @@ void to_json(nlohmann::json &j, const Result &r) { j = nlohmann::json{ - {"alg_name", r.alg_name}, {"params", r.params}, {"value", r.value}, {"gje_cost", r.gje_cost}}; + {"alg_name", r.alg_name}, {"params", r.params}, {"value", r.value}, {"gje_cost", r.gje_cost}, {"list_size", r.list_size}}; } void from_json(const nlohmann::json &j, Result &r) { From 1bdcd2b66be2c848562f0608eb89bade997fd303 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Fri, 26 Jul 2024 16:38:36 +0200 Subject: [PATCH 36/55] DEL #defines to enable specific algorithms --- examples/isd_cost_estimate_ex.cpp | 12 ++ include/utils/isd_cost_estimate.hpp | 31 ++-- src/tools/parameter_generator.cpp | 23 ++- src/tools/work_factor_computation.cpp | 13 +- .../work_factor_computation_parallel.cpp | 9 +- src/utils/isd_cost_estimate.cpp | 149 ++++++++---------- 6 files changed, 131 insertions(+), 106 deletions(-) diff --git a/examples/isd_cost_estimate_ex.cpp b/examples/isd_cost_estimate_ex.cpp index 21f28f0..eee0724 100644 --- a/examples/isd_cost_estimate_ex.cpp +++ b/examples/isd_cost_estimate_ex.cpp @@ -19,6 +19,7 @@ struct Value { uint32_t number_of_errors; uint32_t qc_block_size; bool is_kra; + bool is_red_fac; std::vector costs; }; @@ -38,6 +39,7 @@ void displayValue(const Value &value) { std::cout << " Number of Errors: " << value.number_of_errors << '\n'; std::cout << " QC Block Size: " << value.qc_block_size << '\n'; std::cout << " Is KRA: " << (value.is_kra ? "Yes" : "No") << '\n'; + std::cout << " Is Reduction factor applied: " << (value.is_red_fac ? "Yes" : "No") << '\n'; std::cout << "Costs:\n"; for (const auto &cost : value.costs) { @@ -55,6 +57,7 @@ int main() { 142, 12323, true, + true, { {"Prange", "", false, 171.3, 0.0}, {"Lee-Brickell", "", false, 158.4, 0.0}, @@ -64,4 +67,13 @@ int main() { }}; displayValue(val); + double c_cost = + c_isd_log_cost( + val.codeword_size, val.code_dimension, val.number_of_errors, + val.qc_block_size, val.is_kra, val.is_red_fac, + std::unordered_set{Prange, Lee_Brickell, Leon, Stern, + Finiasz_Sendrier} + ) + .value; + std::cout << c_cost << std::endl; } diff --git a/include/utils/isd_cost_estimate.hpp b/include/utils/isd_cost_estimate.hpp index dfc1378..4e7bd21 100644 --- a/include/utils/isd_cost_estimate.hpp +++ b/include/utils/isd_cost_estimate.hpp @@ -4,16 +4,7 @@ #include #include #include - -#define SKIP_PRANGE 0 -#define SKIP_LB 0 -#define SKIP_LEON 0 -#define SKIP_STERN 0 -#define SKIP_FS 0 -#define SKIP_BJMM 0 -#define SKIP_MMT 0 -#define SKIP_Q_LB 0 -#define SKIP_Q_STERN 0 +#include struct Result { std::string alg_name; @@ -23,6 +14,21 @@ struct Result { double list_size; }; +enum Algorithm { + Prange, + Lee_Brickell, + Leon, + Stern, + Finiasz_Sendrier, + MMT, + BJMM, + // Add more algorithms here +}; +enum QuantumAlgorithm { + Q_Lee_Brickell, + Q_Stern, // NOTE no circuit available +}; + /***************************Classic ISDs***************************************/ const NTL::RR log_probability_k_by_k_is_inv(const NTL::RR &k); @@ -33,7 +39,7 @@ const NTL::RR classic_rref_red_cost(const NTL::RR &n, const NTL::RR &r); Result c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, const uint32_t qc_order, const uint32_t is_kra, - const bool compute_qc_reduction_factor); + const bool compute_qc_reduction_factor, std::unordered_set algs); Result isd_log_cost_classic_Prange(const uint32_t n, const uint32_t k, const uint32_t t); @@ -55,7 +61,8 @@ Result isd_log_cost_classic_BJMM(const uint32_t n, const uint32_t k, // Quantum Result q_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, const uint32_t qc_order, const uint32_t is_kra, - const bool compute_qc_reduction_factor); + const bool compute_qc_reduction_factor, + std::unordered_set algs); Result isd_log_cost_quantum_LB(const uint32_t n, const uint32_t k, const uint32_t t); diff --git a/src/tools/parameter_generator.cpp b/src/tools/parameter_generator.cpp index f9202e1..268b640 100644 --- a/src/tools/parameter_generator.cpp +++ b/src/tools/parameter_generator.cpp @@ -33,9 +33,16 @@ uint32_t estimate_t_val(const uint32_t c_sec_level, const uint32_t q_sec_level, t = (lo + hi) / 2; std::cerr << "testing t " << t << std::endl; achieved_c_sec_level = - c_isd_log_cost(n_0 * p, ((n_0 - 1) * p), t, p, 0, true).value; + c_isd_log_cost(n_0 * p, ((n_0 - 1) * p), t, p, 0, true, + std::unordered_set{Prange, Lee_Brickell, Leon, + Stern, Finiasz_Sendrier, + MMT, BJMM}) + .value; achieved_q_sec_level = - q_isd_log_cost(n_0 * p, ((n_0 - 1) * p), t, p, 0, true).value; + q_isd_log_cost( + n_0 * p, ((n_0 - 1) * p), t, p, 0, true, + std::unordered_set{Q_Lee_Brickell, Q_Stern}) + .value; if ((achieved_c_sec_level >= c_sec_level) && (achieved_q_sec_level >= q_sec_level)) { hi = t; @@ -106,9 +113,17 @@ uint64_t estimate_dv(const uint32_t c_sec_level, // expressed as /* last parameter indicates a KRA, reduce margin by p due to quasi cyclicity */ achieved_c_sec_level = - c_isd_log_cost(n_0 * p, p, n_0 * d_v_prime, p, 1, true).value; + c_isd_log_cost(n_0 * p, p, n_0 * d_v_prime, p, 1, true, + std::unordered_set{Prange, Lee_Brickell, Leon, + Stern, Finiasz_Sendrier, MMT, + BJMM} + ).value; achieved_q_sec_level = - q_isd_log_cost(n_0 * p, p, n_0 * d_v_prime, p, 1, true).value; + q_isd_log_cost( + n_0 * p, p, n_0 * d_v_prime, p, 1, true, + std::unordered_set{Q_Lee_Brickell, Q_Stern} + ) + .value; } } diff --git a/src/tools/work_factor_computation.cpp b/src/tools/work_factor_computation.cpp index 05a2070..1f27caf 100644 --- a/src/tools/work_factor_computation.cpp +++ b/src/tools/work_factor_computation.cpp @@ -49,11 +49,18 @@ int main(int argc, char *argv[]) { << "- : " << is_red_factor_applied << std::endl; std::cout << "Minimum classic cost :" - << c_isd_log_cost(n, k, t, qc_block_size, is_kra, - is_red_factor_applied).value + << c_isd_log_cost( + n, k, t, qc_block_size, is_kra, is_red_factor_applied, + std::unordered_set{Prange, Lee_Brickell, Leon, + Stern, Finiasz_Sendrier, MMT, + BJMM}) + .value << " Minimum quantum cost :" << q_isd_log_cost(n, k, t, qc_block_size, is_kra, - is_red_factor_applied).value; + is_red_factor_applied, + std::unordered_set{ + Q_Lee_Brickell, Q_Stern}) + .value; if (is_red_factor_applied && qc_block_size != 1) std::cout << " (including qc_effects) "; std::cout << std::endl; diff --git a/src/tools/work_factor_computation_parallel.cpp b/src/tools/work_factor_computation_parallel.cpp index 782d647..2f6961b 100644 --- a/src/tools/work_factor_computation_parallel.cpp +++ b/src/tools/work_factor_computation_parallel.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include "globals.hpp" @@ -48,9 +49,9 @@ int main() { nlohmann::json j; file >> j; + NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); InitConstants(); InitBinomials(); - NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); pi = NTL::ComputePi_RR(); bool is_kra_values[] = {true, false}; std::filesystem::path dirPath(OUT_DIR_RESULTS); @@ -92,9 +93,11 @@ int main() { "is_red_factor_applied {}", n, k, t, qc_block_size, is_kra, is_red_factor_applied); current_c_res = - c_isd_log_cost(n, k, t, qc_block_size, is_kra, is_red_factor_applied); + c_isd_log_cost(n, k, t, qc_block_size, is_kra, is_red_factor_applied, + std::unordered_set{Prange, Lee_Brickell}); current_q_res = - q_isd_log_cost(n, k, t, qc_block_size, is_kra, is_red_factor_applied); + q_isd_log_cost(n, k, t, qc_block_size, is_kra, is_red_factor_applied, + std::unordered_set{Q_Lee_Brickell}); std::string is_kra_name = is_kra ? "KRA": "MRA"; out_values[is_kra_name]["C"] = current_c_res; out_values[is_kra_name]["Q"] = current_q_res; diff --git a/src/utils/isd_cost_estimate.cpp b/src/utils/isd_cost_estimate.cpp index d58b547..80555a3 100644 --- a/src/utils/isd_cost_estimate.cpp +++ b/src/utils/isd_cost_estimate.cpp @@ -3,6 +3,7 @@ #include "logging.hpp" #include #include +#include /***************************Classic ISDs***************************************/ @@ -118,6 +119,7 @@ Result isd_log_cost_classic_LB(const uint32_t n, const uint32_t k, } } spdlog::info("Lee-Brickell best p: {}", best_p); + spdlog::info("Lee-Brickell time: {}", NTL::conv(min_log_cost)); Result res; res.alg_name = "Lee-Brickell"; res.params = {{"p", best_p}}; @@ -162,6 +164,7 @@ Result isd_log_cost_classic_Leon(const uint32_t n, const uint32_t k, } } spdlog::info("Leon Best l {} best p: {}", best_l, best_p); + spdlog::info("Leon time: {}", NTL::conv(min_log_cost)); Result res; res.alg_name = "Lee-Brickell"; res.params = {{"p", best_p}, {"l", best_l}}; @@ -223,6 +226,7 @@ Result isd_log_cost_classic_Stern(const uint32_t n, const uint32_t k, } spdlog::info("Stern Best l {}, best p: {}", best_l, best_p); + spdlog::info("Stern time: {}", NTL::conv(min_log_cost)); Result res; res.alg_name = "Stern"; res.params = {{"p", best_p}, {"l", best_l}}; @@ -286,6 +290,7 @@ Result isd_log_cost_classic_FS(const uint32_t n, const uint32_t k, } } spdlog::info("FS Best l {}, best p: {}", best_l, best_p); + spdlog::info("FS time: {}", NTL::conv(min_log_cost)); Result res; res.alg_name = "Fin-Send"; res.params = {{"p", best_p}, {"l", best_l}}; @@ -402,6 +407,8 @@ Result isd_log_cost_classic_MMT(const uint32_t n, const uint32_t k, if (best_l == constrained_max_l) { spdlog::warn("Warning: l {l} on exploration edge!"); } + + spdlog::info("MMT time: {}", NTL::conv(min_log_cost)); Result res; res.alg_name = "MMT"; res.params = {{"p", best_p}, {"l", best_l}}; @@ -545,6 +552,7 @@ Result isd_log_cost_classic_BJMM(const uint32_t n, const uint32_t k, spdlog::info("BJMM Best l {}, best p: {}, best eps1: {}, best eps2: {}", optional_to_string(best_l), optional_to_string(best_p), optional_to_string(best_eps_1), optional_to_string(best_eps_2)); + spdlog::info("BJMM time: {}", NTL::conv(min_log_cost)); Result res; res.alg_name = "BJMM"; res.params = {{"p", best_p.value()}, @@ -600,6 +608,7 @@ Result isd_log_cost_quantum_LB(const uint32_t n, const uint32_t k, throw std::runtime_error("One or more variables are not initialized."); } + spdlog::info("Quantum LB time: {}", NTL::conv(min_log_cost)); Result res; res.alg_name = "Quantum Lee-Brickell"; res.params = {{"p", best_p.value()}}; @@ -609,7 +618,7 @@ Result isd_log_cost_quantum_LB(const uint32_t n, const uint32_t k, #define MAX_M (t / 2) -Result isd_log_cost_quantum_stern(const uint32_t n, const uint32_t k, +Result isd_log_cost_quantum_Stern(const uint32_t n, const uint32_t k, const uint32_t t) { NTL::RR n_real = NTL::RR(n); NTL::RR k_real = NTL::RR(k); @@ -687,9 +696,11 @@ Result isd_log_cost_quantum_stern(const uint32_t n, const uint32_t k, return res; } + /***************************Aggregation ***************************************/ -double get_qc_red_factor_log(const uint32_t qc_order, const uint32_t is_kra) { +double +get_qc_red_factor_log(const uint32_t qc_order, const uint32_t is_kra) { /* For key recovery attacks (CFP) the advantage from quasi-cyclicity is p. For * a message recovery (SDP), the DOOM advantage is sqrt(p). */ @@ -699,107 +710,77 @@ double get_qc_red_factor_log(const uint32_t qc_order, const uint32_t is_kra) { Result c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, const uint32_t qc_order, const uint32_t is_kra, - const bool compute_qc_reduction_factor) { + const bool compute_qc_reduction_factor, std::unordered_set algs) { Result current_res, min_res; double qc_red_factor = compute_qc_reduction_factor ? get_qc_red_factor_log(qc_order, is_kra) : 0; double min_cost = n; // the cost cannot be greater than 2^n -#if SKIP_PRANGE == 0 - current_res = isd_log_cost_classic_Prange(n, k, t); - current_res.value -= qc_red_factor; - if (current_res.value < min_cost) { - min_res = current_res; - min_cost = current_res.value; - } -#endif - -#if SKIP_LB == 0 - current_res = isd_log_cost_classic_LB(n, k, t); - current_res.value -= qc_red_factor; - if (current_res.value < min_cost) { - min_res = current_res; - min_cost = current_res.value; - } -#endif - -#if SKIP_LEON == 0 - current_res = isd_log_cost_classic_Leon(n, k, t); - current_res.value -= qc_red_factor; - if (current_res.value < min_cost) { - min_res = current_res; - min_cost = current_res.value; - } -#endif - -#if SKIP_STERN == 0 - current_res = isd_log_cost_classic_Stern(n, k, t); - current_res.value -= qc_red_factor; - if (current_res.value < min_cost) { - min_res = current_res; - min_cost = current_res.value; - } -#endif - -#if SKIP_FS == 0 - current_res = isd_log_cost_classic_FS(n, k, t); - current_res.value -= qc_red_factor; - if (current_res.value < min_cost) { - min_res = current_res; - min_cost = current_res.value; - } -#endif - -#if SKIP_MMT == 0 - current_res = isd_log_cost_classic_MMT(n, k, t); - current_res.value -= qc_red_factor; - if (current_res.value < min_cost) { - min_res = current_res; - min_cost = current_res.value; - } -#endif - -#if SKIP_BJMM == 0 - current_res = isd_log_cost_classic_BJMM(n, k, t); - current_res.value -= qc_red_factor; - if (current_res.value < min_cost) { - min_res = current_res; - min_cost = current_res.value; + for (const auto &algo : algs) { + switch (algo) { + case Prange: + current_res = isd_log_cost_classic_Prange(n,k,t); + break; + case Lee_Brickell: + current_res = isd_log_cost_classic_LB(n,k,t); + break; + case Leon: + current_res = isd_log_cost_classic_Leon(n,k,t); + break; + case Stern: + current_res = isd_log_cost_classic_Stern(n,k,t); + break; + case Finiasz_Sendrier: + current_res = isd_log_cost_classic_FS(n, k, t); + break; + case MMT: + current_res = isd_log_cost_classic_MMT(n, k, t); + break; + case BJMM: + current_res = isd_log_cost_classic_BJMM(n,k,t); + break; + default: + std::cerr << "Unknown algorithm\n"; + break; + } + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } } -#endif return min_res; } Result q_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, const uint32_t qc_order, const uint32_t is_kra, - const bool compute_qc_reduction_factor) { + const bool compute_qc_reduction_factor, + std::unordered_set algs) { Result current_res, min_res; double min_cost = n; // cannot be greater than n double qc_red_factor = compute_qc_reduction_factor ? get_qc_red_factor_log(qc_order, is_kra) : 0; - /* This is just a quick hack since experiments says that p = 1 is - * the optimal value at least for the NIST code-based finalists - */ -#if SKIP_Q_LB == 0 - current_res = isd_log_cost_quantum_LB(n, k, t); - current_res.value -= qc_red_factor; - if (current_res.value < min_cost) { - min_res = current_res; - min_cost = current_res.value; - } -#endif - -#if SKIP_Q_STERN == 0 - current_res = isd_log_cost_classic_Stern(n, k, t); - current_res.value -= qc_red_factor; - if (current_res.value < min_cost) { - min_res = current_res; - min_cost = current_res.value; + for (const auto &algo : algs) { + switch (algo) { + case Q_Lee_Brickell: + current_res = isd_log_cost_quantum_LB(n,k,t); + break; + case Q_Stern: + current_res = isd_log_cost_quantum_Stern(n,k,t); + break; + default: + std::cerr << "Unknown quantum algorithm\n"; + break; + } + current_res.value -= qc_red_factor; + if (current_res.value < min_cost) { + min_res = current_res; + min_cost = current_res.value; + } } -#endif return min_res; } From b91ed483ed17553361cf4b0ca80fa157ffd51287 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Fri, 26 Jul 2024 16:39:03 +0200 Subject: [PATCH 37/55] MOD CMakeLists --- examples/CMakeLists.txt | 10 +++++++++- src/tools/CMakeLists.txt | 1 - 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 87c3e15..cf73c45 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -1,5 +1,13 @@ +find_library(ledautils ledautils) +message(STATUS "ledautils library: ${ledautils_LIBRARIES}") + +find_package(spdlog REQUIRED) +find_package(fmt REQUIRED) + add_executable(isd_cost_estimate_ex isd_cost_estimate_ex.cpp) -target_link_libraries(isd_cost_estimate_ex PRIVATE ledautils) +set(LIBS ledautils ${NTL_LIB} ${GMP_LIB} ${M_LIB} spdlog::spdlog fmt::fmt) +target_link_libraries(isd_cost_estimate_ex PRIVATE ${LIBS}) + # # Optionally, specify include directories for the examples # target_include_directories(isd_cost_estimate_ex PRIVATE ${CMAKE_SOURCE_DIR}/include) diff --git a/src/tools/CMakeLists.txt b/src/tools/CMakeLists.txt index 5bfe920..41078f5 100644 --- a/src/tools/CMakeLists.txt +++ b/src/tools/CMakeLists.txt @@ -1,4 +1,3 @@ - find_package(OpenMP REQUIRED) message(STATUS "OpenMP library: ${OpenMP_CXX_LIBRARIES}") find_library(GMP_LIB gmp) From 73cc5cd2ba7a4ad0b04b3073adff2502f8af50ae Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Sat, 27 Jul 2024 11:10:08 +0200 Subject: [PATCH 38/55] FIX binomials for uninitialized data; DEL InitConstants --- include/utils/binomials.hpp | 5 +- src/tools/constant_weight_encodable_bits.cpp | 2 +- src/tools/enumeration_complexity.cpp | 2 +- src/tools/parameter_generator.cpp | 2 +- src/tools/work_factor_computation.cpp | 2 +- .../work_factor_computation_parallel.cpp | 2 +- src/utils/binomials.cpp | 90 ++++++++++--------- 7 files changed, 56 insertions(+), 49 deletions(-) diff --git a/include/utils/binomials.hpp b/include/utils/binomials.hpp index 14f69fa..940f3bd 100644 --- a/include/utils/binomials.hpp +++ b/include/utils/binomials.hpp @@ -15,7 +15,10 @@ extern NTL::RR pi; extern NTL::RR nat_log_2; -void InitConstants(); +extern NTL::Mat binomial_table; +extern NTL::Mat low_k_binomial_table; +extern bool is_data_initialized; + void InitBinomials(); NTL::RR lnFactorial(NTL::RR n); diff --git a/src/tools/constant_weight_encodable_bits.cpp b/src/tools/constant_weight_encodable_bits.cpp index 09998e0..3016c44 100644 --- a/src/tools/constant_weight_encodable_bits.cpp +++ b/src/tools/constant_weight_encodable_bits.cpp @@ -12,7 +12,7 @@ int main(int argc, char* argv[]){ return -1; } - InitConstants(); + InitBinomials(); NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); uint32_t n = atoi(argv[1]); diff --git a/src/tools/enumeration_complexity.cpp b/src/tools/enumeration_complexity.cpp index 6de529e..5dedbe7 100644 --- a/src/tools/enumeration_complexity.cpp +++ b/src/tools/enumeration_complexity.cpp @@ -15,7 +15,7 @@ int main(int argc, char* argv[]){ return -1; } - InitConstants(); + InitBinomials(); NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); uint32_t p = atoi(argv[1]); diff --git a/src/tools/parameter_generator.cpp b/src/tools/parameter_generator.cpp index 268b640..0767977 100644 --- a/src/tools/parameter_generator.cpp +++ b/src/tools/parameter_generator.cpp @@ -175,7 +175,7 @@ int main(int argc, char *argv[]) { } p_th = proper_primes[current_prime_pos]; - InitConstants(); + InitBinomials(); NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); pi = NTL::ComputePi_RR(); diff --git a/src/tools/work_factor_computation.cpp b/src/tools/work_factor_computation.cpp index 1f27caf..a5aac7b 100644 --- a/src/tools/work_factor_computation.cpp +++ b/src/tools/work_factor_computation.cpp @@ -29,7 +29,7 @@ int main(int argc, char *argv[]) { /* reduce by a factor matching the QC block size */ - InitConstants(); + InitBinomials(); NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); pi = NTL::ComputePi_RR(); diff --git a/src/tools/work_factor_computation_parallel.cpp b/src/tools/work_factor_computation_parallel.cpp index 2f6961b..5ef7410 100644 --- a/src/tools/work_factor_computation_parallel.cpp +++ b/src/tools/work_factor_computation_parallel.cpp @@ -50,7 +50,7 @@ int main() { file >> j; NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); - InitConstants(); + InitBinomials(); pi = NTL::ComputePi_RR(); bool is_kra_values[] = {true, false}; diff --git a/src/utils/binomials.cpp b/src/utils/binomials.cpp index 39c7389..f9647ae 100644 --- a/src/utils/binomials.cpp +++ b/src/utils/binomials.cpp @@ -1,16 +1,16 @@ #include "binomials.hpp" +#include "logging.hpp" +#include +#include -NTL::RR pi; -NTL::RR nat_log_2; NTL::Mat binomial_table; NTL::Mat low_k_binomial_table; +bool is_data_initialized = false; -NTL::RR log2_RR(NTL::RR v){ return NTL::log(v) / nat_log_2; } +NTL::RR nat_log_2 = NTL::log(NTL::RR(2)); +NTL::RR pi = NTL::ComputePi_RR(); -void InitConstants(){ - nat_log_2 = NTL::log(NTL::RR(2)); - pi = NTL::ComputePi_RR(); -} +NTL::RR log2_RR(NTL::RR v) { return NTL::log(v) / nat_log_2; } /*NOTE: NTL allows to access matrices as 1- based with Matlab notation */ void InitBinomials() { @@ -27,8 +27,8 @@ void InitBinomials() { } } - std::cerr << "Precomputing low n-choose-t up to n: " << LOW_K_MAX_N - << " t: " << LOW_K_MAX_T << std::endl; + // std::cerr << "Precomputing low n-choose-t up to n: " << LOW_K_MAX_N + // << " t: " << LOW_K_MAX_T << std::endl; low_k_binomial_table.SetDims(LOW_K_MAX_N + 1, LOW_K_MAX_T + 1); low_k_binomial_table[0][0] = NTL::ZZ(1); for (unsigned i = 0; i <= LOW_K_MAX_N; i++) { @@ -39,47 +39,51 @@ void InitBinomials() { low_k_binomial_table[i][j - 1] * NTL::ZZ(i - j + 1) / NTL::ZZ(j); } } - std::cerr << "done" << std::endl; + is_data_initialized = true; + // std::cerr << "done" << std::endl; } -NTL::RR lnFactorial(NTL::RR n){ - /* log of Stirling series approximated to the fourth term - * n log(n) - n + 1/2 log(2 \pi n) + log(- 139/(51840 n^3) + - * + 1/(288 n^2) + 1/(12 n) + 1) */ - return n * NTL::log(n) - n + 0.5 * NTL::log(2*pi*n) + - NTL::log( - NTL::RR(139)/(n*n*n * 51840) + - NTL::RR(1)/(n*n*288) + - NTL::RR(1)/(n*12) + - 1); +NTL::RR lnFactorial(NTL::RR n) { + /* log of Stirling series approximated to the fourth term + * n log(n) - n + 1/2 log(2 \pi n) + log(- 139/(51840 n^3) + + * + 1/(288 n^2) + 1/(12 n) + 1) */ + return n * NTL::log(n) - n + 0.5 * NTL::log(2 * pi * n) + + NTL::log(-NTL::RR(139) / (n * n * n * 51840) + + NTL::RR(1) / (n * n * 288) + NTL::RR(1) / (n * 12) + 1); } -NTL::RR lnBinom(NTL::RR n, NTL::RR k){ - if ( (k == NTL::RR(0) ) || (k == n) ) { - return NTL::RR(0); - } - return lnFactorial(n) - (lnFactorial(k) + lnFactorial(n-k) ); +NTL::RR lnBinom(NTL::RR n, NTL::RR k) { + if ((k == NTL::RR(0)) || (k == n)) { + return NTL::RR(0); + } + return lnFactorial(n) - (lnFactorial(k) + lnFactorial(n - k)); } - -NTL::ZZ binomial_wrapper(long n, long k){ - if(k>n) return NTL::ZZ(0); - /* employ memoized if available */ - if ((n <= MAX_N) && (k < MAX_T)){ - return binomial_table[n][k]; +NTL::ZZ binomial_wrapper(long n, long k) { + if (k > n) + return NTL::ZZ(0); + /* employ memoized if available */ + if (is_data_initialized) { + if ((n <= MAX_N) && (k < MAX_T)) { + return binomial_table[n][k]; } - if ((n <= LOW_K_MAX_N) && (k < LOW_K_MAX_T)){ - return low_k_binomial_table[n][k]; + if ((n <= LOW_K_MAX_N) && (k < LOW_K_MAX_T)) { + return low_k_binomial_table[n][k]; + } else { + spdlog::info( + "Binomial table not initizialed, resorting to standard computation"); } - - /* shortcut computation for fast cases (k < 10) where - * Stirling may not provide good approximations */ - if (k < 10) { - NTL::ZZ result = NTL::ZZ(1); - for(int i = 1 ; i <= k; i++){ - result = (result * (n+1-i))/i; - } - return result; + } + + /* shortcut computation for fast cases (k < 10) where + * Stirling may not provide good approximations */ + if (k < 10) { + NTL::ZZ result = NTL::ZZ(1); + for (int i = 1; i <= k; i++) { + result = (result * (n + 1 - i)) / i; } - /*Fall back to Stirling*/ - return NTL::conv( NTL::exp( lnBinom(NTL::RR(n),NTL::RR(k)) )); + return result; + } + /*Fall back to Stirling*/ + return NTL::conv(NTL::exp(lnBinom(NTL::RR(n), NTL::RR(k)))); } From 9597a4e0d8628e8c3cb6ab91988b94b17885d190 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Sat, 27 Jul 2024 11:36:06 +0200 Subject: [PATCH 39/55] Minors --- include/utils/isd_cost_estimate.hpp | 5 ++++- src/tools/work_factor_computation_parallel.cpp | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/include/utils/isd_cost_estimate.hpp b/include/utils/isd_cost_estimate.hpp index 4e7bd21..8f05325 100644 --- a/include/utils/isd_cost_estimate.hpp +++ b/include/utils/isd_cost_estimate.hpp @@ -39,7 +39,10 @@ const NTL::RR classic_rref_red_cost(const NTL::RR &n, const NTL::RR &r); Result c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, const uint32_t qc_order, const uint32_t is_kra, - const bool compute_qc_reduction_factor, std::unordered_set algs); + const bool compute_qc_reduction_factor, + std::unordered_set algs); + +double get_qc_red_factor_log(const uint32_t qc_order, const uint32_t is_kra); Result isd_log_cost_classic_Prange(const uint32_t n, const uint32_t k, const uint32_t t); diff --git a/src/tools/work_factor_computation_parallel.cpp b/src/tools/work_factor_computation_parallel.cpp index 5ef7410..2abb5e6 100644 --- a/src/tools/work_factor_computation_parallel.cpp +++ b/src/tools/work_factor_computation_parallel.cpp @@ -94,7 +94,7 @@ int main() { n, k, t, qc_block_size, is_kra, is_red_factor_applied); current_c_res = c_isd_log_cost(n, k, t, qc_block_size, is_kra, is_red_factor_applied, - std::unordered_set{Prange, Lee_Brickell}); + std::unordered_set{Stern}); current_q_res = q_isd_log_cost(n, k, t, qc_block_size, is_kra, is_red_factor_applied, std::unordered_set{Q_Lee_Brickell}); From a5c4c2fb1c74958b45c484e56cf53ba3823d9f76 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Sat, 27 Jul 2024 11:36:31 +0200 Subject: [PATCH 40/55] MOD isd estimate example --- examples/isd_cost_estimate_ex.cpp | 113 +++++++++++++++++++++--------- 1 file changed, 81 insertions(+), 32 deletions(-) diff --git a/examples/isd_cost_estimate_ex.cpp b/examples/isd_cost_estimate_ex.cpp index eee0724..aecb9f8 100644 --- a/examples/isd_cost_estimate_ex.cpp +++ b/examples/isd_cost_estimate_ex.cpp @@ -1,9 +1,11 @@ -#include #include // for uint32_t -#include -#include -#include +#include #include +#include +#include +#include +#include +#include struct Cost { std::string algorithm; @@ -20,7 +22,7 @@ struct Value { uint32_t qc_block_size; bool is_kra; bool is_red_fac; - std::vector costs; + std::map costs; }; void displayCost(const Cost &cost) { @@ -39,41 +41,88 @@ void displayValue(const Value &value) { std::cout << " Number of Errors: " << value.number_of_errors << '\n'; std::cout << " QC Block Size: " << value.qc_block_size << '\n'; std::cout << " Is KRA: " << (value.is_kra ? "Yes" : "No") << '\n'; - std::cout << " Is Reduction factor applied: " << (value.is_red_fac ? "Yes" : "No") << '\n'; + std::cout << " Is Reduction factor applied: " + << (value.is_red_fac ? "Yes" : "No") << '\n'; std::cout << "Costs:\n"; - for (const auto &cost : value.costs) { - displayCost(cost); + for (const auto &costPair : value.costs) { + displayCost(costPair.second); std::cout << "-----\n"; } } int main() { - std::cout << "Hello world\n"; // Expected values taken from LEDA specs, Table 4.1 + std::vector values; - Value val = {24646, - 12323, - 142, - 12323, - true, - true, - { - {"Prange", "", false, 171.3, 0.0}, - {"Lee-Brickell", "", false, 158.4, 0.0}, - {"Leon", "", false, 154.4, 0.0}, - {"Stern", "", false, 147.4, 0.0}, - {"Fin-Send", "", false, 147.4, 0.0}, - }}; + Cost pra = {"Prange", "", false, 171.3, 0.0}; + Cost lbr = {"Lee-Brickell", "", false, 158.4, 0.0}; + Cost leo = {"Leon", "", false, 154.4, 0.0}; + Cost ste = {"Stern", "", false, 147.4, 0.0}; + Cost fis = {"Fin-Send", "", false, 147.4, 0.0}; + + std::map costs = {{"Prange", pra}, + {"Lee-Brickell", lbr}, + {"Leon", leo}, + {"Stern", ste}, + {"Fin-Send", fis}}; + + Value val = {24646, 12323, 142, 12323, true, true, costs}; + + // displayValue(val); + + Result current_res, min_res; + uint32_t n = val.codeword_size; + uint32_t k = val.code_dimension; + uint32_t t = val.number_of_errors; + double qc_red_fac = get_qc_red_factor_log(val.qc_block_size, true); + double diff; + std::string name; + std::cout << "qc_red_fac " << qc_red_fac << std::endl; + for (const auto &algo : std::unordered_set{ + Prange, Lee_Brickell, Leon, Stern, Finiasz_Sendrier}) { + switch (algo) { + case Prange: + current_res = isd_log_cost_classic_Prange(n, k, t); + name = "Prange"; + break; + case Lee_Brickell: + current_res = isd_log_cost_classic_LB(n, k, t); + name = "Lee-Brickell"; + break; + case Leon: + current_res = isd_log_cost_classic_Leon(n, k, t); + name = "Leon"; + break; + case Stern: + current_res = isd_log_cost_classic_Stern(n, k, t); + name = "Stern"; + break; + case Finiasz_Sendrier: + current_res = isd_log_cost_classic_FS(n, k, t); + name = "Fin-Send"; + break; + case MMT: + current_res = isd_log_cost_classic_MMT(n, k, t); + name = "MMT "; + break; + case BJMM: + current_res = isd_log_cost_classic_BJMM(n, k, t); + name = "BJMM "; + break; + default: + std::cerr << "Unknown algorithm\n"; + break; + } + current_res.value -= qc_red_fac; + diff = std::abs(costs[name].time_complexity - current_res.value); + std::cout << name << "Obtained: " << current_res.value + << " Expected: " << costs[name].time_complexity + << " Diff: " << diff << std::endl; + if (diff >= 1.0) { + std::cerr << "WARNING: huge diff"; + } + } - displayValue(val); - double c_cost = - c_isd_log_cost( - val.codeword_size, val.code_dimension, val.number_of_errors, - val.qc_block_size, val.is_kra, val.is_red_fac, - std::unordered_set{Prange, Lee_Brickell, Leon, Stern, - Finiasz_Sendrier} - ) - .value; - std::cout << c_cost << std::endl; + return 0; } From ab00a62373679fd4a2efb668f2d8d3ec78b9e3a4 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Sat, 27 Jul 2024 16:42:11 +0200 Subject: [PATCH 41/55] DEL logging utilities --- examples/isd_cost_estimate_ex.cpp | 7 +- include/utils/logging.hpp | 88 ++++++++++++++----- src/tools/parameter_generator.cpp | 38 ++++---- src/tools/work_factor_computation.cpp | 18 ++-- .../work_factor_computation_parallel.cpp | 15 ++-- src/utils/binomials.cpp | 22 +++-- src/utils/isd_cost_estimate.cpp | 43 ++++----- src/utils/logging.cpp | 77 +++++++++------- 8 files changed, 196 insertions(+), 112 deletions(-) diff --git a/examples/isd_cost_estimate_ex.cpp b/examples/isd_cost_estimate_ex.cpp index aecb9f8..3fc67f1 100644 --- a/examples/isd_cost_estimate_ex.cpp +++ b/examples/isd_cost_estimate_ex.cpp @@ -4,9 +4,12 @@ #include #include #include +#include #include #include +// #include "logging.hpp" + struct Cost { std::string algorithm; std::string type; // CFP1, CFP2, CFP3, SDP @@ -52,7 +55,7 @@ void displayValue(const Value &value) { } int main() { - // Expected values taken from LEDA specs, Table 4.1 + std::cout<< "logger setted up" << std::endl; std::vector values; Cost pra = {"Prange", "", false, 171.3, 0.0}; @@ -116,7 +119,7 @@ int main() { } current_res.value -= qc_red_fac; diff = std::abs(costs[name].time_complexity - current_res.value); - std::cout << name << "Obtained: " << current_res.value + std::cout << name << ". Obtained: " << current_res.value << " Expected: " << costs[name].time_complexity << " Diff: " << diff << std::endl; if (diff >= 1.0) { diff --git a/include/utils/logging.hpp b/include/utils/logging.hpp index 1e16c53..7ccac17 100644 --- a/include/utils/logging.hpp +++ b/include/utils/logging.hpp @@ -1,33 +1,81 @@ #pragma once -#include + +#include +#include +#include +#include #include #include #include +#include + +namespace Logger { + class LoggerManager { + public: + static LoggerManager& getInstance(); -void configure_logger(const std::optional filename); + void setup_logger(const std::string& logger_name, + spdlog::level::level_enum console_level, + spdlog::level::level_enum file_level, + const std::string& pattern = "[%Y-%m-%d %H:%M:%S.%e] [%n] [%l] [%s:%#] %v"); + std::shared_ptr get_logger(const std::string& logger_name); -std::string optional_to_string(const std::optional &opt); + template + std::string optional_to_string(const std::optional& opt); + + template + std::string array_to_string(const std::vector& vec); + + template + std::string array_to_string(const T* array, size_t size); -template std::string array_to_string(const std::vector &vec) { - std::string result = "["; - for (size_t i = 0; i < vec.size(); ++i) { - result += std::to_string(vec[i]); - if (i < vec.size() - 1) { - result += ", "; + private: + LoggerManager() = default; + std::map> loggers; + }; + + template + inline std::string LoggerManager::optional_to_string(const std::optional& opt) { + if (opt) { + return std::to_string(*opt); // Convert the value to string if present + } else { + return "None"; // Represent the absence of value } } - result += "]"; - return result; -} -template std::string array_to_string(const T *array, size_t size) { - std::string result = "["; - for (size_t i = 0; i < size; ++i) { - result += std::to_string(array[i]); - if (i < size - 1) { - result += ", "; + template <> + inline std::string LoggerManager::optional_to_string(const std::optional& opt) { + if (opt) { + return *opt; // Return the string directly if present + } else { + return "None"; // Represent the absence of value } } - result += "]"; - return result; + + template + inline std::string LoggerManager::array_to_string(const std::vector& vec) { + std::string result = "["; + for (size_t i = 0; i < vec.size(); ++i) { + result += std::to_string(vec[i]); + if (i < vec.size() - 1) { + result += ", "; + } + } + result += "]"; + return result; + } + + template + inline std::string LoggerManager::array_to_string(const T* array, size_t size) { + std::string result = "["; + for (size_t i = 0; i < size; ++i) { + result += std::to_string(array[i]); + if (i < size - 1) { + result += ", "; + } + } + result += "]"; + return result; + } } + diff --git a/src/tools/parameter_generator.cpp b/src/tools/parameter_generator.cpp index 0767977..7552515 100644 --- a/src/tools/parameter_generator.cpp +++ b/src/tools/parameter_generator.cpp @@ -3,12 +3,6 @@ #include #include -#define NUM_BITS_REAL_MANTISSA 128 -#define IGNORE_DECODING_COST 0 -// #define SKIP_BJMM 1 -// #define SKIP_MMT 1 -// #define LOG_COST_CRITERION 1 - #include "binomials.hpp" #include "bit_error_probabilities.hpp" #include "isd_cost_estimate.hpp" @@ -18,6 +12,15 @@ #include #include #include +#include + +#define NUM_BITS_REAL_MANTISSA 128 +#define IGNORE_DECODING_COST 0 +// #define SKIP_BJMM 1 +// #define SKIP_MMT 1 +// #define LOG_COST_CRITERION 1 +// static auto LOGGER = +// Logger::LoggerManager::getInstance().get_logger("isd_cost_estimate"); uint32_t estimate_t_val(const uint32_t c_sec_level, const uint32_t q_sec_level, const uint32_t n_0, const uint32_t p) { @@ -113,16 +116,15 @@ uint64_t estimate_dv(const uint32_t c_sec_level, // expressed as /* last parameter indicates a KRA, reduce margin by p due to quasi cyclicity */ achieved_c_sec_level = - c_isd_log_cost(n_0 * p, p, n_0 * d_v_prime, p, 1, true, - std::unordered_set{Prange, Lee_Brickell, Leon, - Stern, Finiasz_Sendrier, MMT, - BJMM} - ).value; + c_isd_log_cost( + n_0 * p, p, n_0 * d_v_prime, p, 1, true, + std::unordered_set{Prange, Lee_Brickell, Leon, Stern, + Finiasz_Sendrier, MMT, BJMM}) + .value; achieved_q_sec_level = q_isd_log_cost( n_0 * p, p, n_0 * d_v_prime, p, 1, true, - std::unordered_set{Q_Lee_Brickell, Q_Stern} - ) + std::unordered_set{Q_Lee_Brickell, Q_Stern}) .value; } } @@ -175,7 +177,6 @@ int main(int argc, char *argv[]) { } p_th = proper_primes[current_prime_pos]; - InitBinomials(); NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); pi = NTL::ComputePi_RR(); @@ -263,9 +264,12 @@ int main(int argc, char *argv[]) { spdlog::error("Error: One or more variables are not initialized."); throw std::runtime_error("One or more variables are not initialized."); } else { - spdlog::info("parameter set found: p={}, t={}, d_v={}, mpartition={}", - optional_to_string(p_ok), optional_to_string(t_ok), - optional_to_string(d_v_ok), array_to_string(mpartition_ok)); + spdlog::info( + "parameter set found: p={}, t={}, d_v={}, mpartition={}", + Logger::LoggerManager::getInstance().optional_to_string(p_ok), + Logger::LoggerManager::getInstance().optional_to_string(t_ok), + Logger::LoggerManager::getInstance().optional_to_string(d_v_ok), + Logger::LoggerManager::getInstance().array_to_string(mpartition_ok)); } // std::cout // << " p:" << p_ok << " t: " << t_ok; diff --git a/src/tools/work_factor_computation.cpp b/src/tools/work_factor_computation.cpp index a5aac7b..0ea4a71 100644 --- a/src/tools/work_factor_computation.cpp +++ b/src/tools/work_factor_computation.cpp @@ -1,5 +1,6 @@ #include #include +// #include #define NUM_BITS_REAL_MANTISSA 1024 #define IGNORE_DECODING_COST 0 @@ -27,9 +28,13 @@ int main(int argc, char *argv[]) { return -1; } + // Logger::LoggerManager::getInstance().setup_logger( + // "binomials", spdlog::level::err, spdlog::level::err); + // Logger::LoggerManager::getInstance().setup_logger( + // "isd_cost_estimate", spdlog::level::err, spdlog::level::err); + /* reduce by a factor matching the QC block size */ - InitBinomials(); NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); pi = NTL::ComputePi_RR(); @@ -46,14 +51,17 @@ int main(int argc, char *argv[]) { << "- : " << t << std::endl << "- : " << qc_block_size << std::endl << "- : " << is_kra << std::endl - << "- : " << is_red_factor_applied << std::endl; + << "- : " << is_red_factor_applied + << std::endl; std::cout << "Minimum classic cost :" << c_isd_log_cost( n, k, t, qc_block_size, is_kra, is_red_factor_applied, - std::unordered_set{Prange, Lee_Brickell, Leon, - Stern, Finiasz_Sendrier, MMT, - BJMM}) + std::unordered_set{ + Prange, Lee_Brickell, Leon, Stern, + // Finiasz_Sendrier, // + // MMT, BJMM // + }) .value << " Minimum quantum cost :" << q_isd_log_cost(n, k, t, qc_block_size, is_kra, diff --git a/src/tools/work_factor_computation_parallel.cpp b/src/tools/work_factor_computation_parallel.cpp index 2abb5e6..399abec 100644 --- a/src/tools/work_factor_computation_parallel.cpp +++ b/src/tools/work_factor_computation_parallel.cpp @@ -10,8 +10,9 @@ #include #include #include -#include +// #include #include +#include #include "globals.hpp" @@ -32,9 +33,11 @@ void from_json(const nlohmann::json &j, Result &r) { } int main() { - // Configure the logger - configure_logger(std::nullopt); + // Logger::LoggerManager::getInstance().setup_logger( + // "binomials", spdlog::level::info, spdlog::level::debug); + // Logger::LoggerManager::getInstance().setup_logger( + // "isd_cost_estimate", spdlog::level::info, spdlog::level::debug); const std::string input_isd_values = "out/isd_values.json"; std::ifstream file(input_isd_values); @@ -89,9 +92,9 @@ int main() { Result current_q_res; for (bool is_kra : is_kra_values) { - spdlog::info("Processing n {}, k {}, t {}, qc_block_size {}, is_kra {}, " - "is_red_factor_applied {}", - n, k, t, qc_block_size, is_kra, is_red_factor_applied); + // spdlog::info("Processing n {}, k {}, t {}, qc_block_size {}, is_kra {}, " + // "is_red_factor_applied {}", + // n, k, t, qc_block_size, is_kra, is_red_factor_applied); current_c_res = c_isd_log_cost(n, k, t, qc_block_size, is_kra, is_red_factor_applied, std::unordered_set{Stern}); diff --git a/src/utils/binomials.cpp b/src/utils/binomials.cpp index f9647ae..07b0620 100644 --- a/src/utils/binomials.cpp +++ b/src/utils/binomials.cpp @@ -1,5 +1,5 @@ #include "binomials.hpp" -#include "logging.hpp" +// #include "logging.hpp" #include #include @@ -12,10 +12,13 @@ NTL::RR pi = NTL::ComputePi_RR(); NTL::RR log2_RR(NTL::RR v) { return NTL::log(v) / nat_log_2; } +// static auto LOGGER = +// Logger::LoggerManager::getInstance().get_logger("binomials"); + /*NOTE: NTL allows to access matrices as 1- based with Matlab notation */ void InitBinomials() { - std::cerr << "Precomputing n-choose-t up to n: " << MAX_N << " t: " << MAX_T - << std::endl; + // LOGGER + // ->info("Precomputing n-choose-t up to n: {}, t: {}", MAX_N, MAX_T); binomial_table.SetDims(MAX_N + 1, MAX_T + 1); binomial_table[0][0] = NTL::ZZ(1); for (unsigned i = 1; i <= MAX_N; i++) { @@ -26,9 +29,10 @@ void InitBinomials() { binomial_table[i][j - 1] * NTL::ZZ(i - j + 1) / NTL::ZZ(j); } } + binomial_table.SetDims(MAX_N + 1, MAX_T + 1); - // std::cerr << "Precomputing low n-choose-t up to n: " << LOW_K_MAX_N - // << " t: " << LOW_K_MAX_T << std::endl; + // LOGGER->info("Precomputing low n-choose-t up to n: {}, t: {}", LOW_K_MAX_N, + // LOW_K_MAX_T); low_k_binomial_table.SetDims(LOW_K_MAX_N + 1, LOW_K_MAX_T + 1); low_k_binomial_table[0][0] = NTL::ZZ(1); for (unsigned i = 0; i <= LOW_K_MAX_N; i++) { @@ -40,7 +44,7 @@ void InitBinomials() { } } is_data_initialized = true; - // std::cerr << "done" << std::endl; + // LOGGER->info("Done"); } NTL::RR lnFactorial(NTL::RR n) { @@ -69,10 +73,10 @@ NTL::ZZ binomial_wrapper(long n, long k) { } if ((n <= LOW_K_MAX_N) && (k < LOW_K_MAX_T)) { return low_k_binomial_table[n][k]; - } else { - spdlog::info( - "Binomial table not initizialed, resorting to standard computation"); } + } else { + // LOGGER->info( + // "Binomial table not initizialed, resorting to standard computation"); } /* shortcut computation for fast cases (k < 10) where diff --git a/src/utils/isd_cost_estimate.cpp b/src/utils/isd_cost_estimate.cpp index 80555a3..2257e3c 100644 --- a/src/utils/isd_cost_estimate.cpp +++ b/src/utils/isd_cost_estimate.cpp @@ -1,10 +1,12 @@ #include "isd_cost_estimate.hpp" #include "binomials.hpp" -#include "logging.hpp" +// #include "logging.hpp" #include #include #include +// static auto LOGGER = +// Logger::LoggerManager::getInstance().get_logger("isd_cost_estimate"); /***************************Classic ISDs***************************************/ Result isd_log_cost_classic_BJMM_approx(const uint32_t n, const uint32_t k, @@ -94,6 +96,7 @@ Result isd_log_cost_classic_Prange(const uint32_t n, const uint32_t k, Result isd_log_cost_classic_LB(const uint32_t n, const uint32_t k, const uint32_t t) { NTL::RR n_real = NTL::RR(n); + NTL::RR k_real = NTL::RR(k); NTL::RR t_real = NTL::RR(t); NTL::RR min_log_cost = n_real; // unreachable upper bound @@ -118,8 +121,8 @@ Result isd_log_cost_classic_LB(const uint32_t n, const uint32_t k, best_p = p; } } - spdlog::info("Lee-Brickell best p: {}", best_p); - spdlog::info("Lee-Brickell time: {}", NTL::conv(min_log_cost)); + // LOGGER->info("Lee-Brickell best p: {}", best_p); + // LOGGER->info("Lee-Brickell time: {}", NTL::conv(min_log_cost)); Result res; res.alg_name = "Lee-Brickell"; res.params = {{"p", best_p}}; @@ -163,8 +166,8 @@ Result isd_log_cost_classic_Leon(const uint32_t n, const uint32_t k, } } } - spdlog::info("Leon Best l {} best p: {}", best_l, best_p); - spdlog::info("Leon time: {}", NTL::conv(min_log_cost)); + // LOGGER->info("Leon Best l {} best p: {}", best_l, best_p); + // LOGGER->info("Leon time: {}", NTL::conv(min_log_cost)); Result res; res.alg_name = "Lee-Brickell"; res.params = {{"p", best_p}, {"l", best_l}}; @@ -225,8 +228,8 @@ Result isd_log_cost_classic_Stern(const uint32_t n, const uint32_t k, } } - spdlog::info("Stern Best l {}, best p: {}", best_l, best_p); - spdlog::info("Stern time: {}", NTL::conv(min_log_cost)); + // LOGGER->info("Stern Best l {}, best p: {}", best_l, best_p); + // LOGGER->info("Stern time: {}", NTL::conv(min_log_cost)); Result res; res.alg_name = "Stern"; res.params = {{"p", best_p}, {"l", best_l}}; @@ -289,8 +292,8 @@ Result isd_log_cost_classic_FS(const uint32_t n, const uint32_t k, } } } - spdlog::info("FS Best l {}, best p: {}", best_l, best_p); - spdlog::info("FS time: {}", NTL::conv(min_log_cost)); + // LOGGER->info("FS Best l {}, best p: {}", best_l, best_p); + // LOGGER->info("FS time: {}", NTL::conv(min_log_cost)); Result res; res.alg_name = "Fin-Send"; res.params = {{"p", best_p}, {"l", best_l}}; @@ -400,15 +403,15 @@ Result isd_log_cost_classic_MMT(const uint32_t n, const uint32_t k, #endif } } - spdlog::info("MMT Best l {}, best p: {}", best_l, best_p); + // LOGGER->info("MMT Best l {}, best p: {}", best_l, best_p); if (best_p == constrained_max_p) { - spdlog::warn("Warning: p {p} on exploration edge!"); + // LOGGER->warn("Warning: p {p} on exploration edge!"); } if (best_l == constrained_max_l) { - spdlog::warn("Warning: l {l} on exploration edge!"); + // LOGGER->warn("Warning: l {l} on exploration edge!"); } - spdlog::info("MMT time: {}", NTL::conv(min_log_cost)); + // LOGGER->info("MMT time: {}", NTL::conv(min_log_cost)); Result res; res.alg_name = "MMT"; res.params = {{"p", best_p}, {"l", best_l}}; @@ -546,13 +549,13 @@ Result isd_log_cost_classic_BJMM(const uint32_t n, const uint32_t k, } /* end for over p*/ if (!best_l || !best_eps_1 || !best_p || !best_eps_2) { - spdlog::error("Error: One or more variables are not initialized."); + // LOGGER->error("Error: One or more variables are not initialized."); throw std::runtime_error("One or more variables are not initialized."); } - spdlog::info("BJMM Best l {}, best p: {}, best eps1: {}, best eps2: {}", - optional_to_string(best_l), optional_to_string(best_p), - optional_to_string(best_eps_1), optional_to_string(best_eps_2)); - spdlog::info("BJMM time: {}", NTL::conv(min_log_cost)); + // LOGGER->info("BJMM Best l {}, best p: {}, best eps1: {}, best eps2: {}", + // Logger::LoggerManager::getInstance().optional_to_string(best_l), Logger::LoggerManager::getInstance().optional_to_string(best_p), + // Logger::LoggerManager::getInstance().optional_to_string(best_eps_1), Logger::LoggerManager::getInstance().optional_to_string(best_eps_2)); + // LOGGER->info("BJMM time: {}", NTL::conv(min_log_cost)); Result res; res.alg_name = "BJMM"; res.params = {{"p", best_p.value()}, @@ -604,11 +607,11 @@ Result isd_log_cost_quantum_LB(const uint32_t n, const uint32_t k, } } if (!best_p) { - spdlog::error("Error: One or more variables are not initialized."); + // LOGGER->error("Error: One or more variables are not initialized."); throw std::runtime_error("One or more variables are not initialized."); } - spdlog::info("Quantum LB time: {}", NTL::conv(min_log_cost)); + // LOGGER->info("Quantum LB time: {}", NTL::conv(min_log_cost)); Result res; res.alg_name = "Quantum Lee-Brickell"; res.params = {{"p", best_p.value()}}; diff --git a/src/utils/logging.cpp b/src/utils/logging.cpp index 920d7d2..ff595e1 100644 --- a/src/utils/logging.cpp +++ b/src/utils/logging.cpp @@ -1,46 +1,57 @@ #include "logging.hpp" #include +#include #include +#include +#include -void configure_logger(const std::optional filename) { - // Initialize the logger - const std::string ff = - filename.has_value() ? filename.value() : "logs/default.log"; - auto logger = spdlog::basic_logger_mt("default_logger", ff); - spdlog::set_default_logger(logger); +Logger::LoggerManager &Logger::LoggerManager::getInstance() { + static LoggerManager instance; + return instance; +} + +void Logger::LoggerManager::setup_logger( + const std::string &logger_name, spdlog::level::level_enum console_level, + spdlog::level::level_enum file_level, const std::string &pattern) { + // Creating sinks, console + auto console_sink = std::make_shared(); + console_sink->set_level(console_level); + console_sink->set_pattern(pattern); - // Retrieve the environment variable for log level - const char *log_level_env = std::getenv("LOG_LEVEL"); + // ... and file + // TODO change hard-coded dir + auto file_sink = std::make_shared( + "logs/" + logger_name + ".log", true); + file_sink->set_level(file_level); + file_sink->set_pattern(pattern); - if (log_level_env) { - std::string log_level_str(log_level_env); + std::vector sinks{console_sink, file_sink}; - // Configure the log level based on the environment variable - if (log_level_str == "trace") { - spdlog::set_level(spdlog::level::trace); - } else if (log_level_str == "debug") { - spdlog::set_level(spdlog::level::debug); - } else if (log_level_str == "info") { - spdlog::set_level(spdlog::level::info); - } else if (log_level_str == "warn") { - spdlog::set_level(spdlog::level::warn); - } else if (log_level_str == "err") { - spdlog::set_level(spdlog::level::err); - } else if (log_level_str == "critical") { - spdlog::set_level(spdlog::level::critical); - } else { - spdlog::set_level(spdlog::level::info); // Default level - } + std::shared_ptr logger; + auto it = loggers.find(logger_name); + if (it != loggers.end()) { + std::cout << "Logger already present " << logger_name << std::endl; + logger = it->second; } else { - spdlog::set_level(spdlog::level::err); // Default level if environment - // variable is not set + std::cout << "Creating logger " << logger_name << std::endl; + logger = std::make_shared(logger_name, sinks.begin(), + sinks.end()); + loggers[logger_name] = logger; + spdlog::register_logger(logger); } + // logger->set_level(spdlog::level::trace); // Set to the most verbose level + // logger->flush_on(spdlog::level::err); } -std::string optional_to_string(const std::optional &opt) { - if (opt) { - return std::to_string(*opt); - } else { - return "Not Initialized"; +std::shared_ptr +Logger::LoggerManager::get_logger(const std::string &logger_name) { + auto it = loggers.find(logger_name); + if (it != loggers.end()) { + return it->second; } + // Logger not found, so set it up + setup_logger(logger_name, spdlog::level::info, + spdlog::level::info); // Default levels + + return loggers[logger_name]; } From 4f57509d416aa90e8db448b6fb432310477cf7de Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Sat, 27 Jul 2024 16:48:51 +0200 Subject: [PATCH 42/55] Minors --- CMakeLists.txt | 3 ++- src/tools/CMakeLists.txt | 9 ++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 17a0be0..1880127 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -8,7 +8,8 @@ set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED True) # Global compiler flags -add_compile_options(-O3 -g3 -Wall -Wextra -Wno-sign-compare) +add_compile_options(-O3 -Wall -Wextra -Wno-sign-compare) +# add_compile_options(-O0 -g3 -Wall -Wextra -Wno-sign-compare) # Include directories include_directories(${PROJECT_SOURCE_DIR}/include) diff --git a/src/tools/CMakeLists.txt b/src/tools/CMakeLists.txt index 41078f5..d4d82f9 100644 --- a/src/tools/CMakeLists.txt +++ b/src/tools/CMakeLists.txt @@ -18,25 +18,24 @@ message(STATUS "fmt library: ${fmt_LIBRARIES}") set(target constant_weight_encodable_bits) add_executable(${target} ${target}.cpp) target_link_libraries(${target} PRIVATE ${LIBS}) +install(TARGETS constant_weight_encodable_bits DESTINATION bin) set(target enumeration_complexity) add_executable(${target} ${target}.cpp) target_link_libraries(${target} PRIVATE ${LIBS}) +install(TARGETS enumeration_complexity DESTINATION bin) set(target parameter_generator) add_executable(${target} ${target}.cpp) target_link_libraries(${target} PRIVATE ${LIBS}) +install(TARGETS parameter_generator DESTINATION bin) set(target work_factor_computation) add_executable(${target} ${target}.cpp) target_link_libraries(${target} PRIVATE ${LIBS}) +install(TARGETS work_factor_computation DESTINATION bin) set(target work_factor_computation_parallel) add_executable(${target} ${target}.cpp) target_link_libraries(${target} PRIVATE ${LIBS} OpenMP::OpenMP_CXX) - -install(TARGETS constant_weight_encodable_bits DESTINATION bin) -install(TARGETS enumeration_complexity DESTINATION bin) -install(TARGETS parameter_generator DESTINATION bin) -install(TARGETS work_factor_computation DESTINATION bin) install(TARGETS work_factor_computation_parallel DESTINATION bin) From 94f377cc75045dd2a1d7e4a4178d21e47e0ffb69 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Sat, 27 Jul 2024 19:00:22 +0200 Subject: [PATCH 43/55] ADD different attack costs to parallel computation --- examples/isd_cost_estimate_ex.cpp | 19 +-- include/utils/isd_cost_estimate.hpp | 16 ++- src/tools/CMakeLists.txt | 8 +- src/tools/parameter_generator.cpp | 31 +++-- src/tools/work_factor_computation.cpp | 18 ++- .../work_factor_computation_parallel.cpp | 89 +++++++------ src/utils/isd_cost_estimate.cpp | 123 +++++++++++------- 7 files changed, 177 insertions(+), 127 deletions(-) diff --git a/examples/isd_cost_estimate_ex.cpp b/examples/isd_cost_estimate_ex.cpp index 3fc67f1..241df88 100644 --- a/examples/isd_cost_estimate_ex.cpp +++ b/examples/isd_cost_estimate_ex.cpp @@ -78,38 +78,39 @@ int main() { uint32_t n = val.codeword_size; uint32_t k = val.code_dimension; uint32_t t = val.number_of_errors; - double qc_red_fac = get_qc_red_factor_log(val.qc_block_size, true); + double qc_red_fac = get_qc_red_factor_log(val.qc_block_size, n-k, QCAttackType::KRA3); double diff; std::string name; std::cout << "qc_red_fac " << qc_red_fac << std::endl; for (const auto &algo : std::unordered_set{ - Prange, Lee_Brickell, Leon, Stern, Finiasz_Sendrier}) { + Algorithm::Prange, Algorithm::Lee_Brickell, Algorithm::Leon, + Algorithm::Stern, Algorithm::Finiasz_Sendrier}) { switch (algo) { - case Prange: + case Algorithm::Prange: current_res = isd_log_cost_classic_Prange(n, k, t); name = "Prange"; break; - case Lee_Brickell: + case Algorithm::Lee_Brickell: current_res = isd_log_cost_classic_LB(n, k, t); name = "Lee-Brickell"; break; - case Leon: + case Algorithm::Leon: current_res = isd_log_cost_classic_Leon(n, k, t); name = "Leon"; break; - case Stern: + case Algorithm::Stern: current_res = isd_log_cost_classic_Stern(n, k, t); name = "Stern"; break; - case Finiasz_Sendrier: + case Algorithm::Finiasz_Sendrier: current_res = isd_log_cost_classic_FS(n, k, t); name = "Fin-Send"; break; - case MMT: + case Algorithm::MMT: current_res = isd_log_cost_classic_MMT(n, k, t); name = "MMT "; break; - case BJMM: + case Algorithm::BJMM: current_res = isd_log_cost_classic_BJMM(n, k, t); name = "BJMM "; break; diff --git a/include/utils/isd_cost_estimate.hpp b/include/utils/isd_cost_estimate.hpp index 8f05325..b8e0b24 100644 --- a/include/utils/isd_cost_estimate.hpp +++ b/include/utils/isd_cost_estimate.hpp @@ -14,7 +14,10 @@ struct Result { double list_size; }; -enum Algorithm { +// Plain does not apply qc reductions +enum class QCAttackType { KRA1, KRA2, KRA3, MRA, Plain, Count}; + +enum class Algorithm { Prange, Lee_Brickell, Leon, @@ -23,8 +26,9 @@ enum Algorithm { MMT, BJMM, // Add more algorithms here + Count, }; -enum QuantumAlgorithm { +enum class QuantumAlgorithm { Q_Lee_Brickell, Q_Stern, // NOTE no circuit available }; @@ -36,13 +40,13 @@ const NTL::RR probability_k_by_k_is_inv(const NTL::RR &k); const NTL::RR classic_rref_red_cost(const NTL::RR &n, const NTL::RR &r); // Classic - Result c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, - const uint32_t qc_order, const uint32_t is_kra, + const uint32_t qc_order, QCAttackType attack, const bool compute_qc_reduction_factor, std::unordered_set algs); -double get_qc_red_factor_log(const uint32_t qc_order, const uint32_t is_kra); +double get_qc_red_factor_log(const uint32_t qc_order, const uint32_t n0, + QCAttackType attack); Result isd_log_cost_classic_Prange(const uint32_t n, const uint32_t k, const uint32_t t); @@ -63,7 +67,7 @@ Result isd_log_cost_classic_BJMM(const uint32_t n, const uint32_t k, // Quantum Result q_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, - const uint32_t qc_order, const uint32_t is_kra, + const uint32_t qc_order, QCAttackType attack, const bool compute_qc_reduction_factor, std::unordered_set algs); diff --git a/src/tools/CMakeLists.txt b/src/tools/CMakeLists.txt index d4d82f9..f076e85 100644 --- a/src/tools/CMakeLists.txt +++ b/src/tools/CMakeLists.txt @@ -30,10 +30,10 @@ add_executable(${target} ${target}.cpp) target_link_libraries(${target} PRIVATE ${LIBS}) install(TARGETS parameter_generator DESTINATION bin) -set(target work_factor_computation) -add_executable(${target} ${target}.cpp) -target_link_libraries(${target} PRIVATE ${LIBS}) -install(TARGETS work_factor_computation DESTINATION bin) +# set(target work_factor_computation) +# add_executable(${target} ${target}.cpp) +# target_link_libraries(${target} PRIVATE ${LIBS}) +# install(TARGETS work_factor_computation DESTINATION bin) set(target work_factor_computation_parallel) add_executable(${target} ${target}.cpp) diff --git a/src/tools/parameter_generator.cpp b/src/tools/parameter_generator.cpp index 7552515..37e8658 100644 --- a/src/tools/parameter_generator.cpp +++ b/src/tools/parameter_generator.cpp @@ -36,15 +36,17 @@ uint32_t estimate_t_val(const uint32_t c_sec_level, const uint32_t q_sec_level, t = (lo + hi) / 2; std::cerr << "testing t " << t << std::endl; achieved_c_sec_level = - c_isd_log_cost(n_0 * p, ((n_0 - 1) * p), t, p, 0, true, - std::unordered_set{Prange, Lee_Brickell, Leon, - Stern, Finiasz_Sendrier, - MMT, BJMM}) + c_isd_log_cost(n_0 * p, ((n_0 - 1) * p), t, p, QCAttackType::MRA, true, + std::unordered_set{ + Algorithm::Prange, Algorithm::Lee_Brickell, + Algorithm::Leon, Algorithm::Stern, + Algorithm::Finiasz_Sendrier, Algorithm::MMT, + Algorithm::BJMM}) .value; achieved_q_sec_level = - q_isd_log_cost( - n_0 * p, ((n_0 - 1) * p), t, p, 0, true, - std::unordered_set{Q_Lee_Brickell, Q_Stern}) + q_isd_log_cost(n_0 * p, ((n_0 - 1) * p), t, p, QCAttackType::MRA, true, + std::unordered_set{ + QuantumAlgorithm::Q_Lee_Brickell, QuantumAlgorithm::Q_Stern}) .value; if ((achieved_c_sec_level >= c_sec_level) && (achieved_q_sec_level >= q_sec_level)) { @@ -117,14 +119,17 @@ uint64_t estimate_dv(const uint32_t c_sec_level, // expressed as quasi cyclicity */ achieved_c_sec_level = c_isd_log_cost( - n_0 * p, p, n_0 * d_v_prime, p, 1, true, - std::unordered_set{Prange, Lee_Brickell, Leon, Stern, - Finiasz_Sendrier, MMT, BJMM}) + n_0 * p, p, n_0 * d_v_prime, p, QCAttackType::KRA3, true, + std::unordered_set{ + Algorithm::Prange, Algorithm::Lee_Brickell, Algorithm::Leon, + Algorithm::Stern, Algorithm::Finiasz_Sendrier, + Algorithm::MMT, Algorithm::BJMM}) .value; achieved_q_sec_level = - q_isd_log_cost( - n_0 * p, p, n_0 * d_v_prime, p, 1, true, - std::unordered_set{Q_Lee_Brickell, Q_Stern}) + q_isd_log_cost(n_0 * p, p, n_0 * d_v_prime, p, QCAttackType::KRA3, true, + std::unordered_set{ + QuantumAlgorithm::Q_Lee_Brickell, + QuantumAlgorithm::Q_Stern}) .value; } } diff --git a/src/tools/work_factor_computation.cpp b/src/tools/work_factor_computation.cpp index 0ea4a71..e89b1d9 100644 --- a/src/tools/work_factor_computation.cpp +++ b/src/tools/work_factor_computation.cpp @@ -54,14 +54,18 @@ int main(int argc, char *argv[]) { << "- : " << is_red_factor_applied << std::endl; + for (int i = 0; i < static_cast(QCAttackType::Count); ++i) { + QCAttackType attack = static_cast(i); + printColor(color); + } std::cout << "Minimum classic cost :" - << c_isd_log_cost( - n, k, t, qc_block_size, is_kra, is_red_factor_applied, - std::unordered_set{ - Prange, Lee_Brickell, Leon, Stern, - // Finiasz_Sendrier, // - // MMT, BJMM // - }) + << c_isd_log_cost(n, k, t, qc_block_size, is_kra, + is_red_factor_applied, + std::unordered_set{ + Prange, Lee_Brickell, Leon, Stern, + // Finiasz_Sendrier, // + // MMT, BJMM // + }) .value << " Minimum quantum cost :" << q_isd_log_cost(n, k, t, qc_block_size, is_kra, diff --git a/src/tools/work_factor_computation_parallel.cpp b/src/tools/work_factor_computation_parallel.cpp index 399abec..32e6a05 100644 --- a/src/tools/work_factor_computation_parallel.cpp +++ b/src/tools/work_factor_computation_parallel.cpp @@ -1,18 +1,17 @@ #include +#include #include +#include +#include #include // For std::setprecision #include +#include #include #include #include -#include -#include -#include -#include -#include // #include -#include #include +#include #include "globals.hpp" @@ -21,8 +20,11 @@ // #define EXPLORE_REPRS void to_json(nlohmann::json &j, const Result &r) { - j = nlohmann::json{ - {"alg_name", r.alg_name}, {"params", r.params}, {"value", r.value}, {"gje_cost", r.gje_cost}, {"list_size", r.list_size}}; + j = nlohmann::json{{"alg_name", r.alg_name}, + {"params", r.params}, + {"value", r.value}, + {"gje_cost", r.gje_cost}, + {"list_size", r.list_size}}; } void from_json(const nlohmann::json &j, Result &r) { @@ -42,9 +44,10 @@ int main() { const std::string input_isd_values = "out/isd_values.json"; std::ifstream file(input_isd_values); - // Check if the file is open + // Check if the file is open if (!file.is_open()) { - std::cerr << "Could not open the input file " << input_isd_values << std::endl; + std::cerr << "Could not open the input file " << input_isd_values + << std::endl; return 1; } @@ -53,10 +56,9 @@ int main() { file >> j; NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); - + InitBinomials(); pi = NTL::ComputePi_RR(); - bool is_kra_values[] = {true, false}; std::filesystem::path dirPath(OUT_DIR_RESULTS); // Check if the directory exists if (!std::filesystem::exists(dirPath)) { @@ -78,43 +80,50 @@ int main() { uint32_t k = n - r; uint32_t t = entry["t"]; uint32_t qc_block_size = entry["prime"]; - bool is_red_factor_applied = true; // int n0 = entry["n0"]; // int v = entry["v"]; // int lambd = entry["lambd"]; - - std::string filename = - OUT_DIR_RESULTS + fmt::format("{:06}_{:06}_{:03}.json", n, r, t); - nlohmann::json out_values; Result current_c_res; Result current_q_res; - for (bool is_kra : is_kra_values) { - // spdlog::info("Processing n {}, k {}, t {}, qc_block_size {}, is_kra {}, " - // "is_red_factor_applied {}", - // n, k, t, qc_block_size, is_kra, is_red_factor_applied); - current_c_res = - c_isd_log_cost(n, k, t, qc_block_size, is_kra, is_red_factor_applied, - std::unordered_set{Stern}); - current_q_res = - q_isd_log_cost(n, k, t, qc_block_size, is_kra, is_red_factor_applied, - std::unordered_set{Q_Lee_Brickell}); - std::string is_kra_name = is_kra ? "KRA": "MRA"; - out_values[is_kra_name]["C"] = current_c_res; - out_values[is_kra_name]["Q"] = current_q_res; - } + current_c_res = c_isd_log_cost(n, k, t, qc_block_size, QCAttackType::Plain, + false, std::unordered_set{Algorithm::Stern}); + + current_q_res = + q_isd_log_cost(n, k, t, qc_block_size, QCAttackType::Plain, false, + std::unordered_set{QuantumAlgorithm::Q_Lee_Brickell}); + + std::string attack_type; + out_values["Classic"]["Plain"] = current_c_res; + out_values["Quantum"]["Plain"] = current_q_res; - std::ofstream file(filename); - if (file.is_open()) { - file << std::fixed << std::setprecision(10) - << out_values.dump(4); // Format JSON with indentation - file.close(); - std::cout << "Data written to " << filename << std::endl; - } else { - std::cerr << "Could not open the file!" << std::endl; - } + // Post-apply reduction factors + double red_fac = + get_qc_red_factor_log(qc_block_size, n - k, QCAttackType::MRA); + out_values["Classic"]["MRA"] = current_c_res.value - red_fac; + out_values["Quantum"]["MRA"] = current_c_res.value - red_fac; + red_fac = get_qc_red_factor_log(qc_block_size, n - k, QCAttackType::KRA1); + out_values["Classic"]["KRA1"] = current_c_res.value - red_fac; + red_fac = get_qc_red_factor_log(qc_block_size, n - k, QCAttackType::KRA2); + out_values["Classic"]["KRA2"] = current_c_res.value - red_fac; + red_fac = get_qc_red_factor_log(qc_block_size, n - k, QCAttackType::KRA2); + out_values["Classic"]["KRA3"] = current_c_res.value - red_fac; + + std::string filename = + OUT_DIR_RESULTS + fmt::format("{:06}_{:06}_{:03}.json", n, k, t); + + std::ofstream file(filename); + if (file.is_open()) { + file << std::fixed << std::setprecision(10) + << out_values.dump(4); // Format JSON with indentation + file.close(); + std::cout << "Data written to " << filename << std::endl; + } else { + std::cerr << "Could not open the file!" << std::endl; + } } + return 0; } diff --git a/src/utils/isd_cost_estimate.cpp b/src/utils/isd_cost_estimate.cpp index 2257e3c..a50c219 100644 --- a/src/utils/isd_cost_estimate.cpp +++ b/src/utils/isd_cost_estimate.cpp @@ -1,8 +1,8 @@ #include "isd_cost_estimate.hpp" #include "binomials.hpp" // #include "logging.hpp" -#include #include +#include #include // static auto LOGGER = @@ -24,8 +24,7 @@ const NTL::RR log_probability_k_by_k_is_inv(const NTL::RR &k) { return NTL::RR(-1.79191682); NTL::RR log_pinv = NTL::RR(-1); for (long i = 2; i <= k; i++) { - log_pinv = - log_pinv + log2_RR(NTL::RR(1) - NTL::power2_RR(-i)); + log_pinv = log_pinv + log2_RR(NTL::RR(1) - NTL::power2_RR(-i)); } return log_pinv; } @@ -49,7 +48,8 @@ const NTL::RR classic_rref_red_cost(const NTL::RR &n, const NTL::RR &r) { } // const NTL::RR classic_IS_candidate_cost(const NTL::RR &n, const NTL::RR &r) { -// NB: r* r should be added only for SDP, and even there it can be omitted since the syndrome can be thought as another column of H +// NB: r* r should be added only for SDP, and even there it can be omitted since +// the syndrome can be thought as another column of H // return classic_rref_red_cost(n, r) / probability_k_by_k_is_inv(r) + r * r; // } @@ -82,7 +82,9 @@ Result isd_log_cost_classic_Prange(const uint32_t n, const uint32_t k, NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / NTL::to_RR(binomial_wrapper(n - k, t)); - NTL::RR log_cost = log2_RR(num_iter) - log_probability_k_by_k_is_inv(n_real - k_real) + log2_RR(cost_gje); + NTL::RR log_cost = log2_RR(num_iter) - + log_probability_k_by_k_is_inv(n_real - k_real) + + log2_RR(cost_gje); Result res; res.alg_name = "Prange"; @@ -152,7 +154,8 @@ Result isd_log_cost_classic_Leon(const uint32_t n, const uint32_t k, for (uint32_t l = 0; l < constrained_max_l; l++) { NTL::RR KChooseP = NTL::to_RR(binomial_wrapper(k, p)); NTL::RR cost_iter = - gje_cost / probability_k_by_k_is_inv(n_real - k_real) + KChooseP * p_real * NTL::to_RR(l) + + gje_cost / probability_k_by_k_is_inv(n_real - k_real) + + KChooseP * p_real * NTL::to_RR(l) + (KChooseP / NTL::power2_RR(l)) * NTL::RR(p * (n - k - l)); NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / NTL::to_RR(binomial_wrapper(k, p) * @@ -189,9 +192,9 @@ Result isd_log_cost_classic_Stern(const uint32_t n, const uint32_t k, NTL::RR gje_cost = classic_rref_red_cost(n_real, n_real - k_real); NTL::RR log_stern_list_size; - // IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); + // IS_candidate_cost = classic_IS_candidate_cost(n_real, n_real - k_real); - constrained_max_p = P_MAX_Stern > t ? t : P_MAX_Stern; + constrained_max_p = P_MAX_Stern > t ? t : P_MAX_Stern; for (uint32_t p = 2; p < constrained_max_p; p = p + 2) { constrained_max_l = (L_MAX_Stern > (n - k - (t - p)) ? (n - k - (t - p)) : L_MAX_Stern); @@ -202,18 +205,18 @@ Result isd_log_cost_classic_Stern(const uint32_t n, const uint32_t k, NTL::RR kHalfChoosePHalf_real = NTL::to_RR(kHalfChoosePHalf); NTL::RR cost_iter = - gje_cost/ probability_k_by_k_is_inv(n_real - k_real) + + gje_cost / probability_k_by_k_is_inv(n_real - k_real) + kHalfChoosePHalf_real * (NTL::to_RR(l) * p_real + (kHalfChoosePHalf_real / NTL::power2_RR(l)) * NTL::RR(p * (n - k - l))); // #if LOG_COST_CRITERION == 1 - log_stern_list_size = + log_stern_list_size = kHalfChoosePHalf_real * (p_real / NTL::RR(2) * NTL::log(k_real / NTL::RR(2)) / NTL::log(NTL::RR(2)) + NTL::to_RR(l)); log_stern_list_size = log2_RR(log_stern_list_size); - // NTL::log(log_stern_list_size) / NTL::log(NTL::RR(2)); + // NTL::log(log_stern_list_size) / NTL::log(NTL::RR(2)); cost_iter = cost_iter * log_stern_list_size; // #endif NTL::RR num_iter = NTL::to_RR(binomial_wrapper(n, t)) / @@ -251,7 +254,8 @@ Result isd_log_cost_classic_FS(const uint32_t n, const uint32_t k, uint32_t best_l = 0, best_p = 2, constrained_max_l, constrained_max_p; NTL::RR cost_gje; -// return Fin_Send_rref_red_cost(n, r, l) / probability_k_by_k_is_inv(r - l) + + // return Fin_Send_rref_red_cost(n, r, l) / probability_k_by_k_is_inv(r - l) + // + constrained_max_p = P_MAX_Stern > t ? t : P_MAX_Stern; for (uint32_t p = 2; p < constrained_max_p; p = p + 2) { constrained_max_l = @@ -260,12 +264,11 @@ Result isd_log_cost_classic_FS(const uint32_t n, const uint32_t k, NTL::ZZ kPlusLHalfChoosePHalf; for (uint32_t l = 0; l < constrained_max_l; l++) { NTL::RR l_real = NTL::RR(l); - cost_gje = - Fin_Send_rref_red_cost(n_real, n_real - k_real, l_real); + cost_gje = Fin_Send_rref_red_cost(n_real, n_real - k_real, l_real); kPlusLHalfChoosePHalf = binomial_wrapper((k + l) / 2, p / 2); NTL::RR kPlusLHalfChoosePHalf_real = NTL::to_RR(kPlusLHalfChoosePHalf); NTL::RR cost_iter = - cost_gje / probability_k_by_k_is_inv(n_real - k_real - l_real) + + cost_gje / probability_k_by_k_is_inv(n_real - k_real - l_real) + kPlusLHalfChoosePHalf_real * (NTL::to_RR(l) * p_real + (kPlusLHalfChoosePHalf_real / NTL::power2_RR(l)) * @@ -299,7 +302,7 @@ Result isd_log_cost_classic_FS(const uint32_t n, const uint32_t k, res.params = {{"p", best_p}, {"l", best_l}}; res.value = NTL::conv(min_log_cost); res.gje_cost = NTL::conv(log2_RR(cost_gje)); - //cost_gje not reported + // cost_gje not reported return res; } @@ -334,7 +337,8 @@ Result isd_log_cost_classic_MMT(const uint32_t n, const uint32_t k, NTL::to_RR(binomial_wrapper(n, t)) / NTL::to_RR(kPlusLHalfChoosePHalf * kPlusLHalfChoosePHalf * binomial_wrapper(n - k - l, t - p)); - // FS_IS_candidate_cost = Fin_Send_IS_candidate_cost(n_real, r_real, l_real); + // FS_IS_candidate_cost = Fin_Send_IS_candidate_cost(n_real, r_real, + // l_real); cost_gje = Fin_Send_rref_red_cost(n_real, n_real - k_real, l_real); NTL::ZZ kPlusLHalfChoosePFourths = binomial_wrapper((k + l) / 2, p / 4); NTL::RR kPlusLHalfChoosePFourths_real = @@ -367,7 +371,8 @@ Result isd_log_cost_classic_MMT(const uint32_t n, const uint32_t k, NTL::RR otherFactor = (NTL::to_RR(p / 4 * l_2) + interm); NTL::RR cost_iter = - cost_gje/probability_k_by_k_is_inv(n_real - k_real - l_real) + min * otherFactor + + cost_gje / probability_k_by_k_is_inv(n_real - k_real - l_real) + + min * otherFactor + kPlusLHalfChoosePFourths_real * NTL::to_RR(p / 2 * l_2); NTL::RR lastAddend = @@ -553,8 +558,10 @@ Result isd_log_cost_classic_BJMM(const uint32_t n, const uint32_t k, throw std::runtime_error("One or more variables are not initialized."); } // LOGGER->info("BJMM Best l {}, best p: {}, best eps1: {}, best eps2: {}", - // Logger::LoggerManager::getInstance().optional_to_string(best_l), Logger::LoggerManager::getInstance().optional_to_string(best_p), - // Logger::LoggerManager::getInstance().optional_to_string(best_eps_1), Logger::LoggerManager::getInstance().optional_to_string(best_eps_2)); + // Logger::LoggerManager::getInstance().optional_to_string(best_l), + // Logger::LoggerManager::getInstance().optional_to_string(best_p), + // Logger::LoggerManager::getInstance().optional_to_string(best_eps_1), + // Logger::LoggerManager::getInstance().optional_to_string(best_eps_2)); // LOGGER->info("BJMM time: {}", NTL::conv(min_log_cost)); Result res; res.alg_name = "BJMM"; @@ -699,49 +706,68 @@ Result isd_log_cost_quantum_Stern(const uint32_t n, const uint32_t k, return res; } - /***************************Aggregation ***************************************/ -double -get_qc_red_factor_log(const uint32_t qc_order, const uint32_t is_kra) { +double get_qc_red_factor_log(const uint32_t qc_order, const uint32_t n0, + QCAttackType attack) { /* For key recovery attacks (CFP) the advantage from quasi-cyclicity is p. For * a message recovery (SDP), the DOOM advantage is sqrt(p). + * + * Additionally, for key recovery attacks, there is a speedup depending on the + * different kind of attacks (check LEDA specs). */ - double qc_red_factor = is_kra ? logl(qc_order) : logl(qc_order) / 2.0; - return qc_red_factor / logl(2); + + switch (attack) { + case QCAttackType::KRA1: + return log2(qc_order) + log2(NTL::conv(binomial_wrapper(n0, 2))); + case QCAttackType::KRA2: + return log2(qc_order) + log2(n0); + case QCAttackType::KRA3: + return log2(qc_order); + case QCAttackType::MRA: + return log2(qc_order) / 2; + case QCAttackType::Plain: + return 0; + default: + throw std::runtime_error("Wrong attack type"); + } } Result c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, - const uint32_t qc_order, const uint32_t is_kra, - const bool compute_qc_reduction_factor, std::unordered_set algs) { + const uint32_t qc_order, QCAttackType attack, + const bool compute_qc_reduction_factor, + std::unordered_set algs) { + // attack is useless if compute_qc_reduction_factor is false + Result current_res, min_res; - double qc_red_factor = - compute_qc_reduction_factor ? get_qc_red_factor_log(qc_order, is_kra) : 0; + double qc_red_factor = compute_qc_reduction_factor + ? get_qc_red_factor_log(qc_order, n - k, attack) + : 0; double min_cost = n; // the cost cannot be greater than 2^n for (const auto &algo : algs) { switch (algo) { - case Prange: - current_res = isd_log_cost_classic_Prange(n,k,t); + case Algorithm::Prange: + current_res = isd_log_cost_classic_Prange(n, k, t); break; - case Lee_Brickell: - current_res = isd_log_cost_classic_LB(n,k,t); + case Algorithm::Lee_Brickell: + current_res = isd_log_cost_classic_LB(n, k, t); break; - case Leon: - current_res = isd_log_cost_classic_Leon(n,k,t); + case Algorithm::Leon: + current_res = isd_log_cost_classic_Leon(n, k, t); break; - case Stern: - current_res = isd_log_cost_classic_Stern(n,k,t); + case Algorithm::Stern: + current_res = isd_log_cost_classic_Stern(n, k, t); break; - case Finiasz_Sendrier: + case Algorithm::Finiasz_Sendrier: current_res = isd_log_cost_classic_FS(n, k, t); break; - case MMT: + case Algorithm::MMT: current_res = isd_log_cost_classic_MMT(n, k, t); break; - case BJMM: - current_res = isd_log_cost_classic_BJMM(n,k,t); + case Algorithm::BJMM: + current_res = isd_log_cost_classic_BJMM(n, k, t); break; default: std::cerr << "Unknown algorithm\n"; @@ -758,21 +784,22 @@ Result c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, } Result q_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, - const uint32_t qc_order, const uint32_t is_kra, + const uint32_t qc_order, QCAttackType attack, const bool compute_qc_reduction_factor, std::unordered_set algs) { Result current_res, min_res; double min_cost = n; // cannot be greater than n - double qc_red_factor = - compute_qc_reduction_factor ? get_qc_red_factor_log(qc_order, is_kra) : 0; + double qc_red_factor = compute_qc_reduction_factor + ? get_qc_red_factor_log(qc_order, n - k, attack) + : 0; for (const auto &algo : algs) { switch (algo) { - case Q_Lee_Brickell: - current_res = isd_log_cost_quantum_LB(n,k,t); + case QuantumAlgorithm::Q_Lee_Brickell: + current_res = isd_log_cost_quantum_LB(n, k, t); break; - case Q_Stern: - current_res = isd_log_cost_quantum_Stern(n,k,t); + case QuantumAlgorithm::Q_Stern: + current_res = isd_log_cost_quantum_Stern(n, k, t); break; default: std::cerr << "Unknown quantum algorithm\n"; From bb35ead1e4e9dae0945abd669cb6f8a802e64225 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Mon, 29 Jul 2024 11:05:59 +0200 Subject: [PATCH 44/55] ADD classic/quantum functions for reduction factors --- examples/isd_cost_estimate_ex.cpp | 2 +- include/utils/isd_cost_estimate.hpp | 5 +++- .../work_factor_computation_parallel.cpp | 14 ++++++----- src/utils/isd_cost_estimate.cpp | 25 ++++++++++++++++--- 4 files changed, 35 insertions(+), 11 deletions(-) diff --git a/examples/isd_cost_estimate_ex.cpp b/examples/isd_cost_estimate_ex.cpp index 241df88..a623a08 100644 --- a/examples/isd_cost_estimate_ex.cpp +++ b/examples/isd_cost_estimate_ex.cpp @@ -78,7 +78,7 @@ int main() { uint32_t n = val.codeword_size; uint32_t k = val.code_dimension; uint32_t t = val.number_of_errors; - double qc_red_fac = get_qc_red_factor_log(val.qc_block_size, n-k, QCAttackType::KRA3); + double qc_red_fac = get_qc_red_factor_classic_log(val.qc_block_size, n-k, QCAttackType::KRA3); double diff; std::string name; std::cout << "qc_red_fac " << qc_red_fac << std::endl; diff --git a/include/utils/isd_cost_estimate.hpp b/include/utils/isd_cost_estimate.hpp index b8e0b24..453a3df 100644 --- a/include/utils/isd_cost_estimate.hpp +++ b/include/utils/isd_cost_estimate.hpp @@ -45,7 +45,7 @@ Result c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, const bool compute_qc_reduction_factor, std::unordered_set algs); -double get_qc_red_factor_log(const uint32_t qc_order, const uint32_t n0, +double get_qc_red_factor_classic_log(const uint32_t qc_order, const uint32_t n0, QCAttackType attack); Result isd_log_cost_classic_Prange(const uint32_t n, const uint32_t k, @@ -71,6 +71,9 @@ Result q_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, const bool compute_qc_reduction_factor, std::unordered_set algs); +double get_qc_red_factor_quantum_log(const uint32_t qc_order, const uint32_t n0, + QCAttackType attack); + Result isd_log_cost_quantum_LB(const uint32_t n, const uint32_t k, const uint32_t t); Result isd_log_cost_quantum_Stern(const uint32_t n, const uint32_t k, diff --git a/src/tools/work_factor_computation_parallel.cpp b/src/tools/work_factor_computation_parallel.cpp index 32e6a05..adc3e12 100644 --- a/src/tools/work_factor_computation_parallel.cpp +++ b/src/tools/work_factor_computation_parallel.cpp @@ -101,14 +101,16 @@ int main() { // Post-apply reduction factors double red_fac = - get_qc_red_factor_log(qc_block_size, n - k, QCAttackType::MRA); - out_values["Classic"]["MRA"] = current_c_res.value - red_fac; - out_values["Quantum"]["MRA"] = current_c_res.value - red_fac; - red_fac = get_qc_red_factor_log(qc_block_size, n - k, QCAttackType::KRA1); + get_qc_red_factor_quantum_log(qc_block_size, n - k, QCAttackType::MRA); + out_values["Classic"]["MRA"] = current_q_res.value - red_fac; + red_fac = + get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::MRA); + out_values["Quantum"]["MRA"] = current_q_res.value - red_fac; + red_fac = get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::KRA1); out_values["Classic"]["KRA1"] = current_c_res.value - red_fac; - red_fac = get_qc_red_factor_log(qc_block_size, n - k, QCAttackType::KRA2); + red_fac = get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::KRA2); out_values["Classic"]["KRA2"] = current_c_res.value - red_fac; - red_fac = get_qc_red_factor_log(qc_block_size, n - k, QCAttackType::KRA2); + red_fac = get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::KRA2); out_values["Classic"]["KRA3"] = current_c_res.value - red_fac; std::string filename = diff --git a/src/utils/isd_cost_estimate.cpp b/src/utils/isd_cost_estimate.cpp index a50c219..14b5f2c 100644 --- a/src/utils/isd_cost_estimate.cpp +++ b/src/utils/isd_cost_estimate.cpp @@ -708,7 +708,7 @@ Result isd_log_cost_quantum_Stern(const uint32_t n, const uint32_t k, /***************************Aggregation ***************************************/ -double get_qc_red_factor_log(const uint32_t qc_order, const uint32_t n0, +double get_qc_red_factor_classic_log(const uint32_t qc_order, const uint32_t n0, QCAttackType attack) { /* For key recovery attacks (CFP) the advantage from quasi-cyclicity is p. For * a message recovery (SDP), the DOOM advantage is sqrt(p). @@ -741,7 +741,7 @@ Result c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, Result current_res, min_res; double qc_red_factor = compute_qc_reduction_factor - ? get_qc_red_factor_log(qc_order, n - k, attack) + ? get_qc_red_factor_classic_log(qc_order, n - k, attack) : 0; double min_cost = n; // the cost cannot be greater than 2^n @@ -783,6 +783,25 @@ Result c_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, return min_res; } +double get_qc_red_factor_quantum_log(const uint32_t qc_order, const uint32_t n0, + QCAttackType attack) { + /* For key recovery attacks (CFP) the advantage from quasi-cyclicity is p. For + * a message recovery (SDP), the DOOM advantage is sqrt(p). + * + * Additionally, for key recovery attacks, there is a speedup depending on the + * different kind of attacks (check LEDA specs). + */ + + switch (attack) { + case QCAttackType::MRA: + return log2(qc_order) / 2; + case QCAttackType::Plain: + return 0; + default: + throw std::runtime_error("Wrong attack type"); + } +} + Result q_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, const uint32_t qc_order, QCAttackType attack, const bool compute_qc_reduction_factor, @@ -790,7 +809,7 @@ Result q_isd_log_cost(const uint32_t n, const uint32_t k, const uint32_t t, Result current_res, min_res; double min_cost = n; // cannot be greater than n double qc_red_factor = compute_qc_reduction_factor - ? get_qc_red_factor_log(qc_order, n - k, attack) + ? get_qc_red_factor_quantum_log(qc_order, n - k, attack) : 0; for (const auto &algo : algs) { From 6c386867a62293bf28e5bc5050c1c5649fb75e21 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Mon, 29 Jul 2024 12:16:22 +0200 Subject: [PATCH 45/55] ADD handling of both json and plain input to work_factor_computation --- README_new.md | 4 +- include/utils/isd_cost_estimate.hpp | 12 + src/tools/work_factor_computation.cpp | 262 ++++++++++++++---- src/tools/work_factor_computation_old.cpp | 80 ++++++ .../work_factor_computation_parallel.cpp | 131 --------- src/utils/isd_cost_estimate.cpp | 71 +++++ 6 files changed, 366 insertions(+), 194 deletions(-) create mode 100644 src/tools/work_factor_computation_old.cpp delete mode 100644 src/tools/work_factor_computation_parallel.cpp diff --git a/README_new.md b/README_new.md index 9363e34..39651ca 100644 --- a/README_new.md +++ b/README_new.md @@ -25,11 +25,11 @@ cmake .. make -j ``` -** To create the binaries (inside the local path) +** To create the binaries (inside the local `bin` directory) Inside `build` ```sh -cmake -DCMAKE_INSTALL_PREFIX=/your/custom/path .. +cmake -DCMAKE_INSTALL_PREFIX=.. .. make -j make install ``` diff --git a/include/utils/isd_cost_estimate.hpp b/include/utils/isd_cost_estimate.hpp index 453a3df..e5aa859 100644 --- a/include/utils/isd_cost_estimate.hpp +++ b/include/utils/isd_cost_estimate.hpp @@ -14,9 +14,13 @@ struct Result { double list_size; }; +std::string result_to_string(const Result &result); + // Plain does not apply qc reductions enum class QCAttackType { KRA1, KRA2, KRA3, MRA, Plain, Count}; +std::string qc_attack_type_to_string(QCAttackType type); + enum class Algorithm { Prange, Lee_Brickell, @@ -28,11 +32,19 @@ enum class Algorithm { // Add more algorithms here Count, }; + +std::string algorithm_to_string(Algorithm algo); + + enum class QuantumAlgorithm { Q_Lee_Brickell, Q_Stern, // NOTE no circuit available + // Add more algorithms here + Count, }; +std::string quantum_algorithm_to_string(QuantumAlgorithm algo); + /***************************Classic ISDs***************************************/ const NTL::RR log_probability_k_by_k_is_inv(const NTL::RR &k); diff --git a/src/tools/work_factor_computation.cpp b/src/tools/work_factor_computation.cpp index e89b1d9..accb4ad 100644 --- a/src/tools/work_factor_computation.cpp +++ b/src/tools/work_factor_computation.cpp @@ -1,80 +1,220 @@ #include +#include #include +#include +#include +#include // For std::setprecision +#include +#include +#include +#include +#include // #include +#include +#include + +#include "globals.hpp" #define NUM_BITS_REAL_MANTISSA 1024 #define IGNORE_DECODING_COST 0 // #define EXPLORE_REPRS -#include "binomials.hpp" -#include "isd_cost_estimate.hpp" -#include -#include +void to_json(nlohmann::json &j, const Result &r) { + j = nlohmann::json{{"alg_name", r.alg_name}, + {"params", r.params}, + {"value", r.value}, + {"gje_cost", r.gje_cost}, + {"list_size", r.list_size}}; +} -int main(int argc, char *argv[]) { - if (argc != 7) { - std::cout - << "Work factor computation for ISD" << std::endl - << " Usage " << argv[0] - << " " - " " - << std::endl - << " = 1 implies a non QC code " << std::endl - << " = the attack is a key recovery attack on a QC-[L|M]DPC " - << std::endl - << " = if the quasi-cyclic reduction factor " - "cost should be applied" - << std::endl; - return -1; +void from_json(const nlohmann::json &j, Result &r) { + j.at("alg_name").get_to(r.alg_name); + j.at("params").get_to(r.params); + j.at("value").get_to(r.value); + j.at("gje_cost").get_to(r.gje_cost); +} + +int handle_plain(const std::string args) { + std::istringstream argStream(args); + std::string token; + std::vector values; + while (std::getline(argStream, token, ',')) { + values.push_back(std::stoi(token)); + } + + if (values.size() != 4) { + std::cerr << "Expected 4 comma-separated values, but got " << values.size() + << std::endl; + return 1; + } + + int n = values[0]; + int k = values[1]; + int t = values[2]; + bool qc_block_size = values[3]; + + for (int i = 0; i < static_cast(Algorithm::Count); i++) { + Algorithm algo = static_cast(i); + std::cout << "Algorithm " << algorithm_to_string(algo) << std::endl; + Result current_c_res = c_isd_log_cost(n, k, t, qc_block_size, + QCAttackType::Plain, false, {algo}); + std::cout << "Plain " << std::endl; + std::cout << result_to_string(current_c_res) << std::endl; + + double red_fac; + red_fac = + get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::MRA); + std::cout << "Classic MRA: " << current_c_res.value - red_fac << std::endl; + red_fac = + get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::KRA1); + std::cout << "Classic KRA1: " << current_c_res.value - red_fac << std::endl; + red_fac = + get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::KRA2); + std::cout << "Classic KRA2: " << current_c_res.value - red_fac << std::endl; + red_fac = + get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::KRA3); + std::cout << "Classic KRA3: " << current_c_res.value - red_fac << std::endl; + + std::cout << "**********" << std::endl; + } + + for (int i = 0; i < static_cast(QuantumAlgorithm::Count); i++) { + QuantumAlgorithm algo = static_cast(i); + + std::cout << "Algorithm: " << quantum_algorithm_to_string(algo) << std::endl; + Result current_q_res = q_isd_log_cost( + n, k, t, qc_block_size, QCAttackType::Plain, false, + std::unordered_set{algo}); + std::cout << "Plain " << std::endl; + std::cout << result_to_string(current_q_res) << std::endl; + + double red_fac; + red_fac = + get_qc_red_factor_quantum_log(qc_block_size, n - k, QCAttackType::MRA); + std::cout << "Quantum MRA: " << current_q_res.value - red_fac << std::endl; + } + return 0; +} + +int handle_json(std::string json_filename) { + + // const std::string input_isd_values = "out/isd_values.json"; + std::ifstream file(json_filename); + + // Check if the file is open + if (!file.is_open()) { + std::cerr << "Could not open the input file " << json_filename << std::endl; + return 1; + } + + // Parse the JSON content + nlohmann::json j; + file >> j; + std::filesystem::path dirPath(OUT_DIR_RESULTS); + // Check if the directory exists + if (!std::filesystem::exists(dirPath)) { + // Try to create the directory, including parent directories + if (std::filesystem::create_directories(dirPath)) { + std::cout << "Directory created successfully: " << OUT_DIR_RESULTS + << std::endl; + } else { + std::cerr << "Failed to create directory: " << OUT_DIR_RESULTS + << std::endl; + return 1; // Return an error code + } } +// Iterate over the list of entries +#pragma omp parallel for + for (const auto &entry : j) { + uint32_t n = entry["n"]; + uint32_t r = entry["r"]; + uint32_t k = n - r; + uint32_t t = entry["t"]; + uint32_t qc_block_size = entry["prime"]; + // int n0 = entry["n0"]; + // int v = entry["v"]; + // int lambd = entry["lambd"]; + nlohmann::json out_values; + + Result current_c_res; + Result current_q_res; + + current_c_res = + c_isd_log_cost(n, k, t, qc_block_size, QCAttackType::Plain, false, + std::unordered_set{Algorithm::Stern}); + + current_q_res = q_isd_log_cost( + n, k, t, qc_block_size, QCAttackType::Plain, false, + std::unordered_set{QuantumAlgorithm::Q_Lee_Brickell}); + std::string attack_type; + out_values["Classic"]["Plain"] = current_c_res; + out_values["Quantum"]["Plain"] = current_q_res; + + // Post-apply reduction factors + double red_fac = + get_qc_red_factor_quantum_log(qc_block_size, n - k, QCAttackType::MRA); + out_values["Quantum"]["MRA"] = current_q_res.value - red_fac; + + red_fac = + get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::MRA); + out_values["Classic"]["MRA"] = current_c_res.value - red_fac; + red_fac = + get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::KRA1); + out_values["Classic"]["KRA1"] = current_c_res.value - red_fac; + red_fac = + get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::KRA2); + out_values["Classic"]["KRA2"] = current_c_res.value - red_fac; + red_fac = + get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::KRA3); + out_values["Classic"]["KRA3"] = current_c_res.value - red_fac; + + std::string filename = + OUT_DIR_RESULTS + fmt::format("{:06}_{:06}_{:03}.json", n, k, t); + + std::ofstream file(filename); + if (file.is_open()) { + file << std::fixed << std::setprecision(10) + << out_values.dump(4); // Format JSON with indentation + file.close(); + std::cout << "Data written to " << filename << std::endl; + } else { + std::cerr << "Could not open the file!" << std::endl; + } + } + return 0; +} + +int main(int argc, char *argv[]) { // Logger::LoggerManager::getInstance().setup_logger( - // "binomials", spdlog::level::err, spdlog::level::err); + // "binomials", spdlog::level::info, spdlog::level::debug); // Logger::LoggerManager::getInstance().setup_logger( - // "isd_cost_estimate", spdlog::level::err, spdlog::level::err); - - /* reduce by a factor matching the QC block size */ + // "isd_cost_estimate", spdlog::level::info, spdlog::level::debug); + if (argc != 3) { + std::cerr << "Usage: " << argv[0] << " --json [filename] | --plain [args]" + << std::endl; + return 1; + } - InitBinomials(); NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); + InitBinomials(); pi = NTL::ComputePi_RR(); - uint32_t n = atoi(argv[1]); - uint32_t k = atoi(argv[2]); - uint32_t t = atoi(argv[3]); - uint32_t qc_block_size = atoi(argv[4]); - uint32_t is_kra = atoi(argv[5]); - uint32_t is_red_factor_applied = atoi(argv[6]); - - std::cout << " Input params: " << std::endl - << "- : " << n << std::endl - << "- : " << k << std::endl - << "- : " << t << std::endl - << "- : " << qc_block_size << std::endl - << "- : " << is_kra << std::endl - << "- : " << is_red_factor_applied - << std::endl; - - for (int i = 0; i < static_cast(QCAttackType::Count); ++i) { - QCAttackType attack = static_cast(i); - printColor(color); + + if (strcmp(argv[1], "--json") == 0) { + + std::string json_filename = argv[2]; + handle_json(json_filename); + } else if (strcmp(argv[1], "--plain") == 0) { + std::string plainArgs = argv[2]; + handle_plain(plainArgs); + } else { + + std::cerr << "Unknown argument: " << argv[1] << std::endl; + std::cerr << "Usage: " << argv[0] + << " --json [filename]" // "| --csv [filename]" + << std::endl; + return 1; } - std::cout << "Minimum classic cost :" - << c_isd_log_cost(n, k, t, qc_block_size, is_kra, - is_red_factor_applied, - std::unordered_set{ - Prange, Lee_Brickell, Leon, Stern, - // Finiasz_Sendrier, // - // MMT, BJMM // - }) - .value - << " Minimum quantum cost :" - << q_isd_log_cost(n, k, t, qc_block_size, is_kra, - is_red_factor_applied, - std::unordered_set{ - Q_Lee_Brickell, Q_Stern}) - .value; - if (is_red_factor_applied && qc_block_size != 1) - std::cout << " (including qc_effects) "; - std::cout << std::endl; + return 0; } diff --git a/src/tools/work_factor_computation_old.cpp b/src/tools/work_factor_computation_old.cpp new file mode 100644 index 0000000..e89b1d9 --- /dev/null +++ b/src/tools/work_factor_computation_old.cpp @@ -0,0 +1,80 @@ +#include +#include +// #include + +#define NUM_BITS_REAL_MANTISSA 1024 +#define IGNORE_DECODING_COST 0 +// #define EXPLORE_REPRS + +#include "binomials.hpp" +#include "isd_cost_estimate.hpp" +#include +#include + +int main(int argc, char *argv[]) { + if (argc != 7) { + std::cout + << "Work factor computation for ISD" << std::endl + << " Usage " << argv[0] + << " " + " " + << std::endl + << " = 1 implies a non QC code " << std::endl + << " = the attack is a key recovery attack on a QC-[L|M]DPC " + << std::endl + << " = if the quasi-cyclic reduction factor " + "cost should be applied" + << std::endl; + return -1; + } + + // Logger::LoggerManager::getInstance().setup_logger( + // "binomials", spdlog::level::err, spdlog::level::err); + // Logger::LoggerManager::getInstance().setup_logger( + // "isd_cost_estimate", spdlog::level::err, spdlog::level::err); + + /* reduce by a factor matching the QC block size */ + + InitBinomials(); + NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); + pi = NTL::ComputePi_RR(); + uint32_t n = atoi(argv[1]); + uint32_t k = atoi(argv[2]); + uint32_t t = atoi(argv[3]); + uint32_t qc_block_size = atoi(argv[4]); + uint32_t is_kra = atoi(argv[5]); + uint32_t is_red_factor_applied = atoi(argv[6]); + + std::cout << " Input params: " << std::endl + << "- : " << n << std::endl + << "- : " << k << std::endl + << "- : " << t << std::endl + << "- : " << qc_block_size << std::endl + << "- : " << is_kra << std::endl + << "- : " << is_red_factor_applied + << std::endl; + + for (int i = 0; i < static_cast(QCAttackType::Count); ++i) { + QCAttackType attack = static_cast(i); + printColor(color); + } + std::cout << "Minimum classic cost :" + << c_isd_log_cost(n, k, t, qc_block_size, is_kra, + is_red_factor_applied, + std::unordered_set{ + Prange, Lee_Brickell, Leon, Stern, + // Finiasz_Sendrier, // + // MMT, BJMM // + }) + .value + << " Minimum quantum cost :" + << q_isd_log_cost(n, k, t, qc_block_size, is_kra, + is_red_factor_applied, + std::unordered_set{ + Q_Lee_Brickell, Q_Stern}) + .value; + if (is_red_factor_applied && qc_block_size != 1) + std::cout << " (including qc_effects) "; + std::cout << std::endl; + return 0; +} diff --git a/src/tools/work_factor_computation_parallel.cpp b/src/tools/work_factor_computation_parallel.cpp deleted file mode 100644 index adc3e12..0000000 --- a/src/tools/work_factor_computation_parallel.cpp +++ /dev/null @@ -1,131 +0,0 @@ -#include -#include -#include -#include -#include -#include // For std::setprecision -#include -#include -#include -#include -#include -// #include -#include -#include - -#include "globals.hpp" - -#define NUM_BITS_REAL_MANTISSA 1024 -#define IGNORE_DECODING_COST 0 -// #define EXPLORE_REPRS - -void to_json(nlohmann::json &j, const Result &r) { - j = nlohmann::json{{"alg_name", r.alg_name}, - {"params", r.params}, - {"value", r.value}, - {"gje_cost", r.gje_cost}, - {"list_size", r.list_size}}; -} - -void from_json(const nlohmann::json &j, Result &r) { - j.at("alg_name").get_to(r.alg_name); - j.at("params").get_to(r.params); - j.at("value").get_to(r.value); - j.at("gje_cost").get_to(r.gje_cost); -} - -int main() { - - // Logger::LoggerManager::getInstance().setup_logger( - // "binomials", spdlog::level::info, spdlog::level::debug); - // Logger::LoggerManager::getInstance().setup_logger( - // "isd_cost_estimate", spdlog::level::info, spdlog::level::debug); - - const std::string input_isd_values = "out/isd_values.json"; - std::ifstream file(input_isd_values); - - // Check if the file is open - if (!file.is_open()) { - std::cerr << "Could not open the input file " << input_isd_values - << std::endl; - return 1; - } - - // Parse the JSON content - nlohmann::json j; - file >> j; - - NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); - - InitBinomials(); - pi = NTL::ComputePi_RR(); - std::filesystem::path dirPath(OUT_DIR_RESULTS); - // Check if the directory exists - if (!std::filesystem::exists(dirPath)) { - // Try to create the directory, including parent directories - if (std::filesystem::create_directories(dirPath)) { - std::cout << "Directory created successfully: " << OUT_DIR_RESULTS - << std::endl; - } else { - std::cerr << "Failed to create directory: " << OUT_DIR_RESULTS - << std::endl; - return 1; // Return an error code - } - } -// Iterate over the list of entries -#pragma omp parallel for - for (const auto &entry : j) { - uint32_t n = entry["n"]; - uint32_t r = entry["r"]; - uint32_t k = n - r; - uint32_t t = entry["t"]; - uint32_t qc_block_size = entry["prime"]; - // int n0 = entry["n0"]; - // int v = entry["v"]; - // int lambd = entry["lambd"]; - nlohmann::json out_values; - - Result current_c_res; - Result current_q_res; - - current_c_res = c_isd_log_cost(n, k, t, qc_block_size, QCAttackType::Plain, - false, std::unordered_set{Algorithm::Stern}); - - current_q_res = - q_isd_log_cost(n, k, t, qc_block_size, QCAttackType::Plain, false, - std::unordered_set{QuantumAlgorithm::Q_Lee_Brickell}); - - std::string attack_type; - out_values["Classic"]["Plain"] = current_c_res; - out_values["Quantum"]["Plain"] = current_q_res; - - // Post-apply reduction factors - double red_fac = - get_qc_red_factor_quantum_log(qc_block_size, n - k, QCAttackType::MRA); - out_values["Classic"]["MRA"] = current_q_res.value - red_fac; - red_fac = - get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::MRA); - out_values["Quantum"]["MRA"] = current_q_res.value - red_fac; - red_fac = get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::KRA1); - out_values["Classic"]["KRA1"] = current_c_res.value - red_fac; - red_fac = get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::KRA2); - out_values["Classic"]["KRA2"] = current_c_res.value - red_fac; - red_fac = get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::KRA2); - out_values["Classic"]["KRA3"] = current_c_res.value - red_fac; - - std::string filename = - OUT_DIR_RESULTS + fmt::format("{:06}_{:06}_{:03}.json", n, k, t); - - std::ofstream file(filename); - if (file.is_open()) { - file << std::fixed << std::setprecision(10) - << out_values.dump(4); // Format JSON with indentation - file.close(); - std::cout << "Data written to " << filename << std::endl; - } else { - std::cerr << "Could not open the file!" << std::endl; - } - } - - return 0; -} diff --git a/src/utils/isd_cost_estimate.cpp b/src/utils/isd_cost_estimate.cpp index 14b5f2c..c1115e7 100644 --- a/src/utils/isd_cost_estimate.cpp +++ b/src/utils/isd_cost_estimate.cpp @@ -5,6 +5,77 @@ #include #include +#include +#include + +// string'g + +std::string qc_attack_type_to_string(QCAttackType type) { + switch(type) { + case QCAttackType::KRA1: + return "KRA-1"; + case QCAttackType::KRA2: + return "KRA-2"; + case QCAttackType::KRA3: + return "KRA-3"; + case QCAttackType::MRA: + return "MRA"; + default: + return "Unknown attack type"; + } +} + +std::string quantum_algorithm_to_string(QuantumAlgorithm algo) { + switch (algo) { + case QuantumAlgorithm::Q_Lee_Brickell: return "Quantum Lee-Brickell"; + case QuantumAlgorithm::Q_Stern: return "Quantum Stern"; + default: + return "Unknown Algorithm"; + } +} + +std::string algorithm_to_string(Algorithm algo) { + switch (algo) { + case Algorithm::Prange: + return "Prange"; + case Algorithm::Lee_Brickell: + return "Lee_Brickell"; + case Algorithm::Leon: + return "Leon"; + case Algorithm::Stern: + return "Stern"; + case Algorithm::Finiasz_Sendrier: + return "Finiasz_Sendrier"; + case Algorithm::MMT: + return "MMT"; + case Algorithm::BJMM: + return "BJMM"; + // Add more algorithms here as needed + default: + return "Unknown Algorithm"; + } +} + +std::string result_to_string(const Result &result) { + std::ostringstream oss; + + // Add algorithm name + oss << "Algorithm: " << result.alg_name << "\n"; + + // Add parameters + oss << "Parameters:\n"; + for (const auto ¶m : result.params) { + oss << " " << param.first << ": " << param.second << "\n"; + } + + // Add other fields + oss << "Value: " << result.value << "\n"; + oss << "GJE Cost: " << result.gje_cost << "\n"; + oss << "List Size: " << result.list_size << "\n"; + + return oss.str(); +} + // static auto LOGGER = // Logger::LoggerManager::getInstance().get_logger("isd_cost_estimate"); /***************************Classic ISDs***************************************/ From 7075398dbf4dd5d4c1f061b664a180be48f7f440 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Wed, 31 Jul 2024 11:55:21 +0200 Subject: [PATCH 46/55] MOD CMakeLists to exclude parallel --- src/tools/CMakeLists.txt | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/src/tools/CMakeLists.txt b/src/tools/CMakeLists.txt index f076e85..e02872a 100644 --- a/src/tools/CMakeLists.txt +++ b/src/tools/CMakeLists.txt @@ -30,12 +30,7 @@ add_executable(${target} ${target}.cpp) target_link_libraries(${target} PRIVATE ${LIBS}) install(TARGETS parameter_generator DESTINATION bin) -# set(target work_factor_computation) -# add_executable(${target} ${target}.cpp) -# target_link_libraries(${target} PRIVATE ${LIBS}) -# install(TARGETS work_factor_computation DESTINATION bin) - -set(target work_factor_computation_parallel) +set(target work_factor_computation) add_executable(${target} ${target}.cpp) target_link_libraries(${target} PRIVATE ${LIBS} OpenMP::OpenMP_CXX) -install(TARGETS work_factor_computation_parallel DESTINATION bin) +install(TARGETS work_factor_computation DESTINATION bin) From c3b7495e2bf98f17e317a2a2ac530fcae2788005 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Wed, 31 Jul 2024 11:56:54 +0200 Subject: [PATCH 47/55] MOD prime computation --- src/tools/work_factor_computation.cpp | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/src/tools/work_factor_computation.cpp b/src/tools/work_factor_computation.cpp index accb4ad..f41199d 100644 --- a/src/tools/work_factor_computation.cpp +++ b/src/tools/work_factor_computation.cpp @@ -81,10 +81,11 @@ int handle_plain(const std::string args) { for (int i = 0; i < static_cast(QuantumAlgorithm::Count); i++) { QuantumAlgorithm algo = static_cast(i); - std::cout << "Algorithm: " << quantum_algorithm_to_string(algo) << std::endl; - Result current_q_res = q_isd_log_cost( - n, k, t, qc_block_size, QCAttackType::Plain, false, - std::unordered_set{algo}); + std::cout << "Algorithm: " << quantum_algorithm_to_string(algo) + << std::endl; + Result current_q_res = + q_isd_log_cost(n, k, t, qc_block_size, QCAttackType::Plain, false, + std::unordered_set{algo}); std::cout << "Plain " << std::endl; std::cout << result_to_string(current_q_res) << std::endl; @@ -97,7 +98,6 @@ int handle_plain(const std::string args) { } int handle_json(std::string json_filename) { - // const std::string input_isd_values = "out/isd_values.json"; std::ifstream file(json_filename); @@ -123,14 +123,15 @@ int handle_json(std::string json_filename) { return 1; // Return an error code } } -// Iterate over the list of entries + // Iterate over the list of entries #pragma omp parallel for for (const auto &entry : j) { uint32_t n = entry["n"]; uint32_t r = entry["r"]; uint32_t k = n - r; uint32_t t = entry["t"]; - uint32_t qc_block_size = entry["prime"]; + // uint32_t qc_block_size = entry["prime"]; + uint32_t qc_block_size = r; // int n0 = entry["n0"]; // int v = entry["v"]; // int lambd = entry["lambd"]; @@ -139,9 +140,9 @@ int handle_json(std::string json_filename) { Result current_c_res; Result current_q_res; - current_c_res = - c_isd_log_cost(n, k, t, qc_block_size, QCAttackType::Plain, false, - std::unordered_set{Algorithm::Stern}); + current_c_res = c_isd_log_cost( + n, k, t, qc_block_size, QCAttackType::Plain, false, + std::unordered_set{Algorithm::Prange, Algorithm::Stern}); current_q_res = q_isd_log_cost( n, k, t, qc_block_size, QCAttackType::Plain, false, @@ -201,14 +202,12 @@ int main(int argc, char *argv[]) { pi = NTL::ComputePi_RR(); if (strcmp(argv[1], "--json") == 0) { - std::string json_filename = argv[2]; handle_json(json_filename); } else if (strcmp(argv[1], "--plain") == 0) { std::string plainArgs = argv[2]; handle_plain(plainArgs); } else { - std::cerr << "Unknown argument: " << argv[1] << std::endl; std::cerr << "Usage: " << argv[0] << " --json [filename]" // "| --csv [filename]" From c68bad441ff57f0bd993510190570dfcfbb2246d Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Wed, 31 Jul 2024 13:02:09 +0200 Subject: [PATCH 48/55] MOD work_factor_computation, skip if existing --- src/tools/work_factor_computation.cpp | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/src/tools/work_factor_computation.cpp b/src/tools/work_factor_computation.cpp index f41199d..605e9a5 100644 --- a/src/tools/work_factor_computation.cpp +++ b/src/tools/work_factor_computation.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include // For std::setprecision #include @@ -130,11 +131,18 @@ int handle_json(std::string json_filename) { uint32_t r = entry["r"]; uint32_t k = n - r; uint32_t t = entry["t"]; + + std::string filename = + OUT_DIR_RESULTS + fmt::format("{:06}_{:06}_{:03}.json", n, k, t); + // Check if the generated file exists + if (std::filesystem::exists(filename)) { + // std::cout << "Generated file exists: " << filename << std::endl + // << ". Skipping."; + continue; + } // uint32_t qc_block_size = entry["prime"]; uint32_t qc_block_size = r; - // int n0 = entry["n0"]; - // int v = entry["v"]; - // int lambd = entry["lambd"]; + nlohmann::json out_values; Result current_c_res; @@ -170,9 +178,6 @@ int handle_json(std::string json_filename) { get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::KRA3); out_values["Classic"]["KRA3"] = current_c_res.value - red_fac; - std::string filename = - OUT_DIR_RESULTS + fmt::format("{:06}_{:06}_{:03}.json", n, k, t); - std::ofstream file(filename); if (file.is_open()) { file << std::fixed << std::setprecision(10) From daf4243c592eeea9d97529e1b4e9ae3adfe671e4 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Wed, 31 Jul 2024 13:06:52 +0200 Subject: [PATCH 49/55] ADD dynamic scheduling for OMP_NUM_THREADS --- src/tools/work_factor_computation.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/tools/work_factor_computation.cpp b/src/tools/work_factor_computation.cpp index 605e9a5..7c385b7 100644 --- a/src/tools/work_factor_computation.cpp +++ b/src/tools/work_factor_computation.cpp @@ -124,8 +124,10 @@ int handle_json(std::string json_filename) { return 1; // Return an error code } } - // Iterate over the list of entries -#pragma omp parallel for + // Iterate over the list of entries. With schedule(dynamic) loop iterations + // are divided into chunks, and threads dynamically grab chunks as they + // complete their previous work. +#pragma omp parallel for schedule(dynamic) for (const auto &entry : j) { uint32_t n = entry["n"]; uint32_t r = entry["r"]; From 65e66dc71d80a6a1ab1bb32ad07ede127cfdc30d Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Thu, 1 Aug 2024 12:00:30 +0200 Subject: [PATCH 50/55] ADD no. of processed element to work_factor_computation --- src/tools/work_factor_computation.cpp | 31 +++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/src/tools/work_factor_computation.cpp b/src/tools/work_factor_computation.cpp index 7c385b7..6e549e3 100644 --- a/src/tools/work_factor_computation.cpp +++ b/src/tools/work_factor_computation.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -111,6 +112,9 @@ int handle_json(std::string json_filename) { // Parse the JSON content nlohmann::json j; file >> j; + + int no_values = j.size(); + std::cout << "Number of values in the JSON: " << no_values << std::endl; std::filesystem::path dirPath(OUT_DIR_RESULTS); // Check if the directory exists if (!std::filesystem::exists(dirPath)) { @@ -124,11 +128,27 @@ int handle_json(std::string json_filename) { return 1; // Return an error code } } + + // Define an atomic counter for processed entries + std::atomic processed_count(0); + std::atomic error_count(0); + std::atomic skipped_count(0); + // Iterate over the list of entries. With schedule(dynamic) loop iterations // are divided into chunks, and threads dynamically grab chunks as they // complete their previous work. #pragma omp parallel for schedule(dynamic) for (const auto &entry : j) { + + if (processed_count % 1000 == 0) { +#pragma omp critical + { + std::cout << "\rProcessed: " << processed_count << " / " << no_values + << "; Skipped:" << skipped_count + << "; Errors: " << error_count << std::flush; + } + } + uint32_t n = entry["n"]; uint32_t r = entry["r"]; uint32_t k = n - r; @@ -141,6 +161,7 @@ int handle_json(std::string json_filename) { // std::cout << "Generated file exists: " << filename << std::endl // << ". Skipping."; continue; + ++skipped_count; } // uint32_t qc_block_size = entry["prime"]; uint32_t qc_block_size = r; @@ -185,13 +206,19 @@ int handle_json(std::string json_filename) { file << std::fixed << std::setprecision(10) << out_values.dump(4); // Format JSON with indentation file.close(); - std::cout << "Data written to " << filename << std::endl; + ++processed_count; + // std::cout << "Data written to " << filename << std::endl; } else { std::cerr << "Could not open the file!" << std::endl; + ++error_count; + } + if (processed_count % 1000 == 0) { +#pragma omp critical + { std::cout << processed_count << " / " << no_values << std::endl; } } } return 0; -} + } int main(int argc, char *argv[]) { // Logger::LoggerManager::getInstance().setup_logger( From e906b5cfcdc9004caa6fbe846677cdf21bba8bdb Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Thu, 1 Aug 2024 14:07:44 +0200 Subject: [PATCH 51/55] MOD printings --- src/tools/work_factor_computation.cpp | 39 ++++++++++++++++++--------- 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/src/tools/work_factor_computation.cpp b/src/tools/work_factor_computation.cpp index 6e549e3..3ecf925 100644 --- a/src/tools/work_factor_computation.cpp +++ b/src/tools/work_factor_computation.cpp @@ -4,7 +4,6 @@ #include #include #include -#include #include // For std::setprecision #include #include @@ -16,6 +15,9 @@ #include #include "globals.hpp" +#include +#include +#include #define NUM_BITS_REAL_MANTISSA 1024 #define IGNORE_DECODING_COST 0 @@ -133,19 +135,26 @@ int handle_json(std::string json_filename) { std::atomic processed_count(0); std::atomic error_count(0); std::atomic skipped_count(0); + std::atomic iteration(0); // Iterate over the list of entries. With schedule(dynamic) loop iterations // are divided into chunks, and threads dynamically grab chunks as they // complete their previous work. #pragma omp parallel for schedule(dynamic) for (const auto &entry : j) { - - if (processed_count % 1000 == 0) { + ++iteration; + if (iteration % 1234 == 0) { #pragma omp critical { - std::cout << "\rProcessed: " << processed_count << " / " << no_values - << "; Skipped:" << skipped_count - << "; Errors: " << error_count << std::flush; + std::cout + << "\rProcessed: " << std::setw(8) << std::setfill(' ') + << processed_count << "; Skipped: " << std::setw(8) + << std::setfill(' ') << skipped_count + << "; Errors: " << std::setw(8) << std::setfill(' ') + << error_count << "; Remaining: " << std::setw(8) + << std::setfill(' ') + << (no_values - skipped_count - processed_count - error_count) + << " / " << no_values << std::flush; } } @@ -160,8 +169,8 @@ int handle_json(std::string json_filename) { if (std::filesystem::exists(filename)) { // std::cout << "Generated file exists: " << filename << std::endl // << ". Skipping."; - continue; ++skipped_count; + continue; } // uint32_t qc_block_size = entry["prime"]; uint32_t qc_block_size = r; @@ -212,13 +221,18 @@ int handle_json(std::string json_filename) { std::cerr << "Could not open the file!" << std::endl; ++error_count; } - if (processed_count % 1000 == 0) { -#pragma omp critical - { std::cout << processed_count << " / " << no_values << std::endl; } - } + + std::cout << "\rProcessed: " << std::setw(8) << std::setfill(' ') + << processed_count << "; Skipped: " << std::setw(8) + << std::setfill(' ') << skipped_count + << "; Errors: " << std::setw(8) << std::setfill(' ') + << error_count << "; Remaining: " << std::setw(8) + << std::setfill(' ') + << (no_values - skipped_count - processed_count - error_count) + << " / " << no_values << std::endl; } return 0; - } +} int main(int argc, char *argv[]) { // Logger::LoggerManager::getInstance().setup_logger( @@ -248,6 +262,5 @@ int main(int argc, char *argv[]) { << std::endl; return 1; } - return 0; } From 5243e310d565a1894a693e583104fb460b2319cd Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Fri, 2 Aug 2024 11:20:39 +0200 Subject: [PATCH 52/55] FIX reduction factor; DEL reduction factor recomputation --- src/tools/work_factor_computation.cpp | 65 +++++++++++++++------------ 1 file changed, 36 insertions(+), 29 deletions(-) diff --git a/src/tools/work_factor_computation.cpp b/src/tools/work_factor_computation.cpp index 3ecf925..1abd57d 100644 --- a/src/tools/work_factor_computation.cpp +++ b/src/tools/work_factor_computation.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include // For std::setprecision #include #include @@ -15,9 +16,9 @@ #include #include "globals.hpp" -#include -#include #include +#include +#include #define NUM_BITS_REAL_MANTISSA 1024 #define IGNORE_DECODING_COST 0 @@ -146,8 +147,7 @@ int handle_json(std::string json_filename) { if (iteration % 1234 == 0) { #pragma omp critical { - std::cout - << "\rProcessed: " << std::setw(8) << std::setfill(' ') + std::cout << "\rProcessed: " << std::setw(8) << std::setfill(' ') << processed_count << "; Skipped: " << std::setw(8) << std::setfill(' ') << skipped_count << "; Errors: " << std::setw(8) << std::setfill(' ') @@ -172,6 +172,8 @@ int handle_json(std::string json_filename) { ++skipped_count; continue; } +#pragma omp critical + std::cout << "Processing " << filename << std::endl; // uint32_t qc_block_size = entry["prime"]; uint32_t qc_block_size = r; @@ -193,22 +195,27 @@ int handle_json(std::string json_filename) { out_values["Quantum"]["Plain"] = current_q_res; // Post-apply reduction factors - double red_fac = - get_qc_red_factor_quantum_log(qc_block_size, n - k, QCAttackType::MRA); - out_values["Quantum"]["MRA"] = current_q_res.value - red_fac; - - red_fac = - get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::MRA); - out_values["Classic"]["MRA"] = current_c_res.value - red_fac; - red_fac = - get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::KRA1); - out_values["Classic"]["KRA1"] = current_c_res.value - red_fac; - red_fac = - get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::KRA2); - out_values["Classic"]["KRA2"] = current_c_res.value - red_fac; - red_fac = - get_qc_red_factor_classic_log(qc_block_size, n - k, QCAttackType::KRA3); - out_values["Classic"]["KRA3"] = current_c_res.value - red_fac; + // uint32_t n0 = n / r; + // if (n0 == 0) { + // // It's a value with rate < .5; it happens for KRA2 attacks + // double red_fac = + // get_qc_red_factor_quantum_log(qc_block_size, n0, QCAttackType::MRA); + // out_values["Quantum"]["MRA"] = current_q_res.value - red_fac; + + // red_fac = + // get_qc_red_factor_classic_log(qc_block_size, n0, QCAttackType::MRA); + // out_values["Classic"]["MRA"] = current_c_res.value - red_fac; + + // red_fac = + // get_qc_red_factor_classic_log(qc_block_size, n0, QCAttackType::KRA1); + // out_values["Classic"]["KRA1"] = current_c_res.value - red_fac; + // red_fac = + // get_qc_red_factor_classic_log(qc_block_size, n0, QCAttackType::KRA2); + // out_values["Classic"]["KRA2"] = current_c_res.value - red_fac; + // red_fac = + // get_qc_red_factor_classic_log(qc_block_size, n0, QCAttackType::KRA3); + // out_values["Classic"]["KRA3"] = current_c_res.value - red_fac; + // } std::ofstream file(filename); if (file.is_open()) { @@ -221,16 +228,16 @@ int handle_json(std::string json_filename) { std::cerr << "Could not open the file!" << std::endl; ++error_count; } - - std::cout << "\rProcessed: " << std::setw(8) << std::setfill(' ') - << processed_count << "; Skipped: " << std::setw(8) - << std::setfill(' ') << skipped_count - << "; Errors: " << std::setw(8) << std::setfill(' ') - << error_count << "; Remaining: " << std::setw(8) - << std::setfill(' ') - << (no_values - skipped_count - processed_count - error_count) - << " / " << no_values << std::endl; } + + std::cout << "\rProcessed: " << std::setw(8) << std::setfill(' ') + << processed_count << "; Skipped: " << std::setw(8) + << std::setfill(' ') << skipped_count + << "; Errors: " << std::setw(8) << std::setfill(' ') << error_count + << "; Remaining: " << std::setw(8) << std::setfill(' ') + << (no_values - skipped_count - processed_count - error_count) + << " / " << no_values << std::endl; + return 0; } From 77d57e81317ac03d102adb2f3f8f534db7e65c3a Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Tue, 6 Aug 2024 08:20:54 +0200 Subject: [PATCH 53/55] DEL old work factor --- src/tools/work_factor_computation_old.cpp | 80 ----------------------- 1 file changed, 80 deletions(-) delete mode 100644 src/tools/work_factor_computation_old.cpp diff --git a/src/tools/work_factor_computation_old.cpp b/src/tools/work_factor_computation_old.cpp deleted file mode 100644 index e89b1d9..0000000 --- a/src/tools/work_factor_computation_old.cpp +++ /dev/null @@ -1,80 +0,0 @@ -#include -#include -// #include - -#define NUM_BITS_REAL_MANTISSA 1024 -#define IGNORE_DECODING_COST 0 -// #define EXPLORE_REPRS - -#include "binomials.hpp" -#include "isd_cost_estimate.hpp" -#include -#include - -int main(int argc, char *argv[]) { - if (argc != 7) { - std::cout - << "Work factor computation for ISD" << std::endl - << " Usage " << argv[0] - << " " - " " - << std::endl - << " = 1 implies a non QC code " << std::endl - << " = the attack is a key recovery attack on a QC-[L|M]DPC " - << std::endl - << " = if the quasi-cyclic reduction factor " - "cost should be applied" - << std::endl; - return -1; - } - - // Logger::LoggerManager::getInstance().setup_logger( - // "binomials", spdlog::level::err, spdlog::level::err); - // Logger::LoggerManager::getInstance().setup_logger( - // "isd_cost_estimate", spdlog::level::err, spdlog::level::err); - - /* reduce by a factor matching the QC block size */ - - InitBinomials(); - NTL::RR::SetPrecision(NUM_BITS_REAL_MANTISSA); - pi = NTL::ComputePi_RR(); - uint32_t n = atoi(argv[1]); - uint32_t k = atoi(argv[2]); - uint32_t t = atoi(argv[3]); - uint32_t qc_block_size = atoi(argv[4]); - uint32_t is_kra = atoi(argv[5]); - uint32_t is_red_factor_applied = atoi(argv[6]); - - std::cout << " Input params: " << std::endl - << "- : " << n << std::endl - << "- : " << k << std::endl - << "- : " << t << std::endl - << "- : " << qc_block_size << std::endl - << "- : " << is_kra << std::endl - << "- : " << is_red_factor_applied - << std::endl; - - for (int i = 0; i < static_cast(QCAttackType::Count); ++i) { - QCAttackType attack = static_cast(i); - printColor(color); - } - std::cout << "Minimum classic cost :" - << c_isd_log_cost(n, k, t, qc_block_size, is_kra, - is_red_factor_applied, - std::unordered_set{ - Prange, Lee_Brickell, Leon, Stern, - // Finiasz_Sendrier, // - // MMT, BJMM // - }) - .value - << " Minimum quantum cost :" - << q_isd_log_cost(n, k, t, qc_block_size, is_kra, - is_red_factor_applied, - std::unordered_set{ - Q_Lee_Brickell, Q_Stern}) - .value; - if (is_red_factor_applied && qc_block_size != 1) - std::cout << " (including qc_effects) "; - std::cout << std::endl; - return 0; -} From 6cc25f766b97c5585fef5cb4cd882bcb1c75bdcd Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Tue, 6 Aug 2024 08:22:06 +0200 Subject: [PATCH 54/55] MOD new README --- README_new.md | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/README_new.md b/README_new.md index 39651ca..a4b4e98 100644 --- a/README_new.md +++ b/README_new.md @@ -6,8 +6,8 @@ Dependencies: - fmt (come with spdlog) Executables -- work_factor_computation_parallel -To spwan threads that compute the work_factor +- work_factor_computation +Compute the work factor. It accepts either a json or a plain set of parameters * Structure - include/utils @@ -18,23 +18,12 @@ Al the cpp corresponding to the prev headers All the output tools, that is, executables to use * Compile - -```sh -mkdir build && cd build -cmake .. -make -j -``` - -** To create the binaries (inside the local `bin` directory) +To create the binaries (inside the local `bin` directory) Inside `build` ```sh cmake -DCMAKE_INSTALL_PREFIX=.. .. -make -j -make install +make install -j ``` Then, you can execute the files as ./bin/ - -* TODOs -The tools should not hardcode their paths (see f.e. work_factor_computation_parallel) From c743b6282825e5e7c443a2a74344e50ef7691461 Mon Sep 17 00:00:00 2001 From: Simone Perriello <8199216+tigerjack@users.noreply.github.com> Date: Tue, 6 Aug 2024 11:58:45 +0200 Subject: [PATCH 55/55] DEL last logging utils --- src/tools/parameter_generator.cpp | 16 ++++++++-------- src/utils/CMakeLists.txt | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/tools/parameter_generator.cpp b/src/tools/parameter_generator.cpp index 37e8658..2236c30 100644 --- a/src/tools/parameter_generator.cpp +++ b/src/tools/parameter_generator.cpp @@ -6,7 +6,7 @@ #include "binomials.hpp" #include "bit_error_probabilities.hpp" #include "isd_cost_estimate.hpp" -#include "logging.hpp" +// #include "logging.hpp" #include "partitions_permanents.hpp" #include "proper_primes.hpp" #include @@ -266,15 +266,15 @@ int main(int argc, char *argv[]) { } while ((p > (1.0 + epsilon) * p_th) && (current_prime_pos > 0)); if (!p_ok || !d_v_ok || !t_ok) { - spdlog::error("Error: One or more variables are not initialized."); + // spdlog::error("Error: One or more variables are not initialized."); throw std::runtime_error("One or more variables are not initialized."); } else { - spdlog::info( - "parameter set found: p={}, t={}, d_v={}, mpartition={}", - Logger::LoggerManager::getInstance().optional_to_string(p_ok), - Logger::LoggerManager::getInstance().optional_to_string(t_ok), - Logger::LoggerManager::getInstance().optional_to_string(d_v_ok), - Logger::LoggerManager::getInstance().array_to_string(mpartition_ok)); + // spdlog::info( + // "parameter set found: p={}, t={}, d_v={}, mpartition={}", + // Logger::LoggerManager::getInstance().optional_to_string(p_ok), + // Logger::LoggerManager::getInstance().optional_to_string(t_ok), + // Logger::LoggerManager::getInstance().optional_to_string(d_v_ok), + // Logger::LoggerManager::getInstance().array_to_string(mpartition_ok)); } // std::cout // << " p:" << p_ok << " t: " << t_ok; diff --git a/src/utils/CMakeLists.txt b/src/utils/CMakeLists.txt index 7e6ec76..b893053 100644 --- a/src/utils/CMakeLists.txt +++ b/src/utils/CMakeLists.txt @@ -1,5 +1,5 @@ # add_library(ledautils binomials.cpp logging.cpp bit_error_probabilities.cpp isd_cost_estimate.cpp) -add_library(ledautils binomials.cpp logging.cpp bit_error_probabilities.cpp isd_cost_estimate.cpp partitions_permanents.cpp) +add_library(ledautils binomials.cpp bit_error_probabilities.cpp isd_cost_estimate.cpp partitions_permanents.cpp) find_library(GMP_LIB gmp) find_library(NTL_LIB ntl)