From d30ae2f88a5baab1c856ea0d0daa4cd5a195585e Mon Sep 17 00:00:00 2001 From: papp-pal-andras Date: Thu, 22 Jan 2026 09:07:18 +0100 Subject: [PATCH 1/3] partitioning test suite, draft v1 --- apps/CMakeLists.txt | 1 + .../partitioning_test_suite_config.json | 30 +++ apps/ilp_partitioning_test_suite.cpp | 27 +++ .../AbstractTestSuiteRunner.hpp | 2 +- .../PartitioningTestSuiteRunner.hpp | 177 ++++++++++++++++++ .../StringToScheduler/run_partitioner.hpp | 75 ++++++++ 6 files changed, 311 insertions(+), 1 deletion(-) create mode 100644 apps/config/partitioning_test_suite_config.json create mode 100644 apps/ilp_partitioning_test_suite.cpp create mode 100644 apps/test_suite_runner/PartitioningTestSuiteRunner.hpp create mode 100644 apps/test_suite_runner/StringToScheduler/run_partitioner.hpp diff --git a/apps/CMakeLists.txt b/apps/CMakeLists.txt index 97935a92..48f65c5e 100644 --- a/apps/CMakeLists.txt +++ b/apps/CMakeLists.txt @@ -51,6 +51,7 @@ endif() if (COPT_FOUND) _add_executable( ilp_bsp_scheduler ) _add_executable( ilp_hypergraph_partitioner ) +_add_executable( ilp_partitioning_test_suite ) endif() endif() diff --git a/apps/config/partitioning_test_suite_config.json b/apps/config/partitioning_test_suite_config.json new file mode 100644 index 00000000..5edd80e3 --- /dev/null +++ b/apps/config/partitioning_test_suite_config.json @@ -0,0 +1,30 @@ +{ + "globalParameters": { + "timeLimit": 3600, + "graphDirectory": "graphs/", + "archDirectory": "machines/", + "outputStatsFile": "all_run_stats.csv", + "outputLogFile": "log.txt", + "outputSchedule": false + }, + "algorithms": [ + { + "name": "ILP", + "id" : "ILP", + "run" : true, + "parameters": {} + }, + { + "name": "ILP_dupl", + "id" : "ILP_dupl", + "run" : true, + "parameters": {} + }, + { + "name": "ILP_repl", + "id" : "ILP_repl", + "run" : true, + "parameters": {} + } + ] +} \ No newline at end of file diff --git a/apps/ilp_partitioning_test_suite.cpp b/apps/ilp_partitioning_test_suite.cpp new file mode 100644 index 00000000..4a311189 --- /dev/null +++ b/apps/ilp_partitioning_test_suite.cpp @@ -0,0 +1,27 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#include "osp/graph_implementations/adj_list_impl/computational_dag_edge_idx_vector_impl.hpp" +#include "test_suite_runner/PartitioningTestSuiteRunner.hpp" + +using GraphT = osp::ComputationalDagEdgeIdxVectorImplDefIntT; + +int main(int argc, char *argv[]) { + osp::PartitioningTestSuiteRunner runner; + return runner.Run(argc, argv); +} \ No newline at end of file diff --git a/apps/test_suite_runner/AbstractTestSuiteRunner.hpp b/apps/test_suite_runner/AbstractTestSuiteRunner.hpp index 7b4f249c..0914033f 100644 --- a/apps/test_suite_runner/AbstractTestSuiteRunner.hpp +++ b/apps/test_suite_runner/AbstractTestSuiteRunner.hpp @@ -189,7 +189,7 @@ class AbstractTestSuiteRunner { } } - int Run(int argc, char *argv[]) { + int virtual Run(int argc, char *argv[]) { try { parser_.ParseArgs(argc, argv); } catch (const std::exception &e) { diff --git a/apps/test_suite_runner/PartitioningTestSuiteRunner.hpp b/apps/test_suite_runner/PartitioningTestSuiteRunner.hpp new file mode 100644 index 00000000..9bdef844 --- /dev/null +++ b/apps/test_suite_runner/PartitioningTestSuiteRunner.hpp @@ -0,0 +1,177 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#pragma once + +#include "AbstractTestSuiteRunner.hpp" +#include "StringToScheduler/run_partitioner.hpp" +#include "osp/graph_implementations/adj_list_impl/computational_dag_vector_impl.hpp" +#include "osp/partitioning/model/partitioning.hpp" +#include "osp/partitioning/model/partitioning_replication.hpp" +#include "osp/bsp/model/BspSchedule.hpp" + +namespace osp { + +class PartitioningStatsModule : public IStatisticModule> { + public: + std::vector GetMetricHeaders() const override { return {"Cost", "CutNet"}; } + + std::map RecordStatistics(const Partitioning &partitioning, + std::ofstream & /*log_stream*/) const override { + std::map stats; + stats["Cost"] = std::to_string(partitioning.ComputeConnectivityCost()); + stats["CutNet"] = std::to_string(partitioning.ComputeCutNetCost()); + return stats; + } +}; + +template +class PartitioningTestSuiteRunner : public AbstractTestSuiteRunner, GraphType> { + private: + + protected: + ReturnStatus ComputeTargetObjectImpl(const BspInstance &instance, + std::unique_ptr > &targetObject, + const pt::ptree &algoConfig, + long long &computationTimeMs) override { + return ReturnStatus::ERROR; //unused + } + + void CreateAndRegisterStatisticModules(const std::string &moduleName) override { + if (moduleName == "PartitioningStats") { + this->activeStatsModules_.push_back(std::make_unique()); + } + } + + public: + PartitioningTestSuiteRunner() : AbstractTestSuiteRunner, GraphType>() {} + + int virtual Run(int argc, char *argv[]) override; +}; + +template +int PartitioningTestSuiteRunner::Run(int argc, char *argv[]) { + using HypergraphT = HypergraphDefT; + try { + this->parser_.ParseArgs(argc, argv); + } catch (const std::exception &e) { + std::cerr << "Error parsing command line arguments: " << e.what() << std::endl; + return 1; + } + + if (!this->ParseCommonConfig()) { + return 1; + } + + this->SetupLogFile(); + + CreateAndRegisterStatisticModules("PartitioningStats"); + this->SetupStatisticsFile(); + + for (const auto &machineEntry : std::filesystem::recursive_directory_iterator(this->machineDirPath_)) { + if (std::filesystem::is_directory(machineEntry)) { + this->logStream_ << "Skipping directory " << machineEntry.path().string() << std::endl; + continue; + } + std::string filenameMachine = machineEntry.path().string(); + std::string nameMachine = filenameMachine.substr(filenameMachine.rfind('/') + 1); + if (nameMachine.rfind('.') != std::string::npos) { + nameMachine = nameMachine.substr(0, nameMachine.rfind('.')); + } + + // Temporary hack. Until there is no separate file format for partitioning problem parameters, we abuse + // bsp arch files: 1st number is number of parts, 2nd is imbalance allowed (percentage), rest is ignored + BspArchitecture arch; + if (!file_reader::ReadBspArchitecture(filenameMachine, arch)) { + this->logStream_ << "Reading architecture file " << filenameMachine << " failed." << std::endl; + continue; + } + this->logStream_ << "Start Machine: " + filenameMachine + "\n"; + + + for (const auto &graphEntry : std::filesystem::recursive_directory_iterator(this->graphDirPath_)) { + if (std::filesystem::is_directory(graphEntry)) { + this->logStream_ << "Skipping directory " << graphEntry.path().string() << std::endl; + continue; + } + std::string filenameGraph = graphEntry.path().string(); + std::string nameGraph = filenameGraph.substr(filenameGraph.rfind('/') + 1); + if (nameGraph.rfind('.') != std::string::npos) { + nameGraph = nameGraph.substr(0, nameGraph.rfind('.')); + } + this->logStream_ << "Start Hypergraph: " + filenameGraph + "\n"; + + bool graphStatus = false; + GraphType dag; + graphStatus = file_reader::ReadGraph(filenameGraph, dag); + + if (!graphStatus) { + this->logStream_ << "Reading graph file " << filenameGraph << " failed." << std::endl; + continue; + } + + PartitioningProblem instance(ConvertFromCdagAsHyperdag(dag), arch.NumberOfProcessors()); + instance.SetMaxWorkWeightViaImbalanceFactor(static_cast(arch.CommunicationCosts()) / 100.0); + + for (auto &algorithmConfigPair : this->parser_.scheduler_) { + const pt::ptree &algoConfig = algorithmConfigPair.second; + + std::string currentAlgoName = algoConfig.get_child("name").get_value(); + this->logStream_ << "Start Algorithm " + currentAlgoName + "\n"; + + long long computationTimeMs; + const auto startTime = std::chrono::high_resolution_clock::now(); + + std::pair cost; + ReturnStatus execStatus = RunPartitioner(this->parser_, algoConfig, instance, cost); + + const auto finishTime = std::chrono::high_resolution_clock::now(); + computationTimeMs = std::chrono::duration_cast(finishTime - startTime).count(); + + if (execStatus != ReturnStatus::OSP_SUCCESS && execStatus != ReturnStatus::BEST_FOUND) { + if (execStatus == ReturnStatus::ERROR) { + this->logStream_ << "Error computing with " << currentAlgoName << "." << std::endl; + } else if (execStatus == ReturnStatus::TIMEOUT) { + this->logStream_ << "Partitioner " << currentAlgoName << " timed out." << std::endl; + } + continue; + } + + // currently not writing output to file + + if (this->statsOutStream_.is_open()) { + std::map currentRowValues; + currentRowValues["Graph"] = nameGraph; + currentRowValues["Machine"] = nameMachine; + currentRowValues["Algorithm"] = currentAlgoName; + currentRowValues["TimeToCompute(ms)"] = std::to_string(computationTimeMs); + currentRowValues["Cost"] = std::to_string(cost.first); + currentRowValues["CutNet"] = std::to_string(cost.second); + + for (size_t i = 0; i < this->allCsvHeaders_.size(); ++i) { + this->statsOutStream_ << currentRowValues[this->allCsvHeaders_[i]] << (i == this->allCsvHeaders_.size() - 1 ? "" : ","); + } + this->statsOutStream_ << "\n"; + } + } + } + } + return 0; +} + +} // namespace osp diff --git a/apps/test_suite_runner/StringToScheduler/run_partitioner.hpp b/apps/test_suite_runner/StringToScheduler/run_partitioner.hpp new file mode 100644 index 00000000..e5932977 --- /dev/null +++ b/apps/test_suite_runner/StringToScheduler/run_partitioner.hpp @@ -0,0 +1,75 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#pragma once + +#include +#include +#include +#include +#include + +#include "../ConfigParser.hpp" +#include "osp/partitioning/partitioners/partitioning_ILP.hpp" +#include "osp/partitioning/partitioners/partitioning_ILP_replication.hpp" + +namespace osp { + +const std::set GetAvailablePartitionerNames() { return {"ILP", "ILP_dupl", "ILP_repl"}; } + + +template +ReturnStatus RunPartitioner(const ConfigParser &parser, + const boost::property_tree::ptree &algorithm, + const PartitioningProblem > &instance, + std::pair &cost) { + using Hgraph = Hypergraph; + + std::cout << "Running algorithm: " << algorithm.get_child("name").get_value() << std::endl; + + if (algorithm.get_child("name").get_value() == "ILP") { + HypergraphPartitioningILP partitioner; + Partitioning solution(instance); + + const unsigned timeLimit = parser.globalParams_.get_child("timeLimit").get_value(); + partitioner.SetTimeLimitSeconds(timeLimit); + ReturnStatus status = partitioner.ComputePartitioning(solution); + cost = {solution.ComputeConnectivityCost(), solution.ComputeCutNetCost()}; + return status; + + } else if (algorithm.get_child("name").get_value() == "ILP_dupl" + || algorithm.get_child("name").get_value() == "ILP_repl") { + HypergraphPartitioningILPWithReplication partitioner; + PartitioningWithReplication solution(instance); + + const unsigned timeLimit = parser.globalParams_.get_child("timeLimit").get_value(); + partitioner.SetTimeLimitSeconds(timeLimit); + if (algorithm.get_child("name").get_value() == "ILP_repl") { + partitioner.SetReplicationModel(HypergraphPartitioningILPWithReplication::ReplicationModelInIlp::GENERAL); + } + + ReturnStatus status = partitioner.ComputePartitioning(solution); + cost = {solution.ComputeConnectivityCost(), solution.ComputeCutNetCost()}; + return status; + + } else { + throw std::invalid_argument("Parameter error: Unknown algorithm.\n"); + } +} + +} // namespace osp \ No newline at end of file From 1327e6cbfbf7165737b10ca9a16572a4764968ab Mon Sep 17 00:00:00 2001 From: papp-pal-andras Date: Thu, 22 Jan 2026 12:44:03 +0100 Subject: [PATCH 2/3] matrix->hypergraph: row-net reader --- .../io/mtx_hypergraph_file_reader.hpp | 60 ++++++++++++++----- tests/hypergraph_and_partition.cpp | 7 +++ 2 files changed, 51 insertions(+), 16 deletions(-) diff --git a/include/osp/auxiliary/io/mtx_hypergraph_file_reader.hpp b/include/osp/auxiliary/io/mtx_hypergraph_file_reader.hpp index 42acbd8d..8334a518 100644 --- a/include/osp/auxiliary/io/mtx_hypergraph_file_reader.hpp +++ b/include/osp/auxiliary/io/mtx_hypergraph_file_reader.hpp @@ -32,9 +32,14 @@ limitations under the License. namespace osp { namespace file_reader { -// reads a matrix into Hypergraph format, where nonzeros are vertices, and rows/columns are hyperedges +enum class MatrixToHypergraphFormat { FINE_GRAINED, ROW_NET }; + +// reads a matrix into Hypergraph format, covering two different formats: +// - fine-grained: nonzeros are vertices, and rows/columns are hyperedges +// - row-net: columns are vertices, rows are hyperedges template -bool ReadHypergraphMartixMarketFormat(std::ifstream &infile, Hypergraph &hgraph) { +bool ReadHypergraphMartixMarketFormat(std::ifstream &infile, Hypergraph &hgraph, + MatrixToHypergraphFormat format = MatrixToHypergraphFormat::FINE_GRAINED) { std::string line; // Skip comments or empty lines (robustly) @@ -69,16 +74,20 @@ bool ReadHypergraphMartixMarketFormat(std::ifstream &infile, Hypergraph(nEntries); + const IndexType numNodes = (format == MatrixToHypergraphFormat::FINE_GRAINED) + ? static_cast(nEntries) + : static_cast(mCol); hgraph.Reset(numNodes, 0); - for (IndexType node = 0; node < numNodes; ++node) { - hgraph.SetVertexWorkWeight(node, static_cast(1)); - hgraph.SetVertexMemoryWeight(node, static_cast(1)); - } std::vector> rowHyperedges(static_cast(mRow)); - std::vector> columnHyperedges(static_cast(mCol)); + std::vector> columnHyperedges; + std::vector nrNonZerosInColumn; + if (format == MatrixToHypergraphFormat::FINE_GRAINED) { + columnHyperedges.resize(static_cast(mCol)); + } else { + nrNonZerosInColumn.resize(static_cast(mCol), 0); + } int entriesRead = 0; while (entriesRead < nEntries && std::getline(infile, line)) { @@ -107,13 +116,20 @@ bool ReadHypergraphMartixMarketFormat(std::ifstream &infile, Hypergraph(row) >= numNodes || static_cast(col) >= numNodes) { + IndexType rowFormatLimit = (format == MatrixToHypergraphFormat::FINE_GRAINED) ? numNodes : static_cast(mRow); + if (static_cast(row) >= rowFormatLimit || static_cast(col) >= numNodes) { std::cerr << "Error: Index exceeds vertex type limit.\n"; return false; } - rowHyperedges[static_cast(row)].push_back(static_cast(entriesRead)); - columnHyperedges[static_cast(col)].push_back(static_cast(entriesRead)); + if (format == MatrixToHypergraphFormat::FINE_GRAINED) + { + rowHyperedges[static_cast(row)].push_back(static_cast(entriesRead)); + columnHyperedges[static_cast(col)].push_back(static_cast(entriesRead)); + } else { + rowHyperedges[static_cast(row)].push_back(static_cast(col)); + ++nrNonZerosInColumn[static_cast(col)]; + } ++entriesRead; } @@ -136,9 +152,20 @@ bool ReadHypergraphMartixMarketFormat(std::ifstream &infile, Hypergraph(mCol); ++col) { - if (!columnHyperedges[col].empty()) { - hgraph.AddHyperedge(columnHyperedges[col]); + if (format == MatrixToHypergraphFormat::FINE_GRAINED) { + for (IndexType col = 0; col < static_cast(mCol); ++col) { + if (!columnHyperedges[col].empty()) { + hgraph.AddHyperedge(columnHyperedges[col]); + } + } + for (IndexType node = 0; node < numNodes; ++node) { + hgraph.SetVertexWorkWeight(node, static_cast(1)); + hgraph.SetVertexMemoryWeight(node, static_cast(1)); + } + } else { + for (IndexType node = 0; node < numNodes; ++node) { + hgraph.SetVertexWorkWeight(node, nrNonZerosInColumn[node]); + hgraph.SetVertexMemoryWeight(node, static_cast(1)); } } @@ -146,7 +173,8 @@ bool ReadHypergraphMartixMarketFormat(std::ifstream &infile, Hypergraph -bool ReadHypergraphMartixMarketFormat(const std::string &filename, Hypergraph &hgraph) { +bool ReadHypergraphMartixMarketFormat(const std::string &filename, Hypergraph &hgraph, + MatrixToHypergraphFormat format = MatrixToHypergraphFormat::FINE_GRAINED) { // Ensure the file is .mtx format if (std::filesystem::path(filename).extension() != ".mtx") { std::cerr << "Error: Only .mtx files are accepted.\n"; @@ -174,7 +202,7 @@ bool ReadHypergraphMartixMarketFormat(const std::string &filename, Hypergraph(dag); BOOST_CHECK_EQUAL(dag.NumVertices(), hgraph.NumVertices()); From 21b14be2fcd0918b05158eceba0c3f0357c6187c Mon Sep 17 00:00:00 2001 From: tonibohnlein Date: Thu, 22 Jan 2026 12:59:03 +0100 Subject: [PATCH 3/3] enable logtoconsole partitiong ilps --- .../osp/partitioning/partitioners/partitioning_ILP_base.hpp | 2 +- .../pebblers/pebblingILP/MultiProcessorPebbling.hpp | 6 +++--- .../pebblingILP/partialILP/AcyclicPartitioningILP.hpp | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp b/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp index afc25af4..f7701a99 100644 --- a/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp +++ b/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp @@ -55,7 +55,7 @@ class HypergraphPartitioningILPBase { template void HypergraphPartitioningILPBase::SolveIlp(Model &model) { - model.SetIntParam(COPT_INTPARAM_LOGTOCONSOLE, 0); + // model.SetIntParam(COPT_INTPARAM_LOGTOCONSOLE, 0); model.SetDblParam(COPT_DBLPARAM_TIMELIMIT, timeLimitSeconds_); model.SetIntParam(COPT_INTPARAM_THREADS, 128); diff --git a/include/osp/pebbling/pebblers/pebblingILP/MultiProcessorPebbling.hpp b/include/osp/pebbling/pebblers/pebblingILP/MultiProcessorPebbling.hpp index b6b7948b..72ceabab 100644 --- a/include/osp/pebbling/pebblers/pebblingILP/MultiProcessorPebbling.hpp +++ b/include/osp/pebbling/pebblers/pebblingILP/MultiProcessorPebbling.hpp @@ -239,9 +239,9 @@ class MultiProcessorPebbling : public Scheduler { template void MultiProcessorPebbling::SolveIlp() { - if (!verbose_) { - model_.SetIntParam(COPT_INTPARAM_LOGTOCONSOLE, 0); - } + // if (!verbose_) { + // model_.SetIntParam(COPT_INTPARAM_LOGTOCONSOLE, 0); + // } model_.SetDblParam(COPT_DBLPARAM_TIMELIMIT, timeLimitSeconds_); model_.SetIntParam(COPT_INTPARAM_THREADS, 128); diff --git a/include/osp/pebbling/pebblers/pebblingILP/partialILP/AcyclicPartitioningILP.hpp b/include/osp/pebbling/pebblers/pebblingILP/partialILP/AcyclicPartitioningILP.hpp index e2bd1268..7c3088eb 100644 --- a/include/osp/pebbling/pebblers/pebblingILP/partialILP/AcyclicPartitioningILP.hpp +++ b/include/osp/pebbling/pebblers/pebblingILP/partialILP/AcyclicPartitioningILP.hpp @@ -156,7 +156,7 @@ class AcyclicPartitioningILP { template void AcyclicPartitioningILP::SolveIlp() { - model_.SetIntParam(COPT_INTPARAM_LOGTOCONSOLE, 0); + // model_.SetIntParam(COPT_INTPARAM_LOGTOCONSOLE, 0); model_.SetDblParam(COPT_DBLPARAM_TIMELIMIT, timeLimitSeconds_); model_.SetIntParam(COPT_INTPARAM_THREADS, 128);