From cd4e00f57119918451829a1b88a92c7a56c9d891 Mon Sep 17 00:00:00 2001 From: ppapp Date: Thu, 9 Oct 2025 09:23:17 +0200 Subject: [PATCH 1/9] hypergraph first draft --- include/osp/partitioning/model/hypergraph.hpp | 177 ++++++++++++++++++ tests/CMakeLists.txt | 3 + tests/hypergraph_and_partition.cpp | 60 ++++++ 3 files changed, 240 insertions(+) create mode 100644 include/osp/partitioning/model/hypergraph.hpp create mode 100644 tests/hypergraph_and_partition.cpp diff --git a/include/osp/partitioning/model/hypergraph.hpp b/include/osp/partitioning/model/hypergraph.hpp new file mode 100644 index 00000000..dc756e4b --- /dev/null +++ b/include/osp/partitioning/model/hypergraph.hpp @@ -0,0 +1,177 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ +#pragma once + +#include +#include +#include "osp/concepts/computational_dag_concept.hpp" + +namespace osp { + +class hypergraph { + + public: + + hypergraph() = default; + + hypergraph(unsigned num_vertices_, unsigned num_hyperedges_) + : Num_vertices(num_vertices_), Num_hyperedges(num_hyperedges_), vertex_weights(num_vertices_, 0), + incident_hyperedges_to_vertex(num_vertices_), vertices_in_hyperedge(num_hyperedges_){} + + hypergraph(const hypergraph &other) = default; + hypergraph &operator=(const hypergraph &other) = default; + + virtual ~hypergraph() = default; + + inline unsigned num_vertices() const { return Num_vertices; } + inline unsigned num_hyperedges() const { return Num_hyperedges; } + inline unsigned num_pins() const { return Num_pins; } + + void add_pin(unsigned vertex_idx, unsigned hyperedge_idx); + void add_vertex(unsigned weight = 0); + void add_empty_hyperedge(); + void add_hyperedge(const std::vector& pins); + void set_vertex_weight(unsigned vertex_idx, int weight); + + void clear(); + void reset(unsigned num_vertices_, unsigned num_hyperedges_); + + inline const std::vector &get_incident_hyperedges(unsigned vertex) const { return incident_hyperedges_to_vertex[vertex]; } + inline const std::vector &get_vertices_in_hyperedge(unsigned hyperedge) const { return vertices_in_hyperedge[hyperedge]; } + + template + void convert_from_cdag_as_dag(const Graph_t& dag); + + template + void convert_from_cdag_as_hyperdag(const Graph_t& dag); + + void read_spmv_from_matrixmarket(); + + private: + unsigned Num_vertices = 0, Num_hyperedges = 0, Num_pins = 0; + + std::vector vertex_weights; + + std::vector> incident_hyperedges_to_vertex; + std::vector> vertices_in_hyperedge; +}; + +void hypergraph::add_pin(unsigned vertex_idx, unsigned hyperedge_idx) +{ + if(vertex_idx >= Num_vertices) + { + throw std::invalid_argument("Invalid Argument while adding pin: vertex index out of range."); + } + else if(hyperedge_idx >= Num_hyperedges) + { + throw std::invalid_argument("Invalid Argument while adding pin: hyperedge index out of range."); + } + else{ + incident_hyperedges_to_vertex[vertex_idx].push_back(hyperedge_idx); + vertices_in_hyperedge[hyperedge_idx].push_back(vertex_idx); + ++Num_pins; + } +} + +void hypergraph::add_vertex(unsigned weight = 0) +{ + vertex_weights.push_back(weight); + incident_hyperedges_to_vertex.emplace_back(); + ++Num_vertices; +} + +void hypergraph::add_empty_hyperedge() +{ + vertices_in_hyperedge.emplace_back(); + ++Num_hyperedges; +} + +void hypergraph::add_hyperedge(const std::vector& pins) +{ + vertices_in_hyperedge.emplace_back(pins); + for(unsigned vertex : pins) + incident_hyperedges_to_vertex[vertex].push_back(Num_hyperedges); + ++Num_hyperedges; + Num_pins += pins.size(); +} + +void hypergraph::set_vertex_weight(unsigned vertex_idx, int weight) +{ + if(vertex_idx >= Num_vertices) + throw std::invalid_argument("Invalid Argument while setting vertex weight: vertex index out of range."); + else + vertex_weights[vertex_idx] = weight; +} + +void hypergraph::clear() +{ + Num_vertices = 0; + Num_hyperedges = 0; + Num_pins = 0; + + vertex_weights.clear(); + incident_hyperedges_to_vertex.clear(); + vertices_in_hyperedge.clear(); +} + +void hypergraph::reset(unsigned num_vertices_, unsigned num_hyperedges_) +{ + clear(); + + Num_vertices = num_vertices_; + Num_hyperedges = num_hyperedges_; + + vertex_weights.resize(num_vertices_, 0); + incident_hyperedges_to_vertex.resize(num_vertices_); + vertices_in_hyperedge.resize(num_hyperedges_); +} + +template +void convert_from_cdag_as_dag(const Graph_t& dag) +{ + reset(dag.num_vertices(), dag.num_edges()); + for(const auto &node : dag.vertices()) + { + set_vertex_weight(node, dag.vertex_work_weight(node)); + for (const auto &child : dag.children(node)) + add_hyperedge({node, child}); + } +} + +template +void convert_from_cdag_as_hyperdag(const Graph_t& dag) +{ + unsigned nr_of_non_sinks = 0; + for(const auto &node : dag.vertices()) + if(dag.out_degree(node) > 0) + ++ nr_of_non_sinks; + + reset(dag.num_vertices(), nr_of_non_sinks); + for(const auto &node : dag.vertices()) + { + set_vertex_weight(node, dag.vertex_work_weight(node)); + if(dag.out_degree(node) == 0) + continue; + std::vector new_hyperedge({node}); + for (const auto &child : dag.children(node)) + new_hyperedge.push_back(child); + add_hyperedge(new_hyperedge); + } +} + +} // namespace osp \ No newline at end of file diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index d817b7bf..9b2232dd 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -90,6 +90,9 @@ _add_test( bsp_greedy_recomputer ) ## pebbling model _add_test( pebbling_schedule_class ) +## partitioning model +_add_test( hypergraph_and_partition ) + ## auxiliary _add_test( intpower ) diff --git a/tests/hypergraph_and_partition.cpp b/tests/hypergraph_and_partition.cpp new file mode 100644 index 00000000..4d00acb3 --- /dev/null +++ b/tests/hypergraph_and_partition.cpp @@ -0,0 +1,60 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#define BOOST_TEST_MODULE HYPERGRAPH_AND_PARTITION +#include + +#include +#include +#include + +#include "osp/partitioning/model/hypergraph.hpp" +#include "osp/graph_implementations/boost_graphs/boost_graph.hpp" +#include "osp/auxiliary/io/hdag_graph_file_reader.hpp" + +using namespace osp; + +BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { + + using graph = boost_graph_uint_t; + + // Getting root git directory + std::filesystem::path cwd = std::filesystem::current_path(); + std::cout << cwd << std::endl; + while ((!cwd.empty()) && (cwd.filename() != "OneStopParallel")) { + cwd = cwd.parent_path(); + std::cout << cwd << std::endl; + } + + graph DAG; + + bool status = file_reader::readComputationalDagHyperdagFormatDB( + (cwd / "data/spaa/tiny/instance_bicgstab.hdag").string(), DAG); + + BOOST_CHECK(status); + + hypergraph Hgraph; + + Hgraph.convert_from_cdag_as_dag(DAG); + BOOST_CHECK_EQUAL(DAG.num_vertices(), Hgraph.num_vertices()); + BOOST_CHECK_EQUAL(DAG.num_edges(), Hgraph.num_hyperedges()); + BOOST_CHECK_EQUAL(DAG.num_edges()*2, Hgraph.num_pins()); + + Hgraph.convert_from_cdag_as_hyperdag(DAG); + +}; \ No newline at end of file From 4fc5cc7069d7786d8dec40d958154298293689a2 Mon Sep 17 00:00:00 2001 From: ppapp Date: Mon, 13 Oct 2025 14:36:10 +0200 Subject: [PATCH 2/9] hypergraphs and partitioning, next version --- .../io/mtx_hypergraph_file_reader.hpp | 179 +++++++++++++ .../auxiliary/io/partitioning_file_writer.hpp | 59 +++++ include/osp/partitioning/model/hypergraph.hpp | 91 ++++--- .../osp/partitioning/model/partitioning.hpp | 168 ++++++++++++ .../model/partitioning_problem.hpp | 79 ++++++ .../model/partitioning_replication.hpp | 210 +++++++++++++++ .../partitioning/partitioners/generic_FM.hpp | 239 ++++++++++++++++++ .../partitioners/partitioning_ILP.hpp | 137 ++++++++++ .../partitioners/partitioning_ILP_base.hpp | 151 +++++++++++ .../partitioning_ILP_replication.hpp | 182 +++++++++++++ tests/CMakeLists.txt | 2 + tests/hypergraph_and_partition.cpp | 136 +++++++++- tests/ilp_hypergraph_partitioning.cpp | 130 ++++++++++ 13 files changed, 1723 insertions(+), 40 deletions(-) create mode 100644 include/osp/auxiliary/io/mtx_hypergraph_file_reader.hpp create mode 100644 include/osp/auxiliary/io/partitioning_file_writer.hpp create mode 100644 include/osp/partitioning/model/partitioning.hpp create mode 100644 include/osp/partitioning/model/partitioning_problem.hpp create mode 100644 include/osp/partitioning/model/partitioning_replication.hpp create mode 100644 include/osp/partitioning/partitioners/generic_FM.hpp create mode 100644 include/osp/partitioning/partitioners/partitioning_ILP.hpp create mode 100644 include/osp/partitioning/partitioners/partitioning_ILP_base.hpp create mode 100644 include/osp/partitioning/partitioners/partitioning_ILP_replication.hpp create mode 100644 tests/ilp_hypergraph_partitioning.cpp diff --git a/include/osp/auxiliary/io/mtx_hypergraph_file_reader.hpp b/include/osp/auxiliary/io/mtx_hypergraph_file_reader.hpp new file mode 100644 index 00000000..d112e5fe --- /dev/null +++ b/include/osp/auxiliary/io/mtx_hypergraph_file_reader.hpp @@ -0,0 +1,179 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Christos Matzoros, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "osp/partitioning/model/hypergraph.hpp" +#include "osp/auxiliary/io/filepath_checker.hpp" + +namespace osp { +namespace file_reader { + +// reads a matrix into Hypergraph format, where nonzeros are vertices, and rows/columns are hyperedges +bool readHypergraphMartixMarketFormat(std::ifstream& infile, Hypergraph& hgraph) { + + std::string line; + + // Skip comments or empty lines (robustly) + while (std::getline(infile, line)) { + if (line.empty() || line[0] == '%') continue; + + // Null byte check + if (line.find('\0') != std::string::npos) { + std::cerr << "Error: Null byte detected in header line.\n"; + return false; + } + + if (line.size() > MAX_LINE_LENGTH) { + std::cerr << "Error: Line too long, possible malformed or malicious file.\n"; + return false; + } + break; // We found the actual header line + } + + if (infile.eof()) { + std::cerr << "Error: Unexpected end of file while reading header.\n"; + return false; + } + + int M_row = 0, M_col = 0, nEntries = 0; + + std::istringstream header_stream(line); + if (!(header_stream >> M_row >> M_col >> nEntries) || + M_row <= 0 || M_col <= 0) { + std::cerr << "Error: Invalid header.\n"; + return false; + } + + const unsigned num_nodes = static_cast(nEntries); + if (num_nodes > std::numeric_limits::max()) { + std::cerr << "Error: Matrix dimension too large for vertex type.\n"; + return false; + } + + std::vector node_work_wts(num_nodes, 0); + std::vector node_comm_wts(num_nodes, 1); + + hgraph.reset(num_nodes, 0); + for (unsigned node = 0; node < num_nodes; ++node) { + hgraph.set_vertex_weight(node, 1); + } + + std::vector> row_hyperedges(static_cast(M_row)); + std::vector> column_hyperedges(static_cast(M_col)); + + int entries_read = 0; + while (entries_read < nEntries && std::getline(infile, line)) { + if (line.empty() || line[0] == '%') continue; + if (line.size() > MAX_LINE_LENGTH) { + std::cerr << "Error: Line too long.\n"; + return false; + } + + std::istringstream entry_stream(line); + int row = -1, col = -1; + double val = 0.0; + + if (!(entry_stream >> row >> col >> val)) { + std::cerr << "Error: Malformed matrix entry.\n"; + return false; + } + + row -= 1; col -= 1; // Convert to 0-based + + if (row < 0 || col < 0 || row >= M_row || col >= M_col) { + std::cerr << "Error: Matrix entry out of bounds.\n"; + return false; + } + + if (static_cast(row) >= num_nodes || static_cast(col) >= num_nodes) { + std::cerr << "Error: Index exceeds vertex type limit.\n"; + return false; + } + + row_hyperedges[static_cast(row)].push_back(static_cast(entries_read)); + column_hyperedges[static_cast(col)].push_back(static_cast(entries_read)); + + ++entries_read; + } + + if (entries_read != nEntries) { + std::cerr << "Error: Incomplete matrix entries.\n"; + return false; + } + + while (std::getline(infile, line)) { + if (!line.empty() && line[0] != '%') { + std::cerr << "Error: Extra data after matrix content.\n"; + return false; + } + } + + for(unsigned row = 0; row < static_cast(M_row); ++row) + if(!row_hyperedges[row].empty()) + hgraph.add_hyperedge(row_hyperedges[row]); + + for(unsigned col = 0; col < static_cast(M_col); ++col) + if(!column_hyperedges[col].empty()) + hgraph.add_hyperedge(column_hyperedges[col]); + + return true; +} + +bool readHypergraphMartixMarketFormat(const std::string& filename, Hypergraph& hgraph) { + // Ensure the file is .mtx format + if (std::filesystem::path(filename).extension() != ".mtx") { + std::cerr << "Error: Only .mtx files are accepted.\n"; + return false; + } + + if (!isPathSafe(filename)) { + std::cerr << "Error: Unsafe file path (potential traversal attack).\n"; + return false; + } + + if (std::filesystem::is_symlink(filename)) { + std::cerr << "Error: Symbolic links are not allowed.\n"; + return false; + } + + if (!std::filesystem::is_regular_file(filename)) { + std::cerr << "Error: Input is not a regular file.\n"; + return false; + } + + std::ifstream infile(filename); + if (!infile.is_open()) { + std::cerr << "Error: Failed to open file.\n"; + return false; + } + + return readHypergraphMartixMarketFormat(infile, hgraph); +} + +} // namespace FileReader + +} // namespace osp \ No newline at end of file diff --git a/include/osp/auxiliary/io/partitioning_file_writer.hpp b/include/osp/auxiliary/io/partitioning_file_writer.hpp new file mode 100644 index 00000000..60f836f5 --- /dev/null +++ b/include/osp/auxiliary/io/partitioning_file_writer.hpp @@ -0,0 +1,59 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#pragma once + +#include "osp/partitioning/model/partitioning.hpp" +#include "osp/partitioning/model/partitioning_replication.hpp" +#include +#include + +namespace osp { namespace file_writer { + +void write_txt(std::ostream &os, const Partitioning &partition) { + + os << "\%\% Partitioning for " << partition.getInstance().getNumberOfPartitions() << " parts." << std::endl; + + for(unsigned node = 0; node < partition.getInstance().getHypergraph().num_vertices(); ++node) + os << node << " " << partition.assignedPartition(node) << std::endl; +} + +void write_txt(const std::string &filename, const Partitioning &partition) { + std::ofstream os(filename); + write_txt(os, partition); +} + +void write_txt(std::ostream &os, const PartitioningWithReplication &partition) { + + os << "\%\% Partitioning for " << partition.getInstance().getNumberOfPartitions() << " parts with replication." << std::endl; + + for(unsigned node = 0; node < partition.getInstance().getHypergraph().num_vertices(); ++node) + { + os << node; + for(unsigned part : partition.assignedPartitions(node)) + os << " " << part; + os << std::endl; + } +} + +void write_txt(const std::string &filename, const PartitioningWithReplication &partition) { + std::ofstream os(filename); + write_txt(os, partition); +} + +}} // namespace osp::file_writer \ No newline at end of file diff --git a/include/osp/partitioning/model/hypergraph.hpp b/include/osp/partitioning/model/hypergraph.hpp index dc756e4b..1f9fce40 100644 --- a/include/osp/partitioning/model/hypergraph.hpp +++ b/include/osp/partitioning/model/hypergraph.hpp @@ -23,30 +23,35 @@ limitations under the License. namespace osp { -class hypergraph { +class Hypergraph { public: - hypergraph() = default; + Hypergraph() = default; - hypergraph(unsigned num_vertices_, unsigned num_hyperedges_) - : Num_vertices(num_vertices_), Num_hyperedges(num_hyperedges_), vertex_weights(num_vertices_, 0), + Hypergraph(unsigned num_vertices_, unsigned num_hyperedges_) + : Num_vertices(num_vertices_), Num_hyperedges(num_hyperedges_), vertex_weights(num_vertices_, 1), hyperedge_weights(num_hyperedges_, 1), incident_hyperedges_to_vertex(num_vertices_), vertices_in_hyperedge(num_hyperedges_){} - hypergraph(const hypergraph &other) = default; - hypergraph &operator=(const hypergraph &other) = default; + Hypergraph(const Hypergraph &other) = default; + Hypergraph &operator=(const Hypergraph &other) = default; - virtual ~hypergraph() = default; + virtual ~Hypergraph() = default; inline unsigned num_vertices() const { return Num_vertices; } inline unsigned num_hyperedges() const { return Num_hyperedges; } inline unsigned num_pins() const { return Num_pins; } + inline int get_vertex_weight(unsigned node) const { return vertex_weights[node]; } + inline int get_hyperedge_weight(unsigned hyperedge) const { return hyperedge_weights[hyperedge]; } void add_pin(unsigned vertex_idx, unsigned hyperedge_idx); - void add_vertex(unsigned weight = 0); - void add_empty_hyperedge(); - void add_hyperedge(const std::vector& pins); + void add_vertex(int weight = 1); + void add_empty_hyperedge(int weight = 1); + void add_hyperedge(const std::vector& pins, int weight = 1); void set_vertex_weight(unsigned vertex_idx, int weight); + void set_hyperedge_weight(unsigned hyperedge_idx, int weight); + + int compute_total_vertex_weight() const; void clear(); void reset(unsigned num_vertices_, unsigned num_hyperedges_); @@ -60,18 +65,17 @@ class hypergraph { template void convert_from_cdag_as_hyperdag(const Graph_t& dag); - void read_spmv_from_matrixmarket(); - private: unsigned Num_vertices = 0, Num_hyperedges = 0, Num_pins = 0; std::vector vertex_weights; + std::vector hyperedge_weights; std::vector> incident_hyperedges_to_vertex; std::vector> vertices_in_hyperedge; }; -void hypergraph::add_pin(unsigned vertex_idx, unsigned hyperedge_idx) +void Hypergraph::add_pin(unsigned vertex_idx, unsigned hyperedge_idx) { if(vertex_idx >= Num_vertices) { @@ -88,29 +92,31 @@ void hypergraph::add_pin(unsigned vertex_idx, unsigned hyperedge_idx) } } -void hypergraph::add_vertex(unsigned weight = 0) +void Hypergraph::add_vertex(int weight) { vertex_weights.push_back(weight); incident_hyperedges_to_vertex.emplace_back(); ++Num_vertices; } -void hypergraph::add_empty_hyperedge() +void Hypergraph::add_empty_hyperedge(int weight) { vertices_in_hyperedge.emplace_back(); + hyperedge_weights.push_back(weight); ++Num_hyperedges; } -void hypergraph::add_hyperedge(const std::vector& pins) +void Hypergraph::add_hyperedge(const std::vector& pins, int weight) { vertices_in_hyperedge.emplace_back(pins); + hyperedge_weights.push_back(weight); for(unsigned vertex : pins) incident_hyperedges_to_vertex[vertex].push_back(Num_hyperedges); ++Num_hyperedges; - Num_pins += pins.size(); + Num_pins += static_cast(pins.size()); } -void hypergraph::set_vertex_weight(unsigned vertex_idx, int weight) +void Hypergraph::set_vertex_weight(unsigned vertex_idx, int weight) { if(vertex_idx >= Num_vertices) throw std::invalid_argument("Invalid Argument while setting vertex weight: vertex index out of range."); @@ -118,59 +124,72 @@ void hypergraph::set_vertex_weight(unsigned vertex_idx, int weight) vertex_weights[vertex_idx] = weight; } -void hypergraph::clear() +void Hypergraph::set_hyperedge_weight(unsigned hyperedge_idx, int weight) +{ + if(hyperedge_idx >= Num_hyperedges) + throw std::invalid_argument("Invalid Argument while setting hyperedge weight: hyepredge index out of range."); + else + hyperedge_weights[hyperedge_idx] = weight; +} + +int Hypergraph::compute_total_vertex_weight() const +{ + int total = 0; + for(unsigned node = 0; node < Num_vertices; ++node) + total += vertex_weights[node]; + return total; +} + +void Hypergraph::clear() { Num_vertices = 0; Num_hyperedges = 0; Num_pins = 0; vertex_weights.clear(); + hyperedge_weights.clear(); incident_hyperedges_to_vertex.clear(); vertices_in_hyperedge.clear(); } -void hypergraph::reset(unsigned num_vertices_, unsigned num_hyperedges_) +void Hypergraph::reset(unsigned num_vertices_, unsigned num_hyperedges_) { clear(); Num_vertices = num_vertices_; Num_hyperedges = num_hyperedges_; - vertex_weights.resize(num_vertices_, 0); + vertex_weights.resize(num_vertices_, 1); + hyperedge_weights.resize(num_hyperedges_, 1); incident_hyperedges_to_vertex.resize(num_vertices_); vertices_in_hyperedge.resize(num_hyperedges_); } template -void convert_from_cdag_as_dag(const Graph_t& dag) +void Hypergraph::convert_from_cdag_as_dag(const Graph_t& dag) { - reset(dag.num_vertices(), dag.num_edges()); + reset(static_cast(dag.num_vertices()), 0); for(const auto &node : dag.vertices()) { - set_vertex_weight(node, dag.vertex_work_weight(node)); + set_vertex_weight(static_cast(node), static_cast(dag.vertex_work_weight(node))); for (const auto &child : dag.children(node)) - add_hyperedge({node, child}); + add_hyperedge({static_cast(node), static_cast(child)}); // TODO add edge weights if present } } template -void convert_from_cdag_as_hyperdag(const Graph_t& dag) +void Hypergraph::convert_from_cdag_as_hyperdag(const Graph_t& dag) { - unsigned nr_of_non_sinks = 0; - for(const auto &node : dag.vertices()) - if(dag.out_degree(node) > 0) - ++ nr_of_non_sinks; - - reset(dag.num_vertices(), nr_of_non_sinks); + reset(static_cast(dag.num_vertices()), 0); for(const auto &node : dag.vertices()) { - set_vertex_weight(node, dag.vertex_work_weight(node)); + set_vertex_weight(static_cast(node), static_cast(dag.vertex_work_weight(node))); if(dag.out_degree(node) == 0) continue; - std::vector new_hyperedge({node}); + std::vector new_hyperedge({static_cast(node)}); for (const auto &child : dag.children(node)) - new_hyperedge.push_back(child); - add_hyperedge(new_hyperedge); + new_hyperedge.push_back(static_cast(child)); + add_hyperedge(new_hyperedge, static_cast(dag.vertex_comm_weight(node))); } } diff --git a/include/osp/partitioning/model/partitioning.hpp b/include/osp/partitioning/model/partitioning.hpp new file mode 100644 index 00000000..0f99781e --- /dev/null +++ b/include/osp/partitioning/model/partitioning.hpp @@ -0,0 +1,168 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#pragma once + +#include + +#include "osp/partitioning/model/partitioning_problem.hpp" + +namespace osp { + +// Represents a partitioning where each vertex of a hypergraph is assigned to a specifc partition + +class Partitioning { + + private: + + const PartitioningProblem *instance; + + std::vector node_to_partition_assignment; + + public: + + Partitioning() = delete; + + Partitioning(const PartitioningProblem &inst) + : instance(&inst), node_to_partition_assignment(std::vector(inst.getHypergraph().num_vertices(), 0)) {} + + Partitioning(const PartitioningProblem &inst, const std::vector &partition_assignment_) + : instance(&inst), node_to_partition_assignment(partition_assignment_) {} + + Partitioning(const Partitioning &partitioning_) = default; + Partitioning(Partitioning &&partitioning_) = default; + + Partitioning &operator=(const Partitioning &partitioning_) = default; + + virtual ~Partitioning() = default; + + + // getters and setters + + inline const PartitioningProblem &getInstance() const { return *instance; } + + inline unsigned assignedPartition(unsigned node) const { return node_to_partition_assignment[node]; } + inline const std::vector &assignedPartitions() const { return node_to_partition_assignment; } + inline std::vector &assignedPartitions() { return node_to_partition_assignment; } + + inline void setAssignedPartition(unsigned node, unsigned part) { node_to_partition_assignment.at(node) = part; } + void setAssignedPartitions(const std::vector &vec) { + + if (vec.size() == static_cast(instance->getHypergraph().num_vertices()) ) { + node_to_partition_assignment = vec; + } else { + throw std::invalid_argument( + "Invalid Argument while assigning processors: size does not match number of nodes."); + } + } + void setAssignedPartitions(std::vector &&vec) { + + if (vec.size() == static_cast(instance->getHypergraph().num_vertices()) ) { + node_to_partition_assignment = vec; + } else { + throw std::invalid_argument( + "Invalid Argument while assigning processors: size does not match number of nodes."); + } + } + + std::vector getPartitionContent(unsigned part) const { + + std::vector content; + for (unsigned node = 0; node < node_to_partition_assignment.size(); ++node) { + + if (node_to_partition_assignment[node] == part) { + content.push_back(node); + } + } + + return content; + } + + void resetPartition() { + node_to_partition_assignment.clear(); + node_to_partition_assignment.resize(instance->getHypergraph().num_vertices(), 0); + } + + // costs and validity + + std::vector computeLambdaForHyperedges() const; + int computeConnectivityCost() const; + int computeCutNetCost() const; + + bool satisfiesBalanceConstraint() const; + +}; + +std::vector Partitioning::computeLambdaForHyperedges() const +{ + std::vector lambda(instance->getHypergraph().num_hyperedges(), 0); + for(unsigned edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) + { + const std::vector &hyperedge = instance->getHypergraph().get_vertices_in_hyperedge(edge_idx); + if(hyperedge.empty()) + continue; + std::vector intersects_part(instance->getNumberOfPartitions(), false); + for(const unsigned& node : hyperedge) + intersects_part[node_to_partition_assignment[node]] = true; + for(unsigned part = 0; part < instance->getNumberOfPartitions(); ++part) + if(intersects_part[part]) + ++lambda[edge_idx]; + } + return lambda; +} + +int Partitioning::computeConnectivityCost() const { + + int total = 0; + std::vector lambda = computeLambdaForHyperedges(); + + for(unsigned edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) + if(lambda[edge_idx] >= 1) + total += (static_cast(lambda[edge_idx])-1) * instance->getHypergraph().get_hyperedge_weight(edge_idx); + + return total; +} + +int Partitioning::computeCutNetCost() const { + + int total = 0; + std::vector lambda = computeLambdaForHyperedges(); + for(unsigned edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) + if(lambda[edge_idx] > 1) + total += instance->getHypergraph().get_hyperedge_weight(edge_idx); + + return total; +} + +bool Partitioning::satisfiesBalanceConstraint() const { + std::vector weight(instance->getNumberOfPartitions(), 0); + for (unsigned node = 0; node < node_to_partition_assignment.size(); ++node) { + if (node_to_partition_assignment[node] > instance->getNumberOfPartitions()) + throw std::invalid_argument("Invalid Argument while checking balance constraint: partition ID out of range."); + else + weight[node_to_partition_assignment[node]] += instance->getHypergraph().get_vertex_weight(node); + } + + for(unsigned part = 0; part < instance->getNumberOfPartitions(); ++part) + if(weight[part] > instance->getMaxWeightPerPartition()) + return false; + + return true; +}; + +} // namespace osp \ No newline at end of file diff --git a/include/osp/partitioning/model/partitioning_problem.hpp b/include/osp/partitioning/model/partitioning_problem.hpp new file mode 100644 index 00000000..33e8dc1e --- /dev/null +++ b/include/osp/partitioning/model/partitioning_problem.hpp @@ -0,0 +1,79 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#pragma once + +#include +#include + +#include "osp/partitioning/model/hypergraph.hpp" + +namespace osp { + +// represents a hypergraph partitioning problem into a fixed number of parts with a balance constraint +class PartitioningProblem { + + private: + Hypergraph hgraph; + + unsigned nr_of_partitions; + int max_weight_per_partition; + + bool allows_replication = false; + + public: + + PartitioningProblem() = default; + + PartitioningProblem(const Hypergraph &hgraph_, unsigned nr_parts_ = 2, int max_weight_ = std::numeric_limits::max()) + : hgraph(hgraph_), nr_of_partitions(nr_parts_), max_weight_per_partition(max_weight_) {} + + PartitioningProblem(const Hypergraph &&hgraph_, unsigned nr_parts_ = 2, int max_weight_ = std::numeric_limits::max()) + : hgraph(hgraph_), nr_of_partitions(nr_parts_), max_weight_per_partition(max_weight_) {} + + PartitioningProblem(const PartitioningProblem &other) = default; + PartitioningProblem(PartitioningProblem &&other) = default; + + PartitioningProblem &operator=(const PartitioningProblem &other) = default; + PartitioningProblem &operator=(PartitioningProblem &&other) = default; + + // getters + inline const Hypergraph &getHypergraph() const { return hgraph; } + inline Hypergraph &getHypergraph() { return hgraph; } + + inline unsigned getNumberOfPartitions() const { return nr_of_partitions; } + inline int getMaxWeightPerPartition() const { return max_weight_per_partition; } + inline bool getAllowsReplication() const { return allows_replication; } + + // setters + inline void setHypergraph(const Hypergraph &hgraph_) { hgraph = hgraph_; } + + inline void setNumberOfPartitions(unsigned nr_parts_) { nr_of_partitions = nr_parts_; } + inline void setAllowsReplication(bool allowed_) { allows_replication = allowed_; } + + inline void setMaxWeightExplicitly(int max_weight_) { max_weight_per_partition = max_weight_; } + void setMaxWeightViaImbalanceFactor(double imbalance){ + if(imbalance < 0 ) + throw std::invalid_argument("Invalid Argument while setting imbalance parameter: parameter is negative."); + else + max_weight_per_partition = static_cast(ceil(hgraph.compute_total_vertex_weight()/ static_cast(nr_of_partitions) * (1.0+imbalance))); + } +}; + + +} // namespace osp \ No newline at end of file diff --git a/include/osp/partitioning/model/partitioning_replication.hpp b/include/osp/partitioning/model/partitioning_replication.hpp new file mode 100644 index 00000000..4db8a5be --- /dev/null +++ b/include/osp/partitioning/model/partitioning_replication.hpp @@ -0,0 +1,210 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#pragma once + +#include + +#include "osp/partitioning/model/partitioning_problem.hpp" + +namespace osp { + +// Represents a partitioning where each vertex of a hypergraph can be assinged to one or more partitions + +class PartitioningWithReplication { + + private: + + const PartitioningProblem *instance; + + std::vector > node_to_partitions_assignment; + + public: + + PartitioningWithReplication() = delete; + + PartitioningWithReplication(const PartitioningProblem &inst) + : instance(&inst), node_to_partitions_assignment(std::vector>(inst.getHypergraph().num_vertices(), {0})) {} + + PartitioningWithReplication(const PartitioningProblem &inst, const std::vector > &partition_assignment_) + : instance(&inst), node_to_partitions_assignment(partition_assignment_) {} + + PartitioningWithReplication(const PartitioningWithReplication &partitioning_) = default; + PartitioningWithReplication(PartitioningWithReplication &&partitioning_) = default; + + PartitioningWithReplication &operator=(const PartitioningWithReplication &partitioning_) = default; + + virtual ~PartitioningWithReplication() = default; + + + // getters and setters + + inline const PartitioningProblem &getInstance() const { return *instance; } + + inline std::vector assignedPartitions(unsigned node) const { return node_to_partitions_assignment[node]; } + inline const std::vector > &assignedPartitions() const { return node_to_partitions_assignment; } + inline std::vector > &assignedPartitions() { return node_to_partitions_assignment; } + + inline void setAssignedPartitions(unsigned node, const std::vector& parts) { node_to_partitions_assignment.at(node) = parts; } + void setAssignedPartitionVectors(const std::vector > &vec) { + + if (vec.size() == static_cast(instance->getHypergraph().num_vertices()) ) { + node_to_partitions_assignment = vec; + } else { + throw std::invalid_argument( + "Invalid Argument while assigning processors: size does not match number of nodes."); + } + } + void setAssignedPartitionVectors(std::vector > &&vec) { + + if (vec.size() == static_cast(instance->getHypergraph().num_vertices()) ) { + node_to_partitions_assignment = vec; + } else { + throw std::invalid_argument( + "Invalid Argument while assigning processors: size does not match number of nodes."); + } + } + + std::vector > getPartitionContents() const { + + std::vector > content(instance->getNumberOfPartitions()); + for (unsigned node = 0; node < node_to_partitions_assignment.size(); ++node) + for(unsigned part : node_to_partitions_assignment[node]) + content[part].push_back(node); + + return content; + } + + void resetPartition() { + node_to_partitions_assignment.clear(); + node_to_partitions_assignment.resize(instance->getHypergraph().num_vertices(), {0}); + } + + // costs and validity + + int computeConnectivityCost() const; + int computeCutNetCost() const; + + bool satisfiesBalanceConstraint() const; + +}; + +int PartitioningWithReplication::computeConnectivityCost() const { + + // naive implementation. in the worst-case this is exponential in the number of parts + if(instance->getNumberOfPartitions() > 16) + throw std::invalid_argument("Computing connectivity cost is not supported for more than 16 partitions."); + + int total = 0; + std::vector part_used(instance->getNumberOfPartitions(), false); + for(unsigned edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) + { + const std::vector &hyperedge = instance->getHypergraph().get_vertices_in_hyperedge(edge_idx); + if(hyperedge.empty()) + continue; + + unsigned long mask = 0UL; + + std::vector nr_nodes_covered_by_part(instance->getNumberOfPartitions(), 0); + for(const unsigned& node : hyperedge) + if(node_to_partitions_assignment[node].size() == 1) + mask = mask | (1UL << node_to_partitions_assignment[node].front()); + + unsigned min_parts_to_cover = instance->getNumberOfPartitions(); + unsigned long mask_limit = 1UL << instance->getNumberOfPartitions(); + for(unsigned long subset_mask = 1UL; subset_mask < mask_limit; ++subset_mask) + { + if((subset_mask & mask)!= mask) + continue; + + unsigned nr_parts_used = 0; + for(unsigned part = 0; part < instance->getNumberOfPartitions(); ++part) + { + part_used[part] = (((1UL << part) & subset_mask) > 0); + nr_parts_used += static_cast(part_used[part]); + } + + bool all_nodes_covered = true; + for(const unsigned& node : hyperedge) + { + bool node_covered=false; + for(unsigned part : node_to_partitions_assignment[node]) + if(part_used[part]) + { + node_covered = true; + break; + } + if(!node_covered) + { + all_nodes_covered = false; + break; + } + } + if(all_nodes_covered) + min_parts_to_cover = std::min(min_parts_to_cover, nr_parts_used); + } + + total += static_cast(min_parts_to_cover-1) * instance->getHypergraph().get_hyperedge_weight(edge_idx); + } + + return total; +} + +int PartitioningWithReplication::computeCutNetCost() const { + + int total = 0; + for(unsigned edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) + { + const std::vector &hyperedge = instance->getHypergraph().get_vertices_in_hyperedge(edge_idx); + if(hyperedge.empty()) + continue; + std::vector nr_nodes_covered_by_part(instance->getNumberOfPartitions(), 0); + for(const unsigned& node : hyperedge) + for(unsigned part : node_to_partitions_assignment[node]) + ++nr_nodes_covered_by_part[part]; + + bool covers_all = false; + for(unsigned part = 0; part < instance->getNumberOfPartitions(); ++part) + if(nr_nodes_covered_by_part[part] == hyperedge.size()) + covers_all = true; + + if(!covers_all) + total += instance->getHypergraph().get_hyperedge_weight(edge_idx); + } + + return total; +} + +bool PartitioningWithReplication::satisfiesBalanceConstraint() const { + std::vector weight(instance->getNumberOfPartitions(), 0); + for (unsigned node = 0; node < node_to_partitions_assignment.size(); ++node) + for(unsigned part : node_to_partitions_assignment[node]){ + if (part > instance->getNumberOfPartitions()) + throw std::invalid_argument("Invalid Argument while checking balance constraint: partition ID out of range."); + else + weight[part] += instance->getHypergraph().get_vertex_weight(node); + } + + for(unsigned part = 0; part < instance->getNumberOfPartitions(); ++part) + if(weight[part] > instance->getMaxWeightPerPartition()) + return false; + + return true; +}; + +} // namespace osp \ No newline at end of file diff --git a/include/osp/partitioning/partitioners/generic_FM.hpp b/include/osp/partitioning/partitioners/generic_FM.hpp new file mode 100644 index 00000000..c41ed880 --- /dev/null +++ b/include/osp/partitioning/partitioners/generic_FM.hpp @@ -0,0 +1,239 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#pragma once + +#include "osp/partitioning/model/partitioning.hpp" +#include +#include + +namespace osp{ + +class GenericFM { + + protected: + unsigned max_number_of_passes = 10; + unsigned max_nodes_in_part = 0; + + public: + + void ImprovePartitioning(Partitioning& partition); + + inline unsigned getMaxNumberOfPasses() const { return max_number_of_passes; } + inline void setMaxNumberOfPasses(unsigned passes_) { max_number_of_passes = passes_; } + inline unsigned getMaxNodesInPart() const { return max_nodes_in_part; } + inline void setMaxNodesInPart(unsigned max_nodes_) { max_nodes_in_part = max_nodes_; } +}; + +void GenericFM::ImprovePartitioning(Partitioning& partition) +{ + // Note: this algorithm disregards hyperedge weights, in order to keep the size of the gain bucket array bounded! + + if(partition.getInstance().getNumberOfPartitions() != 2) + { + std::cout << "Error: FM can only be used for 2 partitions." << std::endl; + return; + } + + if(!partition.satisfiesBalanceConstraint()) + { + std::cout << "Error: initial partition to FM does not satisfy balance constraint." << std::endl; + return; + } + + const Hypergraph& Hgraph = partition.getInstance().getHypergraph(); + + unsigned max_degree = 0; + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + max_degree = std::max(max_degree, static_cast(Hgraph.get_incident_hyperedges(node).size())); + + if(max_nodes_in_part == 0) // if not initialized + max_nodes_in_part = static_cast(ceil(static_cast(Hgraph.num_vertices()) * static_cast(partition.getInstance().getMaxWeightPerPartition()) + / static_cast(Hgraph.compute_total_vertex_weight()) )); + + for(unsigned pass_idx = 0; pass_idx < max_number_of_passes; ++pass_idx) + { + std::vector node_to_new_part = partition.assignedPartitions(); + std::vector locked(Hgraph.num_vertices(), false); + std::vector gain(Hgraph.num_vertices(), 0); + std::vector > nr_nodes_in_hyperedge_on_side(Hgraph.num_hyperedges(), std::vector(2, 0)); + int cost = 0; + + unsigned left_side = 0; + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + if(partition.assignedPartition(node) == 0) + ++left_side; + + // Initialize gain values + for(unsigned hyperedge = 0; hyperedge < Hgraph.num_hyperedges(); ++hyperedge) + { + for(unsigned node : Hgraph.get_vertices_in_hyperedge(hyperedge)) + ++nr_nodes_in_hyperedge_on_side[hyperedge][partition.assignedPartition(node)]; + + if(Hgraph.get_vertices_in_hyperedge(hyperedge).size() < 2) + continue; + + for(unsigned part = 0; part < 2; ++part) + { + if(nr_nodes_in_hyperedge_on_side[hyperedge][part] == 1) + for(unsigned node : Hgraph.get_vertices_in_hyperedge(hyperedge)) + if(partition.assignedPartition(node) == part) + ++gain[node]; + } + } + + // build gain bucket array + std::vector max_gain(2, -static_cast(max_degree)-1); + std::vector > > gain_bucket_array(2, std::vector >(2*max_degree+1)); + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + { + const unsigned& part = partition.assignedPartition(node); + gain_bucket_array[part][static_cast(gain[node] + static_cast(max_degree))].push_back(node); + max_gain[part] = std::max(max_gain[part], gain[node]); + } + + unsigned best_index = 0; + int best_cost = 0; + std::vector moved_nodes; + + // the pass itself: make moves + while(moved_nodes.size() < Hgraph.num_vertices()) + { + // select move + unsigned to_move = std::numeric_limits::max(); + unsigned chosen_part = std::numeric_limits::max(); + + unsigned gain_index = static_cast(std::max(max_gain[0], max_gain[1]) + static_cast(max_degree)); + while(gain_index < std::numeric_limits::max()) + { + bool can_choose_left = (Hgraph.num_vertices() - left_side < max_nodes_in_part) && !gain_bucket_array[0][gain_index].empty(); + bool can_choose_right = (left_side < max_nodes_in_part) && !gain_bucket_array[1][gain_index].empty(); + + if(can_choose_left && can_choose_right) + chosen_part = (left_side >= Hgraph.num_vertices() / 2) ? 1 : 0; + else if(can_choose_left) + chosen_part = 0; + else if(can_choose_right) + chosen_part = 1; + + if(chosen_part < 2) + { + to_move = gain_bucket_array[chosen_part][gain_index].back(); + gain_bucket_array[chosen_part][gain_index].pop_back(); + break; + } + --gain_index; + } + + if(to_move == std::numeric_limits::max()) + break; + + // make move + + moved_nodes.push_back(to_move); + cost -= gain[to_move]; + if(cost < best_cost) + { + best_cost = cost; + best_index = static_cast(moved_nodes.size()) + 1; + } + locked[to_move] = true; + node_to_new_part[to_move] = 1 - node_to_new_part[to_move]; + + if(chosen_part == 0) + --left_side; + else + ++left_side; + + unsigned other_part = 1-chosen_part; + + // update gain values + for(unsigned hyperedge : Hgraph.get_incident_hyperedges(to_move)) + { + if(nr_nodes_in_hyperedge_on_side[hyperedge][chosen_part] == 1) + { + for(unsigned node : Hgraph.get_vertices_in_hyperedge(hyperedge)) + { + if(locked[node]) + continue; + + std::vector& vec = gain_bucket_array[other_part][static_cast(gain[node] + static_cast(max_degree))]; + vec.erase(std::remove(vec.begin(), vec.end(), node), vec.end()); + --gain[node]; + gain_bucket_array[other_part][static_cast(gain[node] + static_cast(max_degree))].push_back(node); + } + } + else if(nr_nodes_in_hyperedge_on_side[hyperedge][chosen_part] == 2) + { + for(unsigned node : Hgraph.get_vertices_in_hyperedge(hyperedge)) + { + if(node_to_new_part[node] == chosen_part && !locked[node]) + { + std::vector& vec = gain_bucket_array[chosen_part][static_cast(gain[node] + static_cast(max_degree))]; + vec.erase(std::remove(vec.begin(), vec.end(), node), vec.end()); + ++gain[node]; + gain_bucket_array[chosen_part][static_cast(gain[node] + static_cast(max_degree))].push_back(node); + max_gain[chosen_part] = std::max(max_gain[chosen_part], gain[node]); + break; + } + } + } + if(nr_nodes_in_hyperedge_on_side[hyperedge][other_part] == 1) + { + for(unsigned node : Hgraph.get_vertices_in_hyperedge(hyperedge)) + { + if(node_to_new_part[node] == other_part && !locked[node]) + { + std::vector& vec = gain_bucket_array[other_part][static_cast(gain[node] + static_cast(max_degree))]; + vec.erase(std::remove(vec.begin(), vec.end(), node), vec.end()); + --gain[node]; + gain_bucket_array[other_part][static_cast(gain[node] + static_cast(max_degree))].push_back(node); + break; + } + } + } + else if(nr_nodes_in_hyperedge_on_side[hyperedge][other_part] == 0) + { + for(unsigned node : Hgraph.get_vertices_in_hyperedge(hyperedge)) + { + if(locked[node]) + continue; + + std::vector& vec = gain_bucket_array[chosen_part][static_cast(gain[node] + static_cast(max_degree))]; + vec.erase(std::remove(vec.begin(), vec.end(), node), vec.end()); + ++gain[node]; + gain_bucket_array[chosen_part][static_cast(gain[node] + static_cast(max_degree))].push_back(node); + max_gain[chosen_part] = std::max(max_gain[chosen_part], gain[node]); + } + } + --nr_nodes_in_hyperedge_on_side[hyperedge][chosen_part]; + ++nr_nodes_in_hyperedge_on_side[hyperedge][other_part]; + } + } + + // apply best configuration seen + if(best_index == 0) + break; + + for(unsigned node_idx = 0; node_idx < best_index; ++node_idx) + partition.setAssignedPartition(moved_nodes[node_idx], 1U-partition.assignedPartition(moved_nodes[node_idx])); + + } +} + +} // namespace osp \ No newline at end of file diff --git a/include/osp/partitioning/partitioners/partitioning_ILP.hpp b/include/osp/partitioning/partitioners/partitioning_ILP.hpp new file mode 100644 index 00000000..7f6af471 --- /dev/null +++ b/include/osp/partitioning/partitioners/partitioning_ILP.hpp @@ -0,0 +1,137 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#pragma once + +#include +#include + +#include "osp/partitioning/partitioners/partitioning_ILP_base.hpp" +#include "osp/partitioning/model/partitioning.hpp" + +namespace osp{ + +class HypergraphPartitioningILP : public HypergraphPartitioningILPBase { + + protected: + std::vector readCoptAssignment(const PartitioningProblem &instance, Model& model); + + void setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model); + + void setInitialSolution(const Partitioning &partition, Model& model); + + public: + + virtual ~HypergraphPartitioningILP() = default; + + RETURN_STATUS computePartitioning(Partitioning& result); + + virtual std::string getAlgorithmName() const override { return "HypergraphPartitioningILP"; } +}; + +RETURN_STATUS HypergraphPartitioningILP::computePartitioning(Partitioning& result) +{ + Envr env; + Model model = env.CreateModel("HypergraphPart"); + + setupFundamentalVariablesConstraintsObjective(result.getInstance(), model); + setupExtraVariablesConstraints(result.getInstance(), model); + + if(use_initial_solution) + setInitialSolution(result, model); + + solveILP(model); + + if (model.GetIntAttr(COPT_INTATTR_MIPSTATUS) == COPT_MIPSTATUS_OPTIMAL) { + + result.setAssignedPartitions(readCoptAssignment(result.getInstance(), model)); + return RETURN_STATUS::OSP_SUCCESS; + + } else if (model.GetIntAttr(COPT_INTATTR_MIPSTATUS) == COPT_MIPSTATUS_INF_OR_UNB) { + + return RETURN_STATUS::ERROR; + + } else { + + if (model.GetIntAttr(COPT_INTATTR_HASMIPSOL)) { + + result.setAssignedPartitions(readCoptAssignment(result.getInstance(), model)); + return RETURN_STATUS::OSP_SUCCESS; + + } else { + return RETURN_STATUS::ERROR; + } + } +} + +void HypergraphPartitioningILP::setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model) { + + const unsigned numberOfParts = instance.getNumberOfPartitions(); + const unsigned numberOfVertices = instance.getHypergraph().num_vertices(); + + // Constraints + + // each node assigned to exactly one partition + for (unsigned node = 0; node < numberOfVertices; node++) { + + Expr expr; + for (unsigned part = 0; part < numberOfParts; part++) + expr += node_in_partition[node][static_cast(part)]; + + model.AddConstr(expr == 1); + } + + // hyperedge indicators match node variables + for (unsigned part = 0; part < numberOfParts; part++) + for (unsigned node = 0; node < numberOfVertices; node++) + for (const unsigned& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) + model.AddConstr(hyperedge_uses_partition[hyperedge][static_cast(part)] >= node_in_partition[node][static_cast(part)]); + +}; + +// convert generic one-to-many assingment (of base class function) to one-to-one +std::vector HypergraphPartitioningILP::readCoptAssignment(const PartitioningProblem &instance, Model& model) +{ + std::vector node_to_partition(instance.getHypergraph().num_vertices(), UINT_MAX); + std::vector > assignmentsGenericForm = readAllCoptAssignments(instance, model); + + for (unsigned node = 0; node < instance.getHypergraph().num_vertices(); node++) + node_to_partition[node] = assignmentsGenericForm[node].front(); + + return node_to_partition; +} + +void HypergraphPartitioningILP::setInitialSolution(const Partitioning &partition, Model& model) +{ + const std::vector& assignment = partition.assignedPartitions(); + const unsigned& numPartitions = partition.getInstance().getNumberOfPartitions(); + if(assignment.size() != partition.getInstance().getHypergraph().num_vertices()) + return; + + for(unsigned node = 0; node < assignment.size(); ++node) + { + if(assignment[node] >= numPartitions) + continue; + + for(unsigned part = 0; part < numPartitions; ++part) + model.SetMipStart(node_in_partition[node][static_cast(part)], static_cast(assignment[node] == part)); + } + model.LoadMipStart(); +} + +} // namespace osp \ No newline at end of file diff --git a/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp b/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp new file mode 100644 index 00000000..a6e4fcc2 --- /dev/null +++ b/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp @@ -0,0 +1,151 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#pragma once + +#include +#include + +#include "osp/partitioning/model/partitioning_problem.hpp" +#include "osp/bsp/model/BspInstance.hpp" // for return statuses (stati?) + +namespace osp{ + +class HypergraphPartitioningILPBase { + + protected: + std::vector node_in_partition; + std::vector hyperedge_uses_partition; + + unsigned time_limit_seconds = 3600; + bool use_initial_solution = false; + + std::vector > readAllCoptAssignments(const PartitioningProblem &instance, Model& model); + + void setupFundamentalVariablesConstraintsObjective(const PartitioningProblem &instance, Model& model); + + void solveILP(Model& model); + + public: + + virtual std::string getAlgorithmName() const = 0; + + inline unsigned getTimeLimitSeconds() const { return time_limit_seconds; } + inline void setTimeLimitSeconds(unsigned limit_) { time_limit_seconds = limit_; } + inline void setUseInitialSolution(bool use_) { use_initial_solution = use_; } +}; + +void HypergraphPartitioningILPBase::solveILP(Model& model) { + + model.SetIntParam(COPT_INTPARAM_LOGTOCONSOLE, 0); + + model.SetDblParam(COPT_DBLPARAM_TIMELIMIT, time_limit_seconds); + model.SetIntParam(COPT_INTPARAM_THREADS, 128); + + model.SetIntParam(COPT_INTPARAM_STRONGBRANCHING, 1); + model.SetIntParam(COPT_INTPARAM_LPMETHOD, 1); + model.SetIntParam(COPT_INTPARAM_ROUNDINGHEURLEVEL, 1); + + model.SetIntParam(COPT_INTPARAM_SUBMIPHEURLEVEL, 1); + // model.SetIntParam(COPT_INTPARAM_PRESOLVE, 1); + // model.SetIntParam(COPT_INTPARAM_CUTLEVEL, 0); + model.SetIntParam(COPT_INTPARAM_TREECUTLEVEL, 2); + // model.SetIntParam(COPT_INTPARAM_DIVINGHEURLEVEL, 2); + + model.Solve(); +} + +void HypergraphPartitioningILPBase::setupFundamentalVariablesConstraintsObjective(const PartitioningProblem &instance, Model& model) { + + const unsigned numberOfParts = instance.getNumberOfPartitions(); + const unsigned numberOfVertices = instance.getHypergraph().num_vertices(); + const unsigned numberOfHyperedges = instance.getHypergraph().num_hyperedges(); + + // Variables + + node_in_partition = std::vector(numberOfVertices); + + for (unsigned node = 0; node < numberOfVertices; node++) + node_in_partition[node] = model.AddVars(static_cast(numberOfParts), COPT_BINARY, "node_in_partition"); + + hyperedge_uses_partition = std::vector(numberOfHyperedges); + + for (unsigned hyperedge = 0; hyperedge < numberOfHyperedges; hyperedge++) + hyperedge_uses_partition[hyperedge] = model.AddVars(static_cast(numberOfParts), COPT_BINARY, "hyperedge_uses_partition"); + + // partition size constraints + for (unsigned part = 0; part < numberOfParts; part++) + { + Expr expr; + for (unsigned node = 0; node < numberOfVertices; node++) + expr += instance.getHypergraph().get_vertex_weight(node) * node_in_partition[node][static_cast(part)]; + + model.AddConstr(expr <= instance.getMaxWeightPerPartition()); + } + + // set objective + Expr expr; + for (unsigned hyperedge = 0; hyperedge < numberOfHyperedges; hyperedge++) + { + expr -= instance.getHypergraph().get_hyperedge_weight(hyperedge); + for (unsigned part = 0; part < numberOfParts; part++) + expr += instance.getHypergraph().get_hyperedge_weight(hyperedge) * hyperedge_uses_partition[hyperedge][static_cast(part)]; + } + + model.SetObjective(expr, COPT_MINIMIZE); + +}; + +std::vector > HypergraphPartitioningILPBase::readAllCoptAssignments(const PartitioningProblem &instance, Model& model) +{ + std::vector > node_to_partitions(instance.getHypergraph().num_vertices()); + + std::set nonempty_partition_ids; + for (unsigned node = 0; node < instance.getHypergraph().num_vertices(); node++) + for(unsigned part = 0; part < instance.getNumberOfPartitions(); part++) + if(node_in_partition[node][static_cast(part)].Get(COPT_DBLINFO_VALUE) >= .99) + { + node_to_partitions[node].push_back(part); + nonempty_partition_ids.insert(part); + } + + for(std::vector& chosen_partitions : node_to_partitions) + if(chosen_partitions.empty()) + { + std::cout<<"Error: partitioning returned by ILP seems incomplete!"< new_part_index; + for(unsigned part_index : nonempty_partition_ids) + { + new_part_index[part_index] = current_index; + ++current_index; + } + + for(unsigned node = 0; node < instance.getHypergraph().num_vertices(); node++) + for(unsigned entry_idx = 0; entry_idx < node_to_partitions[node].size(); ++entry_idx) + node_to_partitions[node][entry_idx] = new_part_index[node_to_partitions[node][entry_idx]]; + + std::cout<<"Hypergraph partitioning ILP best solution value: "< +#include + +#include "osp/partitioning/partitioners/partitioning_ILP_base.hpp" +#include "osp/partitioning/model/partitioning_replication.hpp" + +namespace osp{ + +class HypergraphPartitioningILPWithReplication : public HypergraphPartitioningILPBase { + + public: + enum class REPLICATION_MODEL_IN_ILP { ONLY_TWICE, GENERAL }; + + protected: + std::vector readCoptAssignment(const PartitioningProblem &instance, Model& model); + + void setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model); + + void setInitialSolution(const PartitioningWithReplication &partition, Model& model); + + REPLICATION_MODEL_IN_ILP replication_model = REPLICATION_MODEL_IN_ILP::ONLY_TWICE; + + public: + + virtual ~HypergraphPartitioningILPWithReplication() = default; + + RETURN_STATUS computePartitioning(PartitioningWithReplication& result); + + virtual std::string getAlgorithmName() const override { return "HypergraphPartitioningILPWithReplication"; } + + void setReplicationModel(REPLICATION_MODEL_IN_ILP replication_model_) { replication_model = replication_model_; } +}; + +RETURN_STATUS HypergraphPartitioningILPWithReplication::computePartitioning(PartitioningWithReplication& result) +{ + Envr env; + Model model = env.CreateModel("HypergraphPartRepl"); + + setupFundamentalVariablesConstraintsObjective(result.getInstance(), model); + setupExtraVariablesConstraints(result.getInstance(), model); + + if(use_initial_solution) + setInitialSolution(result, model); + + solveILP(model); + + if (model.GetIntAttr(COPT_INTATTR_MIPSTATUS) == COPT_MIPSTATUS_OPTIMAL) { + + result.setAssignedPartitionVectors(readAllCoptAssignments(result.getInstance(), model)); + return RETURN_STATUS::OSP_SUCCESS; + + } else if (model.GetIntAttr(COPT_INTATTR_MIPSTATUS) == COPT_MIPSTATUS_INF_OR_UNB) { + + return RETURN_STATUS::ERROR; + + } else { + + if (model.GetIntAttr(COPT_INTATTR_HASMIPSOL)) { + + result.setAssignedPartitionVectors(readAllCoptAssignments(result.getInstance(), model)); + return RETURN_STATUS::OSP_SUCCESS; + + } else { + return RETURN_STATUS::ERROR; + } + } +} + +void HypergraphPartitioningILPWithReplication::setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model) { + + const unsigned numberOfParts = instance.getNumberOfPartitions(); + const unsigned numberOfVertices = instance.getHypergraph().num_vertices(); + + if(replication_model == REPLICATION_MODEL_IN_ILP::GENERAL) + { + // create variables for each pin+partition combination + std::map, unsigned> pin_ID_map; + unsigned nr_of_pins = 0; + for (unsigned node = 0; node < numberOfVertices; node++) + for (const unsigned& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) + pin_ID_map[std::make_pair(node, hyperedge)] = nr_of_pins++; + + std::vector pin_covered_by_partition = std::vector(nr_of_pins); + + for (unsigned pin = 0; pin < nr_of_pins; pin++) + pin_covered_by_partition[pin] = model.AddVars(static_cast(numberOfParts), COPT_BINARY, "pin_covered_by_partition"); + + // each pin covered exactly once + for (unsigned pin = 0; pin < nr_of_pins; pin++) { + + Expr expr; + for (unsigned part = 0; part < numberOfParts; part++) + expr += pin_covered_by_partition[pin][static_cast(part)]; + + model.AddConstr(expr == 1); + } + + // pin covering requires node assignment + for (unsigned part = 0; part < numberOfParts; part++) + for (unsigned node = 0; node < numberOfVertices; node++) + for (const unsigned& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) + model.AddConstr(node_in_partition[node][static_cast(part)] >= pin_covered_by_partition[pin_ID_map[std::make_pair(node, hyperedge)]][static_cast(part)]); + + // pin covering requires hyperedge use + for (unsigned part = 0; part < numberOfParts; part++) + for (unsigned node = 0; node < numberOfVertices; node++) + for (const unsigned& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) + model.AddConstr(hyperedge_uses_partition[hyperedge][static_cast(part)] >= pin_covered_by_partition[pin_ID_map[std::make_pair(node, hyperedge)]][static_cast(part)]); + + } + else if(replication_model == REPLICATION_MODEL_IN_ILP::ONLY_TWICE) + { + // each node has one or two copies + VarArray node_replicated = model.AddVars(static_cast(numberOfVertices), COPT_BINARY, "node_replicated"); + + for (unsigned node = 0; node < numberOfVertices; node++) { + + Expr expr = -1; + for (unsigned part = 0; part < numberOfParts; part++) + expr += node_in_partition[node][static_cast(part)]; + + model.AddConstr(expr == node_replicated[static_cast(node)]); + } + + // hyperedge indicators if node is not replicated + for (unsigned part = 0; part < numberOfParts; part++) + for (unsigned node = 0; node < numberOfVertices; node++) + for (const unsigned& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) + model.AddConstr(hyperedge_uses_partition[hyperedge][static_cast(part)] >= node_in_partition[node][static_cast(part)] - node_replicated[static_cast(node)]); + + // hyperedge indicators if node is replicated + for (unsigned node = 0; node < numberOfVertices; node++) + for (const unsigned& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) + for (unsigned part1 = 0; part1 < numberOfParts; part1++) + for (unsigned part2 = part1+1; part2 < numberOfParts; part2++) + model.AddConstr(hyperedge_uses_partition[hyperedge][static_cast(part1)] + hyperedge_uses_partition[hyperedge][static_cast(part2)] >= + node_in_partition[node][static_cast(part1)] + node_in_partition[node][static_cast(part2)] - 1); + } + +}; + +void HypergraphPartitioningILPWithReplication::setInitialSolution(const PartitioningWithReplication &partition, Model& model) +{ + const std::vector >& assignments = partition.assignedPartitions(); + const unsigned& numPartitions = partition.getInstance().getNumberOfPartitions(); + if(assignments.size() != partition.getInstance().getHypergraph().num_vertices()) + return; + + for(unsigned node = 0; node < assignments.size(); ++node) + { + std::vector assingedToPart(numPartitions, false); + for(unsigned part : assignments[node]) + if(part < numPartitions) + assingedToPart[part] = true; + + for(unsigned part = 0; part < numPartitions; ++part) + model.SetMipStart(node_in_partition[node][static_cast(part)], static_cast(assingedToPart[part])); + } + model.LoadMipStart(); +} + +} // namespace osp \ No newline at end of file diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 9b2232dd..359feed3 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -121,6 +121,8 @@ if (COPT_FOUND) #_add_test( ilp_bsp_scheduler ) +#_add_test( ilp_hypergraph_partitioning ) + endif() _add_test( bsp_schedulers ) diff --git a/tests/hypergraph_and_partition.cpp b/tests/hypergraph_and_partition.cpp index 4d00acb3..3670128a 100644 --- a/tests/hypergraph_and_partition.cpp +++ b/tests/hypergraph_and_partition.cpp @@ -23,9 +23,14 @@ limitations under the License. #include #include -#include "osp/partitioning/model/hypergraph.hpp" +#include "osp/partitioning/model/partitioning.hpp" +#include "osp/partitioning/model/partitioning_replication.hpp" +#include "osp/partitioning/partitioners/generic_FM.hpp" #include "osp/graph_implementations/boost_graphs/boost_graph.hpp" #include "osp/auxiliary/io/hdag_graph_file_reader.hpp" +#include "osp/auxiliary/io/mtx_hypergraph_file_reader.hpp" +#include "osp/auxiliary/io/partitioning_file_writer.hpp" + using namespace osp; @@ -39,7 +44,7 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { while ((!cwd.empty()) && (cwd.filename() != "OneStopParallel")) { cwd = cwd.parent_path(); std::cout << cwd << std::endl; - } + } graph DAG; @@ -48,13 +53,136 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { BOOST_CHECK(status); - hypergraph Hgraph; - + Hypergraph Hgraph; + + // Matrix format, one hyperedge for each row/column + status = file_reader::readHypergraphMartixMarketFormat((cwd / "data/mtx_tests/ErdosRenyi_8_19_A.mtx").string(), Hgraph); + BOOST_CHECK(status); + BOOST_CHECK_EQUAL(Hgraph.num_vertices(), 27); + BOOST_CHECK_EQUAL(Hgraph.num_hyperedges(), 16); + + // DAG format, all hyperedges have size 2 Hgraph.convert_from_cdag_as_dag(DAG); BOOST_CHECK_EQUAL(DAG.num_vertices(), Hgraph.num_vertices()); BOOST_CHECK_EQUAL(DAG.num_edges(), Hgraph.num_hyperedges()); BOOST_CHECK_EQUAL(DAG.num_edges()*2, Hgraph.num_pins()); + // HyperDAG format, one hypredge for each non-sink node + unsigned nr_of_non_sinks = 0; + for(const auto &node : DAG.vertices()) + if(DAG.out_degree(node) > 0) + ++ nr_of_non_sinks; + Hgraph.convert_from_cdag_as_hyperdag(DAG); + BOOST_CHECK_EQUAL(DAG.num_vertices(), Hgraph.num_vertices()); + BOOST_CHECK_EQUAL(nr_of_non_sinks, Hgraph.num_hyperedges()); + BOOST_CHECK_EQUAL(DAG.num_edges() + nr_of_non_sinks, Hgraph.num_pins()); + + + // Dummy partitioning + + PartitioningProblem instance(Hgraph, 3, 30); + + Partitioning partition(instance); + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + partition.setAssignedPartition(node, node % 3); + + BOOST_CHECK(partition.satisfiesBalanceConstraint()); + int cutNetCost = partition.computeCutNetCost(); + int connectivityCost = partition.computeConnectivityCost(); + BOOST_CHECK(connectivityCost >= cutNetCost); + + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + instance.getHypergraph().set_vertex_weight(node, 1); + + instance.setMaxWeightViaImbalanceFactor(0); + BOOST_CHECK(partition.satisfiesBalanceConstraint()); + + instance.setNumberOfPartitions(5); + instance.setMaxWeightViaImbalanceFactor(0); + BOOST_CHECK(!partition.satisfiesBalanceConstraint()); + + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + partition.setAssignedPartition(node, node % 5); + + BOOST_CHECK(partition.satisfiesBalanceConstraint()); + BOOST_CHECK(partition.computeConnectivityCost() >= partition.computeCutNetCost()); + + file_writer::write_txt(std::cout, partition); + + + // Dummy partitioning with replication + + instance.getHypergraph().convert_from_cdag_as_hyperdag(DAG); + instance.setNumberOfPartitions(3); + instance.setMaxWeightExplicitly(30); + PartitioningWithReplication partition_with_rep(instance); + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + partition_with_rep.setAssignedPartitions(node, {node % 3}); + + BOOST_CHECK(partition_with_rep.satisfiesBalanceConstraint()); + BOOST_CHECK(partition_with_rep.computeCutNetCost() == cutNetCost); + BOOST_CHECK(partition_with_rep.computeConnectivityCost() == connectivityCost); + + instance.setMaxWeightExplicitly(60); + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + partition_with_rep.setAssignedPartitions(node, {node % 3, (node+1)%3}); + + BOOST_CHECK(partition_with_rep.satisfiesBalanceConstraint()); + BOOST_CHECK(partition_with_rep.computeConnectivityCost() >= partition_with_rep.computeCutNetCost()); + + instance.setMaxWeightExplicitly(Hgraph.compute_total_vertex_weight()); + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + partition_with_rep.setAssignedPartitions(node, {0, 1, 2}); + + BOOST_CHECK(partition_with_rep.satisfiesBalanceConstraint()); + BOOST_CHECK(partition_with_rep.computeConnectivityCost() == 0); + BOOST_CHECK(partition_with_rep.computeCutNetCost() == 0); + + file_writer::write_txt(std::cout, partition_with_rep); + + + // Generic FM + + instance.setNumberOfPartitions(2); + instance.setMaxWeightExplicitly(/*35*/ 4000); + for(unsigned node = 0; node < instance.getHypergraph().num_vertices(); ++node) + instance.getHypergraph().set_vertex_weight(node, 1); + + Partitioning partition_to_improve(instance); + for(unsigned node = 0; node < instance.getHypergraph().num_vertices(); ++node) + partition_to_improve.setAssignedPartition(node, node % 2); + + int original_cost = partition_to_improve.computeConnectivityCost(); + + GenericFM fm; + fm.ImprovePartitioning(partition_to_improve); + int new_cost = partition_to_improve.computeConnectivityCost(); + + BOOST_CHECK(partition_to_improve.satisfiesBalanceConstraint()); + BOOST_CHECK(new_cost <= original_cost); + std::cout< "< "< + +#include +#include "osp/partitioning/partitioners/partitioning_ILP.hpp" +#include "osp/partitioning/partitioners/partitioning_ILP_replication.hpp" +#include "osp/graph_implementations/boost_graphs/boost_graph.hpp" +#include "osp/auxiliary/io/hdag_graph_file_reader.hpp" + + +using namespace osp; + +BOOST_AUTO_TEST_CASE(test_full) { + + using graph = boost_graph_uint_t; + + // Getting root git directory + std::filesystem::path cwd = std::filesystem::current_path(); + std::cout << cwd << std::endl; + while ((!cwd.empty()) && (cwd.filename() != "OneStopParallel")) { + cwd = cwd.parent_path(); + std::cout << cwd << std::endl; + } + + graph DAG; + + bool status = file_reader::readComputationalDagHyperdagFormatDB( + (cwd / "data/spaa/tiny/instance_bicgstab.hdag").string(), DAG); + + BOOST_CHECK(status); + + Hypergraph Hgraph; + + Hgraph.convert_from_cdag_as_hyperdag(DAG); + BOOST_CHECK_EQUAL(DAG.num_vertices(), Hgraph.num_vertices()); + + PartitioningProblem instance(Hgraph, 3, 35); + Partitioning partition(instance); + + + // ILP without replication + + HypergraphPartitioningILP partitioner; + partitioner.setTimeLimitSeconds(60); + partitioner.computePartitioning(partition); + + BOOST_CHECK(partition.satisfiesBalanceConstraint()); + BOOST_CHECK(partition.computeConnectivityCost() >= partition.computeCutNetCost()); + + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + partition.setAssignedPartition(node, node % 3); + + partitioner.setUseInitialSolution(true); + partitioner.computePartitioning(partition); + + BOOST_CHECK(partition.satisfiesBalanceConstraint()); + BOOST_CHECK(partition.computeConnectivityCost() >= partition.computeCutNetCost()); + + + // ILP with replication + + HypergraphPartitioningILPWithReplication partitioner_rep; + PartitioningWithReplication partition_rep(instance); + + partitioner_rep.setTimeLimitSeconds(60); + partitioner_rep.computePartitioning(partition_rep); + + BOOST_CHECK(partition_rep.satisfiesBalanceConstraint()); + BOOST_CHECK(partition_rep.computeConnectivityCost() == 0); + + partitioner_rep.setUseInitialSolution(true); + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + partition_rep.setAssignedPartitions(node, {node % 3}); + + partitioner_rep.computePartitioning(partition_rep); + BOOST_CHECK(partition_rep.satisfiesBalanceConstraint()); + BOOST_CHECK(partition_rep.computeConnectivityCost() == 0); + + instance.setMaxWeightExplicitly(60); + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + partition_rep.setAssignedPartitions(node, {node % 3, (node+1)%3}); + + partitioner_rep.computePartitioning(partition_rep); + BOOST_CHECK(partition_rep.satisfiesBalanceConstraint()); + BOOST_CHECK(partition_rep.computeConnectivityCost() == 0); + + // same tests with other replicaiton formulation + instance.setMaxWeightExplicitly(35); + partitioner_rep.setReplicationModel(HypergraphPartitioningILPWithReplication::REPLICATION_MODEL_IN_ILP::GENERAL); + partitioner_rep.setUseInitialSolution(false); + partitioner_rep.computePartitioning(partition_rep); + + BOOST_CHECK(partition_rep.satisfiesBalanceConstraint()); + BOOST_CHECK(partition_rep.computeConnectivityCost() == 0); + + partitioner_rep.setUseInitialSolution(true); + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + partition_rep.setAssignedPartitions(node, {node % 3}); + + partitioner_rep.computePartitioning(partition_rep); + BOOST_CHECK(partition_rep.satisfiesBalanceConstraint()); + BOOST_CHECK(partition_rep.computeConnectivityCost() == 0); + + instance.setMaxWeightExplicitly(60); + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + partition_rep.setAssignedPartitions(node, {node % 3, (node+1)%3}); + + partitioner_rep.computePartitioning(partition_rep); + BOOST_CHECK(partition_rep.satisfiesBalanceConstraint()); + BOOST_CHECK(partition_rep.computeConnectivityCost() == 0); + +}; \ No newline at end of file From da686e2556b0cad06f2fc7f9b3d33e0e8da71740 Mon Sep 17 00:00:00 2001 From: ppapp Date: Mon, 13 Oct 2025 15:20:12 +0200 Subject: [PATCH 3/9] two kinds of vertex weights on hypergraphs --- .../io/mtx_hypergraph_file_reader.hpp | 3 +- include/osp/partitioning/model/hypergraph.hpp | 58 ++++++++++++++----- .../osp/partitioning/model/partitioning.hpp | 14 ++++- .../model/partitioning_problem.hpp | 29 +++++++--- .../model/partitioning_replication.hpp | 14 ++++- .../partitioning/partitioners/generic_FM.hpp | 4 +- .../partitioners/partitioning_ILP_base.hpp | 24 ++++++-- tests/hypergraph_and_partition.cpp | 26 +++++---- tests/ilp_hypergraph_partitioning.cpp | 17 ++++-- 9 files changed, 135 insertions(+), 54 deletions(-) diff --git a/include/osp/auxiliary/io/mtx_hypergraph_file_reader.hpp b/include/osp/auxiliary/io/mtx_hypergraph_file_reader.hpp index d112e5fe..c409c04a 100644 --- a/include/osp/auxiliary/io/mtx_hypergraph_file_reader.hpp +++ b/include/osp/auxiliary/io/mtx_hypergraph_file_reader.hpp @@ -79,7 +79,8 @@ bool readHypergraphMartixMarketFormat(std::ifstream& infile, Hypergraph& hgraph) hgraph.reset(num_nodes, 0); for (unsigned node = 0; node < num_nodes; ++node) { - hgraph.set_vertex_weight(node, 1); + hgraph.set_vertex_work_weight(node, 1); + hgraph.set_vertex_memory_weight(node, 1); } std::vector> row_hyperedges(static_cast(M_row)); diff --git a/include/osp/partitioning/model/hypergraph.hpp b/include/osp/partitioning/model/hypergraph.hpp index 1f9fce40..421f2d8c 100644 --- a/include/osp/partitioning/model/hypergraph.hpp +++ b/include/osp/partitioning/model/hypergraph.hpp @@ -30,7 +30,8 @@ class Hypergraph { Hypergraph() = default; Hypergraph(unsigned num_vertices_, unsigned num_hyperedges_) - : Num_vertices(num_vertices_), Num_hyperedges(num_hyperedges_), vertex_weights(num_vertices_, 1), hyperedge_weights(num_hyperedges_, 1), + : Num_vertices(num_vertices_), Num_hyperedges(num_hyperedges_), vertex_work_weights(num_vertices_, 1), + vertex_memory_weights(num_vertices_, 1), hyperedge_weights(num_hyperedges_, 1), incident_hyperedges_to_vertex(num_vertices_), vertices_in_hyperedge(num_hyperedges_){} Hypergraph(const Hypergraph &other) = default; @@ -41,17 +42,20 @@ class Hypergraph { inline unsigned num_vertices() const { return Num_vertices; } inline unsigned num_hyperedges() const { return Num_hyperedges; } inline unsigned num_pins() const { return Num_pins; } - inline int get_vertex_weight(unsigned node) const { return vertex_weights[node]; } + inline int get_vertex_work_weight(unsigned node) const { return vertex_work_weights[node]; } + inline int get_vertex_memory_weight(unsigned node) const { return vertex_memory_weights[node]; } inline int get_hyperedge_weight(unsigned hyperedge) const { return hyperedge_weights[hyperedge]; } void add_pin(unsigned vertex_idx, unsigned hyperedge_idx); - void add_vertex(int weight = 1); + void add_vertex(int work_weight = 1, int memory_weight = 1); void add_empty_hyperedge(int weight = 1); void add_hyperedge(const std::vector& pins, int weight = 1); - void set_vertex_weight(unsigned vertex_idx, int weight); + void set_vertex_work_weight(unsigned vertex_idx, int weight); + void set_vertex_memory_weight(unsigned vertex_idx, int weight); void set_hyperedge_weight(unsigned hyperedge_idx, int weight); - int compute_total_vertex_weight() const; + int compute_total_vertex_work_weight() const; + int compute_total_vertex_memory_weight() const; void clear(); void reset(unsigned num_vertices_, unsigned num_hyperedges_); @@ -68,7 +72,8 @@ class Hypergraph { private: unsigned Num_vertices = 0, Num_hyperedges = 0, Num_pins = 0; - std::vector vertex_weights; + std::vector vertex_work_weights; + std::vector vertex_memory_weights; std::vector hyperedge_weights; std::vector> incident_hyperedges_to_vertex; @@ -92,9 +97,10 @@ void Hypergraph::add_pin(unsigned vertex_idx, unsigned hyperedge_idx) } } -void Hypergraph::add_vertex(int weight) +void Hypergraph::add_vertex(int work_weight, int memory_weight) { - vertex_weights.push_back(weight); + vertex_work_weights.push_back(work_weight); + vertex_memory_weights.push_back(memory_weight); incident_hyperedges_to_vertex.emplace_back(); ++Num_vertices; } @@ -116,12 +122,20 @@ void Hypergraph::add_hyperedge(const std::vector& pins, int weight) Num_pins += static_cast(pins.size()); } -void Hypergraph::set_vertex_weight(unsigned vertex_idx, int weight) +void Hypergraph::set_vertex_work_weight(unsigned vertex_idx, int weight) { if(vertex_idx >= Num_vertices) throw std::invalid_argument("Invalid Argument while setting vertex weight: vertex index out of range."); else - vertex_weights[vertex_idx] = weight; + vertex_work_weights[vertex_idx] = weight; +} + +void Hypergraph::set_vertex_memory_weight(unsigned vertex_idx, int weight) +{ + if(vertex_idx >= Num_vertices) + throw std::invalid_argument("Invalid Argument while setting vertex weight: vertex index out of range."); + else + vertex_memory_weights[vertex_idx] = weight; } void Hypergraph::set_hyperedge_weight(unsigned hyperedge_idx, int weight) @@ -132,11 +146,19 @@ void Hypergraph::set_hyperedge_weight(unsigned hyperedge_idx, int weight) hyperedge_weights[hyperedge_idx] = weight; } -int Hypergraph::compute_total_vertex_weight() const +int Hypergraph::compute_total_vertex_work_weight() const +{ + int total = 0; + for(unsigned node = 0; node < Num_vertices; ++node) + total += vertex_work_weights[node]; + return total; +} + +int Hypergraph::compute_total_vertex_memory_weight() const { int total = 0; for(unsigned node = 0; node < Num_vertices; ++node) - total += vertex_weights[node]; + total += vertex_memory_weights[node]; return total; } @@ -146,7 +168,8 @@ void Hypergraph::clear() Num_hyperedges = 0; Num_pins = 0; - vertex_weights.clear(); + vertex_work_weights.clear(); + vertex_memory_weights.clear(); hyperedge_weights.clear(); incident_hyperedges_to_vertex.clear(); vertices_in_hyperedge.clear(); @@ -159,7 +182,8 @@ void Hypergraph::reset(unsigned num_vertices_, unsigned num_hyperedges_) Num_vertices = num_vertices_; Num_hyperedges = num_hyperedges_; - vertex_weights.resize(num_vertices_, 1); + vertex_work_weights.resize(num_vertices_, 1); + vertex_memory_weights.resize(num_vertices_, 1); hyperedge_weights.resize(num_hyperedges_, 1); incident_hyperedges_to_vertex.resize(num_vertices_); vertices_in_hyperedge.resize(num_hyperedges_); @@ -171,7 +195,8 @@ void Hypergraph::convert_from_cdag_as_dag(const Graph_t& dag) reset(static_cast(dag.num_vertices()), 0); for(const auto &node : dag.vertices()) { - set_vertex_weight(static_cast(node), static_cast(dag.vertex_work_weight(node))); + set_vertex_work_weight(static_cast(node), static_cast(dag.vertex_work_weight(node))); + set_vertex_memory_weight(static_cast(node), static_cast(dag.vertex_mem_weight(node))); for (const auto &child : dag.children(node)) add_hyperedge({static_cast(node), static_cast(child)}); // TODO add edge weights if present } @@ -183,7 +208,8 @@ void Hypergraph::convert_from_cdag_as_hyperdag(const Graph_t& dag) reset(static_cast(dag.num_vertices()), 0); for(const auto &node : dag.vertices()) { - set_vertex_weight(static_cast(node), static_cast(dag.vertex_work_weight(node))); + set_vertex_work_weight(static_cast(node), static_cast(dag.vertex_work_weight(node))); + set_vertex_memory_weight(static_cast(node), static_cast(dag.vertex_mem_weight(node))); if(dag.out_degree(node) == 0) continue; std::vector new_hyperedge({static_cast(node)}); diff --git a/include/osp/partitioning/model/partitioning.hpp b/include/osp/partitioning/model/partitioning.hpp index 0f99781e..8e670784 100644 --- a/include/osp/partitioning/model/partitioning.hpp +++ b/include/osp/partitioning/model/partitioning.hpp @@ -150,17 +150,25 @@ int Partitioning::computeCutNetCost() const { } bool Partitioning::satisfiesBalanceConstraint() const { - std::vector weight(instance->getNumberOfPartitions(), 0); + std::vector work_weight(instance->getNumberOfPartitions(), 0); + std::vector memory_weight(instance->getNumberOfPartitions(), 0); for (unsigned node = 0; node < node_to_partition_assignment.size(); ++node) { if (node_to_partition_assignment[node] > instance->getNumberOfPartitions()) throw std::invalid_argument("Invalid Argument while checking balance constraint: partition ID out of range."); else - weight[node_to_partition_assignment[node]] += instance->getHypergraph().get_vertex_weight(node); + { + work_weight[node_to_partition_assignment[node]] += instance->getHypergraph().get_vertex_work_weight(node); + memory_weight[node_to_partition_assignment[node]] += instance->getHypergraph().get_vertex_memory_weight(node); + } } for(unsigned part = 0; part < instance->getNumberOfPartitions(); ++part) - if(weight[part] > instance->getMaxWeightPerPartition()) + { + if(work_weight[part] > instance->getMaxWorkWeightPerPartition()) return false; + if(memory_weight[part] > instance->getMaxMemoryWeightPerPartition()) + return false; + } return true; }; diff --git a/include/osp/partitioning/model/partitioning_problem.hpp b/include/osp/partitioning/model/partitioning_problem.hpp index 33e8dc1e..4bc8710c 100644 --- a/include/osp/partitioning/model/partitioning_problem.hpp +++ b/include/osp/partitioning/model/partitioning_problem.hpp @@ -32,7 +32,8 @@ class PartitioningProblem { Hypergraph hgraph; unsigned nr_of_partitions; - int max_weight_per_partition; + int max_work_weight_per_partition; + int max_memory_weight_per_partition; bool allows_replication = false; @@ -40,11 +41,13 @@ class PartitioningProblem { PartitioningProblem() = default; - PartitioningProblem(const Hypergraph &hgraph_, unsigned nr_parts_ = 2, int max_weight_ = std::numeric_limits::max()) - : hgraph(hgraph_), nr_of_partitions(nr_parts_), max_weight_per_partition(max_weight_) {} + PartitioningProblem(const Hypergraph &hgraph_, unsigned nr_parts_ = 2, int max_work_weight_ = std::numeric_limits::max(), + int max_memory_weight_ = std::numeric_limits::max()) : hgraph(hgraph_), nr_of_partitions(nr_parts_), + max_work_weight_per_partition(max_work_weight_), max_memory_weight_per_partition(max_memory_weight_) {} - PartitioningProblem(const Hypergraph &&hgraph_, unsigned nr_parts_ = 2, int max_weight_ = std::numeric_limits::max()) - : hgraph(hgraph_), nr_of_partitions(nr_parts_), max_weight_per_partition(max_weight_) {} + PartitioningProblem(const Hypergraph &&hgraph_, unsigned nr_parts_ = 2, int max_work_weight_ = std::numeric_limits::max(), + int max_memory_weight_ = std::numeric_limits::max()) : hgraph(hgraph_), nr_of_partitions(nr_parts_), + max_work_weight_per_partition(max_work_weight_), max_memory_weight_per_partition(max_memory_weight_) {} PartitioningProblem(const PartitioningProblem &other) = default; PartitioningProblem(PartitioningProblem &&other) = default; @@ -57,7 +60,8 @@ class PartitioningProblem { inline Hypergraph &getHypergraph() { return hgraph; } inline unsigned getNumberOfPartitions() const { return nr_of_partitions; } - inline int getMaxWeightPerPartition() const { return max_weight_per_partition; } + inline int getMaxWorkWeightPerPartition() const { return max_work_weight_per_partition; } + inline int getMaxMemoryWeightPerPartition() const { return max_memory_weight_per_partition; } inline bool getAllowsReplication() const { return allows_replication; } // setters @@ -66,12 +70,19 @@ class PartitioningProblem { inline void setNumberOfPartitions(unsigned nr_parts_) { nr_of_partitions = nr_parts_; } inline void setAllowsReplication(bool allowed_) { allows_replication = allowed_; } - inline void setMaxWeightExplicitly(int max_weight_) { max_weight_per_partition = max_weight_; } - void setMaxWeightViaImbalanceFactor(double imbalance){ + inline void setMaxWorkWeightExplicitly(int max_weight_) { max_work_weight_per_partition = max_weight_; } + void setMaxWorkWeightViaImbalanceFactor(double imbalance){ if(imbalance < 0 ) throw std::invalid_argument("Invalid Argument while setting imbalance parameter: parameter is negative."); else - max_weight_per_partition = static_cast(ceil(hgraph.compute_total_vertex_weight()/ static_cast(nr_of_partitions) * (1.0+imbalance))); + max_work_weight_per_partition = static_cast(ceil(hgraph.compute_total_vertex_work_weight()/ static_cast(nr_of_partitions) * (1.0+imbalance))); + } + inline void setMaxMemoryWeightExplicitly(int max_weight_) { max_memory_weight_per_partition = max_weight_; } + void setMaxMemoryWeightViaImbalanceFactor(double imbalance){ + if(imbalance < 0 ) + throw std::invalid_argument("Invalid Argument while setting imbalance parameter: parameter is negative."); + else + max_memory_weight_per_partition = static_cast(ceil(hgraph.compute_total_vertex_memory_weight()/ static_cast(nr_of_partitions) * (1.0+imbalance))); } }; diff --git a/include/osp/partitioning/model/partitioning_replication.hpp b/include/osp/partitioning/model/partitioning_replication.hpp index 4db8a5be..03450e30 100644 --- a/include/osp/partitioning/model/partitioning_replication.hpp +++ b/include/osp/partitioning/model/partitioning_replication.hpp @@ -191,18 +191,26 @@ int PartitioningWithReplication::computeCutNetCost() const { } bool PartitioningWithReplication::satisfiesBalanceConstraint() const { - std::vector weight(instance->getNumberOfPartitions(), 0); + std::vector work_weight(instance->getNumberOfPartitions(), 0); + std::vector memory_weight(instance->getNumberOfPartitions(), 0); for (unsigned node = 0; node < node_to_partitions_assignment.size(); ++node) for(unsigned part : node_to_partitions_assignment[node]){ if (part > instance->getNumberOfPartitions()) throw std::invalid_argument("Invalid Argument while checking balance constraint: partition ID out of range."); else - weight[part] += instance->getHypergraph().get_vertex_weight(node); + { + work_weight[part] += instance->getHypergraph().get_vertex_work_weight(node); + memory_weight[part] += instance->getHypergraph().get_vertex_memory_weight(node); + } } for(unsigned part = 0; part < instance->getNumberOfPartitions(); ++part) - if(weight[part] > instance->getMaxWeightPerPartition()) + { + if(work_weight[part] > instance->getMaxWorkWeightPerPartition()) return false; + if(memory_weight[part] > instance->getMaxMemoryWeightPerPartition()) + return false; + } return true; }; diff --git a/include/osp/partitioning/partitioners/generic_FM.hpp b/include/osp/partitioning/partitioners/generic_FM.hpp index c41ed880..ebca500d 100644 --- a/include/osp/partitioning/partitioners/generic_FM.hpp +++ b/include/osp/partitioning/partitioners/generic_FM.hpp @@ -63,8 +63,8 @@ void GenericFM::ImprovePartitioning(Partitioning& partition) max_degree = std::max(max_degree, static_cast(Hgraph.get_incident_hyperedges(node).size())); if(max_nodes_in_part == 0) // if not initialized - max_nodes_in_part = static_cast(ceil(static_cast(Hgraph.num_vertices()) * static_cast(partition.getInstance().getMaxWeightPerPartition()) - / static_cast(Hgraph.compute_total_vertex_weight()) )); + max_nodes_in_part = static_cast(ceil(static_cast(Hgraph.num_vertices()) * static_cast(partition.getInstance().getMaxWorkWeightPerPartition()) + / static_cast(Hgraph.compute_total_vertex_work_weight()) )); for(unsigned pass_idx = 0; pass_idx < max_number_of_passes; ++pass_idx) { diff --git a/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp b/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp index a6e4fcc2..aa339a32 100644 --- a/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp +++ b/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp @@ -89,14 +89,28 @@ void HypergraphPartitioningILPBase::setupFundamentalVariablesConstraintsObjectiv hyperedge_uses_partition[hyperedge] = model.AddVars(static_cast(numberOfParts), COPT_BINARY, "hyperedge_uses_partition"); // partition size constraints - for (unsigned part = 0; part < numberOfParts; part++) + if(instance.getMaxWorkWeightPerPartition() < std::numeric_limits::max()) { - Expr expr; - for (unsigned node = 0; node < numberOfVertices; node++) - expr += instance.getHypergraph().get_vertex_weight(node) * node_in_partition[node][static_cast(part)]; + for (unsigned part = 0; part < numberOfParts; part++) + { + Expr expr; + for (unsigned node = 0; node < numberOfVertices; node++) + expr += instance.getHypergraph().get_vertex_work_weight(node) * node_in_partition[node][static_cast(part)]; - model.AddConstr(expr <= instance.getMaxWeightPerPartition()); + model.AddConstr(expr <= instance.getMaxWorkWeightPerPartition()); + } } + if(instance.getMaxMemoryWeightPerPartition() < std::numeric_limits::max()) + { + for (unsigned part = 0; part < numberOfParts; part++) + { + Expr expr; + for (unsigned node = 0; node < numberOfVertices; node++) + expr += instance.getHypergraph().get_vertex_memory_weight(node) * node_in_partition[node][static_cast(part)]; + + model.AddConstr(expr <= instance.getMaxMemoryWeightPerPartition()); + } + } // set objective Expr expr; diff --git a/tests/hypergraph_and_partition.cpp b/tests/hypergraph_and_partition.cpp index 3670128a..bd9b661b 100644 --- a/tests/hypergraph_and_partition.cpp +++ b/tests/hypergraph_and_partition.cpp @@ -93,13 +93,13 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { BOOST_CHECK(connectivityCost >= cutNetCost); for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) - instance.getHypergraph().set_vertex_weight(node, 1); + instance.getHypergraph().set_vertex_work_weight(node, 1); - instance.setMaxWeightViaImbalanceFactor(0); + instance.setMaxWorkWeightViaImbalanceFactor(0); BOOST_CHECK(partition.satisfiesBalanceConstraint()); instance.setNumberOfPartitions(5); - instance.setMaxWeightViaImbalanceFactor(0); + instance.setMaxWorkWeightViaImbalanceFactor(0); BOOST_CHECK(!partition.satisfiesBalanceConstraint()); for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) @@ -108,6 +108,12 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { BOOST_CHECK(partition.satisfiesBalanceConstraint()); BOOST_CHECK(partition.computeConnectivityCost() >= partition.computeCutNetCost()); + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + instance.getHypergraph().set_vertex_memory_weight(node, 1); + instance.setMaxMemoryWeightExplicitly(10); + BOOST_CHECK(partition.satisfiesBalanceConstraint() == false); + instance.setMaxMemoryWeightExplicitly(std::numeric_limits::max()); + file_writer::write_txt(std::cout, partition); @@ -115,7 +121,7 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { instance.getHypergraph().convert_from_cdag_as_hyperdag(DAG); instance.setNumberOfPartitions(3); - instance.setMaxWeightExplicitly(30); + instance.setMaxWorkWeightExplicitly(30); PartitioningWithReplication partition_with_rep(instance); for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) partition_with_rep.setAssignedPartitions(node, {node % 3}); @@ -124,14 +130,14 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { BOOST_CHECK(partition_with_rep.computeCutNetCost() == cutNetCost); BOOST_CHECK(partition_with_rep.computeConnectivityCost() == connectivityCost); - instance.setMaxWeightExplicitly(60); + instance.setMaxWorkWeightExplicitly(60); for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) partition_with_rep.setAssignedPartitions(node, {node % 3, (node+1)%3}); BOOST_CHECK(partition_with_rep.satisfiesBalanceConstraint()); BOOST_CHECK(partition_with_rep.computeConnectivityCost() >= partition_with_rep.computeCutNetCost()); - instance.setMaxWeightExplicitly(Hgraph.compute_total_vertex_weight()); + instance.setMaxWorkWeightExplicitly(Hgraph.compute_total_vertex_work_weight()); for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) partition_with_rep.setAssignedPartitions(node, {0, 1, 2}); @@ -145,9 +151,9 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { // Generic FM instance.setNumberOfPartitions(2); - instance.setMaxWeightExplicitly(/*35*/ 4000); + instance.setMaxWorkWeightExplicitly(/*35*/ 4000); for(unsigned node = 0; node < instance.getHypergraph().num_vertices(); ++node) - instance.getHypergraph().set_vertex_weight(node, 1); + instance.getHypergraph().set_vertex_work_weight(node, 1); Partitioning partition_to_improve(instance); for(unsigned node = 0; node < instance.getHypergraph().num_vertices(); ++node) @@ -168,9 +174,9 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { (cwd / "data/spaa/large/instance_CG_N24_K22_nzP0d2.hdag").string(), larger_DAG); instance.getHypergraph().convert_from_cdag_as_hyperdag(larger_DAG); - instance.setMaxWeightExplicitly(4000); + instance.setMaxWorkWeightExplicitly(4000); for(unsigned node = 0; node < instance.getHypergraph().num_vertices(); ++node) - instance.getHypergraph().set_vertex_weight(node, 1); + instance.getHypergraph().set_vertex_work_weight(node, 1); partition_to_improve.resetPartition(); for(unsigned node = 0; node < instance.getHypergraph().num_vertices(); ++node) diff --git a/tests/ilp_hypergraph_partitioning.cpp b/tests/ilp_hypergraph_partitioning.cpp index 37d41595..fcdba56f 100644 --- a/tests/ilp_hypergraph_partitioning.cpp +++ b/tests/ilp_hypergraph_partitioning.cpp @@ -72,8 +72,15 @@ BOOST_AUTO_TEST_CASE(test_full) { partitioner.computePartitioning(partition); BOOST_CHECK(partition.satisfiesBalanceConstraint()); - BOOST_CHECK(partition.computeConnectivityCost() >= partition.computeCutNetCost()); + int cutNetCost = partition.computeCutNetCost(), connectivityCost = partition.computeConnectivityCost(); + BOOST_CHECK(connectivityCost >= cutNetCost); + instance.setMaxMemoryWeightExplicitly(37); + partitioner.computePartitioning(partition); + BOOST_CHECK(partition.satisfiesBalanceConstraint()); + BOOST_CHECK(cutNetCost == partition.computeCutNetCost()); + BOOST_CHECK(connectivityCost == partition.computeConnectivityCost()); + instance.setMaxMemoryWeightExplicitly(std::numeric_limits::max()); // ILP with replication @@ -94,7 +101,7 @@ BOOST_AUTO_TEST_CASE(test_full) { BOOST_CHECK(partition_rep.satisfiesBalanceConstraint()); BOOST_CHECK(partition_rep.computeConnectivityCost() == 0); - instance.setMaxWeightExplicitly(60); + instance.setMaxWorkWeightExplicitly(60); for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) partition_rep.setAssignedPartitions(node, {node % 3, (node+1)%3}); @@ -102,8 +109,8 @@ BOOST_AUTO_TEST_CASE(test_full) { BOOST_CHECK(partition_rep.satisfiesBalanceConstraint()); BOOST_CHECK(partition_rep.computeConnectivityCost() == 0); - // same tests with other replicaiton formulation - instance.setMaxWeightExplicitly(35); + // same tests with other replication formulation + instance.setMaxWorkWeightExplicitly(35); partitioner_rep.setReplicationModel(HypergraphPartitioningILPWithReplication::REPLICATION_MODEL_IN_ILP::GENERAL); partitioner_rep.setUseInitialSolution(false); partitioner_rep.computePartitioning(partition_rep); @@ -119,7 +126,7 @@ BOOST_AUTO_TEST_CASE(test_full) { BOOST_CHECK(partition_rep.satisfiesBalanceConstraint()); BOOST_CHECK(partition_rep.computeConnectivityCost() == 0); - instance.setMaxWeightExplicitly(60); + instance.setMaxWorkWeightExplicitly(60); for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) partition_rep.setAssignedPartitions(node, {node % 3, (node+1)%3}); From d901d2609e9f82cb0650cb77cac1caa19e7ae49a Mon Sep 17 00:00:00 2001 From: ppapp Date: Tue, 14 Oct 2025 11:15:15 +0200 Subject: [PATCH 4/9] templating the partitioning part --- .../io/mtx_hypergraph_file_reader.hpp | 35 ++--- .../auxiliary/io/partitioning_file_writer.hpp | 16 +- include/osp/partitioning/model/hypergraph.hpp | 140 +++++++++++------- .../osp/partitioning/model/partitioning.hpp | 63 ++++---- .../model/partitioning_problem.hpp | 45 +++--- .../model/partitioning_replication.hpp | 68 +++++---- .../partitioning/partitioners/generic_FM.hpp | 68 ++++----- .../partitioners/partitioning_ILP.hpp | 51 ++++--- .../partitioners/partitioning_ILP_base.hpp | 38 ++--- .../partitioning_ILP_replication.hpp | 80 +++++----- tests/hypergraph_and_partition.cpp | 6 +- tests/ilp_hypergraph_partitioning.cpp | 6 +- 12 files changed, 335 insertions(+), 281 deletions(-) diff --git a/include/osp/auxiliary/io/mtx_hypergraph_file_reader.hpp b/include/osp/auxiliary/io/mtx_hypergraph_file_reader.hpp index c409c04a..7d0bbe8f 100644 --- a/include/osp/auxiliary/io/mtx_hypergraph_file_reader.hpp +++ b/include/osp/auxiliary/io/mtx_hypergraph_file_reader.hpp @@ -33,7 +33,8 @@ namespace osp { namespace file_reader { // reads a matrix into Hypergraph format, where nonzeros are vertices, and rows/columns are hyperedges -bool readHypergraphMartixMarketFormat(std::ifstream& infile, Hypergraph& hgraph) { +template +bool readHypergraphMartixMarketFormat(std::ifstream& infile, Hypergraph& hgraph) { std::string line; @@ -68,23 +69,16 @@ bool readHypergraphMartixMarketFormat(std::ifstream& infile, Hypergraph& hgraph) return false; } - const unsigned num_nodes = static_cast(nEntries); - if (num_nodes > std::numeric_limits::max()) { - std::cerr << "Error: Matrix dimension too large for vertex type.\n"; - return false; - } - - std::vector node_work_wts(num_nodes, 0); - std::vector node_comm_wts(num_nodes, 1); + const index_type num_nodes = static_cast(nEntries); hgraph.reset(num_nodes, 0); - for (unsigned node = 0; node < num_nodes; ++node) { - hgraph.set_vertex_work_weight(node, 1); - hgraph.set_vertex_memory_weight(node, 1); + for (index_type node = 0; node < num_nodes; ++node) { + hgraph.set_vertex_work_weight(node, static_cast(1)); + hgraph.set_vertex_memory_weight(node, static_cast(1)); } - std::vector> row_hyperedges(static_cast(M_row)); - std::vector> column_hyperedges(static_cast(M_col)); + std::vector> row_hyperedges(static_cast(M_row)); + std::vector> column_hyperedges(static_cast(M_col)); int entries_read = 0; while (entries_read < nEntries && std::getline(infile, line)) { @@ -110,13 +104,13 @@ bool readHypergraphMartixMarketFormat(std::ifstream& infile, Hypergraph& hgraph) return false; } - if (static_cast(row) >= num_nodes || static_cast(col) >= num_nodes) { + if (static_cast(row) >= num_nodes || static_cast(col) >= num_nodes) { std::cerr << "Error: Index exceeds vertex type limit.\n"; return false; } - row_hyperedges[static_cast(row)].push_back(static_cast(entries_read)); - column_hyperedges[static_cast(col)].push_back(static_cast(entries_read)); + row_hyperedges[static_cast(row)].push_back(static_cast(entries_read)); + column_hyperedges[static_cast(col)].push_back(static_cast(entries_read)); ++entries_read; } @@ -133,18 +127,19 @@ bool readHypergraphMartixMarketFormat(std::ifstream& infile, Hypergraph& hgraph) } } - for(unsigned row = 0; row < static_cast(M_row); ++row) + for(index_type row = 0; row < static_cast(M_row); ++row) if(!row_hyperedges[row].empty()) hgraph.add_hyperedge(row_hyperedges[row]); - for(unsigned col = 0; col < static_cast(M_col); ++col) + for(index_type col = 0; col < static_cast(M_col); ++col) if(!column_hyperedges[col].empty()) hgraph.add_hyperedge(column_hyperedges[col]); return true; } -bool readHypergraphMartixMarketFormat(const std::string& filename, Hypergraph& hgraph) { +template +bool readHypergraphMartixMarketFormat(const std::string& filename, Hypergraph& hgraph) { // Ensure the file is .mtx format if (std::filesystem::path(filename).extension() != ".mtx") { std::cerr << "Error: Only .mtx files are accepted.\n"; diff --git a/include/osp/auxiliary/io/partitioning_file_writer.hpp b/include/osp/auxiliary/io/partitioning_file_writer.hpp index 60f836f5..5bf71810 100644 --- a/include/osp/auxiliary/io/partitioning_file_writer.hpp +++ b/include/osp/auxiliary/io/partitioning_file_writer.hpp @@ -25,24 +25,27 @@ limitations under the License. namespace osp { namespace file_writer { -void write_txt(std::ostream &os, const Partitioning &partition) { +template +void write_txt(std::ostream &os, const Partitioning &partition) { os << "\%\% Partitioning for " << partition.getInstance().getNumberOfPartitions() << " parts." << std::endl; - for(unsigned node = 0; node < partition.getInstance().getHypergraph().num_vertices(); ++node) + for(index_type node = 0; node < partition.getInstance().getHypergraph().num_vertices(); ++node) os << node << " " << partition.assignedPartition(node) << std::endl; } -void write_txt(const std::string &filename, const Partitioning &partition) { +template +void write_txt(const std::string &filename, const Partitioning &partition) { std::ofstream os(filename); write_txt(os, partition); } -void write_txt(std::ostream &os, const PartitioningWithReplication &partition) { +template +void write_txt(std::ostream &os, const PartitioningWithReplication &partition) { os << "\%\% Partitioning for " << partition.getInstance().getNumberOfPartitions() << " parts with replication." << std::endl; - for(unsigned node = 0; node < partition.getInstance().getHypergraph().num_vertices(); ++node) + for(index_type node = 0; node < partition.getInstance().getHypergraph().num_vertices(); ++node) { os << node; for(unsigned part : partition.assignedPartitions(node)) @@ -51,7 +54,8 @@ void write_txt(std::ostream &os, const PartitioningWithReplication &partition) { } } -void write_txt(const std::string &filename, const PartitioningWithReplication &partition) { +template +void write_txt(const std::string &filename, const PartitioningWithReplication &partition) { std::ofstream os(filename); write_txt(os, partition); } diff --git a/include/osp/partitioning/model/hypergraph.hpp b/include/osp/partitioning/model/hypergraph.hpp index 421f2d8c..0aafe973 100644 --- a/include/osp/partitioning/model/hypergraph.hpp +++ b/include/osp/partitioning/model/hypergraph.hpp @@ -20,48 +20,50 @@ limitations under the License. #include #include #include "osp/concepts/computational_dag_concept.hpp" +#include "osp/graph_algorithms/directed_graph_edge_desc_util.hpp" namespace osp { +template class Hypergraph { public: Hypergraph() = default; - Hypergraph(unsigned num_vertices_, unsigned num_hyperedges_) + Hypergraph(index_type num_vertices_, index_type num_hyperedges_) : Num_vertices(num_vertices_), Num_hyperedges(num_hyperedges_), vertex_work_weights(num_vertices_, 1), vertex_memory_weights(num_vertices_, 1), hyperedge_weights(num_hyperedges_, 1), incident_hyperedges_to_vertex(num_vertices_), vertices_in_hyperedge(num_hyperedges_){} - Hypergraph(const Hypergraph &other) = default; - Hypergraph &operator=(const Hypergraph &other) = default; + Hypergraph(const Hypergraph &other) = default; + Hypergraph &operator=(const Hypergraph &other) = default; virtual ~Hypergraph() = default; - inline unsigned num_vertices() const { return Num_vertices; } - inline unsigned num_hyperedges() const { return Num_hyperedges; } - inline unsigned num_pins() const { return Num_pins; } - inline int get_vertex_work_weight(unsigned node) const { return vertex_work_weights[node]; } - inline int get_vertex_memory_weight(unsigned node) const { return vertex_memory_weights[node]; } - inline int get_hyperedge_weight(unsigned hyperedge) const { return hyperedge_weights[hyperedge]; } + inline index_type num_vertices() const { return Num_vertices; } + inline index_type num_hyperedges() const { return Num_hyperedges; } + inline index_type num_pins() const { return Num_pins; } + inline workw_type get_vertex_work_weight(index_type node) const { return vertex_work_weights[node]; } + inline memw_type get_vertex_memory_weight(index_type node) const { return vertex_memory_weights[node]; } + inline commw_type get_hyperedge_weight(index_type hyperedge) const { return hyperedge_weights[hyperedge]; } - void add_pin(unsigned vertex_idx, unsigned hyperedge_idx); - void add_vertex(int work_weight = 1, int memory_weight = 1); - void add_empty_hyperedge(int weight = 1); - void add_hyperedge(const std::vector& pins, int weight = 1); - void set_vertex_work_weight(unsigned vertex_idx, int weight); - void set_vertex_memory_weight(unsigned vertex_idx, int weight); - void set_hyperedge_weight(unsigned hyperedge_idx, int weight); + void add_pin(index_type vertex_idx, index_type hyperedge_idx); + void add_vertex(workw_type work_weight = 1, memw_type memory_weight = 1); + void add_empty_hyperedge(commw_type weight = 1); + void add_hyperedge(const std::vector& pins, commw_type weight = 1); + void set_vertex_work_weight(index_type vertex_idx, workw_type weight); + void set_vertex_memory_weight(index_type vertex_idx, memw_type weight); + void set_hyperedge_weight(index_type hyperedge_idx, commw_type weight); - int compute_total_vertex_work_weight() const; - int compute_total_vertex_memory_weight() const; + workw_type compute_total_vertex_work_weight() const; + memw_type compute_total_vertex_memory_weight() const; void clear(); - void reset(unsigned num_vertices_, unsigned num_hyperedges_); + void reset(index_type num_vertices_, index_type num_hyperedges_); - inline const std::vector &get_incident_hyperedges(unsigned vertex) const { return incident_hyperedges_to_vertex[vertex]; } - inline const std::vector &get_vertices_in_hyperedge(unsigned hyperedge) const { return vertices_in_hyperedge[hyperedge]; } + inline const std::vector &get_incident_hyperedges(index_type vertex) const { return incident_hyperedges_to_vertex[vertex]; } + inline const std::vector &get_vertices_in_hyperedge(index_type hyperedge) const { return vertices_in_hyperedge[hyperedge]; } template void convert_from_cdag_as_dag(const Graph_t& dag); @@ -70,17 +72,18 @@ class Hypergraph { void convert_from_cdag_as_hyperdag(const Graph_t& dag); private: - unsigned Num_vertices = 0, Num_hyperedges = 0, Num_pins = 0; + index_type Num_vertices = 0, Num_hyperedges = 0, Num_pins = 0; - std::vector vertex_work_weights; - std::vector vertex_memory_weights; - std::vector hyperedge_weights; + std::vector vertex_work_weights; + std::vector vertex_memory_weights; + std::vector hyperedge_weights; - std::vector> incident_hyperedges_to_vertex; - std::vector> vertices_in_hyperedge; + std::vector> incident_hyperedges_to_vertex; + std::vector> vertices_in_hyperedge; }; -void Hypergraph::add_pin(unsigned vertex_idx, unsigned hyperedge_idx) +template +void Hypergraph::add_pin(index_type vertex_idx, index_type hyperedge_idx) { if(vertex_idx >= Num_vertices) { @@ -97,7 +100,8 @@ void Hypergraph::add_pin(unsigned vertex_idx, unsigned hyperedge_idx) } } -void Hypergraph::add_vertex(int work_weight, int memory_weight) +template +void Hypergraph::add_vertex(workw_type work_weight, memw_type memory_weight) { vertex_work_weights.push_back(work_weight); vertex_memory_weights.push_back(memory_weight); @@ -105,24 +109,27 @@ void Hypergraph::add_vertex(int work_weight, int memory_weight) ++Num_vertices; } -void Hypergraph::add_empty_hyperedge(int weight) +template +void Hypergraph::add_empty_hyperedge(commw_type weight) { vertices_in_hyperedge.emplace_back(); hyperedge_weights.push_back(weight); ++Num_hyperedges; } -void Hypergraph::add_hyperedge(const std::vector& pins, int weight) +template +void Hypergraph::add_hyperedge(const std::vector& pins, commw_type weight) { vertices_in_hyperedge.emplace_back(pins); hyperedge_weights.push_back(weight); - for(unsigned vertex : pins) + for(index_type vertex : pins) incident_hyperedges_to_vertex[vertex].push_back(Num_hyperedges); ++Num_hyperedges; - Num_pins += static_cast(pins.size()); + Num_pins += static_cast(pins.size()); } -void Hypergraph::set_vertex_work_weight(unsigned vertex_idx, int weight) +template +void Hypergraph::set_vertex_work_weight(index_type vertex_idx, workw_type weight) { if(vertex_idx >= Num_vertices) throw std::invalid_argument("Invalid Argument while setting vertex weight: vertex index out of range."); @@ -130,7 +137,8 @@ void Hypergraph::set_vertex_work_weight(unsigned vertex_idx, int weight) vertex_work_weights[vertex_idx] = weight; } -void Hypergraph::set_vertex_memory_weight(unsigned vertex_idx, int weight) +template +void Hypergraph::set_vertex_memory_weight(index_type vertex_idx, memw_type weight) { if(vertex_idx >= Num_vertices) throw std::invalid_argument("Invalid Argument while setting vertex weight: vertex index out of range."); @@ -138,7 +146,8 @@ void Hypergraph::set_vertex_memory_weight(unsigned vertex_idx, int weight) vertex_memory_weights[vertex_idx] = weight; } -void Hypergraph::set_hyperedge_weight(unsigned hyperedge_idx, int weight) +template +void Hypergraph::set_hyperedge_weight(index_type hyperedge_idx, commw_type weight) { if(hyperedge_idx >= Num_hyperedges) throw std::invalid_argument("Invalid Argument while setting hyperedge weight: hyepredge index out of range."); @@ -146,23 +155,26 @@ void Hypergraph::set_hyperedge_weight(unsigned hyperedge_idx, int weight) hyperedge_weights[hyperedge_idx] = weight; } -int Hypergraph::compute_total_vertex_work_weight() const +template +workw_type Hypergraph::compute_total_vertex_work_weight() const { - int total = 0; - for(unsigned node = 0; node < Num_vertices; ++node) + workw_type total = 0; + for(index_type node = 0; node < Num_vertices; ++node) total += vertex_work_weights[node]; return total; } -int Hypergraph::compute_total_vertex_memory_weight() const +template +memw_type Hypergraph::compute_total_vertex_memory_weight() const { - int total = 0; - for(unsigned node = 0; node < Num_vertices; ++node) + memw_type total = 0; + for(index_type node = 0; node < Num_vertices; ++node) total += vertex_memory_weights[node]; return total; } -void Hypergraph::clear() +template +void Hypergraph::clear() { Num_vertices = 0; Num_hyperedges = 0; @@ -175,7 +187,8 @@ void Hypergraph::clear() vertices_in_hyperedge.clear(); } -void Hypergraph::reset(unsigned num_vertices_, unsigned num_hyperedges_) +template +void Hypergraph::reset(index_type num_vertices_, index_type num_hyperedges_) { clear(); @@ -189,33 +202,48 @@ void Hypergraph::reset(unsigned num_vertices_, unsigned num_hyperedges_) vertices_in_hyperedge.resize(num_hyperedges_); } +template template -void Hypergraph::convert_from_cdag_as_dag(const Graph_t& dag) +void Hypergraph::convert_from_cdag_as_dag(const Graph_t& dag) { - reset(static_cast(dag.num_vertices()), 0); + static_assert(std::is_same_v, index_type>, "Index type mismatch, cannot convert DAG to hypergraph."); + static_assert(std::is_same_v, workw_type>, "Work weight type mismatch, cannot convert DAG to hypergraph."); + static_assert(std::is_same_v, memw_type>, "Memory weight type mismatch, cannot convert DAG to hypergraph."); + static_assert(!has_edge_weights_v || std::is_same_v, commw_type>, "Communication weight type mismatch, cannot convert DAG to hypergraph."); + + reset(dag.num_vertices(), 0); for(const auto &node : dag.vertices()) { - set_vertex_work_weight(static_cast(node), static_cast(dag.vertex_work_weight(node))); - set_vertex_memory_weight(static_cast(node), static_cast(dag.vertex_mem_weight(node))); + set_vertex_work_weight(node, dag.vertex_work_weight(node)); + set_vertex_memory_weight(node, dag.vertex_mem_weight(node)); for (const auto &child : dag.children(node)) - add_hyperedge({static_cast(node), static_cast(child)}); // TODO add edge weights if present + if constexpr(has_edge_weights_v) + add_hyperedge({node, child}, dag.edge_comm_weight(edge_desc(node, child, dag).first)); + else + add_hyperedge({node, child}); } } +template template -void Hypergraph::convert_from_cdag_as_hyperdag(const Graph_t& dag) +void Hypergraph::convert_from_cdag_as_hyperdag(const Graph_t& dag) { - reset(static_cast(dag.num_vertices()), 0); + static_assert(std::is_same_v, index_type>, "Index type mismatch, cannot convert DAG to hypergraph."); + static_assert(std::is_same_v, workw_type>, "Work weight type mismatch, cannot convert DAG to hypergraph."); + static_assert(std::is_same_v, memw_type>, "Memory weight type mismatch, cannot convert DAG to hypergraph."); + static_assert(std::is_same_v, commw_type>, "Communication weight type mismatch, cannot convert DAG to hypergraph."); + + reset(dag.num_vertices(), 0); for(const auto &node : dag.vertices()) { - set_vertex_work_weight(static_cast(node), static_cast(dag.vertex_work_weight(node))); - set_vertex_memory_weight(static_cast(node), static_cast(dag.vertex_mem_weight(node))); + set_vertex_work_weight(node, dag.vertex_work_weight(node)); + set_vertex_memory_weight(node, dag.vertex_mem_weight(node)); if(dag.out_degree(node) == 0) continue; - std::vector new_hyperedge({static_cast(node)}); + std::vector new_hyperedge({node}); for (const auto &child : dag.children(node)) - new_hyperedge.push_back(static_cast(child)); - add_hyperedge(new_hyperedge, static_cast(dag.vertex_comm_weight(node))); + new_hyperedge.push_back(child); + add_hyperedge(new_hyperedge, dag.vertex_comm_weight(node)); } } diff --git a/include/osp/partitioning/model/partitioning.hpp b/include/osp/partitioning/model/partitioning.hpp index 8e670784..e724deed 100644 --- a/include/osp/partitioning/model/partitioning.hpp +++ b/include/osp/partitioning/model/partitioning.hpp @@ -26,11 +26,12 @@ namespace osp { // Represents a partitioning where each vertex of a hypergraph is assigned to a specifc partition +template class Partitioning { private: - const PartitioningProblem *instance; + const PartitioningProblem *instance; std::vector node_to_partition_assignment; @@ -38,29 +39,29 @@ class Partitioning { Partitioning() = delete; - Partitioning(const PartitioningProblem &inst) + Partitioning(const PartitioningProblem &inst) : instance(&inst), node_to_partition_assignment(std::vector(inst.getHypergraph().num_vertices(), 0)) {} - Partitioning(const PartitioningProblem &inst, const std::vector &partition_assignment_) + Partitioning(const PartitioningProblem &inst, const std::vector &partition_assignment_) : instance(&inst), node_to_partition_assignment(partition_assignment_) {} - Partitioning(const Partitioning &partitioning_) = default; - Partitioning(Partitioning &&partitioning_) = default; + Partitioning(const Partitioning &partitioning_) = default; + Partitioning(Partitioning &&partitioning_) = default; - Partitioning &operator=(const Partitioning &partitioning_) = default; + Partitioning &operator=(const Partitioning &partitioning_) = default; virtual ~Partitioning() = default; // getters and setters - inline const PartitioningProblem &getInstance() const { return *instance; } + inline const PartitioningProblem &getInstance() const { return *instance; } - inline unsigned assignedPartition(unsigned node) const { return node_to_partition_assignment[node]; } + inline unsigned assignedPartition(index_type node) const { return node_to_partition_assignment[node]; } inline const std::vector &assignedPartitions() const { return node_to_partition_assignment; } inline std::vector &assignedPartitions() { return node_to_partition_assignment; } - inline void setAssignedPartition(unsigned node, unsigned part) { node_to_partition_assignment.at(node) = part; } + inline void setAssignedPartition(index_type node, unsigned part) { node_to_partition_assignment.at(node) = part; } void setAssignedPartitions(const std::vector &vec) { if (vec.size() == static_cast(instance->getHypergraph().num_vertices()) ) { @@ -80,10 +81,10 @@ class Partitioning { } } - std::vector getPartitionContent(unsigned part) const { + std::vector getPartitionContent(unsigned part) const { - std::vector content; - for (unsigned node = 0; node < node_to_partition_assignment.size(); ++node) { + std::vector content; + for (index_type node = 0; node < node_to_partition_assignment.size(); ++node) { if (node_to_partition_assignment[node] == part) { content.push_back(node); @@ -101,23 +102,24 @@ class Partitioning { // costs and validity std::vector computeLambdaForHyperedges() const; - int computeConnectivityCost() const; - int computeCutNetCost() const; + commw_type computeConnectivityCost() const; + commw_type computeCutNetCost() const; bool satisfiesBalanceConstraint() const; }; -std::vector Partitioning::computeLambdaForHyperedges() const +template +std::vector Partitioning::computeLambdaForHyperedges() const { std::vector lambda(instance->getHypergraph().num_hyperedges(), 0); - for(unsigned edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) + for(index_type edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) { - const std::vector &hyperedge = instance->getHypergraph().get_vertices_in_hyperedge(edge_idx); + const std::vector &hyperedge = instance->getHypergraph().get_vertices_in_hyperedge(edge_idx); if(hyperedge.empty()) continue; std::vector intersects_part(instance->getNumberOfPartitions(), false); - for(const unsigned& node : hyperedge) + for(const index_type& node : hyperedge) intersects_part[node_to_partition_assignment[node]] = true; for(unsigned part = 0; part < instance->getNumberOfPartitions(); ++part) if(intersects_part[part]) @@ -126,33 +128,36 @@ std::vector Partitioning::computeLambdaForHyperedges() const return lambda; } -int Partitioning::computeConnectivityCost() const { +template +commw_type Partitioning::computeConnectivityCost() const { - int total = 0; + commw_type total = 0; std::vector lambda = computeLambdaForHyperedges(); - for(unsigned edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) + for(index_type edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) if(lambda[edge_idx] >= 1) - total += (static_cast(lambda[edge_idx])-1) * instance->getHypergraph().get_hyperedge_weight(edge_idx); + total += (static_cast(lambda[edge_idx])-1) * instance->getHypergraph().get_hyperedge_weight(edge_idx); return total; } -int Partitioning::computeCutNetCost() const { +template +commw_type Partitioning::computeCutNetCost() const { - int total = 0; + commw_type total = 0; std::vector lambda = computeLambdaForHyperedges(); - for(unsigned edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) + for(index_type edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) if(lambda[edge_idx] > 1) total += instance->getHypergraph().get_hyperedge_weight(edge_idx); return total; } -bool Partitioning::satisfiesBalanceConstraint() const { - std::vector work_weight(instance->getNumberOfPartitions(), 0); - std::vector memory_weight(instance->getNumberOfPartitions(), 0); - for (unsigned node = 0; node < node_to_partition_assignment.size(); ++node) { +template +bool Partitioning::satisfiesBalanceConstraint() const { + std::vector work_weight(instance->getNumberOfPartitions(), 0); + std::vector memory_weight(instance->getNumberOfPartitions(), 0); + for (index_type node = 0; node < node_to_partition_assignment.size(); ++node) { if (node_to_partition_assignment[node] > instance->getNumberOfPartitions()) throw std::invalid_argument("Invalid Argument while checking balance constraint: partition ID out of range."); else diff --git a/include/osp/partitioning/model/partitioning_problem.hpp b/include/osp/partitioning/model/partitioning_problem.hpp index 4bc8710c..2996cefa 100644 --- a/include/osp/partitioning/model/partitioning_problem.hpp +++ b/include/osp/partitioning/model/partitioning_problem.hpp @@ -26,14 +26,15 @@ limitations under the License. namespace osp { // represents a hypergraph partitioning problem into a fixed number of parts with a balance constraint +template class PartitioningProblem { private: - Hypergraph hgraph; + Hypergraph hgraph; unsigned nr_of_partitions; - int max_work_weight_per_partition; - int max_memory_weight_per_partition; + workw_type max_work_weight_per_partition; + memw_type max_memory_weight_per_partition; bool allows_replication = false; @@ -41,48 +42,52 @@ class PartitioningProblem { PartitioningProblem() = default; - PartitioningProblem(const Hypergraph &hgraph_, unsigned nr_parts_ = 2, int max_work_weight_ = std::numeric_limits::max(), - int max_memory_weight_ = std::numeric_limits::max()) : hgraph(hgraph_), nr_of_partitions(nr_parts_), + PartitioningProblem(const Hypergraph &hgraph_, unsigned nr_parts_ = 2, + workw_type max_work_weight_ = std::numeric_limits::max(), + memw_type max_memory_weight_ = std::numeric_limits::max()) : + hgraph(hgraph_), nr_of_partitions(nr_parts_), max_work_weight_per_partition(max_work_weight_), max_memory_weight_per_partition(max_memory_weight_) {} - PartitioningProblem(const Hypergraph &&hgraph_, unsigned nr_parts_ = 2, int max_work_weight_ = std::numeric_limits::max(), - int max_memory_weight_ = std::numeric_limits::max()) : hgraph(hgraph_), nr_of_partitions(nr_parts_), + PartitioningProblem(const Hypergraph &&hgraph_, unsigned nr_parts_ = 2, + workw_type max_work_weight_ = std::numeric_limits::max(), + memw_type max_memory_weight_ = std::numeric_limits::max()) : + hgraph(hgraph_), nr_of_partitions(nr_parts_), max_work_weight_per_partition(max_work_weight_), max_memory_weight_per_partition(max_memory_weight_) {} - PartitioningProblem(const PartitioningProblem &other) = default; - PartitioningProblem(PartitioningProblem &&other) = default; + PartitioningProblem(const PartitioningProblem &other) = default; + PartitioningProblem(PartitioningProblem &&other) = default; - PartitioningProblem &operator=(const PartitioningProblem &other) = default; - PartitioningProblem &operator=(PartitioningProblem &&other) = default; + PartitioningProblem &operator=(const PartitioningProblem &other) = default; + PartitioningProblem &operator=(PartitioningProblem &&other) = default; // getters - inline const Hypergraph &getHypergraph() const { return hgraph; } - inline Hypergraph &getHypergraph() { return hgraph; } + inline const Hypergraph &getHypergraph() const { return hgraph; } + inline Hypergraph &getHypergraph() { return hgraph; } inline unsigned getNumberOfPartitions() const { return nr_of_partitions; } - inline int getMaxWorkWeightPerPartition() const { return max_work_weight_per_partition; } - inline int getMaxMemoryWeightPerPartition() const { return max_memory_weight_per_partition; } + inline workw_type getMaxWorkWeightPerPartition() const { return max_work_weight_per_partition; } + inline memw_type getMaxMemoryWeightPerPartition() const { return max_memory_weight_per_partition; } inline bool getAllowsReplication() const { return allows_replication; } // setters - inline void setHypergraph(const Hypergraph &hgraph_) { hgraph = hgraph_; } + inline void setHypergraph(const Hypergraph &hgraph_) { hgraph = hgraph_; } inline void setNumberOfPartitions(unsigned nr_parts_) { nr_of_partitions = nr_parts_; } inline void setAllowsReplication(bool allowed_) { allows_replication = allowed_; } - inline void setMaxWorkWeightExplicitly(int max_weight_) { max_work_weight_per_partition = max_weight_; } + inline void setMaxWorkWeightExplicitly(workw_type max_weight_) { max_work_weight_per_partition = max_weight_; } void setMaxWorkWeightViaImbalanceFactor(double imbalance){ if(imbalance < 0 ) throw std::invalid_argument("Invalid Argument while setting imbalance parameter: parameter is negative."); else - max_work_weight_per_partition = static_cast(ceil(hgraph.compute_total_vertex_work_weight()/ static_cast(nr_of_partitions) * (1.0+imbalance))); + max_work_weight_per_partition = static_cast(ceil(hgraph.compute_total_vertex_work_weight()/ static_cast(nr_of_partitions) * (1.0+imbalance))); } - inline void setMaxMemoryWeightExplicitly(int max_weight_) { max_memory_weight_per_partition = max_weight_; } + inline void setMaxMemoryWeightExplicitly(memw_type max_weight_) { max_memory_weight_per_partition = max_weight_; } void setMaxMemoryWeightViaImbalanceFactor(double imbalance){ if(imbalance < 0 ) throw std::invalid_argument("Invalid Argument while setting imbalance parameter: parameter is negative."); else - max_memory_weight_per_partition = static_cast(ceil(hgraph.compute_total_vertex_memory_weight()/ static_cast(nr_of_partitions) * (1.0+imbalance))); + max_memory_weight_per_partition = static_cast(ceil(hgraph.compute_total_vertex_memory_weight()/ static_cast(nr_of_partitions) * (1.0+imbalance))); } }; diff --git a/include/osp/partitioning/model/partitioning_replication.hpp b/include/osp/partitioning/model/partitioning_replication.hpp index 03450e30..70c3a886 100644 --- a/include/osp/partitioning/model/partitioning_replication.hpp +++ b/include/osp/partitioning/model/partitioning_replication.hpp @@ -26,11 +26,12 @@ namespace osp { // Represents a partitioning where each vertex of a hypergraph can be assinged to one or more partitions +template class PartitioningWithReplication { private: - const PartitioningProblem *instance; + const PartitioningProblem *instance; std::vector > node_to_partitions_assignment; @@ -38,29 +39,29 @@ class PartitioningWithReplication { PartitioningWithReplication() = delete; - PartitioningWithReplication(const PartitioningProblem &inst) + PartitioningWithReplication(const PartitioningProblem &inst) : instance(&inst), node_to_partitions_assignment(std::vector>(inst.getHypergraph().num_vertices(), {0})) {} - PartitioningWithReplication(const PartitioningProblem &inst, const std::vector > &partition_assignment_) + PartitioningWithReplication(const PartitioningProblem &inst, const std::vector > &partition_assignment_) : instance(&inst), node_to_partitions_assignment(partition_assignment_) {} - PartitioningWithReplication(const PartitioningWithReplication &partitioning_) = default; - PartitioningWithReplication(PartitioningWithReplication &&partitioning_) = default; + PartitioningWithReplication(const PartitioningWithReplication &partitioning_) = default; + PartitioningWithReplication(PartitioningWithReplication &&partitioning_) = default; - PartitioningWithReplication &operator=(const PartitioningWithReplication &partitioning_) = default; + PartitioningWithReplication &operator=(const PartitioningWithReplication &partitioning_) = default; virtual ~PartitioningWithReplication() = default; // getters and setters - inline const PartitioningProblem &getInstance() const { return *instance; } + inline const PartitioningProblem &getInstance() const { return *instance; } - inline std::vector assignedPartitions(unsigned node) const { return node_to_partitions_assignment[node]; } + inline std::vector assignedPartitions(index_type node) const { return node_to_partitions_assignment[node]; } inline const std::vector > &assignedPartitions() const { return node_to_partitions_assignment; } inline std::vector > &assignedPartitions() { return node_to_partitions_assignment; } - inline void setAssignedPartitions(unsigned node, const std::vector& parts) { node_to_partitions_assignment.at(node) = parts; } + inline void setAssignedPartitions(index_type node, const std::vector& parts) { node_to_partitions_assignment.at(node) = parts; } void setAssignedPartitionVectors(const std::vector > &vec) { if (vec.size() == static_cast(instance->getHypergraph().num_vertices()) ) { @@ -80,10 +81,10 @@ class PartitioningWithReplication { } } - std::vector > getPartitionContents() const { + std::vector > getPartitionContents() const { - std::vector > content(instance->getNumberOfPartitions()); - for (unsigned node = 0; node < node_to_partitions_assignment.size(); ++node) + std::vector > content(instance->getNumberOfPartitions()); + for (index_type node = 0; node < node_to_partitions_assignment.size(); ++node) for(unsigned part : node_to_partitions_assignment[node]) content[part].push_back(node); @@ -97,31 +98,32 @@ class PartitioningWithReplication { // costs and validity - int computeConnectivityCost() const; - int computeCutNetCost() const; + commw_type computeConnectivityCost() const; + commw_type computeCutNetCost() const; bool satisfiesBalanceConstraint() const; }; -int PartitioningWithReplication::computeConnectivityCost() const { +template +commw_type PartitioningWithReplication::computeConnectivityCost() const { // naive implementation. in the worst-case this is exponential in the number of parts if(instance->getNumberOfPartitions() > 16) throw std::invalid_argument("Computing connectivity cost is not supported for more than 16 partitions."); - int total = 0; + commw_type total = 0; std::vector part_used(instance->getNumberOfPartitions(), false); - for(unsigned edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) + for(index_type edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) { - const std::vector &hyperedge = instance->getHypergraph().get_vertices_in_hyperedge(edge_idx); + const std::vector &hyperedge = instance->getHypergraph().get_vertices_in_hyperedge(edge_idx); if(hyperedge.empty()) continue; unsigned long mask = 0UL; - std::vector nr_nodes_covered_by_part(instance->getNumberOfPartitions(), 0); - for(const unsigned& node : hyperedge) + std::vector nr_nodes_covered_by_part(instance->getNumberOfPartitions(), 0); + for(const index_type& node : hyperedge) if(node_to_partitions_assignment[node].size() == 1) mask = mask | (1UL << node_to_partitions_assignment[node].front()); @@ -140,7 +142,7 @@ int PartitioningWithReplication::computeConnectivityCost() const { } bool all_nodes_covered = true; - for(const unsigned& node : hyperedge) + for(const index_type& node : hyperedge) { bool node_covered=false; for(unsigned part : node_to_partitions_assignment[node]) @@ -159,22 +161,23 @@ int PartitioningWithReplication::computeConnectivityCost() const { min_parts_to_cover = std::min(min_parts_to_cover, nr_parts_used); } - total += static_cast(min_parts_to_cover-1) * instance->getHypergraph().get_hyperedge_weight(edge_idx); + total += static_cast(min_parts_to_cover-1) * instance->getHypergraph().get_hyperedge_weight(edge_idx); } return total; } -int PartitioningWithReplication::computeCutNetCost() const { +template +commw_type PartitioningWithReplication::computeCutNetCost() const { - int total = 0; - for(unsigned edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) + commw_type total = 0; + for(index_type edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) { - const std::vector &hyperedge = instance->getHypergraph().get_vertices_in_hyperedge(edge_idx); + const std::vector &hyperedge = instance->getHypergraph().get_vertices_in_hyperedge(edge_idx); if(hyperedge.empty()) continue; - std::vector nr_nodes_covered_by_part(instance->getNumberOfPartitions(), 0); - for(const unsigned& node : hyperedge) + std::vector nr_nodes_covered_by_part(instance->getNumberOfPartitions(), 0); + for(const index_type& node : hyperedge) for(unsigned part : node_to_partitions_assignment[node]) ++nr_nodes_covered_by_part[part]; @@ -190,10 +193,11 @@ int PartitioningWithReplication::computeCutNetCost() const { return total; } -bool PartitioningWithReplication::satisfiesBalanceConstraint() const { - std::vector work_weight(instance->getNumberOfPartitions(), 0); - std::vector memory_weight(instance->getNumberOfPartitions(), 0); - for (unsigned node = 0; node < node_to_partitions_assignment.size(); ++node) +template +bool PartitioningWithReplication::satisfiesBalanceConstraint() const { + std::vector work_weight(instance->getNumberOfPartitions(), 0); + std::vector memory_weight(instance->getNumberOfPartitions(), 0); + for (index_type node = 0; node < node_to_partitions_assignment.size(); ++node) for(unsigned part : node_to_partitions_assignment[node]){ if (part > instance->getNumberOfPartitions()) throw std::invalid_argument("Invalid Argument while checking balance constraint: partition ID out of range."); diff --git a/include/osp/partitioning/partitioners/generic_FM.hpp b/include/osp/partitioning/partitioners/generic_FM.hpp index ebca500d..9dfe6eaf 100644 --- a/include/osp/partitioning/partitioners/generic_FM.hpp +++ b/include/osp/partitioning/partitioners/generic_FM.hpp @@ -24,23 +24,25 @@ limitations under the License. namespace osp{ +template class GenericFM { protected: unsigned max_number_of_passes = 10; - unsigned max_nodes_in_part = 0; + index_type max_nodes_in_part = 0; public: - void ImprovePartitioning(Partitioning& partition); + void ImprovePartitioning(Partitioning& partition); inline unsigned getMaxNumberOfPasses() const { return max_number_of_passes; } inline void setMaxNumberOfPasses(unsigned passes_) { max_number_of_passes = passes_; } - inline unsigned getMaxNodesInPart() const { return max_nodes_in_part; } - inline void setMaxNodesInPart(unsigned max_nodes_) { max_nodes_in_part = max_nodes_; } + inline index_type getMaxNodesInPart() const { return max_nodes_in_part; } + inline void setMaxNodesInPart(index_type max_nodes_) { max_nodes_in_part = max_nodes_; } }; -void GenericFM::ImprovePartitioning(Partitioning& partition) +template +void GenericFM::ImprovePartitioning(Partitioning& partition) { // Note: this algorithm disregards hyperedge weights, in order to keep the size of the gain bucket array bounded! @@ -56,14 +58,14 @@ void GenericFM::ImprovePartitioning(Partitioning& partition) return; } - const Hypergraph& Hgraph = partition.getInstance().getHypergraph(); + const Hypergraph& Hgraph = partition.getInstance().getHypergraph(); - unsigned max_degree = 0; - for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) - max_degree = std::max(max_degree, static_cast(Hgraph.get_incident_hyperedges(node).size())); + index_type max_degree = 0; + for(index_type node = 0; node < Hgraph.num_vertices(); ++node) + max_degree = std::max(max_degree, static_cast(Hgraph.get_incident_hyperedges(node).size())); if(max_nodes_in_part == 0) // if not initialized - max_nodes_in_part = static_cast(ceil(static_cast(Hgraph.num_vertices()) * static_cast(partition.getInstance().getMaxWorkWeightPerPartition()) + max_nodes_in_part = static_cast(ceil(static_cast(Hgraph.num_vertices()) * static_cast(partition.getInstance().getMaxWorkWeightPerPartition()) / static_cast(Hgraph.compute_total_vertex_work_weight()) )); for(unsigned pass_idx = 0; pass_idx < max_number_of_passes; ++pass_idx) @@ -71,18 +73,18 @@ void GenericFM::ImprovePartitioning(Partitioning& partition) std::vector node_to_new_part = partition.assignedPartitions(); std::vector locked(Hgraph.num_vertices(), false); std::vector gain(Hgraph.num_vertices(), 0); - std::vector > nr_nodes_in_hyperedge_on_side(Hgraph.num_hyperedges(), std::vector(2, 0)); + std::vector > nr_nodes_in_hyperedge_on_side(Hgraph.num_hyperedges(), std::vector(2, 0)); int cost = 0; - unsigned left_side = 0; - for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + index_type left_side = 0; + for(index_type node = 0; node < Hgraph.num_vertices(); ++node) if(partition.assignedPartition(node) == 0) ++left_side; // Initialize gain values - for(unsigned hyperedge = 0; hyperedge < Hgraph.num_hyperedges(); ++hyperedge) + for(index_type hyperedge = 0; hyperedge < Hgraph.num_hyperedges(); ++hyperedge) { - for(unsigned node : Hgraph.get_vertices_in_hyperedge(hyperedge)) + for(index_type node : Hgraph.get_vertices_in_hyperedge(hyperedge)) ++nr_nodes_in_hyperedge_on_side[hyperedge][partition.assignedPartition(node)]; if(Hgraph.get_vertices_in_hyperedge(hyperedge).size() < 2) @@ -91,7 +93,7 @@ void GenericFM::ImprovePartitioning(Partitioning& partition) for(unsigned part = 0; part < 2; ++part) { if(nr_nodes_in_hyperedge_on_side[hyperedge][part] == 1) - for(unsigned node : Hgraph.get_vertices_in_hyperedge(hyperedge)) + for(index_type node : Hgraph.get_vertices_in_hyperedge(hyperedge)) if(partition.assignedPartition(node) == part) ++gain[node]; } @@ -99,23 +101,23 @@ void GenericFM::ImprovePartitioning(Partitioning& partition) // build gain bucket array std::vector max_gain(2, -static_cast(max_degree)-1); - std::vector > > gain_bucket_array(2, std::vector >(2*max_degree+1)); - for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + std::vector > > gain_bucket_array(2, std::vector >(2*max_degree+1)); + for(index_type node = 0; node < Hgraph.num_vertices(); ++node) { const unsigned& part = partition.assignedPartition(node); gain_bucket_array[part][static_cast(gain[node] + static_cast(max_degree))].push_back(node); max_gain[part] = std::max(max_gain[part], gain[node]); } - unsigned best_index = 0; + index_type best_index = 0; int best_cost = 0; - std::vector moved_nodes; + std::vector moved_nodes; // the pass itself: make moves while(moved_nodes.size() < Hgraph.num_vertices()) { // select move - unsigned to_move = std::numeric_limits::max(); + index_type to_move = std::numeric_limits::max(); unsigned chosen_part = std::numeric_limits::max(); unsigned gain_index = static_cast(std::max(max_gain[0], max_gain[1]) + static_cast(max_degree)); @@ -140,7 +142,7 @@ void GenericFM::ImprovePartitioning(Partitioning& partition) --gain_index; } - if(to_move == std::numeric_limits::max()) + if(to_move == std::numeric_limits::max()) break; // make move @@ -150,7 +152,7 @@ void GenericFM::ImprovePartitioning(Partitioning& partition) if(cost < best_cost) { best_cost = cost; - best_index = static_cast(moved_nodes.size()) + 1; + best_index = static_cast(moved_nodes.size()) + 1; } locked[to_move] = true; node_to_new_part[to_move] = 1 - node_to_new_part[to_move]; @@ -163,16 +165,16 @@ void GenericFM::ImprovePartitioning(Partitioning& partition) unsigned other_part = 1-chosen_part; // update gain values - for(unsigned hyperedge : Hgraph.get_incident_hyperedges(to_move)) + for(index_type hyperedge : Hgraph.get_incident_hyperedges(to_move)) { if(nr_nodes_in_hyperedge_on_side[hyperedge][chosen_part] == 1) { - for(unsigned node : Hgraph.get_vertices_in_hyperedge(hyperedge)) + for(index_type node : Hgraph.get_vertices_in_hyperedge(hyperedge)) { if(locked[node]) continue; - std::vector& vec = gain_bucket_array[other_part][static_cast(gain[node] + static_cast(max_degree))]; + std::vector& vec = gain_bucket_array[other_part][static_cast(gain[node] + static_cast(max_degree))]; vec.erase(std::remove(vec.begin(), vec.end(), node), vec.end()); --gain[node]; gain_bucket_array[other_part][static_cast(gain[node] + static_cast(max_degree))].push_back(node); @@ -180,11 +182,11 @@ void GenericFM::ImprovePartitioning(Partitioning& partition) } else if(nr_nodes_in_hyperedge_on_side[hyperedge][chosen_part] == 2) { - for(unsigned node : Hgraph.get_vertices_in_hyperedge(hyperedge)) + for(index_type node : Hgraph.get_vertices_in_hyperedge(hyperedge)) { if(node_to_new_part[node] == chosen_part && !locked[node]) { - std::vector& vec = gain_bucket_array[chosen_part][static_cast(gain[node] + static_cast(max_degree))]; + std::vector& vec = gain_bucket_array[chosen_part][static_cast(gain[node] + static_cast(max_degree))]; vec.erase(std::remove(vec.begin(), vec.end(), node), vec.end()); ++gain[node]; gain_bucket_array[chosen_part][static_cast(gain[node] + static_cast(max_degree))].push_back(node); @@ -195,11 +197,11 @@ void GenericFM::ImprovePartitioning(Partitioning& partition) } if(nr_nodes_in_hyperedge_on_side[hyperedge][other_part] == 1) { - for(unsigned node : Hgraph.get_vertices_in_hyperedge(hyperedge)) + for(index_type node : Hgraph.get_vertices_in_hyperedge(hyperedge)) { if(node_to_new_part[node] == other_part && !locked[node]) { - std::vector& vec = gain_bucket_array[other_part][static_cast(gain[node] + static_cast(max_degree))]; + std::vector& vec = gain_bucket_array[other_part][static_cast(gain[node] + static_cast(max_degree))]; vec.erase(std::remove(vec.begin(), vec.end(), node), vec.end()); --gain[node]; gain_bucket_array[other_part][static_cast(gain[node] + static_cast(max_degree))].push_back(node); @@ -209,12 +211,12 @@ void GenericFM::ImprovePartitioning(Partitioning& partition) } else if(nr_nodes_in_hyperedge_on_side[hyperedge][other_part] == 0) { - for(unsigned node : Hgraph.get_vertices_in_hyperedge(hyperedge)) + for(index_type node : Hgraph.get_vertices_in_hyperedge(hyperedge)) { if(locked[node]) continue; - std::vector& vec = gain_bucket_array[chosen_part][static_cast(gain[node] + static_cast(max_degree))]; + std::vector& vec = gain_bucket_array[chosen_part][static_cast(gain[node] + static_cast(max_degree))]; vec.erase(std::remove(vec.begin(), vec.end(), node), vec.end()); ++gain[node]; gain_bucket_array[chosen_part][static_cast(gain[node] + static_cast(max_degree))].push_back(node); @@ -230,7 +232,7 @@ void GenericFM::ImprovePartitioning(Partitioning& partition) if(best_index == 0) break; - for(unsigned node_idx = 0; node_idx < best_index; ++node_idx) + for(index_type node_idx = 0; node_idx < best_index; ++node_idx) partition.setAssignedPartition(moved_nodes[node_idx], 1U-partition.assignedPartition(moved_nodes[node_idx])); } diff --git a/include/osp/partitioning/partitioners/partitioning_ILP.hpp b/include/osp/partitioning/partitioners/partitioning_ILP.hpp index 7f6af471..db7123a3 100644 --- a/include/osp/partitioning/partitioners/partitioning_ILP.hpp +++ b/include/osp/partitioning/partitioners/partitioning_ILP.hpp @@ -26,36 +26,38 @@ limitations under the License. namespace osp{ -class HypergraphPartitioningILP : public HypergraphPartitioningILPBase { +template +class HypergraphPartitioningILP : public HypergraphPartitioningILPBase { protected: - std::vector readCoptAssignment(const PartitioningProblem &instance, Model& model); + std::vector readCoptAssignment(const PartitioningProblem &instance, Model& model); - void setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model); + void setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model); - void setInitialSolution(const Partitioning &partition, Model& model); + void setInitialSolution(const Partitioning &partition, Model& model); public: virtual ~HypergraphPartitioningILP() = default; - RETURN_STATUS computePartitioning(Partitioning& result); + RETURN_STATUS computePartitioning(Partitioning& result); virtual std::string getAlgorithmName() const override { return "HypergraphPartitioningILP"; } }; -RETURN_STATUS HypergraphPartitioningILP::computePartitioning(Partitioning& result) +template +RETURN_STATUS HypergraphPartitioningILP::computePartitioning(Partitioning& result) { Envr env; Model model = env.CreateModel("HypergraphPart"); - setupFundamentalVariablesConstraintsObjective(result.getInstance(), model); + this->setupFundamentalVariablesConstraintsObjective(result.getInstance(), model); setupExtraVariablesConstraints(result.getInstance(), model); - if(use_initial_solution) + if(this->use_initial_solution) setInitialSolution(result, model); - solveILP(model); + this->solveILP(model); if (model.GetIntAttr(COPT_INTATTR_MIPSTATUS) == COPT_MIPSTATUS_OPTIMAL) { @@ -79,57 +81,60 @@ RETURN_STATUS HypergraphPartitioningILP::computePartitioning(Partitioning& resul } } -void HypergraphPartitioningILP::setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model) { +template +void HypergraphPartitioningILP::setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model) { - const unsigned numberOfParts = instance.getNumberOfPartitions(); - const unsigned numberOfVertices = instance.getHypergraph().num_vertices(); + const index_type numberOfParts = instance.getNumberOfPartitions(); + const index_type numberOfVertices = instance.getHypergraph().num_vertices(); // Constraints // each node assigned to exactly one partition - for (unsigned node = 0; node < numberOfVertices; node++) { + for (index_type node = 0; node < numberOfVertices; node++) { Expr expr; for (unsigned part = 0; part < numberOfParts; part++) - expr += node_in_partition[node][static_cast(part)]; + expr += this->node_in_partition[node][static_cast(part)]; model.AddConstr(expr == 1); } // hyperedge indicators match node variables for (unsigned part = 0; part < numberOfParts; part++) - for (unsigned node = 0; node < numberOfVertices; node++) - for (const unsigned& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) - model.AddConstr(hyperedge_uses_partition[hyperedge][static_cast(part)] >= node_in_partition[node][static_cast(part)]); + for (index_type node = 0; node < numberOfVertices; node++) + for (const index_type& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) + model.AddConstr(this->hyperedge_uses_partition[hyperedge][static_cast(part)] >= this->node_in_partition[node][static_cast(part)]); }; // convert generic one-to-many assingment (of base class function) to one-to-one -std::vector HypergraphPartitioningILP::readCoptAssignment(const PartitioningProblem &instance, Model& model) +template +std::vector HypergraphPartitioningILP::readCoptAssignment(const PartitioningProblem &instance, Model& model) { std::vector node_to_partition(instance.getHypergraph().num_vertices(), UINT_MAX); - std::vector > assignmentsGenericForm = readAllCoptAssignments(instance, model); + std::vector > assignmentsGenericForm = this->readAllCoptAssignments(instance, model); - for (unsigned node = 0; node < instance.getHypergraph().num_vertices(); node++) + for (index_type node = 0; node < instance.getHypergraph().num_vertices(); node++) node_to_partition[node] = assignmentsGenericForm[node].front(); return node_to_partition; } -void HypergraphPartitioningILP::setInitialSolution(const Partitioning &partition, Model& model) +template +void HypergraphPartitioningILP::setInitialSolution(const Partitioning &partition, Model& model) { const std::vector& assignment = partition.assignedPartitions(); const unsigned& numPartitions = partition.getInstance().getNumberOfPartitions(); if(assignment.size() != partition.getInstance().getHypergraph().num_vertices()) return; - for(unsigned node = 0; node < assignment.size(); ++node) + for(index_type node = 0; node < assignment.size(); ++node) { if(assignment[node] >= numPartitions) continue; for(unsigned part = 0; part < numPartitions; ++part) - model.SetMipStart(node_in_partition[node][static_cast(part)], static_cast(assignment[node] == part)); + model.SetMipStart(this->node_in_partition[node][static_cast(part)], static_cast(assignment[node] == part)); } model.LoadMipStart(); } diff --git a/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp b/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp index aa339a32..206d7f45 100644 --- a/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp +++ b/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp @@ -26,6 +26,7 @@ limitations under the License. namespace osp{ +template class HypergraphPartitioningILPBase { protected: @@ -35,9 +36,9 @@ class HypergraphPartitioningILPBase { unsigned time_limit_seconds = 3600; bool use_initial_solution = false; - std::vector > readAllCoptAssignments(const PartitioningProblem &instance, Model& model); + std::vector > readAllCoptAssignments(const PartitioningProblem &instance, Model& model); - void setupFundamentalVariablesConstraintsObjective(const PartitioningProblem &instance, Model& model); + void setupFundamentalVariablesConstraintsObjective(const PartitioningProblem &instance, Model& model); void solveILP(Model& model); @@ -50,7 +51,8 @@ class HypergraphPartitioningILPBase { inline void setUseInitialSolution(bool use_) { use_initial_solution = use_; } }; -void HypergraphPartitioningILPBase::solveILP(Model& model) { +template +void HypergraphPartitioningILPBase::solveILP(Model& model) { model.SetIntParam(COPT_INTPARAM_LOGTOCONSOLE, 0); @@ -70,42 +72,43 @@ void HypergraphPartitioningILPBase::solveILP(Model& model) { model.Solve(); } -void HypergraphPartitioningILPBase::setupFundamentalVariablesConstraintsObjective(const PartitioningProblem &instance, Model& model) { +template +void HypergraphPartitioningILPBase::setupFundamentalVariablesConstraintsObjective(const PartitioningProblem &instance, Model& model) { - const unsigned numberOfParts = instance.getNumberOfPartitions(); - const unsigned numberOfVertices = instance.getHypergraph().num_vertices(); - const unsigned numberOfHyperedges = instance.getHypergraph().num_hyperedges(); + const index_type numberOfParts = instance.getNumberOfPartitions(); + const index_type numberOfVertices = instance.getHypergraph().num_vertices(); + const index_type numberOfHyperedges = instance.getHypergraph().num_hyperedges(); // Variables node_in_partition = std::vector(numberOfVertices); - for (unsigned node = 0; node < numberOfVertices; node++) + for (index_type node = 0; node < numberOfVertices; node++) node_in_partition[node] = model.AddVars(static_cast(numberOfParts), COPT_BINARY, "node_in_partition"); hyperedge_uses_partition = std::vector(numberOfHyperedges); - for (unsigned hyperedge = 0; hyperedge < numberOfHyperedges; hyperedge++) + for (index_type hyperedge = 0; hyperedge < numberOfHyperedges; hyperedge++) hyperedge_uses_partition[hyperedge] = model.AddVars(static_cast(numberOfParts), COPT_BINARY, "hyperedge_uses_partition"); // partition size constraints - if(instance.getMaxWorkWeightPerPartition() < std::numeric_limits::max()) + if(instance.getMaxWorkWeightPerPartition() < std::numeric_limits::max()) { for (unsigned part = 0; part < numberOfParts; part++) { Expr expr; - for (unsigned node = 0; node < numberOfVertices; node++) + for (index_type node = 0; node < numberOfVertices; node++) expr += instance.getHypergraph().get_vertex_work_weight(node) * node_in_partition[node][static_cast(part)]; model.AddConstr(expr <= instance.getMaxWorkWeightPerPartition()); } } - if(instance.getMaxMemoryWeightPerPartition() < std::numeric_limits::max()) + if(instance.getMaxMemoryWeightPerPartition() < std::numeric_limits::max()) { for (unsigned part = 0; part < numberOfParts; part++) { Expr expr; - for (unsigned node = 0; node < numberOfVertices; node++) + for (index_type node = 0; node < numberOfVertices; node++) expr += instance.getHypergraph().get_vertex_memory_weight(node) * node_in_partition[node][static_cast(part)]; model.AddConstr(expr <= instance.getMaxMemoryWeightPerPartition()); @@ -114,7 +117,7 @@ void HypergraphPartitioningILPBase::setupFundamentalVariablesConstraintsObjectiv // set objective Expr expr; - for (unsigned hyperedge = 0; hyperedge < numberOfHyperedges; hyperedge++) + for (index_type hyperedge = 0; hyperedge < numberOfHyperedges; hyperedge++) { expr -= instance.getHypergraph().get_hyperedge_weight(hyperedge); for (unsigned part = 0; part < numberOfParts; part++) @@ -125,12 +128,13 @@ void HypergraphPartitioningILPBase::setupFundamentalVariablesConstraintsObjectiv }; -std::vector > HypergraphPartitioningILPBase::readAllCoptAssignments(const PartitioningProblem &instance, Model& model) +template +std::vector > HypergraphPartitioningILPBase::readAllCoptAssignments(const PartitioningProblem &instance, Model& model) { std::vector > node_to_partitions(instance.getHypergraph().num_vertices()); std::set nonempty_partition_ids; - for (unsigned node = 0; node < instance.getHypergraph().num_vertices(); node++) + for (index_type node = 0; node < instance.getHypergraph().num_vertices(); node++) for(unsigned part = 0; part < instance.getNumberOfPartitions(); part++) if(node_in_partition[node][static_cast(part)].Get(COPT_DBLINFO_VALUE) >= .99) { @@ -153,7 +157,7 @@ std::vector > HypergraphPartitioningILPBase::readAllCoptAs ++current_index; } - for(unsigned node = 0; node < instance.getHypergraph().num_vertices(); node++) + for(index_type node = 0; node < instance.getHypergraph().num_vertices(); node++) for(unsigned entry_idx = 0; entry_idx < node_to_partitions[node].size(); ++entry_idx) node_to_partitions[node][entry_idx] = new_part_index[node_to_partitions[node][entry_idx]]; diff --git a/include/osp/partitioning/partitioners/partitioning_ILP_replication.hpp b/include/osp/partitioning/partitioners/partitioning_ILP_replication.hpp index 6711cbe8..9e4754bb 100644 --- a/include/osp/partitioning/partitioners/partitioning_ILP_replication.hpp +++ b/include/osp/partitioning/partitioners/partitioning_ILP_replication.hpp @@ -26,17 +26,16 @@ limitations under the License. namespace osp{ -class HypergraphPartitioningILPWithReplication : public HypergraphPartitioningILPBase { +template +class HypergraphPartitioningILPWithReplication : public HypergraphPartitioningILPBase { public: enum class REPLICATION_MODEL_IN_ILP { ONLY_TWICE, GENERAL }; protected: - std::vector readCoptAssignment(const PartitioningProblem &instance, Model& model); + void setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model); - void setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model); - - void setInitialSolution(const PartitioningWithReplication &partition, Model& model); + void setInitialSolution(const PartitioningWithReplication &partition, Model& model); REPLICATION_MODEL_IN_ILP replication_model = REPLICATION_MODEL_IN_ILP::ONLY_TWICE; @@ -44,29 +43,30 @@ class HypergraphPartitioningILPWithReplication : public HypergraphPartitioningIL virtual ~HypergraphPartitioningILPWithReplication() = default; - RETURN_STATUS computePartitioning(PartitioningWithReplication& result); + RETURN_STATUS computePartitioning(PartitioningWithReplication& result); virtual std::string getAlgorithmName() const override { return "HypergraphPartitioningILPWithReplication"; } void setReplicationModel(REPLICATION_MODEL_IN_ILP replication_model_) { replication_model = replication_model_; } }; -RETURN_STATUS HypergraphPartitioningILPWithReplication::computePartitioning(PartitioningWithReplication& result) +template +RETURN_STATUS HypergraphPartitioningILPWithReplication::computePartitioning(PartitioningWithReplication& result) { Envr env; Model model = env.CreateModel("HypergraphPartRepl"); - setupFundamentalVariablesConstraintsObjective(result.getInstance(), model); + this->setupFundamentalVariablesConstraintsObjective(result.getInstance(), model); setupExtraVariablesConstraints(result.getInstance(), model); - if(use_initial_solution) + if(this->use_initial_solution) setInitialSolution(result, model); - solveILP(model); + this->solveILP(model); if (model.GetIntAttr(COPT_INTATTR_MIPSTATUS) == COPT_MIPSTATUS_OPTIMAL) { - result.setAssignedPartitionVectors(readAllCoptAssignments(result.getInstance(), model)); + result.setAssignedPartitionVectors(this->readAllCoptAssignments(result.getInstance(), model)); return RETURN_STATUS::OSP_SUCCESS; } else if (model.GetIntAttr(COPT_INTATTR_MIPSTATUS) == COPT_MIPSTATUS_INF_OR_UNB) { @@ -77,7 +77,7 @@ RETURN_STATUS HypergraphPartitioningILPWithReplication::computePartitioning(Part if (model.GetIntAttr(COPT_INTATTR_HASMIPSOL)) { - result.setAssignedPartitionVectors(readAllCoptAssignments(result.getInstance(), model)); + result.setAssignedPartitionVectors(this->readAllCoptAssignments(result.getInstance(), model)); return RETURN_STATUS::OSP_SUCCESS; } else { @@ -86,27 +86,28 @@ RETURN_STATUS HypergraphPartitioningILPWithReplication::computePartitioning(Part } } -void HypergraphPartitioningILPWithReplication::setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model) { +template +void HypergraphPartitioningILPWithReplication::setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model) { - const unsigned numberOfParts = instance.getNumberOfPartitions(); - const unsigned numberOfVertices = instance.getHypergraph().num_vertices(); + const index_type numberOfParts = instance.getNumberOfPartitions(); + const index_type numberOfVertices = instance.getHypergraph().num_vertices(); if(replication_model == REPLICATION_MODEL_IN_ILP::GENERAL) { // create variables for each pin+partition combination - std::map, unsigned> pin_ID_map; - unsigned nr_of_pins = 0; - for (unsigned node = 0; node < numberOfVertices; node++) - for (const unsigned& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) + std::map, index_type> pin_ID_map; + index_type nr_of_pins = 0; + for (index_type node = 0; node < numberOfVertices; node++) + for (const index_type& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) pin_ID_map[std::make_pair(node, hyperedge)] = nr_of_pins++; std::vector pin_covered_by_partition = std::vector(nr_of_pins); - for (unsigned pin = 0; pin < nr_of_pins; pin++) + for (index_type pin = 0; pin < nr_of_pins; pin++) pin_covered_by_partition[pin] = model.AddVars(static_cast(numberOfParts), COPT_BINARY, "pin_covered_by_partition"); // each pin covered exactly once - for (unsigned pin = 0; pin < nr_of_pins; pin++) { + for (index_type pin = 0; pin < nr_of_pins; pin++) { Expr expr; for (unsigned part = 0; part < numberOfParts; part++) @@ -117,15 +118,15 @@ void HypergraphPartitioningILPWithReplication::setupExtraVariablesConstraints(co // pin covering requires node assignment for (unsigned part = 0; part < numberOfParts; part++) - for (unsigned node = 0; node < numberOfVertices; node++) - for (const unsigned& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) - model.AddConstr(node_in_partition[node][static_cast(part)] >= pin_covered_by_partition[pin_ID_map[std::make_pair(node, hyperedge)]][static_cast(part)]); + for (index_type node = 0; node < numberOfVertices; node++) + for (const index_type& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) + model.AddConstr(this->node_in_partition[node][static_cast(part)] >= pin_covered_by_partition[pin_ID_map[std::make_pair(node, hyperedge)]][static_cast(part)]); // pin covering requires hyperedge use for (unsigned part = 0; part < numberOfParts; part++) - for (unsigned node = 0; node < numberOfVertices; node++) - for (const unsigned& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) - model.AddConstr(hyperedge_uses_partition[hyperedge][static_cast(part)] >= pin_covered_by_partition[pin_ID_map[std::make_pair(node, hyperedge)]][static_cast(part)]); + for (index_type node = 0; node < numberOfVertices; node++) + for (const index_type& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) + model.AddConstr(this->hyperedge_uses_partition[hyperedge][static_cast(part)] >= pin_covered_by_partition[pin_ID_map[std::make_pair(node, hyperedge)]][static_cast(part)]); } else if(replication_model == REPLICATION_MODEL_IN_ILP::ONLY_TWICE) @@ -133,40 +134,41 @@ void HypergraphPartitioningILPWithReplication::setupExtraVariablesConstraints(co // each node has one or two copies VarArray node_replicated = model.AddVars(static_cast(numberOfVertices), COPT_BINARY, "node_replicated"); - for (unsigned node = 0; node < numberOfVertices; node++) { + for (index_type node = 0; node < numberOfVertices; node++) { Expr expr = -1; for (unsigned part = 0; part < numberOfParts; part++) - expr += node_in_partition[node][static_cast(part)]; + expr += this->node_in_partition[node][static_cast(part)]; model.AddConstr(expr == node_replicated[static_cast(node)]); } // hyperedge indicators if node is not replicated for (unsigned part = 0; part < numberOfParts; part++) - for (unsigned node = 0; node < numberOfVertices; node++) - for (const unsigned& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) - model.AddConstr(hyperedge_uses_partition[hyperedge][static_cast(part)] >= node_in_partition[node][static_cast(part)] - node_replicated[static_cast(node)]); + for (index_type node = 0; node < numberOfVertices; node++) + for (const index_type& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) + model.AddConstr(this->hyperedge_uses_partition[hyperedge][static_cast(part)] >= this->node_in_partition[node][static_cast(part)] - node_replicated[static_cast(node)]); // hyperedge indicators if node is replicated - for (unsigned node = 0; node < numberOfVertices; node++) - for (const unsigned& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) + for (index_type node = 0; node < numberOfVertices; node++) + for (const index_type& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) for (unsigned part1 = 0; part1 < numberOfParts; part1++) for (unsigned part2 = part1+1; part2 < numberOfParts; part2++) - model.AddConstr(hyperedge_uses_partition[hyperedge][static_cast(part1)] + hyperedge_uses_partition[hyperedge][static_cast(part2)] >= - node_in_partition[node][static_cast(part1)] + node_in_partition[node][static_cast(part2)] - 1); + model.AddConstr(this->hyperedge_uses_partition[hyperedge][static_cast(part1)] + this->hyperedge_uses_partition[hyperedge][static_cast(part2)] >= + this->node_in_partition[node][static_cast(part1)] + this->node_in_partition[node][static_cast(part2)] - 1); } }; -void HypergraphPartitioningILPWithReplication::setInitialSolution(const PartitioningWithReplication &partition, Model& model) +template +void HypergraphPartitioningILPWithReplication::setInitialSolution(const PartitioningWithReplication &partition, Model& model) { const std::vector >& assignments = partition.assignedPartitions(); const unsigned& numPartitions = partition.getInstance().getNumberOfPartitions(); if(assignments.size() != partition.getInstance().getHypergraph().num_vertices()) return; - for(unsigned node = 0; node < assignments.size(); ++node) + for(index_type node = 0; node < assignments.size(); ++node) { std::vector assingedToPart(numPartitions, false); for(unsigned part : assignments[node]) @@ -174,7 +176,7 @@ void HypergraphPartitioningILPWithReplication::setInitialSolution(const Partitio assingedToPart[part] = true; for(unsigned part = 0; part < numPartitions; ++part) - model.SetMipStart(node_in_partition[node][static_cast(part)], static_cast(assingedToPart[part])); + model.SetMipStart(this->node_in_partition[node][static_cast(part)], static_cast(assingedToPart[part])); } model.LoadMipStart(); } diff --git a/tests/hypergraph_and_partition.cpp b/tests/hypergraph_and_partition.cpp index bd9b661b..44a8d282 100644 --- a/tests/hypergraph_and_partition.cpp +++ b/tests/hypergraph_and_partition.cpp @@ -26,7 +26,7 @@ limitations under the License. #include "osp/partitioning/model/partitioning.hpp" #include "osp/partitioning/model/partitioning_replication.hpp" #include "osp/partitioning/partitioners/generic_FM.hpp" -#include "osp/graph_implementations/boost_graphs/boost_graph.hpp" +#include "osp/graph_implementations/adj_list_impl/computational_dag_vector_impl.hpp" #include "osp/auxiliary/io/hdag_graph_file_reader.hpp" #include "osp/auxiliary/io/mtx_hypergraph_file_reader.hpp" #include "osp/auxiliary/io/partitioning_file_writer.hpp" @@ -36,7 +36,7 @@ using namespace osp; BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { - using graph = boost_graph_uint_t; + using graph = computational_dag_vector_impl_def_int_t; // Getting root git directory std::filesystem::path cwd = std::filesystem::current_path(); @@ -151,7 +151,7 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { // Generic FM instance.setNumberOfPartitions(2); - instance.setMaxWorkWeightExplicitly(/*35*/ 4000); + instance.setMaxWorkWeightExplicitly(35); for(unsigned node = 0; node < instance.getHypergraph().num_vertices(); ++node) instance.getHypergraph().set_vertex_work_weight(node, 1); diff --git a/tests/ilp_hypergraph_partitioning.cpp b/tests/ilp_hypergraph_partitioning.cpp index fcdba56f..0d95887d 100644 --- a/tests/ilp_hypergraph_partitioning.cpp +++ b/tests/ilp_hypergraph_partitioning.cpp @@ -22,7 +22,7 @@ limitations under the License. #include #include "osp/partitioning/partitioners/partitioning_ILP.hpp" #include "osp/partitioning/partitioners/partitioning_ILP_replication.hpp" -#include "osp/graph_implementations/boost_graphs/boost_graph.hpp" +#include "osp/graph_implementations/adj_list_impl/computational_dag_vector_impl.hpp" #include "osp/auxiliary/io/hdag_graph_file_reader.hpp" @@ -30,7 +30,7 @@ using namespace osp; BOOST_AUTO_TEST_CASE(test_full) { - using graph = boost_graph_uint_t; + using graph = computational_dag_vector_impl_def_int_t; // Getting root git directory std::filesystem::path cwd = std::filesystem::current_path(); @@ -111,7 +111,7 @@ BOOST_AUTO_TEST_CASE(test_full) { // same tests with other replication formulation instance.setMaxWorkWeightExplicitly(35); - partitioner_rep.setReplicationModel(HypergraphPartitioningILPWithReplication::REPLICATION_MODEL_IN_ILP::GENERAL); + partitioner_rep.setReplicationModel(HypergraphPartitioningILPWithReplication<>::REPLICATION_MODEL_IN_ILP::GENERAL); partitioner_rep.setUseInitialSolution(false); partitioner_rep.computePartitioning(partition_rep); From 65c80f11e905e1d09cb90a199c573fe45e5b7df0 Mon Sep 17 00:00:00 2001 From: ppapp Date: Tue, 14 Oct 2025 16:26:56 +0200 Subject: [PATCH 5/9] hypergraph-partitioning-ILP app added --- apps/CMakeLists.txt | 1 + apps/ilp_hypergraph_partitioner.cpp | 153 ++++++++++++++++++ .../partitioners/partitioning_ILP.hpp | 2 +- .../partitioners/partitioning_ILP_base.hpp | 2 +- 4 files changed, 156 insertions(+), 2 deletions(-) create mode 100644 apps/ilp_hypergraph_partitioner.cpp diff --git a/apps/CMakeLists.txt b/apps/CMakeLists.txt index 05aac548..97935a92 100644 --- a/apps/CMakeLists.txt +++ b/apps/CMakeLists.txt @@ -50,6 +50,7 @@ endif() if (COPT_FOUND) _add_executable( ilp_bsp_scheduler ) +_add_executable( ilp_hypergraph_partitioner ) endif() endif() diff --git a/apps/ilp_hypergraph_partitioner.cpp b/apps/ilp_hypergraph_partitioner.cpp new file mode 100644 index 00000000..89739749 --- /dev/null +++ b/apps/ilp_hypergraph_partitioner.cpp @@ -0,0 +1,153 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#include +#include +#include +#include +#include +#include + +#include "osp/auxiliary/misc.hpp" +#include "osp/graph_algorithms/directed_graph_path_util.hpp" +#include "osp/auxiliary/io/general_file_reader.hpp" +#include "osp/partitioning/partitioners/partitioning_ILP.hpp" +#include "osp/partitioning/partitioners/partitioning_ILP_replication.hpp" +#include "osp/graph_implementations/adj_list_impl/computational_dag_vector_impl.hpp" +#include "osp/auxiliary/io/hdag_graph_file_reader.hpp" +#include "osp/auxiliary/io/mtx_hypergraph_file_reader.hpp" +#include "osp/auxiliary/io/partitioning_file_writer.hpp" + + +using namespace osp; + +using graph = computational_dag_vector_impl_def_int_t; + +int main(int argc, char *argv[]) { + if (argc < 4) { + std::cerr << "Usage: " << argv[0] << " " + << std::endl; + return 1; + } + + std::string filename_hgraph = argv[1]; + std::string name_hgraph = filename_hgraph.substr(0, filename_hgraph.rfind(".")); + std::string file_ending = filename_hgraph.substr(filename_hgraph.rfind(".") + 1); + if (!file_reader::isPathSafe(filename_hgraph)) { + std::cerr << "Error: Unsafe file path (possible traversal or invalid type).\n"; + return 1; + } + + std::cout << name_hgraph << std::endl; + + int nr_parts = std::stoi(argv[2]); + if (nr_parts < 2 || nr_parts > 32) { + std::cerr << "Argument nr_parts must be an integer between 2 and 32: " << nr_parts << std::endl; + return 1; + } + + float imbalance = std::stof(argv[3]); + if (imbalance < 0.01 || imbalance > .99) { + std::cerr << "Argument imbalance must be a float between 0.01 and 0.99: " << imbalance << std::endl; + return 1; + } + + unsigned replicate = 0; + + if (argc > 4 && std::string(argv[4]) == "part_repl") { + replicate = 1; + } else if (argc > 4 && std::string(argv[4]) == "full_repl") { + replicate = 2; + } else if (argc > 4) { + std::cerr << "Unknown argument: " << argv[4] << ". Expected 'part_repl' or 'full_repl' for replication." << std::endl; + return 1; + } + + Hypergraph hgraph; + + bool file_status = true; + if (file_ending == "hdag") { + graph dag; + file_status = file_reader::readComputationalDagHyperdagFormatDB(filename_hgraph, dag); + if(file_status) + hgraph.convert_from_cdag_as_hyperdag(dag); + } else if (file_ending == "mtx") { + file_status = file_reader::readHypergraphMartixMarketFormat(filename_hgraph, hgraph); + } else { + std::cout << "Unknown file extension." << std::endl; + return 1; + } + if (!file_status) { + + std::cout << "Reading input file failed." << std::endl; + return 1; + } + + PartitioningProblem instance(hgraph, static_cast(nr_parts)); + instance.setMaxWorkWeightViaImbalanceFactor(imbalance); + + if (replicate > 0) { + + PartitioningWithReplication partition(instance); + HypergraphPartitioningILPWithReplication partitioner; + + for(size_t node = 0; node < hgraph.num_vertices(); ++node) + partition.setAssignedPartitions(node, {static_cast(node % static_cast(nr_parts))}); + if(partition.satisfiesBalanceConstraint()) + partitioner.setUseInitialSolution(true); + + partitioner.setTimeLimitSeconds(600); + if(replicate == 2) + partitioner.setReplicationModel(HypergraphPartitioningILPWithReplication<>::REPLICATION_MODEL_IN_ILP::GENERAL); + + auto solve_status = partitioner.computePartitioning(partition); + + if (solve_status == RETURN_STATUS::OSP_SUCCESS || solve_status == RETURN_STATUS::BEST_FOUND) { + file_writer::write_txt(name_hgraph + "_" + std::to_string(nr_parts) + "_" + std::to_string(imbalance) + + "_ILP_rep" + std::to_string(replicate) + ".txt", partition); + std::cout << "Partitioning (with replicaiton) computed with costs: " << partition.computeConnectivityCost() << std::endl; + } else { + std::cout << "Computing partition failed." << std::endl; + return 1; + } + + } else { + + Partitioning partition(instance); + HypergraphPartitioningILP partitioner; + + for(size_t node = 0; node < hgraph.num_vertices(); ++node) + partition.setAssignedPartition(node, static_cast(node % static_cast(nr_parts))); + if(partition.satisfiesBalanceConstraint()) + partitioner.setUseInitialSolution(true); + + partitioner.setTimeLimitSeconds(600); + + auto solve_status = partitioner.computePartitioning(partition); + + if (solve_status == RETURN_STATUS::OSP_SUCCESS || solve_status == RETURN_STATUS::BEST_FOUND) { + file_writer::write_txt(name_hgraph + "_" + std::to_string(nr_parts) + "_" + std::to_string(imbalance) + + "_ILP_rep" + std::to_string(replicate) + ".txt", partition); + std::cout << "Partitioning computed with costs: " << partition.computeConnectivityCost() << std::endl; + } else { + std::cout << "Computing partition failed." << std::endl; + return 1; + } + } + return 0; +} diff --git a/include/osp/partitioning/partitioners/partitioning_ILP.hpp b/include/osp/partitioning/partitioners/partitioning_ILP.hpp index db7123a3..2c74fd21 100644 --- a/include/osp/partitioning/partitioners/partitioning_ILP.hpp +++ b/include/osp/partitioning/partitioners/partitioning_ILP.hpp @@ -111,7 +111,7 @@ void HypergraphPartitioningILP::s template std::vector HypergraphPartitioningILP::readCoptAssignment(const PartitioningProblem &instance, Model& model) { - std::vector node_to_partition(instance.getHypergraph().num_vertices(), UINT_MAX); + std::vector node_to_partition(instance.getHypergraph().num_vertices(), std::numeric_limits::max()); std::vector > assignmentsGenericForm = this->readAllCoptAssignments(instance, model); for (index_type node = 0; node < instance.getHypergraph().num_vertices(); node++) diff --git a/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp b/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp index 206d7f45..558d1472 100644 --- a/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp +++ b/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp @@ -146,7 +146,7 @@ std::vector > HypergraphPartitioningILPBase::max()); } unsigned current_index = 0; From da0a0e8f15d460d77340bedcaa21194ef0622a7e Mon Sep 17 00:00:00 2001 From: ppapp Date: Wed, 15 Oct 2025 15:10:36 +0200 Subject: [PATCH 6/9] FM fixes and recursive FM --- include/osp/partitioning/model/hypergraph.hpp | 39 ++++++ .../partitioning/partitioners/generic_FM.hpp | 129 +++++++++++++++++- tests/hypergraph_and_partition.cpp | 19 +++ 3 files changed, 186 insertions(+), 1 deletion(-) diff --git a/include/osp/partitioning/model/hypergraph.hpp b/include/osp/partitioning/model/hypergraph.hpp index 0aafe973..5d886062 100644 --- a/include/osp/partitioning/model/hypergraph.hpp +++ b/include/osp/partitioning/model/hypergraph.hpp @@ -71,6 +71,8 @@ class Hypergraph { template void convert_from_cdag_as_hyperdag(const Graph_t& dag); + Hypergraph create_induced_hypergraph(const std::vector& include) const; + private: index_type Num_vertices = 0, Num_hyperedges = 0, Num_pins = 0; @@ -247,4 +249,41 @@ void Hypergraph::convert_from_cda } } +template +Hypergraph Hypergraph::create_induced_hypergraph(const std::vector& include) const +{ + if(include.size() != Num_vertices) + throw std::invalid_argument("Invalid Argument while extracting induced hypergraph: input bool array has incorrect size."); + + std::vector new_index(Num_vertices); + unsigned current_index = 0; + for(index_type node = 0; node < Num_vertices; ++node) + if(include[node]) + new_index[node] = current_index++; + + Hypergraph hgraph(current_index, 0); + for(index_type node = 0; node < Num_vertices; ++node) + if(include[node]) + { + hgraph.set_vertex_work_weight(new_index[node], vertex_work_weights[node]); + hgraph.set_vertex_memory_weight(new_index[node], vertex_memory_weights[node]); + } + + for(index_type hyperedge = 0; hyperedge < Num_hyperedges; ++hyperedge) + { + unsigned nr_induced_pins = 0; + std::vector induced_hyperedge; + for(index_type node : vertices_in_hyperedge[hyperedge]) + if(include[node]) + { + induced_hyperedge.push_back(new_index[node]); + ++nr_induced_pins; + } + + if(nr_induced_pins >= 2) + hgraph.add_hyperedge(induced_hyperedge, hyperedge_weights[hyperedge]); + } + return hgraph; +} + } // namespace osp \ No newline at end of file diff --git a/include/osp/partitioning/partitioners/generic_FM.hpp b/include/osp/partitioning/partitioners/generic_FM.hpp index 9dfe6eaf..8e759b5f 100644 --- a/include/osp/partitioning/partitioners/generic_FM.hpp +++ b/include/osp/partitioning/partitioners/generic_FM.hpp @@ -31,10 +31,15 @@ class GenericFM { unsigned max_number_of_passes = 10; index_type max_nodes_in_part = 0; + // auxiliary for RecursiveFM + std::vector getMaxNodesOnLevel(index_type nr_nodes, unsigned nr_parts) const; + public: void ImprovePartitioning(Partitioning& partition); + void RecursiveFM(Partitioning& partition); + inline unsigned getMaxNumberOfPasses() const { return max_number_of_passes; } inline void setMaxNumberOfPasses(unsigned passes_) { max_number_of_passes = passes_; } inline index_type getMaxNodesInPart() const { return max_nodes_in_part; } @@ -81,6 +86,20 @@ void GenericFM::ImprovePartitioni if(partition.assignedPartition(node) == 0) ++left_side; + if(left_side > max_nodes_in_part || Hgraph.num_vertices() - left_side > max_nodes_in_part) + { + if(pass_idx == 0) + { + std::cout<<"Error: initial partitioning of FM is not balanced."<::ImprovePartitioni for(index_type node : Hgraph.get_vertices_in_hyperedge(hyperedge)) if(partition.assignedPartition(node) == part) ++gain[node]; + + if(nr_nodes_in_hyperedge_on_side[hyperedge][part] == 0) + for(index_type node : Hgraph.get_vertices_in_hyperedge(hyperedge)) + if(partition.assignedPartition(node) != part) + --gain[node]; } } @@ -232,10 +256,113 @@ void GenericFM::ImprovePartitioni if(best_index == 0) break; - for(index_type node_idx = 0; node_idx < best_index; ++node_idx) + for(index_type node_idx = 0; node_idx < best_index && node_idx < static_cast(moved_nodes.size()); ++node_idx) partition.setAssignedPartition(moved_nodes[node_idx], 1U-partition.assignedPartition(moved_nodes[node_idx])); } } +template +void GenericFM::RecursiveFM(Partitioning& partition) +{ + const unsigned& nr_parts = partition.getInstance().getNumberOfPartitions(); + const index_type& nr_nodes = partition.getInstance().getHypergraph().num_vertices(); + + using Hgraph = Hypergraph; + + // Note: this is just a simple recursive heuristic for the case when the partitions are a small power of 2 + if(nr_parts != 4 && nr_parts != 8 && nr_parts != 16 && nr_parts != 32) + { + std::cout << "Error: Recursive FM can only be used for 4, 8, 16 or 32 partitions currently." << std::endl; + return; + } + + for(index_type node = 0; node < nr_nodes; ++node) + partition.setAssignedPartition(node, static_cast(node % 2)); + + if(max_nodes_in_part == 0) // if not initialized + max_nodes_in_part = static_cast(ceil(static_cast(nr_nodes) * static_cast(partition.getInstance().getMaxWorkWeightPerPartition()) + / static_cast(partition.getInstance().getHypergraph().compute_total_vertex_work_weight()) )); + + const std::vector max_nodes_on_level = getMaxNodesOnLevel(nr_nodes, nr_parts); + + unsigned parts = 1; + unsigned level = 0; + std::vector sub_hgraphs({partition.getInstance().getHypergraph()}); + unsigned start_index = 0; + + std::map > node_to_new_hgraph_and_id; + std::map, index_type> hgraph_and_id_to_old_idx; + for(index_type node = 0; node < nr_nodes; ++node) + { + node_to_new_hgraph_and_id[node] = std::make_pair(0, node); + hgraph_and_id_to_old_idx[std::make_pair(0, node)] = node; + } + + while(parts < nr_parts) + { + unsigned end_idx = static_cast(sub_hgraphs.size()); + for(unsigned sub_hgraph_index = start_index; sub_hgraph_index < end_idx; ++sub_hgraph_index) + { + const Hgraph& hgraph = sub_hgraphs[sub_hgraph_index]; + PartitioningProblem instance(hgraph, 2); + Partitioning sub_partition(instance); + for(index_type node = 0; node < hgraph.num_vertices(); ++node) + sub_partition.setAssignedPartition(node, node%2); + + GenericFM sub_fm; + sub_fm.setMaxNodesInPart(max_nodes_on_level[level]); + //std::cout<<"Hgraph of size "< current_idx(2, 0); + std::vector > part_indicator(2, std::vector(hgraph.num_vertices(), false)); + for(index_type node = 0; node < hgraph.num_vertices(); ++node) + { + const unsigned part_id = sub_partition.assignedPartition(node); + const index_type original_id = hgraph_and_id_to_old_idx[std::make_pair(sub_hgraph_index, node)]; + node_to_new_hgraph_and_id[original_id] = std::make_pair(sub_hgraphs.size()+part_id, current_idx[part_id]); + hgraph_and_id_to_old_idx[std::make_pair(sub_hgraphs.size()+part_id, current_idx[part_id])] = original_id; + ++current_idx[part_id]; + part_indicator[part_id][node] = true; + } + + for(unsigned part = 0; part < 2; ++part) + sub_hgraphs.push_back(sub_hgraphs[sub_hgraph_index].create_induced_hypergraph(part_indicator[part])); + + ++start_index; + } + + parts *= 2; + ++level; + } + + for(index_type node = 0; node < nr_nodes; ++node) + partition.setAssignedPartition(node, node_to_new_hgraph_and_id[node].first - (static_cast(sub_hgraphs.size())-nr_parts)); + +} + +template +std::vector GenericFM::getMaxNodesOnLevel(index_type nr_nodes, unsigned nr_parts) const +{ + std::vector max_nodes_on_level; + std::vector limit_per_level({static_cast(ceil(static_cast(nr_nodes) / 2.0))}); + for(unsigned parts = nr_parts / 4; parts > 0; parts /= 2) + limit_per_level.push_back(static_cast(ceil(static_cast(limit_per_level.back()) / 2.0))); + + max_nodes_on_level.push_back(max_nodes_in_part); + for(unsigned parts = 2; parts < nr_parts; parts *= 2) + { + index_type next_limit = max_nodes_on_level.back()*2; + if(next_limit > limit_per_level.back()) + --next_limit; + + limit_per_level.pop_back(); + max_nodes_on_level.push_back(next_limit); + } + + std::reverse(max_nodes_on_level.begin(),max_nodes_on_level.end()); + return max_nodes_on_level; +} + } // namespace osp \ No newline at end of file diff --git a/tests/hypergraph_and_partition.cpp b/tests/hypergraph_and_partition.cpp index 44a8d282..9d47a338 100644 --- a/tests/hypergraph_and_partition.cpp +++ b/tests/hypergraph_and_partition.cpp @@ -184,6 +184,7 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { original_cost = partition_to_improve.computeConnectivityCost(); + fm.setMaxNodesInPart(0); fm.ImprovePartitioning(partition_to_improve); new_cost = partition_to_improve.computeConnectivityCost(); @@ -191,4 +192,22 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { BOOST_CHECK(new_cost <= original_cost); std::cout< "< "< Date: Wed, 15 Oct 2025 15:41:16 +0200 Subject: [PATCH 7/9] initialization in Hgraph partitioning app --- apps/ilp_hypergraph_partitioner.cpp | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/apps/ilp_hypergraph_partitioner.cpp b/apps/ilp_hypergraph_partitioner.cpp index 89739749..73e162fe 100644 --- a/apps/ilp_hypergraph_partitioner.cpp +++ b/apps/ilp_hypergraph_partitioner.cpp @@ -26,6 +26,7 @@ limitations under the License. #include "osp/auxiliary/misc.hpp" #include "osp/graph_algorithms/directed_graph_path_util.hpp" #include "osp/auxiliary/io/general_file_reader.hpp" +#include "osp/partitioning/partitioners/generic_FM.hpp" #include "osp/partitioning/partitioners/partitioning_ILP.hpp" #include "osp/partitioning/partitioners/partitioning_ILP_replication.hpp" #include "osp/graph_implementations/adj_list_impl/computational_dag_vector_impl.hpp" @@ -101,13 +102,22 @@ int main(int argc, char *argv[]) { PartitioningProblem instance(hgraph, static_cast(nr_parts)); instance.setMaxWorkWeightViaImbalanceFactor(imbalance); + Partitioning initial_partition(instance); + GenericFM fm; + for(size_t node = 0; node < hgraph.num_vertices(); ++node) + initial_partition.setAssignedPartition(node, static_cast(node % static_cast(nr_parts))); + if(nr_parts == 2) + fm.ImprovePartitioning(initial_partition); + if(nr_parts == 4 || nr_parts == 8 || nr_parts == 16 || nr_parts == 32) + fm.RecursiveFM(initial_partition); + if (replicate > 0) { PartitioningWithReplication partition(instance); HypergraphPartitioningILPWithReplication partitioner; for(size_t node = 0; node < hgraph.num_vertices(); ++node) - partition.setAssignedPartitions(node, {static_cast(node % static_cast(nr_parts))}); + partition.setAssignedPartitions(node, {initial_partition.assignedPartition(node)}); if(partition.satisfiesBalanceConstraint()) partitioner.setUseInitialSolution(true); @@ -132,7 +142,7 @@ int main(int argc, char *argv[]) { HypergraphPartitioningILP partitioner; for(size_t node = 0; node < hgraph.num_vertices(); ++node) - partition.setAssignedPartition(node, static_cast(node % static_cast(nr_parts))); + partition.setAssignedPartition(node, initial_partition.assignedPartition(node)); if(partition.satisfiesBalanceConstraint()) partitioner.setUseInitialSolution(true); From 55200c980aa895cf62d4c71b2e8db29de09282df Mon Sep 17 00:00:00 2001 From: papp-pal-andras Date: Thu, 23 Oct 2025 11:38:51 +0200 Subject: [PATCH 8/9] moving hypergraph utility functions into separate file --- apps/ilp_hypergraph_partitioner.cpp | 3 +- include/osp/partitioning/model/hypergraph.hpp | 107 ------------- .../partitioning/model/hypergraph_utility.hpp | 145 ++++++++++++++++++ .../model/partitioning_problem.hpp | 6 +- .../partitioning/partitioners/generic_FM.hpp | 6 +- tests/hypergraph_and_partition.cpp | 11 +- tests/ilp_hypergraph_partitioning.cpp | 5 +- 7 files changed, 161 insertions(+), 122 deletions(-) create mode 100644 include/osp/partitioning/model/hypergraph_utility.hpp diff --git a/apps/ilp_hypergraph_partitioner.cpp b/apps/ilp_hypergraph_partitioner.cpp index 73e162fe..bd271930 100644 --- a/apps/ilp_hypergraph_partitioner.cpp +++ b/apps/ilp_hypergraph_partitioner.cpp @@ -26,6 +26,7 @@ limitations under the License. #include "osp/auxiliary/misc.hpp" #include "osp/graph_algorithms/directed_graph_path_util.hpp" #include "osp/auxiliary/io/general_file_reader.hpp" +#include "osp/partitioning/model/hypergraph_utility.hpp" #include "osp/partitioning/partitioners/generic_FM.hpp" #include "osp/partitioning/partitioners/partitioning_ILP.hpp" #include "osp/partitioning/partitioners/partitioning_ILP_replication.hpp" @@ -86,7 +87,7 @@ int main(int argc, char *argv[]) { graph dag; file_status = file_reader::readComputationalDagHyperdagFormatDB(filename_hgraph, dag); if(file_status) - hgraph.convert_from_cdag_as_hyperdag(dag); + hgraph = convert_from_cdag_as_hyperdag(dag); } else if (file_ending == "mtx") { file_status = file_reader::readHypergraphMartixMarketFormat(filename_hgraph, hgraph); } else { diff --git a/include/osp/partitioning/model/hypergraph.hpp b/include/osp/partitioning/model/hypergraph.hpp index 5d886062..ec16635a 100644 --- a/include/osp/partitioning/model/hypergraph.hpp +++ b/include/osp/partitioning/model/hypergraph.hpp @@ -56,8 +56,6 @@ class Hypergraph { void set_vertex_memory_weight(index_type vertex_idx, memw_type weight); void set_hyperedge_weight(index_type hyperedge_idx, commw_type weight); - workw_type compute_total_vertex_work_weight() const; - memw_type compute_total_vertex_memory_weight() const; void clear(); void reset(index_type num_vertices_, index_type num_hyperedges_); @@ -65,13 +63,6 @@ class Hypergraph { inline const std::vector &get_incident_hyperedges(index_type vertex) const { return incident_hyperedges_to_vertex[vertex]; } inline const std::vector &get_vertices_in_hyperedge(index_type hyperedge) const { return vertices_in_hyperedge[hyperedge]; } - template - void convert_from_cdag_as_dag(const Graph_t& dag); - - template - void convert_from_cdag_as_hyperdag(const Graph_t& dag); - - Hypergraph create_induced_hypergraph(const std::vector& include) const; private: index_type Num_vertices = 0, Num_hyperedges = 0, Num_pins = 0; @@ -157,23 +148,6 @@ void Hypergraph::set_hyperedge_we hyperedge_weights[hyperedge_idx] = weight; } -template -workw_type Hypergraph::compute_total_vertex_work_weight() const -{ - workw_type total = 0; - for(index_type node = 0; node < Num_vertices; ++node) - total += vertex_work_weights[node]; - return total; -} - -template -memw_type Hypergraph::compute_total_vertex_memory_weight() const -{ - memw_type total = 0; - for(index_type node = 0; node < Num_vertices; ++node) - total += vertex_memory_weights[node]; - return total; -} template void Hypergraph::clear() @@ -204,86 +178,5 @@ void Hypergraph::reset(index_type vertices_in_hyperedge.resize(num_hyperedges_); } -template -template -void Hypergraph::convert_from_cdag_as_dag(const Graph_t& dag) -{ - static_assert(std::is_same_v, index_type>, "Index type mismatch, cannot convert DAG to hypergraph."); - static_assert(std::is_same_v, workw_type>, "Work weight type mismatch, cannot convert DAG to hypergraph."); - static_assert(std::is_same_v, memw_type>, "Memory weight type mismatch, cannot convert DAG to hypergraph."); - static_assert(!has_edge_weights_v || std::is_same_v, commw_type>, "Communication weight type mismatch, cannot convert DAG to hypergraph."); - - reset(dag.num_vertices(), 0); - for(const auto &node : dag.vertices()) - { - set_vertex_work_weight(node, dag.vertex_work_weight(node)); - set_vertex_memory_weight(node, dag.vertex_mem_weight(node)); - for (const auto &child : dag.children(node)) - if constexpr(has_edge_weights_v) - add_hyperedge({node, child}, dag.edge_comm_weight(edge_desc(node, child, dag).first)); - else - add_hyperedge({node, child}); - } -} - -template -template -void Hypergraph::convert_from_cdag_as_hyperdag(const Graph_t& dag) -{ - static_assert(std::is_same_v, index_type>, "Index type mismatch, cannot convert DAG to hypergraph."); - static_assert(std::is_same_v, workw_type>, "Work weight type mismatch, cannot convert DAG to hypergraph."); - static_assert(std::is_same_v, memw_type>, "Memory weight type mismatch, cannot convert DAG to hypergraph."); - static_assert(std::is_same_v, commw_type>, "Communication weight type mismatch, cannot convert DAG to hypergraph."); - - reset(dag.num_vertices(), 0); - for(const auto &node : dag.vertices()) - { - set_vertex_work_weight(node, dag.vertex_work_weight(node)); - set_vertex_memory_weight(node, dag.vertex_mem_weight(node)); - if(dag.out_degree(node) == 0) - continue; - std::vector new_hyperedge({node}); - for (const auto &child : dag.children(node)) - new_hyperedge.push_back(child); - add_hyperedge(new_hyperedge, dag.vertex_comm_weight(node)); - } -} - -template -Hypergraph Hypergraph::create_induced_hypergraph(const std::vector& include) const -{ - if(include.size() != Num_vertices) - throw std::invalid_argument("Invalid Argument while extracting induced hypergraph: input bool array has incorrect size."); - - std::vector new_index(Num_vertices); - unsigned current_index = 0; - for(index_type node = 0; node < Num_vertices; ++node) - if(include[node]) - new_index[node] = current_index++; - - Hypergraph hgraph(current_index, 0); - for(index_type node = 0; node < Num_vertices; ++node) - if(include[node]) - { - hgraph.set_vertex_work_weight(new_index[node], vertex_work_weights[node]); - hgraph.set_vertex_memory_weight(new_index[node], vertex_memory_weights[node]); - } - - for(index_type hyperedge = 0; hyperedge < Num_hyperedges; ++hyperedge) - { - unsigned nr_induced_pins = 0; - std::vector induced_hyperedge; - for(index_type node : vertices_in_hyperedge[hyperedge]) - if(include[node]) - { - induced_hyperedge.push_back(new_index[node]); - ++nr_induced_pins; - } - - if(nr_induced_pins >= 2) - hgraph.add_hyperedge(induced_hyperedge, hyperedge_weights[hyperedge]); - } - return hgraph; -} } // namespace osp \ No newline at end of file diff --git a/include/osp/partitioning/model/hypergraph_utility.hpp b/include/osp/partitioning/model/hypergraph_utility.hpp new file mode 100644 index 00000000..77af4516 --- /dev/null +++ b/include/osp/partitioning/model/hypergraph_utility.hpp @@ -0,0 +1,145 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#pragma once + +#include +#include +#include + +#include "osp/partitioning/model/hypergraph.hpp" + +/** + * @file hypergraph_utility.hpp + * @brief Utility functions and classes for working with hypergraphs graphs. + * + * This file provides a collection of simple utility functions for the hypergraph class. + */ + +namespace osp { + + +// summing up weights + +template +workw_type compute_total_vertex_work_weight(const Hypergraph& hgraph) +{ + workw_type total = 0; + for(index_type node = 0; node < hgraph.num_vertices(); ++node) + total += hgraph.get_vertex_work_weight(node); + return total; +} + +template +memw_type compute_total_vertex_memory_weight(const Hypergraph& hgraph) +{ + memw_type total = 0; + for(index_type node = 0; node < hgraph.num_vertices(); ++node) + total += hgraph.get_vertex_memory_weight(node); + return total; +} + + +// get induced subhypergraph + +template +Hypergraph create_induced_hypergraph(const Hypergraph& hgraph, const std::vector& include) +{ + if(include.size() != hgraph.num_vertices()) + throw std::invalid_argument("Invalid Argument while extracting induced hypergraph: input bool array has incorrect size."); + + std::vector new_index(hgraph.num_vertices()); + unsigned current_index = 0; + for(index_type node = 0; node < hgraph.num_vertices(); ++node) + if(include[node]) + new_index[node] = current_index++; + + Hypergraph new_hgraph(current_index, 0); + for(index_type node = 0; node < hgraph.num_vertices(); ++node) + if(include[node]) + { + new_hgraph.set_vertex_work_weight(new_index[node], hgraph.get_vertex_work_weight(node)); + new_hgraph.set_vertex_memory_weight(new_index[node], hgraph.get_vertex_memory_weight(node)); + } + + for(index_type hyperedge = 0; hyperedge < hgraph.num_hyperedges(); ++hyperedge) + { + unsigned nr_induced_pins = 0; + std::vector induced_hyperedge; + for(index_type node : hgraph.get_vertices_in_hyperedge(hyperedge)) + if(include[node]) + { + induced_hyperedge.push_back(new_index[node]); + ++nr_induced_pins; + } + + if(nr_induced_pins >= 2) + new_hgraph.add_hyperedge(induced_hyperedge, hgraph.get_hyperedge_weight(hyperedge)); + } + return new_hgraph; +} + + +// conversion + +template +Hypergraph convert_from_cdag_as_dag(const Graph_t& dag) +{ + static_assert(std::is_same_v, index_type>, "Index type mismatch, cannot convert DAG to hypergraph."); + static_assert(std::is_same_v, workw_type>, "Work weight type mismatch, cannot convert DAG to hypergraph."); + static_assert(std::is_same_v, memw_type>, "Memory weight type mismatch, cannot convert DAG to hypergraph."); + static_assert(!has_edge_weights_v || std::is_same_v, commw_type>, "Communication weight type mismatch, cannot convert DAG to hypergraph."); + + Hypergraph hgraph(dag.num_vertices(), 0); + for(const auto &node : dag.vertices()) + { + hgraph.set_vertex_work_weight(node, dag.vertex_work_weight(node)); + hgraph.set_vertex_memory_weight(node, dag.vertex_mem_weight(node)); + for (const auto &child : dag.children(node)) + if constexpr(has_edge_weights_v) + hgraph.add_hyperedge({node, child}, dag.edge_comm_weight(edge_desc(node, child, dag).first)); + else + hgraph.add_hyperedge({node, child}); + } + return hgraph; +} + +template +Hypergraph convert_from_cdag_as_hyperdag(const Graph_t& dag) +{ + static_assert(std::is_same_v, index_type>, "Index type mismatch, cannot convert DAG to hypergraph."); + static_assert(std::is_same_v, workw_type>, "Work weight type mismatch, cannot convert DAG to hypergraph."); + static_assert(std::is_same_v, memw_type>, "Memory weight type mismatch, cannot convert DAG to hypergraph."); + static_assert(std::is_same_v, commw_type>, "Communication weight type mismatch, cannot convert DAG to hypergraph."); + + Hypergraph hgraph(dag.num_vertices(), 0); + for(const auto &node : dag.vertices()) + { + hgraph.set_vertex_work_weight(node, dag.vertex_work_weight(node)); + hgraph.set_vertex_memory_weight(node, dag.vertex_mem_weight(node)); + if(dag.out_degree(node) == 0) + continue; + std::vector new_hyperedge({node}); + for (const auto &child : dag.children(node)) + new_hyperedge.push_back(child); + hgraph.add_hyperedge(new_hyperedge, dag.vertex_comm_weight(node)); + } + return hgraph; +} + +} // namespace osp \ No newline at end of file diff --git a/include/osp/partitioning/model/partitioning_problem.hpp b/include/osp/partitioning/model/partitioning_problem.hpp index 2996cefa..a2fe1313 100644 --- a/include/osp/partitioning/model/partitioning_problem.hpp +++ b/include/osp/partitioning/model/partitioning_problem.hpp @@ -21,7 +21,7 @@ limitations under the License. #include #include -#include "osp/partitioning/model/hypergraph.hpp" +#include "osp/partitioning/model/hypergraph_utility.hpp" namespace osp { @@ -80,14 +80,14 @@ class PartitioningProblem { if(imbalance < 0 ) throw std::invalid_argument("Invalid Argument while setting imbalance parameter: parameter is negative."); else - max_work_weight_per_partition = static_cast(ceil(hgraph.compute_total_vertex_work_weight()/ static_cast(nr_of_partitions) * (1.0+imbalance))); + max_work_weight_per_partition = static_cast(ceil(compute_total_vertex_work_weight(hgraph)/ static_cast(nr_of_partitions) * (1.0+imbalance))); } inline void setMaxMemoryWeightExplicitly(memw_type max_weight_) { max_memory_weight_per_partition = max_weight_; } void setMaxMemoryWeightViaImbalanceFactor(double imbalance){ if(imbalance < 0 ) throw std::invalid_argument("Invalid Argument while setting imbalance parameter: parameter is negative."); else - max_memory_weight_per_partition = static_cast(ceil(hgraph.compute_total_vertex_memory_weight()/ static_cast(nr_of_partitions) * (1.0+imbalance))); + max_memory_weight_per_partition = static_cast(ceil(compute_total_vertex_memory_weight(hgraph)/ static_cast(nr_of_partitions) * (1.0+imbalance))); } }; diff --git a/include/osp/partitioning/partitioners/generic_FM.hpp b/include/osp/partitioning/partitioners/generic_FM.hpp index 8e759b5f..6b24af2d 100644 --- a/include/osp/partitioning/partitioners/generic_FM.hpp +++ b/include/osp/partitioning/partitioners/generic_FM.hpp @@ -71,7 +71,7 @@ void GenericFM::ImprovePartitioni if(max_nodes_in_part == 0) // if not initialized max_nodes_in_part = static_cast(ceil(static_cast(Hgraph.num_vertices()) * static_cast(partition.getInstance().getMaxWorkWeightPerPartition()) - / static_cast(Hgraph.compute_total_vertex_work_weight()) )); + / static_cast(compute_total_vertex_work_weight(Hgraph)) )); for(unsigned pass_idx = 0; pass_idx < max_number_of_passes; ++pass_idx) { @@ -282,7 +282,7 @@ void GenericFM::RecursiveFM(Parti if(max_nodes_in_part == 0) // if not initialized max_nodes_in_part = static_cast(ceil(static_cast(nr_nodes) * static_cast(partition.getInstance().getMaxWorkWeightPerPartition()) - / static_cast(partition.getInstance().getHypergraph().compute_total_vertex_work_weight()) )); + / static_cast(compute_total_vertex_work_weight(partition.getInstance().getHypergraph())) )); const std::vector max_nodes_on_level = getMaxNodesOnLevel(nr_nodes, nr_parts); @@ -328,7 +328,7 @@ void GenericFM::RecursiveFM(Parti } for(unsigned part = 0; part < 2; ++part) - sub_hgraphs.push_back(sub_hgraphs[sub_hgraph_index].create_induced_hypergraph(part_indicator[part])); + sub_hgraphs.push_back(create_induced_hypergraph(sub_hgraphs[sub_hgraph_index], part_indicator[part])); ++start_index; } diff --git a/tests/hypergraph_and_partition.cpp b/tests/hypergraph_and_partition.cpp index 9d47a338..2c06f5b5 100644 --- a/tests/hypergraph_and_partition.cpp +++ b/tests/hypergraph_and_partition.cpp @@ -25,6 +25,7 @@ limitations under the License. #include "osp/partitioning/model/partitioning.hpp" #include "osp/partitioning/model/partitioning_replication.hpp" +#include "osp/partitioning/model/hypergraph_utility.hpp" #include "osp/partitioning/partitioners/generic_FM.hpp" #include "osp/graph_implementations/adj_list_impl/computational_dag_vector_impl.hpp" #include "osp/auxiliary/io/hdag_graph_file_reader.hpp" @@ -62,7 +63,7 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { BOOST_CHECK_EQUAL(Hgraph.num_hyperedges(), 16); // DAG format, all hyperedges have size 2 - Hgraph.convert_from_cdag_as_dag(DAG); + Hgraph = convert_from_cdag_as_dag(DAG); BOOST_CHECK_EQUAL(DAG.num_vertices(), Hgraph.num_vertices()); BOOST_CHECK_EQUAL(DAG.num_edges(), Hgraph.num_hyperedges()); BOOST_CHECK_EQUAL(DAG.num_edges()*2, Hgraph.num_pins()); @@ -73,7 +74,7 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { if(DAG.out_degree(node) > 0) ++ nr_of_non_sinks; - Hgraph.convert_from_cdag_as_hyperdag(DAG); + Hgraph = convert_from_cdag_as_hyperdag(DAG); BOOST_CHECK_EQUAL(DAG.num_vertices(), Hgraph.num_vertices()); BOOST_CHECK_EQUAL(nr_of_non_sinks, Hgraph.num_hyperedges()); BOOST_CHECK_EQUAL(DAG.num_edges() + nr_of_non_sinks, Hgraph.num_pins()); @@ -119,7 +120,7 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { // Dummy partitioning with replication - instance.getHypergraph().convert_from_cdag_as_hyperdag(DAG); + instance.setHypergraph(convert_from_cdag_as_hyperdag(DAG)); instance.setNumberOfPartitions(3); instance.setMaxWorkWeightExplicitly(30); PartitioningWithReplication partition_with_rep(instance); @@ -137,7 +138,7 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { BOOST_CHECK(partition_with_rep.satisfiesBalanceConstraint()); BOOST_CHECK(partition_with_rep.computeConnectivityCost() >= partition_with_rep.computeCutNetCost()); - instance.setMaxWorkWeightExplicitly(Hgraph.compute_total_vertex_work_weight()); + instance.setMaxWorkWeightExplicitly(compute_total_vertex_work_weight(Hgraph)); for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) partition_with_rep.setAssignedPartitions(node, {0, 1, 2}); @@ -172,7 +173,7 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { graph larger_DAG; file_reader::readComputationalDagHyperdagFormatDB( (cwd / "data/spaa/large/instance_CG_N24_K22_nzP0d2.hdag").string(), larger_DAG); - instance.getHypergraph().convert_from_cdag_as_hyperdag(larger_DAG); + instance.setHypergraph(convert_from_cdag_as_hyperdag(larger_DAG)); instance.setMaxWorkWeightExplicitly(4000); for(unsigned node = 0; node < instance.getHypergraph().num_vertices(); ++node) diff --git a/tests/ilp_hypergraph_partitioning.cpp b/tests/ilp_hypergraph_partitioning.cpp index 0d95887d..f98797da 100644 --- a/tests/ilp_hypergraph_partitioning.cpp +++ b/tests/ilp_hypergraph_partitioning.cpp @@ -20,6 +20,7 @@ limitations under the License. #include #include +#include "osp/partitioning/model/hypergraph_utility.hpp" #include "osp/partitioning/partitioners/partitioning_ILP.hpp" #include "osp/partitioning/partitioners/partitioning_ILP_replication.hpp" #include "osp/graph_implementations/adj_list_impl/computational_dag_vector_impl.hpp" @@ -47,9 +48,7 @@ BOOST_AUTO_TEST_CASE(test_full) { BOOST_CHECK(status); - Hypergraph Hgraph; - - Hgraph.convert_from_cdag_as_hyperdag(DAG); + Hypergraph Hgraph = convert_from_cdag_as_hyperdag(DAG); BOOST_CHECK_EQUAL(DAG.num_vertices(), Hgraph.num_vertices()); PartitioningProblem instance(Hgraph, 3, 35); From 1a4fad6c27e342087712a430329010655f260ac7 Mon Sep 17 00:00:00 2001 From: tonibohnlein Date: Fri, 24 Oct 2025 12:59:38 +0200 Subject: [PATCH 9/9] added hyperdag template arguments --- apps/ilp_hypergraph_partitioner.cpp | 29 ++++++------ .../auxiliary/io/partitioning_file_writer.hpp | 20 +++++---- include/osp/partitioning/model/hypergraph.hpp | 15 ++++++- .../partitioning/model/hypergraph_utility.hpp | 45 +++++++++++++------ .../osp/partitioning/model/partitioning.hpp | 37 ++++++++------- .../model/partitioning_problem.hpp | 30 ++++++++----- .../model/partitioning_replication.hpp | 34 ++++++++------ .../partitioning/partitioners/generic_FM.hpp | 25 +++++++---- .../partitioners/partitioning_ILP.hpp | 34 ++++++++------ .../partitioners/partitioning_ILP_base.hpp | 25 +++++++---- .../partitioning_ILP_replication.hpp | 27 ++++++----- tests/hypergraph_and_partition.cpp | 14 +++--- 12 files changed, 207 insertions(+), 128 deletions(-) diff --git a/apps/ilp_hypergraph_partitioner.cpp b/apps/ilp_hypergraph_partitioner.cpp index bd271930..fd184358 100644 --- a/apps/ilp_hypergraph_partitioner.cpp +++ b/apps/ilp_hypergraph_partitioner.cpp @@ -39,6 +39,7 @@ limitations under the License. using namespace osp; using graph = computational_dag_vector_impl_def_int_t; +using hypergraph = Hypergraph_def_t; int main(int argc, char *argv[]) { if (argc < 4) { @@ -80,16 +81,16 @@ int main(int argc, char *argv[]) { return 1; } - Hypergraph hgraph; + PartitioningProblem instance; bool file_status = true; if (file_ending == "hdag") { graph dag; file_status = file_reader::readComputationalDagHyperdagFormatDB(filename_hgraph, dag); if(file_status) - hgraph = convert_from_cdag_as_hyperdag(dag); + instance.getHypergraph() = convert_from_cdag_as_hyperdag(dag); } else if (file_ending == "mtx") { - file_status = file_reader::readHypergraphMartixMarketFormat(filename_hgraph, hgraph); + file_status = file_reader::readHypergraphMartixMarketFormat(filename_hgraph, instance.getHypergraph()); } else { std::cout << "Unknown file extension." << std::endl; return 1; @@ -100,12 +101,12 @@ int main(int argc, char *argv[]) { return 1; } - PartitioningProblem instance(hgraph, static_cast(nr_parts)); + instance.setNumberOfPartitions(static_cast(nr_parts)); instance.setMaxWorkWeightViaImbalanceFactor(imbalance); - Partitioning initial_partition(instance); - GenericFM fm; - for(size_t node = 0; node < hgraph.num_vertices(); ++node) + Partitioning initial_partition(instance); + GenericFM fm; + for(size_t node = 0; node < instance.getHypergraph().num_vertices(); ++node) initial_partition.setAssignedPartition(node, static_cast(node % static_cast(nr_parts))); if(nr_parts == 2) fm.ImprovePartitioning(initial_partition); @@ -114,17 +115,17 @@ int main(int argc, char *argv[]) { if (replicate > 0) { - PartitioningWithReplication partition(instance); - HypergraphPartitioningILPWithReplication partitioner; + PartitioningWithReplication partition(instance); + HypergraphPartitioningILPWithReplication partitioner; - for(size_t node = 0; node < hgraph.num_vertices(); ++node) + for(size_t node = 0; node < instance.getHypergraph().num_vertices(); ++node) partition.setAssignedPartitions(node, {initial_partition.assignedPartition(node)}); if(partition.satisfiesBalanceConstraint()) partitioner.setUseInitialSolution(true); partitioner.setTimeLimitSeconds(600); if(replicate == 2) - partitioner.setReplicationModel(HypergraphPartitioningILPWithReplication<>::REPLICATION_MODEL_IN_ILP::GENERAL); + partitioner.setReplicationModel(HypergraphPartitioningILPWithReplication::REPLICATION_MODEL_IN_ILP::GENERAL); auto solve_status = partitioner.computePartitioning(partition); @@ -139,10 +140,10 @@ int main(int argc, char *argv[]) { } else { - Partitioning partition(instance); - HypergraphPartitioningILP partitioner; + Partitioning partition(instance); + HypergraphPartitioningILP partitioner; - for(size_t node = 0; node < hgraph.num_vertices(); ++node) + for(size_t node = 0; node < instance.getHypergraph().num_vertices(); ++node) partition.setAssignedPartition(node, initial_partition.assignedPartition(node)); if(partition.satisfiesBalanceConstraint()) partitioner.setUseInitialSolution(true); diff --git a/include/osp/auxiliary/io/partitioning_file_writer.hpp b/include/osp/auxiliary/io/partitioning_file_writer.hpp index 5bf71810..782eac31 100644 --- a/include/osp/auxiliary/io/partitioning_file_writer.hpp +++ b/include/osp/auxiliary/io/partitioning_file_writer.hpp @@ -25,8 +25,10 @@ limitations under the License. namespace osp { namespace file_writer { -template -void write_txt(std::ostream &os, const Partitioning &partition) { +template +void write_txt(std::ostream &os, const Partitioning &partition) { + + using index_type = typename hypergraph_t::vertex_idx; os << "\%\% Partitioning for " << partition.getInstance().getNumberOfPartitions() << " parts." << std::endl; @@ -34,14 +36,16 @@ void write_txt(std::ostream &os, const Partitioning -void write_txt(const std::string &filename, const Partitioning &partition) { +template +void write_txt(const std::string &filename, const Partitioning &partition) { std::ofstream os(filename); write_txt(os, partition); } -template -void write_txt(std::ostream &os, const PartitioningWithReplication &partition) { +template +void write_txt(std::ostream &os, const PartitioningWithReplication &partition) { + + using index_type = typename hypergraph_t::vertex_idx; os << "\%\% Partitioning for " << partition.getInstance().getNumberOfPartitions() << " parts with replication." << std::endl; @@ -54,8 +58,8 @@ void write_txt(std::ostream &os, const PartitioningWithReplication -void write_txt(const std::string &filename, const PartitioningWithReplication &partition) { +template +void write_txt(const std::string &filename, const PartitioningWithReplication &partition) { std::ofstream os(filename); write_txt(os, partition); } diff --git a/include/osp/partitioning/model/hypergraph.hpp b/include/osp/partitioning/model/hypergraph.hpp index ec16635a..715f6ac8 100644 --- a/include/osp/partitioning/model/hypergraph.hpp +++ b/include/osp/partitioning/model/hypergraph.hpp @@ -27,8 +27,15 @@ namespace osp { template class Hypergraph { + using this_t = Hypergraph; + public: + using vertex_idx = index_type; + using vertex_work_weight_type = workw_type; + using vertex_mem_weight_type = memw_type; + using vertex_comm_weight_type = commw_type; + Hypergraph() = default; Hypergraph(index_type num_vertices_, index_type num_hyperedges_) @@ -36,8 +43,8 @@ class Hypergraph { vertex_memory_weights(num_vertices_, 1), hyperedge_weights(num_hyperedges_, 1), incident_hyperedges_to_vertex(num_vertices_), vertices_in_hyperedge(num_hyperedges_){} - Hypergraph(const Hypergraph &other) = default; - Hypergraph &operator=(const Hypergraph &other) = default; + Hypergraph(const this_t &other) = default; + Hypergraph &operator=(const this_t &other) = default; virtual ~Hypergraph() = default; @@ -75,6 +82,8 @@ class Hypergraph { std::vector> vertices_in_hyperedge; }; +using Hypergraph_def_t = Hypergraph; + template void Hypergraph::add_pin(index_type vertex_idx, index_type hyperedge_idx) { @@ -179,4 +188,6 @@ void Hypergraph::reset(index_type } + + } // namespace osp \ No newline at end of file diff --git a/include/osp/partitioning/model/hypergraph_utility.hpp b/include/osp/partitioning/model/hypergraph_utility.hpp index 77af4516..46e698b4 100644 --- a/include/osp/partitioning/model/hypergraph_utility.hpp +++ b/include/osp/partitioning/model/hypergraph_utility.hpp @@ -36,18 +36,24 @@ namespace osp { // summing up weights -template -workw_type compute_total_vertex_work_weight(const Hypergraph& hgraph) +template +typename hypergraph_t::vertex_work_weight_type compute_total_vertex_work_weight(const hypergraph_t& hgraph) { + using index_type = typename hypergraph_t::vertex_idx; + using workw_type = typename hypergraph_t::vertex_work_weight_type; + workw_type total = 0; for(index_type node = 0; node < hgraph.num_vertices(); ++node) total += hgraph.get_vertex_work_weight(node); return total; } -template -memw_type compute_total_vertex_memory_weight(const Hypergraph& hgraph) +template +typename hypergraph_t::vertex_mem_weight_type compute_total_vertex_memory_weight(const hypergraph_t& hgraph) { + using index_type = typename hypergraph_t::vertex_idx; + using memw_type = typename hypergraph_t::vertex_mem_weight_type; + memw_type total = 0; for(index_type node = 0; node < hgraph.num_vertices(); ++node) total += hgraph.get_vertex_memory_weight(node); @@ -57,19 +63,22 @@ memw_type compute_total_vertex_memory_weight(const Hypergraph -Hypergraph create_induced_hypergraph(const Hypergraph& hgraph, const std::vector& include) +template +hypergraph_t create_induced_hypergraph(const hypergraph_t& hgraph, const std::vector& include) { if(include.size() != hgraph.num_vertices()) throw std::invalid_argument("Invalid Argument while extracting induced hypergraph: input bool array has incorrect size."); + using index_type = typename hypergraph_t::vertex_idx; + + std::vector new_index(hgraph.num_vertices()); unsigned current_index = 0; for(index_type node = 0; node < hgraph.num_vertices(); ++node) if(include[node]) new_index[node] = current_index++; - Hypergraph new_hgraph(current_index, 0); + hypergraph_t new_hgraph(current_index, 0); for(index_type node = 0; node < hgraph.num_vertices(); ++node) if(include[node]) { @@ -97,15 +106,20 @@ Hypergraph create_induced_hypergr // conversion -template -Hypergraph convert_from_cdag_as_dag(const Graph_t& dag) +template +hypergraph_t convert_from_cdag_as_dag(const Graph_t& dag) { + using index_type = typename hypergraph_t::vertex_idx; + using workw_type = typename hypergraph_t::vertex_work_weight_type; + using memw_type = typename hypergraph_t::vertex_mem_weight_type; + using commw_type = typename hypergraph_t::vertex_comm_weight_type; + static_assert(std::is_same_v, index_type>, "Index type mismatch, cannot convert DAG to hypergraph."); static_assert(std::is_same_v, workw_type>, "Work weight type mismatch, cannot convert DAG to hypergraph."); static_assert(std::is_same_v, memw_type>, "Memory weight type mismatch, cannot convert DAG to hypergraph."); static_assert(!has_edge_weights_v || std::is_same_v, commw_type>, "Communication weight type mismatch, cannot convert DAG to hypergraph."); - Hypergraph hgraph(dag.num_vertices(), 0); + hypergraph_t hgraph(dag.num_vertices(), 0); for(const auto &node : dag.vertices()) { hgraph.set_vertex_work_weight(node, dag.vertex_work_weight(node)); @@ -119,15 +133,20 @@ Hypergraph convert_from_cdag_as_d return hgraph; } -template -Hypergraph convert_from_cdag_as_hyperdag(const Graph_t& dag) +template +hypergraph_t convert_from_cdag_as_hyperdag(const Graph_t& dag) { + using index_type = typename hypergraph_t::vertex_idx; + using workw_type = typename hypergraph_t::vertex_work_weight_type; + using memw_type = typename hypergraph_t::vertex_mem_weight_type; + using commw_type = typename hypergraph_t::vertex_comm_weight_type; + static_assert(std::is_same_v, index_type>, "Index type mismatch, cannot convert DAG to hypergraph."); static_assert(std::is_same_v, workw_type>, "Work weight type mismatch, cannot convert DAG to hypergraph."); static_assert(std::is_same_v, memw_type>, "Memory weight type mismatch, cannot convert DAG to hypergraph."); static_assert(std::is_same_v, commw_type>, "Communication weight type mismatch, cannot convert DAG to hypergraph."); - Hypergraph hgraph(dag.num_vertices(), 0); + hypergraph_t hgraph(dag.num_vertices(), 0); for(const auto &node : dag.vertices()) { hgraph.set_vertex_work_weight(node, dag.vertex_work_weight(node)); diff --git a/include/osp/partitioning/model/partitioning.hpp b/include/osp/partitioning/model/partitioning.hpp index e724deed..d25a28b9 100644 --- a/include/osp/partitioning/model/partitioning.hpp +++ b/include/osp/partitioning/model/partitioning.hpp @@ -26,12 +26,17 @@ namespace osp { // Represents a partitioning where each vertex of a hypergraph is assigned to a specifc partition -template +template class Partitioning { private: - const PartitioningProblem *instance; + using index_type = typename hypergraph_t::vertex_idx; + using workw_type = typename hypergraph_t::vertex_work_weight_type; + using memw_type = typename hypergraph_t::vertex_mem_weight_type; + using commw_type = typename hypergraph_t::vertex_comm_weight_type; + + const PartitioningProblem *instance; std::vector node_to_partition_assignment; @@ -39,23 +44,23 @@ class Partitioning { Partitioning() = delete; - Partitioning(const PartitioningProblem &inst) + Partitioning(const PartitioningProblem &inst) : instance(&inst), node_to_partition_assignment(std::vector(inst.getHypergraph().num_vertices(), 0)) {} - Partitioning(const PartitioningProblem &inst, const std::vector &partition_assignment_) + Partitioning(const PartitioningProblem &inst, const std::vector &partition_assignment_) : instance(&inst), node_to_partition_assignment(partition_assignment_) {} - Partitioning(const Partitioning &partitioning_) = default; - Partitioning(Partitioning &&partitioning_) = default; + Partitioning(const Partitioning &partitioning_) = default; + Partitioning(Partitioning &&partitioning_) = default; - Partitioning &operator=(const Partitioning &partitioning_) = default; + Partitioning &operator=(const Partitioning &partitioning_) = default; virtual ~Partitioning() = default; // getters and setters - inline const PartitioningProblem &getInstance() const { return *instance; } + inline const PartitioningProblem &getInstance() const { return *instance; } inline unsigned assignedPartition(index_type node) const { return node_to_partition_assignment[node]; } inline const std::vector &assignedPartitions() const { return node_to_partition_assignment; } @@ -109,8 +114,8 @@ class Partitioning { }; -template -std::vector Partitioning::computeLambdaForHyperedges() const +template +std::vector Partitioning::computeLambdaForHyperedges() const { std::vector lambda(instance->getHypergraph().num_hyperedges(), 0); for(index_type edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) @@ -128,8 +133,8 @@ std::vector Partitioning -commw_type Partitioning::computeConnectivityCost() const { +template +typename hypergraph_t::vertex_comm_weight_type Partitioning::computeConnectivityCost() const { commw_type total = 0; std::vector lambda = computeLambdaForHyperedges(); @@ -141,8 +146,8 @@ commw_type Partitioning::computeC return total; } -template -commw_type Partitioning::computeCutNetCost() const { +template +typename hypergraph_t::vertex_comm_weight_type Partitioning::computeCutNetCost() const { commw_type total = 0; std::vector lambda = computeLambdaForHyperedges(); @@ -153,8 +158,8 @@ commw_type Partitioning::computeC return total; } -template -bool Partitioning::satisfiesBalanceConstraint() const { +template +bool Partitioning::satisfiesBalanceConstraint() const { std::vector work_weight(instance->getNumberOfPartitions(), 0); std::vector memory_weight(instance->getNumberOfPartitions(), 0); for (index_type node = 0; node < node_to_partition_assignment.size(); ++node) { diff --git a/include/osp/partitioning/model/partitioning_problem.hpp b/include/osp/partitioning/model/partitioning_problem.hpp index a2fe1313..b121ddd9 100644 --- a/include/osp/partitioning/model/partitioning_problem.hpp +++ b/include/osp/partitioning/model/partitioning_problem.hpp @@ -26,11 +26,19 @@ limitations under the License. namespace osp { // represents a hypergraph partitioning problem into a fixed number of parts with a balance constraint -template +template class PartitioningProblem { private: - Hypergraph hgraph; + + using this_t = PartitioningProblem; + + using index_type = typename hypergraph_t::vertex_idx; + using workw_type = typename hypergraph_t::vertex_work_weight_type; + using memw_type = typename hypergraph_t::vertex_mem_weight_type; + using commw_type = typename hypergraph_t::vertex_comm_weight_type; + + hypergraph_t hgraph; unsigned nr_of_partitions; workw_type max_work_weight_per_partition; @@ -42,27 +50,27 @@ class PartitioningProblem { PartitioningProblem() = default; - PartitioningProblem(const Hypergraph &hgraph_, unsigned nr_parts_ = 2, + PartitioningProblem(const hypergraph_t &hgraph_, unsigned nr_parts_ = 2, workw_type max_work_weight_ = std::numeric_limits::max(), memw_type max_memory_weight_ = std::numeric_limits::max()) : hgraph(hgraph_), nr_of_partitions(nr_parts_), max_work_weight_per_partition(max_work_weight_), max_memory_weight_per_partition(max_memory_weight_) {} - PartitioningProblem(const Hypergraph &&hgraph_, unsigned nr_parts_ = 2, + PartitioningProblem(const hypergraph_t &&hgraph_, unsigned nr_parts_ = 2, workw_type max_work_weight_ = std::numeric_limits::max(), memw_type max_memory_weight_ = std::numeric_limits::max()) : hgraph(hgraph_), nr_of_partitions(nr_parts_), max_work_weight_per_partition(max_work_weight_), max_memory_weight_per_partition(max_memory_weight_) {} - PartitioningProblem(const PartitioningProblem &other) = default; - PartitioningProblem(PartitioningProblem &&other) = default; + PartitioningProblem(const this_t &other) = default; + PartitioningProblem(this_t &&other) = default; - PartitioningProblem &operator=(const PartitioningProblem &other) = default; - PartitioningProblem &operator=(PartitioningProblem &&other) = default; + PartitioningProblem &operator=(const this_t &other) = default; + PartitioningProblem &operator=(this_t &&other) = default; // getters - inline const Hypergraph &getHypergraph() const { return hgraph; } - inline Hypergraph &getHypergraph() { return hgraph; } + inline const hypergraph_t &getHypergraph() const { return hgraph; } + inline hypergraph_t &getHypergraph() { return hgraph; } inline unsigned getNumberOfPartitions() const { return nr_of_partitions; } inline workw_type getMaxWorkWeightPerPartition() const { return max_work_weight_per_partition; } @@ -70,7 +78,7 @@ class PartitioningProblem { inline bool getAllowsReplication() const { return allows_replication; } // setters - inline void setHypergraph(const Hypergraph &hgraph_) { hgraph = hgraph_; } + inline void setHypergraph(const hypergraph_t &hgraph_) { hgraph = hgraph_; } inline void setNumberOfPartitions(unsigned nr_parts_) { nr_of_partitions = nr_parts_; } inline void setAllowsReplication(bool allowed_) { allows_replication = allowed_; } diff --git a/include/osp/partitioning/model/partitioning_replication.hpp b/include/osp/partitioning/model/partitioning_replication.hpp index 70c3a886..0efe577c 100644 --- a/include/osp/partitioning/model/partitioning_replication.hpp +++ b/include/osp/partitioning/model/partitioning_replication.hpp @@ -26,12 +26,18 @@ namespace osp { // Represents a partitioning where each vertex of a hypergraph can be assinged to one or more partitions -template +template class PartitioningWithReplication { private: - const PartitioningProblem *instance; + using index_type = typename hypergraph_t::vertex_idx; + using workw_type = typename hypergraph_t::vertex_work_weight_type; + using memw_type = typename hypergraph_t::vertex_mem_weight_type; + using commw_type = typename hypergraph_t::vertex_comm_weight_type; + + + const PartitioningProblem *instance; std::vector > node_to_partitions_assignment; @@ -39,23 +45,23 @@ class PartitioningWithReplication { PartitioningWithReplication() = delete; - PartitioningWithReplication(const PartitioningProblem &inst) + PartitioningWithReplication(const PartitioningProblem &inst) : instance(&inst), node_to_partitions_assignment(std::vector>(inst.getHypergraph().num_vertices(), {0})) {} - PartitioningWithReplication(const PartitioningProblem &inst, const std::vector > &partition_assignment_) + PartitioningWithReplication(const PartitioningProblem &inst, const std::vector > &partition_assignment_) : instance(&inst), node_to_partitions_assignment(partition_assignment_) {} - PartitioningWithReplication(const PartitioningWithReplication &partitioning_) = default; - PartitioningWithReplication(PartitioningWithReplication &&partitioning_) = default; + PartitioningWithReplication(const PartitioningWithReplication &partitioning_) = default; + PartitioningWithReplication(PartitioningWithReplication &&partitioning_) = default; - PartitioningWithReplication &operator=(const PartitioningWithReplication &partitioning_) = default; + PartitioningWithReplication &operator=(const PartitioningWithReplication &partitioning_) = default; virtual ~PartitioningWithReplication() = default; // getters and setters - inline const PartitioningProblem &getInstance() const { return *instance; } + inline const PartitioningProblem &getInstance() const { return *instance; } inline std::vector assignedPartitions(index_type node) const { return node_to_partitions_assignment[node]; } inline const std::vector > &assignedPartitions() const { return node_to_partitions_assignment; } @@ -105,8 +111,8 @@ class PartitioningWithReplication { }; -template -commw_type PartitioningWithReplication::computeConnectivityCost() const { +template +typename hypergraph_t::vertex_comm_weight_type PartitioningWithReplication::computeConnectivityCost() const { // naive implementation. in the worst-case this is exponential in the number of parts if(instance->getNumberOfPartitions() > 16) @@ -167,8 +173,8 @@ commw_type PartitioningWithReplication -commw_type PartitioningWithReplication::computeCutNetCost() const { +template +typename hypergraph_t::vertex_comm_weight_type PartitioningWithReplication::computeCutNetCost() const { commw_type total = 0; for(index_type edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) @@ -193,8 +199,8 @@ commw_type PartitioningWithReplication -bool PartitioningWithReplication::satisfiesBalanceConstraint() const { +template +bool PartitioningWithReplication::satisfiesBalanceConstraint() const { std::vector work_weight(instance->getNumberOfPartitions(), 0); std::vector memory_weight(instance->getNumberOfPartitions(), 0); for (index_type node = 0; node < node_to_partitions_assignment.size(); ++node) diff --git a/include/osp/partitioning/partitioners/generic_FM.hpp b/include/osp/partitioning/partitioners/generic_FM.hpp index 6b24af2d..a7df5bec 100644 --- a/include/osp/partitioning/partitioners/generic_FM.hpp +++ b/include/osp/partitioning/partitioners/generic_FM.hpp @@ -24,9 +24,16 @@ limitations under the License. namespace osp{ -template +template class GenericFM { + using index_type = typename hypergraph_t::vertex_idx; + using workw_type = typename hypergraph_t::vertex_work_weight_type; + using memw_type = typename hypergraph_t::vertex_mem_weight_type; + using commw_type = typename hypergraph_t::vertex_comm_weight_type; + + + protected: unsigned max_number_of_passes = 10; index_type max_nodes_in_part = 0; @@ -36,9 +43,9 @@ class GenericFM { public: - void ImprovePartitioning(Partitioning& partition); + void ImprovePartitioning(Partitioning& partition); - void RecursiveFM(Partitioning& partition); + void RecursiveFM(Partitioning& partition); inline unsigned getMaxNumberOfPasses() const { return max_number_of_passes; } inline void setMaxNumberOfPasses(unsigned passes_) { max_number_of_passes = passes_; } @@ -46,8 +53,8 @@ class GenericFM { inline void setMaxNodesInPart(index_type max_nodes_) { max_nodes_in_part = max_nodes_; } }; -template -void GenericFM::ImprovePartitioning(Partitioning& partition) +template +void GenericFM::ImprovePartitioning(Partitioning& partition) { // Note: this algorithm disregards hyperedge weights, in order to keep the size of the gain bucket array bounded! @@ -262,8 +269,8 @@ void GenericFM::ImprovePartitioni } } -template -void GenericFM::RecursiveFM(Partitioning& partition) +template +void GenericFM::RecursiveFM(Partitioning& partition) { const unsigned& nr_parts = partition.getInstance().getNumberOfPartitions(); const index_type& nr_nodes = partition.getInstance().getHypergraph().num_vertices(); @@ -342,8 +349,8 @@ void GenericFM::RecursiveFM(Parti } -template -std::vector GenericFM::getMaxNodesOnLevel(index_type nr_nodes, unsigned nr_parts) const +template +std::vector GenericFM::getMaxNodesOnLevel(typename hypergraph_t::vertex_idx nr_nodes, unsigned nr_parts) const { std::vector max_nodes_on_level; std::vector limit_per_level({static_cast(ceil(static_cast(nr_nodes) / 2.0))}); diff --git a/include/osp/partitioning/partitioners/partitioning_ILP.hpp b/include/osp/partitioning/partitioners/partitioning_ILP.hpp index 2c74fd21..6b799c7f 100644 --- a/include/osp/partitioning/partitioners/partitioning_ILP.hpp +++ b/include/osp/partitioning/partitioners/partitioning_ILP.hpp @@ -26,27 +26,27 @@ limitations under the License. namespace osp{ -template -class HypergraphPartitioningILP : public HypergraphPartitioningILPBase { +template +class HypergraphPartitioningILP : public HypergraphPartitioningILPBase { protected: - std::vector readCoptAssignment(const PartitioningProblem &instance, Model& model); + std::vector readCoptAssignment(const PartitioningProblem &instance, Model& model); - void setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model); + void setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model); - void setInitialSolution(const Partitioning &partition, Model& model); + void setInitialSolution(const Partitioning &partition, Model& model); public: virtual ~HypergraphPartitioningILP() = default; - RETURN_STATUS computePartitioning(Partitioning& result); + RETURN_STATUS computePartitioning(Partitioning& result); virtual std::string getAlgorithmName() const override { return "HypergraphPartitioningILP"; } }; -template -RETURN_STATUS HypergraphPartitioningILP::computePartitioning(Partitioning& result) +template +RETURN_STATUS HypergraphPartitioningILP::computePartitioning(Partitioning& result) { Envr env; Model model = env.CreateModel("HypergraphPart"); @@ -81,8 +81,10 @@ RETURN_STATUS HypergraphPartitioningILP -void HypergraphPartitioningILP::setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model) { +template +void HypergraphPartitioningILP::setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model) { + + using index_type = typename hypergraph_t::vertex_idx; const index_type numberOfParts = instance.getNumberOfPartitions(); const index_type numberOfVertices = instance.getHypergraph().num_vertices(); @@ -108,9 +110,11 @@ void HypergraphPartitioningILP::s }; // convert generic one-to-many assingment (of base class function) to one-to-one -template -std::vector HypergraphPartitioningILP::readCoptAssignment(const PartitioningProblem &instance, Model& model) +template +std::vector HypergraphPartitioningILP::readCoptAssignment(const PartitioningProblem &instance, Model& model) { + using index_type = typename hypergraph_t::vertex_idx; + std::vector node_to_partition(instance.getHypergraph().num_vertices(), std::numeric_limits::max()); std::vector > assignmentsGenericForm = this->readAllCoptAssignments(instance, model); @@ -120,9 +124,11 @@ std::vector HypergraphPartitioningILP -void HypergraphPartitioningILP::setInitialSolution(const Partitioning &partition, Model& model) +template +void HypergraphPartitioningILP::setInitialSolution(const Partitioning &partition, Model& model) { + using index_type = typename hypergraph_t::vertex_idx; + const std::vector& assignment = partition.assignedPartitions(); const unsigned& numPartitions = partition.getInstance().getNumberOfPartitions(); if(assignment.size() != partition.getInstance().getHypergraph().num_vertices()) diff --git a/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp b/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp index 558d1472..60e7378c 100644 --- a/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp +++ b/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp @@ -26,7 +26,7 @@ limitations under the License. namespace osp{ -template +template class HypergraphPartitioningILPBase { protected: @@ -36,9 +36,9 @@ class HypergraphPartitioningILPBase { unsigned time_limit_seconds = 3600; bool use_initial_solution = false; - std::vector > readAllCoptAssignments(const PartitioningProblem &instance, Model& model); + std::vector > readAllCoptAssignments(const PartitioningProblem &instance, Model& model); - void setupFundamentalVariablesConstraintsObjective(const PartitioningProblem &instance, Model& model); + void setupFundamentalVariablesConstraintsObjective(const PartitioningProblem &instance, Model& model); void solveILP(Model& model); @@ -51,8 +51,8 @@ class HypergraphPartitioningILPBase { inline void setUseInitialSolution(bool use_) { use_initial_solution = use_; } }; -template -void HypergraphPartitioningILPBase::solveILP(Model& model) { +template +void HypergraphPartitioningILPBase::solveILP(Model& model) { model.SetIntParam(COPT_INTPARAM_LOGTOCONSOLE, 0); @@ -72,8 +72,12 @@ void HypergraphPartitioningILPBase -void HypergraphPartitioningILPBase::setupFundamentalVariablesConstraintsObjective(const PartitioningProblem &instance, Model& model) { +template +void HypergraphPartitioningILPBase::setupFundamentalVariablesConstraintsObjective(const PartitioningProblem &instance, Model& model) { + + using index_type = typename hypergraph_t::vertex_idx; + using workw_type = typename hypergraph_t::vertex_work_weight_type; + using memw_type = typename hypergraph_t::vertex_mem_weight_type; const index_type numberOfParts = instance.getNumberOfPartitions(); const index_type numberOfVertices = instance.getHypergraph().num_vertices(); @@ -128,9 +132,12 @@ void HypergraphPartitioningILPBase -std::vector > HypergraphPartitioningILPBase::readAllCoptAssignments(const PartitioningProblem &instance, Model& model) +template +std::vector > HypergraphPartitioningILPBase::readAllCoptAssignments(const PartitioningProblem &instance, Model& model) { + using index_type = typename hypergraph_t::vertex_idx; + + std::vector > node_to_partitions(instance.getHypergraph().num_vertices()); std::set nonempty_partition_ids; diff --git a/include/osp/partitioning/partitioners/partitioning_ILP_replication.hpp b/include/osp/partitioning/partitioners/partitioning_ILP_replication.hpp index 9e4754bb..cda1d0ec 100644 --- a/include/osp/partitioning/partitioners/partitioning_ILP_replication.hpp +++ b/include/osp/partitioning/partitioners/partitioning_ILP_replication.hpp @@ -26,16 +26,16 @@ limitations under the License. namespace osp{ -template -class HypergraphPartitioningILPWithReplication : public HypergraphPartitioningILPBase { +template +class HypergraphPartitioningILPWithReplication : public HypergraphPartitioningILPBase { public: enum class REPLICATION_MODEL_IN_ILP { ONLY_TWICE, GENERAL }; protected: - void setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model); + void setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model); - void setInitialSolution(const PartitioningWithReplication &partition, Model& model); + void setInitialSolution(const PartitioningWithReplication &partition, Model& model); REPLICATION_MODEL_IN_ILP replication_model = REPLICATION_MODEL_IN_ILP::ONLY_TWICE; @@ -43,15 +43,15 @@ class HypergraphPartitioningILPWithReplication : public HypergraphPartitioningIL virtual ~HypergraphPartitioningILPWithReplication() = default; - RETURN_STATUS computePartitioning(PartitioningWithReplication& result); + RETURN_STATUS computePartitioning(PartitioningWithReplication& result); virtual std::string getAlgorithmName() const override { return "HypergraphPartitioningILPWithReplication"; } void setReplicationModel(REPLICATION_MODEL_IN_ILP replication_model_) { replication_model = replication_model_; } }; -template -RETURN_STATUS HypergraphPartitioningILPWithReplication::computePartitioning(PartitioningWithReplication& result) +template +RETURN_STATUS HypergraphPartitioningILPWithReplication::computePartitioning(PartitioningWithReplication& result) { Envr env; Model model = env.CreateModel("HypergraphPartRepl"); @@ -86,8 +86,11 @@ RETURN_STATUS HypergraphPartitioningILPWithReplication -void HypergraphPartitioningILPWithReplication::setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model) { +template +void HypergraphPartitioningILPWithReplication::setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model) { + + using index_type = typename hypergraph_t::vertex_idx; + const index_type numberOfParts = instance.getNumberOfPartitions(); const index_type numberOfVertices = instance.getHypergraph().num_vertices(); @@ -160,9 +163,11 @@ void HypergraphPartitioningILPWithReplication -void HypergraphPartitioningILPWithReplication::setInitialSolution(const PartitioningWithReplication &partition, Model& model) +template +void HypergraphPartitioningILPWithReplication::setInitialSolution(const PartitioningWithReplication &partition, Model& model) { + using index_type = typename hypergraph_t::vertex_idx; + const std::vector >& assignments = partition.assignedPartitions(); const unsigned& numPartitions = partition.getInstance().getNumberOfPartitions(); if(assignments.size() != partition.getInstance().getHypergraph().num_vertices()) diff --git a/tests/hypergraph_and_partition.cpp b/tests/hypergraph_and_partition.cpp index 2c06f5b5..4f804a2a 100644 --- a/tests/hypergraph_and_partition.cpp +++ b/tests/hypergraph_and_partition.cpp @@ -32,12 +32,12 @@ limitations under the License. #include "osp/auxiliary/io/mtx_hypergraph_file_reader.hpp" #include "osp/auxiliary/io/partitioning_file_writer.hpp" - using namespace osp; BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { using graph = computational_dag_vector_impl_def_int_t; + using hypergraph = Hypergraph_def_t; // Getting root git directory std::filesystem::path cwd = std::filesystem::current_path(); @@ -54,7 +54,7 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { BOOST_CHECK(status); - Hypergraph Hgraph; + hypergraph Hgraph; // Matrix format, one hyperedge for each row/column status = file_reader::readHypergraphMartixMarketFormat((cwd / "data/mtx_tests/ErdosRenyi_8_19_A.mtx").string(), Hgraph); @@ -63,7 +63,7 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { BOOST_CHECK_EQUAL(Hgraph.num_hyperedges(), 16); // DAG format, all hyperedges have size 2 - Hgraph = convert_from_cdag_as_dag(DAG); + Hgraph = convert_from_cdag_as_dag(DAG); BOOST_CHECK_EQUAL(DAG.num_vertices(), Hgraph.num_vertices()); BOOST_CHECK_EQUAL(DAG.num_edges(), Hgraph.num_hyperedges()); BOOST_CHECK_EQUAL(DAG.num_edges()*2, Hgraph.num_pins()); @@ -74,7 +74,7 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { if(DAG.out_degree(node) > 0) ++ nr_of_non_sinks; - Hgraph = convert_from_cdag_as_hyperdag(DAG); + Hgraph = convert_from_cdag_as_hyperdag(DAG); BOOST_CHECK_EQUAL(DAG.num_vertices(), Hgraph.num_vertices()); BOOST_CHECK_EQUAL(nr_of_non_sinks, Hgraph.num_hyperedges()); BOOST_CHECK_EQUAL(DAG.num_edges() + nr_of_non_sinks, Hgraph.num_pins()); @@ -120,7 +120,7 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { // Dummy partitioning with replication - instance.setHypergraph(convert_from_cdag_as_hyperdag(DAG)); + instance.setHypergraph(convert_from_cdag_as_hyperdag(DAG)); instance.setNumberOfPartitions(3); instance.setMaxWorkWeightExplicitly(30); PartitioningWithReplication partition_with_rep(instance); @@ -162,7 +162,7 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { int original_cost = partition_to_improve.computeConnectivityCost(); - GenericFM fm; + GenericFM fm; fm.ImprovePartitioning(partition_to_improve); int new_cost = partition_to_improve.computeConnectivityCost(); @@ -173,7 +173,7 @@ BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { graph larger_DAG; file_reader::readComputationalDagHyperdagFormatDB( (cwd / "data/spaa/large/instance_CG_N24_K22_nzP0d2.hdag").string(), larger_DAG); - instance.setHypergraph(convert_from_cdag_as_hyperdag(larger_DAG)); + instance.setHypergraph(convert_from_cdag_as_hyperdag(larger_DAG)); instance.setMaxWorkWeightExplicitly(4000); for(unsigned node = 0; node < instance.getHypergraph().num_vertices(); ++node)