diff --git a/ZippedView.h b/ZippedView.h new file mode 100644 index 0000000..8b48dec --- /dev/null +++ b/ZippedView.h @@ -0,0 +1,98 @@ +#pragma once +#include + +template auto tuple_map(Fn&& fn, const std::tuple& t) + -> decltype(std::make_tuple(fn(std::get<0>(t)))) + { return std::make_tuple(fn(std::get<0>(t))); } +template auto tuple_map(Fn&& fn, const std::tuple& t) + -> decltype(std::make_tuple(fn(std::get<0>(t)),fn(std::get<1>(t)))) + { return std::make_tuple(fn(std::get<0>(t)),fn(std::get<1>(t))); } +template auto tuple_map(Fn&& fn, const std::tuple& t) + -> decltype(std::make_tuple(fn(std::get<0>(t)),fn(std::get<1>(t)),fn(std::get<2>(t)))) + { return std::make_tuple(fn(std::get<0>(t)),fn(std::get<1>(t)),fn(std::get<2>(t))); } + + +template struct ZippedView { + std::tuple&...> arrays; + + using IndexT = decltype(std::get<0>(arrays).size()); + using IndexDiffT = decltype(declval() - declval()); + + private: + struct GetRefHelper { + IndexT i; + template std::reference_wrapper operator()(const Array& array) { return array[i]; } + }; + public: + using Val = std::tuple; + + struct Ref { + ZippedView* view; + IndexT i; + template auto get() -> decltype(std::get(view->arrays)[i]) { return std::get(view->arrays)[i]; } + template auto get() const -> decltype(std::get(view->arrays)[i]) { return std::get(view->arrays)[i]; } + + std::tuple as_tuple() const { + return tuple_map(GetRefHelper{i}, view->arrays); + } + + Ref& operator=(const Ref& rhs) { + as_tuple() = rhs.as_tuple(); + return *this; + } + + Ref& operator=(const std::tuple& rhs) { + as_tuple() = rhs; + return *this; + } + + // Implicit conversion to tuple + operator Val() const + { return as_tuple(); } + + // Overload that accepts non-lvalue arguments since we are swapping the underlying memory + inline friend void swap(Ref lhs, Ref rhs) { + auto lt = lhs.as_tuple(); + auto rt = rhs.as_tuple(); + swap(lt, rt); + } + }; + + struct Ptr { + ZippedView* view; + IndexT i; + template auto get() -> decltype(&std::get(view->arrays)[i]) { return &std::get(view->arrays)[i]; } + template auto get() const -> decltype(&std::get(view->arrays)[i]) { return &std::get(view->arrays)[i]; } + }; + + struct Iterator : public std::iterator { + ZippedView* view; + IndexT i; + Iterator(ZippedView& _view, IndexT _i) + : view(&_view) + , i(_i) + { } + IndexDiffT operator-(const Iterator& rhs) const { assert(view == rhs.view); return i - rhs.i; } + Iterator& operator--() { --i; return *this; } + Iterator& operator++() { ++i; return *this; } + Iterator operator+(const IndexDiffT rhs) const { return Iterator{*view, i+rhs}; } + Iterator& operator+=(const IndexDiffT rhs) { i += rhs; return *this; } + Ref operator*() const { return Ref{view, i}; } + + bool operator==(const Iterator& rhs) const { assert(view == rhs.view); return i == rhs.i; } + bool operator!=(const Iterator& rhs) const { assert(view == rhs.view); return i != rhs.i; } + bool operator<(const Iterator& rhs) const { assert(view == rhs.view); return i < rhs.i; } + bool operator<=(const Iterator& rhs) const { assert(view == rhs.view); return i <= rhs.i; } + bool operator>(const Iterator& rhs) const { assert(view == rhs.view); return i > rhs.i; } + bool operator>=(const Iterator& rhs) const { assert(view == rhs.view); return i > rhs.i; } + }; + + IndexT size() const { return std::get<0>(arrays).size(); } + Iterator begin() { return Iterator{*this, 0}; } + Iterator end() { return Iterator{*this, size()}; } + + Ref operator[](const IndexT i) { return Ref{this, i}; } +}; + +template auto zipped_view(Array&... arrays) -> ZippedView +{ return ZippedView{std::tuple&...>{arrays...}}; } diff --git a/cad.cpp b/cad.cpp index 5c12fba..4f988bb 100644 --- a/cad.cpp +++ b/cad.cpp @@ -1,8 +1,25 @@ #include "cad.h" +#include "ZippedView.h" +#include #include // #include #include +bool has_dups(const RawArray raw_elements) { + Array elements = raw_elements.copy(); + + std::sort(elements.begin(), elements.end(), [](const Vec3i lhs, const Vec3i rhs) { + return lex_less(lhs.sorted(), rhs.sorted()); + }); + + for(const int i : range(elements.size()-1)) { + if(elements[i].sorted() == elements[i+1].sorted()) { + return true; + } + } + return false; +} + double rndd () { return (double)((double)rand() / (double)RAND_MAX); @@ -642,8 +659,76 @@ Mesh maybe_cleanup_mesh (Mesh mesh, bool is_cleanup) { else return mesh; } + +static void simplify_duplicate_faces(Array& elements, Array& depth_weights) { + + // Sort all elements so that duplicate faces will be adjacent + auto zipped = zipped_view(elements, depth_weights); + using Zipped = decltype(zipped); + std::sort(zipped.begin(), zipped.end(), [](const Zipped::Val& lhs, const Zipped::Val& rhs) { + return lex_less(std::get<0>(lhs).sorted(), std::get<0>(rhs).sorted()); + }); + + // Scan over elements only keeping the first in groups of duplicates and updating depth_weights accordingly + // Erase any elements where net depth_weights is 0 + if(!elements.empty()) { + // Check if sorting f flips normal of triangle + const auto permutation_parity = [](const Vec3i f) { + const int f0 = f.argmin(); + return (f[(f0+1) % 3] < f[(f0+2) % 3]); + }; + + int prev = 0; // Used as output iterator + + for(const int next : range(1,zipped.size())) { + // Is this a new triangle? + if(elements[prev].sorted() == elements[next].sorted()) { + // Duplicate triangle. Need to check if normal is same or opposite of prev and update weight accordingly + depth_weights[prev] += depth_weights[next] * ((permutation_parity(elements[prev]) == permutation_parity(elements[next])) ? 1 : -1); + } + else { + // Found a new triangle + // Unless previous triangle had a nonzero weight, we overwrite it + if(depth_weights[prev] != 0) { + prev += 1; + } + elements[prev] = elements[next]; + depth_weights[prev] = depth_weights[next]; + } + } + + // If final unique triangle had zero net weight, erase it + if(depth_weights[prev] == 0) { + prev -= 1; + } + + // Resize to match compacted output + elements.resize(prev + 1); + depth_weights.resize(prev + 1); + } +} + +// TODO: This function should be merged into default behavior for geode::split_soup +Tuple, Array> safe_split_soup(const TriangleSoup& faces, Array X, const int depth) { + // Perturbation allows us to ignore most degeneracies, but duplicates of the same face are still an issue + // If faces are duplicated, crossing needs to account for total depth of all duplicates + // If we aren't filtering by depth we leave duplicated faces. This ensures we don't cull duplicates and is safe sense depths aren't computed + if(depth == all_depths) { + return split_soup(faces, X, depth); + } + + // Copy elements so we can find duplicates + Array elements = faces.elements.copy(); + auto depth_weights = Array{elements.size()}; + depth_weights.fill(1); + simplify_duplicate_faces(elements, depth_weights); + + assert(!has_dups(elements)); + return split_soup(new_(elements), X, depth_weights, depth); +} + Mesh split_mesh (Mesh mesh, int depth, bool is_cleanup) { - auto split = split_soup(mesh.soup, mesh.points, depth); + auto split = safe_split_soup(mesh.soup, mesh.points, depth); return maybe_cleanup_mesh(Mesh(split.x, split.y), is_cleanup); } diff --git a/cad.h b/cad.h index b46657b..3ece96f 100644 --- a/cad.h +++ b/cad.h @@ -128,6 +128,7 @@ struct Mesh { Ref soup; Array points; Mesh(Ref soup, Array points) : soup(soup), points(points) { } + explicit Mesh(const Tuple,Array> soup_and_points) : soup(soup_and_points.x), points(soup_and_points.y) { } }; extern Mesh fab_mesh (Array faces, Array points);