From 6f11d76823210538d5c4d50794d6d76506e50949 Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Thu, 20 Nov 2025 17:31:53 -0800 Subject: [PATCH 01/37] Consolidated Flow-ipc ipc-143 (ticket) work in Flow as triggered (not in *all* cases relevant to) by that Flow-IPC perf deep-dive. Detailed notes forthcoming in PR. --- src/CMakeLists.txt | 3 + src/flow/async/async_fwd.hpp | 2 +- src/flow/async/concurrent_task_loop.cpp | 8 +- src/flow/async/concurrent_task_loop.hpp | 10 +- src/flow/async/detail/task_qing_thread.cpp | 8 +- src/flow/async/detail/task_qing_thread.hpp | 4 +- src/flow/async/op.hpp | 2 +- .../async/segregated_thread_task_loop.cpp | 14 +- .../async/segregated_thread_task_loop.hpp | 4 +- src/flow/async/single_thread_task_loop.hpp | 2 +- src/flow/async/timed_concurrent_task_loop.hpp | 12 +- src/flow/async/util.hpp | 2 +- src/flow/async/x_thread_task_loop.cpp | 12 +- src/flow/async/x_thread_task_loop.hpp | 4 +- src/flow/cfg/cfg_manager.hpp | 42 +- src/flow/cfg/dynamic_cfg_context.hpp | 6 +- src/flow/cfg/option_set.cpp | 8 +- src/flow/cfg/option_set.hpp | 73 +- src/flow/cfg/static_cfg_manager.hpp | 2 +- src/flow/common.hpp | 13 +- src/flow/detail/doc/doc-coding_style.cpp | 56 +- src/flow/error/error.hpp | 20 +- src/flow/log/async_file_logger.cpp | 12 +- src/flow/log/async_file_logger.hpp | 2 +- src/flow/log/config.hpp | 10 +- .../log/detail/test/component_cfg_test.cpp | 8 +- src/flow/log/log.cpp | 134 +- src/flow/log/log.hpp | 194 ++- src/flow/log/log_fwd.hpp | 10 + src/flow/log/ostream_log_msg_writer.cpp | 2 +- src/flow/log/simple_ostream_logger.cpp | 4 +- src/flow/log/test/log_test.cpp | 132 ++ src/flow/log/verbosity_config.cpp | 6 +- src/flow/net_flow/asio/node.hpp | 16 +- src/flow/net_flow/asio/peer_socket.cpp | 8 +- src/flow/net_flow/asio/peer_socket.hpp | 22 +- src/flow/net_flow/asio/server_socket.cpp | 4 +- src/flow/net_flow/asio/server_socket.hpp | 8 +- src/flow/net_flow/detail/cong_ctl.cpp | 6 +- .../detail/cong_ctl/cong_ctl_classic_bw.cpp | 4 +- src/flow/net_flow/detail/drop_timer.cpp | 6 +- src/flow/net_flow/detail/drop_timer.hpp | 6 +- src/flow/net_flow/detail/low_lvl_io.cpp | 14 +- src/flow/net_flow/detail/low_lvl_packet.cpp | 14 +- src/flow/net_flow/detail/low_lvl_packet.hpp | 12 +- src/flow/net_flow/detail/port_space.cpp | 2 +- src/flow/net_flow/detail/seq_num.cpp | 8 +- src/flow/net_flow/detail/seq_num.hpp | 4 +- src/flow/net_flow/detail/socket_buffer.cpp | 6 +- src/flow/net_flow/detail/socket_buffer.hpp | 16 +- src/flow/net_flow/detail/stats/bandwidth.cpp | 6 +- src/flow/net_flow/detail/stats/bandwidth.hpp | 8 +- src/flow/net_flow/error/error.cpp | 2 +- src/flow/net_flow/error/error.hpp | 2 +- src/flow/net_flow/event_set.cpp | 46 +- src/flow/net_flow/event_set.hpp | 2 +- src/flow/net_flow/info.cpp | 2 +- src/flow/net_flow/net_env_simulator.cpp | 2 +- src/flow/net_flow/net_env_simulator.hpp | 8 +- src/flow/net_flow/node.cpp | 14 +- src/flow/net_flow/node.hpp | 66 +- src/flow/net_flow/options.cpp | 18 +- src/flow/net_flow/peer_socket.cpp | 132 +- src/flow/net_flow/peer_socket.hpp | 54 +- src/flow/net_flow/server_socket.cpp | 67 +- src/flow/net_flow/server_socket.hpp | 14 +- src/flow/perf/checkpt_timer.cpp | 20 +- src/flow/perf/checkpt_timer.hpp | 8 +- src/flow/perf/perf_fwd.hpp | 14 +- src/flow/util/basic_blob.hpp | 829 +++++++++--- src/flow/util/blob.cpp | 29 + src/flow/util/blob.hpp | 94 +- src/flow/util/blob_fwd.hpp | 11 +- src/flow/util/detail/linked_hash.hpp | 334 +++++ .../util/detail/sched_task_handle_state.cpp | 3 +- src/flow/util/detail/util.hpp | 4 +- src/flow/util/detail/util_fwd.hpp | 30 +- src/flow/util/linked_hash_map.hpp | 981 +++++++------- src/flow/util/linked_hash_set.hpp | 686 +++++----- src/flow/util/sched_task.cpp | 10 +- src/flow/util/sched_task.hpp | 4 +- src/flow/util/sched_task_fwd.hpp | 4 +- src/flow/util/shared_ptr_alias_holder.hpp | 12 +- src/flow/util/string_ostream.hpp | 2 +- src/flow/util/string_view.hpp | 4 +- src/flow/util/test/blob_test.cpp | 1093 +++++++++++++++ src/flow/util/test/linked_hash_test.cpp | 497 +++++++ src/flow/util/test/thread_lcl_test.cpp | 247 ++++ src/flow/util/test/util_test.cpp | 104 ++ src/flow/util/thread_lcl.hpp | 1190 +++++++++++++++++ src/flow/util/util.cpp | 6 +- src/flow/util/util.hpp | 85 +- src/flow/util/util_fwd.hpp | 132 +- test/basic/net_flow/echo/cli/echo_client.cpp | 16 +- test/basic/net_flow/echo/srv/echo_server.cpp | 10 +- test/suite/unit_test/CMakeLists.txt | 10 + test/suite/unit_test/test_main.cpp | 2 +- tools/cmake/FlowLikeCodeGenerate.cmake | 7 +- 98 files changed, 6215 insertions(+), 1683 deletions(-) create mode 100644 src/flow/log/test/log_test.cpp create mode 100644 src/flow/util/blob.cpp create mode 100644 src/flow/util/detail/linked_hash.hpp create mode 100644 src/flow/util/test/blob_test.cpp create mode 100644 src/flow/util/test/linked_hash_test.cpp create mode 100644 src/flow/util/test/thread_lcl_test.cpp create mode 100644 src/flow/util/test/util_test.cpp create mode 100644 src/flow/util/thread_lcl.hpp diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 73d97692b..38bee4d43 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -79,6 +79,7 @@ set(SRCS flow/net_flow/server_socket.cpp flow/perf/checkpt_timer.cpp flow/perf/clock_type.cpp + flow/util/blob.cpp flow/util/detail/sched_task_handle_state.cpp flow/util/detail/util.cpp flow/util/sched_task.cpp @@ -159,6 +160,7 @@ set(HDRS flow/util/basic_blob.hpp flow/util/blob.hpp flow/util/blob_fwd.hpp + flow/util/detail/linked_hash.hpp flow/util/detail/sched_task_handle_state.hpp flow/util/detail/util.hpp flow/util/detail/util_fwd.hpp @@ -171,6 +173,7 @@ set(HDRS flow/util/shared_ptr_alias_holder.hpp flow/util/string_ostream.hpp flow/util/string_view.hpp + flow/util/thread_lcl.hpp flow/util/traits.hpp flow/util/uniq_id_holder.hpp flow/util/util.hpp diff --git a/src/flow/async/async_fwd.hpp b/src/flow/async/async_fwd.hpp index 3976b8037..164b818ef 100644 --- a/src/flow/async/async_fwd.hpp +++ b/src/flow/async/async_fwd.hpp @@ -104,7 +104,7 @@ using Task = Function; * In addition, it is guaranteed that copying (via constructor or assignment) of async::Op is * has performance characteristics no worse than those of `shared_ptr`. I.e., it is to be thought of as light-weight. * - * The value `Op()` is designated as a null/sentinel value and must not be passed to Concurrent_task_loop::post() + * The value `Op{}` is designated as a null/sentinel value and must not be passed to Concurrent_task_loop::post() * or anything built on it. * * That's the formal definition. We reiterate that copying these is cheap; and moreover two `Op`s such that diff --git a/src/flow/async/concurrent_task_loop.cpp b/src/flow/async/concurrent_task_loop.cpp index 24cae6e70..a67c3ee0b 100644 --- a/src/flow/async/concurrent_task_loop.cpp +++ b/src/flow/async/concurrent_task_loop.cpp @@ -210,9 +210,9 @@ void optimize_pinning_in_thread_pool(log::Logger* logger_ptr, const auto native_mach_thread_id = pthread_mach_thread_np(native_pthread_thread_id); if (native_pthread_thread_id == 0) { - const Error_code sys_err_code(errno, system_category()); // As above.... + const Error_code sys_err_code{errno, system_category()}; // As above.... FLOW_ERROR_SYS_ERROR_LOG_WARNING(); - throw error::Runtime_error(sys_err_code, "pthread_mach_thread_np() call in optimize_pinning_in_thread_pool()"); + throw error::Runtime_error{sys_err_code, "pthread_mach_thread_np() call in optimize_pinning_in_thread_pool()"}; } // else FLOW_LOG_TRACE("pthread ID [" << native_pthread_thread_id << "] " @@ -249,8 +249,8 @@ void optimize_pinning_in_thread_pool(log::Logger* logger_ptr, * @todo For sure though should use error::Runtime_error here, the ctor that takes no Error_code. * That ctor did not exist when the present code was written; as of this writing Flow is Linux-only. * Would do it right now but lack the time to verify any changes for Mac at the moment. */ - throw runtime_error(ostream_op_string("[MACH_KERN_RETURN_T:", code, - "] [thread_policy_set(THREAD_AFFINITY_POLICY) failed]")); + throw runtime_error{ostream_op_string("[MACH_KERN_RETURN_T:", code, + "] [thread_policy_set(THREAD_AFFINITY_POLICY) failed]")}; } // else OK! # endif // if 0 diff --git a/src/flow/async/concurrent_task_loop.hpp b/src/flow/async/concurrent_task_loop.hpp index bf6f16f7f..8d30a0b7a 100644 --- a/src/flow/async/concurrent_task_loop.hpp +++ b/src/flow/async/concurrent_task_loop.hpp @@ -208,11 +208,11 @@ namespace flow::async * flow::async::Concurrent_task_loop L; * auto op J = L.create_op(); // ATTN! The syntax is different from Strands but the idea is identical. * ... - * X_type X(L.task_engine()); + * X_type X{L.task_engine()}; * // ATTN! The syntax is again somewhat different from bind_executor(S, F), but the idea is equivalent. * X.async_A(&A_target, A_settings, flow::async::asio_handler_via_op(&L, J, F)); * ... - * Y_type Y(L.task_engine()); + * Y_type Y{L.task_engine()}; * Y.async_B(&B_target, B_settings, flow::async::asio_handler_via_op(&L, J, G)); * // X.sync_A() and Y.sync_B() are executing in background; F and G will run on respective completion; * // but F() and G() shall run non-concurrently by virtue of being wrapped by the same Op: J. @@ -408,8 +408,8 @@ class Concurrent_task_loop : * in each thread, for all `thread_idx` in [0, n_threads()). start() will return no sooner than * when each such callback has finished. */ - virtual void start(Task&& init_task_or_empty = Task(), - const Thread_init_func& thread_init_func_or_empty = Thread_init_func()) = 0; + virtual void start(Task&& init_task_or_empty = Task{}, + const Thread_init_func& thread_init_func_or_empty = Thread_init_func{}) = 0; /** * Waits for any ongoing task(s)/completion handler(s) to return; then prevents any further-queued such tasks @@ -535,7 +535,7 @@ class Concurrent_task_loop : * * @param op * The (presumably) multi-async-step operation to which `task` belongs, such that no `Task`s associated with - * `op` may execute concurrently with `task`. If `op.empty()` (a/k/a `op == Op()`, recalling that `Op()` + * `op` may execute concurrently with `task`. If `op.empty()` (a/k/a `op == Op{}`, recalling that `Op{}` * is null/sentinel), then `assert()` trips. * @param task * See other post(). diff --git a/src/flow/async/detail/task_qing_thread.cpp b/src/flow/async/detail/task_qing_thread.cpp index ef9162991..8ed33dae1 100644 --- a/src/flow/async/detail/task_qing_thread.cpp +++ b/src/flow/async/detail/task_qing_thread.cpp @@ -58,7 +58,7 @@ Task_qing_thread::Task_qing_thread(flow::log::Logger* logger_ptr, util::String_v using Log_config = log::Config; assert(m_task_engine); - string nickname(nickname_view); // We need an std::string below anyway, so copy this now. + string nickname{nickname_view}; // We need an std::string below anyway, so copy this now. // Some programs start tons of threads. Let's be stingy with INFO messages. @@ -93,7 +93,7 @@ Task_qing_thread::Task_qing_thread(flow::log::Logger* logger_ptr, util::String_v * `sev_override == Sev::S_END_SENTINEL`; we need not even track it as a special case.) */ const auto sev_override = *(Log_config::this_thread_verbosity_override()); - m_worker_thread.reset(new Thread([this, // Valid throughout thread { body }. + m_worker_thread.reset(new Thread{[this, // Valid throughout thread { body }. sev_override, nickname = std::move(nickname), // Valid throughout thread { body }. init_func_or_empty = std::move(init_func_or_empty), @@ -148,7 +148,7 @@ Task_qing_thread::Task_qing_thread(flow::log::Logger* logger_ptr, util::String_v } // const auto sev_override_auto = // Restore logging to normal (how it normally is at thread start). // Avoid loop, thread exiting when no pending tasks remain. - Task_engine_work avoid_task_engine_stop(make_work_guard(*m_task_engine)); + Task_engine_work avoid_task_engine_stop{make_work_guard(*m_task_engine)}; // Block -- wait for tasks to be posted on this thread's (possibly shared with other threads) Task_engine. m_task_engine->run(); @@ -230,7 +230,7 @@ Task_qing_thread::Task_qing_thread(flow::log::Logger* logger_ptr, util::String_v * trace to the logs as well.) The answer is yes, though it's not on us to do it. One should do such work either * in std::terminate() (by using std::set_terminate()) or, arguably even better, in a global SIGABRT handler. * I am only mentioning it here as opportunistic advice -- again, it's not in our purview, as shown above. */ - })); // Thread body. + }}); // Thread body. // `nickname`, `init_task_or_empty` may now be hosed. if (done_promise_else_block) diff --git a/src/flow/async/detail/task_qing_thread.hpp b/src/flow/async/detail/task_qing_thread.hpp index b68520cce..817f8fef9 100644 --- a/src/flow/async/detail/task_qing_thread.hpp +++ b/src/flow/async/detail/task_qing_thread.hpp @@ -176,8 +176,8 @@ class Task_qing_thread : */ explicit Task_qing_thread(flow::log::Logger* logger_ptr, util::String_view nickname, const Task_engine_ptr& task_engine, bool own_task_engine, - boost::promise* done_promise_else_block = 0, - Task&& init_func_or_empty = Task()); + boost::promise* done_promise_else_block = nullptr, + Task&& init_func_or_empty = Task{}); /** * stop(), followed by forgetting the `Task_engine` returned by task_engine(); the latter action may diff --git a/src/flow/async/op.hpp b/src/flow/async/op.hpp index ef2b16d04..3cd9d567c 100644 --- a/src/flow/async/op.hpp +++ b/src/flow/async/op.hpp @@ -114,7 +114,7 @@ class Op_list : * @return See above. Note the address stored in the returned *reference* is valid until destructor runs; * hence it's not necessary (though cheap) to copy the `Op`. */ - const Op& random_op(size_t* chosen_idx = 0) const; + const Op& random_op(size_t* chosen_idx = nullptr) const; /** * Returns a randomly selected index from range [O, size()). diff --git a/src/flow/async/segregated_thread_task_loop.cpp b/src/flow/async/segregated_thread_task_loop.cpp index 176708a48..4e3c269ae 100644 --- a/src/flow/async/segregated_thread_task_loop.cpp +++ b/src/flow/async/segregated_thread_task_loop.cpp @@ -58,7 +58,7 @@ Segregated_thread_task_loop::Segregated_thread_task_loop(log::Logger* logger_ptr for (Task_engine_ptr& task_engine_ptr_in_container : m_task_engines) { // Attn: The concurrency-hint=1 may avoid or all most locking in boost.asio. Exactly 1 thread in the Task_engine. - task_engine_ptr_in_container.reset(new Task_engine(1)); + task_engine_ptr_in_container.reset(new Task_engine{1}); /* Task_engine starts in !stopped() mode ready to run(). start() pre-condition is stopped() so for simplicity * start in the same state that our stop() would put the Task_engine into: */ @@ -68,9 +68,9 @@ Segregated_thread_task_loop::Segregated_thread_task_loop(log::Logger* logger_ptr // Initialize our Ops_list of pre-created Ops which in our case simply store all `n` `Task_engine_ptr`s. const size_t n = n_threads(); - m_per_thread_ops.reset(new Op_list(get_logger(), n, + m_per_thread_ops.reset(new Op_list{get_logger(), n, [this](size_t idx) -> Op - { return Op(static_cast(m_task_engines[idx])); })); + { return Op{static_cast(m_task_engines[idx])}; }}); /* (The static_cast<> is probably unnecessary but makes the compiler check our type logic for us. That's quite * helpful in this rare situation where we're essentially using a dynamically typed variable in C++ [boost::any]. * There is 0 perf cost to it by the way.) */ @@ -124,7 +124,7 @@ void Segregated_thread_task_loop::start(Task&& init_task_or_empty, * though, so let's keep to the letter of our contract. Also, this way we can do it in parallel instead of * serially. */ - vector> thread_init_done_promises(n); + vector> thread_init_done_promises{n}; for (size_t idx = 0; idx != n; ++idx) { Task task_qing_thread_init_func; @@ -147,11 +147,11 @@ void Segregated_thread_task_loop::start(Task&& init_task_or_empty, // Now its Task_qing_thread can do ->run() as most of its thread body (and it won't just return). // Create/start the thread. - m_qing_threads[idx].reset(new Task_qing_thread(get_logger(), + m_qing_threads[idx].reset(new Task_qing_thread{get_logger(), (n == 1) ? m_nickname : util::ostream_op_string(m_nickname, idx), task_engine, true, // Its *own* 1-1 Task_engine. &(thread_init_done_promises[idx]), - std::move(task_qing_thread_init_func))); + std::move(task_qing_thread_init_func)}); } // for (idx in [0, n)) FLOW_LOG_INFO("All threads are asynchronously starting. Awaiting their readiness barrier-style, in sequence."); for (size_t idx = 0; idx != n; ++idx) @@ -167,7 +167,7 @@ void Segregated_thread_task_loop::start(Task&& init_task_or_empty, { FLOW_LOG_INFO("Thread count was auto-determined. Further attempting thread-to-core scheduling optimization."); - vector worker_threads(n); // Initialized to nulls. Now set them to the raw `Thread*`s. + vector worker_threads{n}; // Initialized to nulls. Now set them to the raw `Thread*`s. transform(m_qing_threads.begin(), m_qing_threads.end(), worker_threads.begin(), [](const Task_qing_thread_ptr& qing_thread_ptr) -> Thread* { return qing_thread_ptr->raw_worker_thread(); }); diff --git a/src/flow/async/segregated_thread_task_loop.hpp b/src/flow/async/segregated_thread_task_loop.hpp index e8c205245..59abbc038 100644 --- a/src/flow/async/segregated_thread_task_loop.hpp +++ b/src/flow/async/segregated_thread_task_loop.hpp @@ -114,8 +114,8 @@ class Segregated_thread_task_loop : * @param thread_init_func_or_empty * See superclass API. */ - void start(Task&& init_task_or_empty = Task(), - const Thread_init_func& thread_init_func_or_empty = Thread_init_func()) override; + void start(Task&& init_task_or_empty = Task{}, + const Thread_init_func& thread_init_func_or_empty = Thread_init_func{}) override; /** * Implements superclass API. In this implementation this essentially boils down to N `Task_engine::stop()`s, diff --git a/src/flow/async/single_thread_task_loop.hpp b/src/flow/async/single_thread_task_loop.hpp index 48440c4ea..1dc58b5b8 100644 --- a/src/flow/async/single_thread_task_loop.hpp +++ b/src/flow/async/single_thread_task_loop.hpp @@ -138,7 +138,7 @@ class Single_thread_task_loop : * thread started by this method, delaying the method's return to the caller until `init_task_or_empty()` * returns in said spawned thread. */ - void start(Task&& init_task_or_empty = Task()); + void start(Task&& init_task_or_empty = Task{}); /** * Waits for the ongoing task/completion handler -- if one is running -- to return; then prevents any further-queued diff --git a/src/flow/async/timed_concurrent_task_loop.hpp b/src/flow/async/timed_concurrent_task_loop.hpp index 2ee545868..ee539a8dc 100644 --- a/src/flow/async/timed_concurrent_task_loop.hpp +++ b/src/flow/async/timed_concurrent_task_loop.hpp @@ -53,7 +53,7 @@ class Timed_concurrent_task_loop_impl : public Concurrent_task_loop public: // Types. - /// Short-hand for the exhanger function taken by the ctor. + /// Short-hand for the exchanger function taken by the ctor. using Exchanger_func = Function; // Constructors/destructor. @@ -87,8 +87,8 @@ class Timed_concurrent_task_loop_impl : public Concurrent_task_loop * @param thread_init_func_or_empty * See superclass API. */ - void start(Task&& init_task_or_empty = Task(), - const Thread_init_func& thread_init_func_or_empty = Thread_init_func()) override; + void start(Task&& init_task_or_empty = Task{}, + const Thread_init_func& thread_init_func_or_empty = Thread_init_func{}) override; /// Implements superclass API. void stop() override; @@ -230,7 +230,7 @@ class Timed_concurrent_task_loop_impl : public Concurrent_task_loop Concurrent_task_loop* const m_loop; /// See constructor. - const Exchanger_func m_exhanger_func; + const Exchanger_func m_exchanger_func; /// Accumulates time ticks, of clock type #m_clock_type, spent in tasks posted onto #m_loop. Time_accumulator m_time_accumulator; @@ -291,7 +291,7 @@ Timed_concurrent_task_loop_impl::Timed_concurrent_task_loop_im (Concurrent_task_loop* loop, perf::Clock_type clock_type, Exchanger_func&& exchanger_func_moved) : m_clock_type(clock_type), m_loop(loop), - m_exhanger_func(std::move(exchanger_func_moved)), + m_exchanger_func(std::move(exchanger_func_moved)), m_time_accumulator(0) { // That's it. @@ -384,7 +384,7 @@ Task_engine_ptr Timed_concurrent_task_loop_impl::task_engine() template perf::Duration Timed_concurrent_task_loop_impl::accumulated_time() { - return perf::Duration(m_exhanger_func(&m_time_accumulator)); + return perf::Duration{m_exchanger_func(&m_time_accumulator)}; } template diff --git a/src/flow/async/util.hpp b/src/flow/async/util.hpp index 450b57ec6..505daab02 100644 --- a/src/flow/async/util.hpp +++ b/src/flow/async/util.hpp @@ -95,7 +95,7 @@ void asio_exec_ctx_post(log::Logger* logger_ptr, Execution_context* exec_ctx, Sy "will we ensure concurrent initiation before continuing? = " "[" << (synchronicity == Synchronicity::S_ASYNC_AND_AWAIT_CONCURRENT_START) << "]."); - Task actual_task(std::move(task)); + Task actual_task{std::move(task)}; /* If the current log level suggests we want TRACE logging then book-end their task with some log statements. * For perf: entirely avoid this wrapping if almost certainly no logging would occur anyway. */ diff --git a/src/flow/async/x_thread_task_loop.cpp b/src/flow/async/x_thread_task_loop.cpp index 9aa65c23a..76bf9628d 100644 --- a/src/flow/async/x_thread_task_loop.cpp +++ b/src/flow/async/x_thread_task_loop.cpp @@ -48,11 +48,11 @@ Cross_thread_task_loop::Cross_thread_task_loop(log::Logger* logger_ptr, util::St : m_n_threads_or_zero), /* n_threads() is now accurate. Create the shared Task_engine capable of smartly scheduling across N threads. * Attn: Give concurrency hint; 1 in particular may help avoid or eliminate locking inside boost.asio. */ - m_shared_task_engine(new util::Task_engine(n_threads())), + m_shared_task_engine(new util::Task_engine{static_cast(n_threads())}), // Forever initialize our Ops_list of pre-created Ops which in our case simply store long-lived Strands. m_per_thread_strands(logger_ptr, n_threads(), [this](size_t) -> Op - { return Op(Strand_ptr(new util::Strand(*m_shared_task_engine))); }) + { return Op{Strand_ptr{new util::Strand{*m_shared_task_engine}}}; }) { /* Task_engine starts in !stopped() mode ready to run(). start() pre-condition is stopped() so for simplicity * start in the same state that our stop() would put the Task_engine into: */ @@ -116,7 +116,7 @@ void Cross_thread_task_loop::start(Task&& init_task_or_empty, * So that's why we use the Task_qing_thread ctor mode wherein we pass in our own `promise`s and then wait afterwards * for them all to be satisfied, barrier-style. */ - vector> thread_init_done_promises(n); + vector> thread_init_done_promises{n}; for (size_t idx = 0; idx != n; ++idx) { Task task_qing_thread_init_func; @@ -132,11 +132,11 @@ void Cross_thread_task_loop::start(Task&& init_task_or_empty, assert(task_qing_thread_init_func.empty()); // Just leave it. } - m_qing_threads[idx].reset(new Task_qing_thread(get_logger(), + m_qing_threads[idx].reset(new Task_qing_thread{get_logger(), (n == 1) ? m_nickname : util::ostream_op_string(m_nickname, idx), m_shared_task_engine, false, // A *shared* Task_engine. &(thread_init_done_promises[idx]), - std::move(task_qing_thread_init_func))); + std::move(task_qing_thread_init_func)}); } // for (idx in [0, n)) // By barrier-style I mean that they all the waits must be done, before the loop exits. @@ -266,7 +266,7 @@ size_t Cross_thread_task_loop::n_threads() const // Virtual. Op Cross_thread_task_loop::create_op() // Virtual. { - return Op(Strand_ptr(new util::Strand(*m_shared_task_engine))); + return Op{Strand_ptr{new util::Strand{*m_shared_task_engine}}}; } const Op_list& Cross_thread_task_loop::per_thread_ops() // Virtual. diff --git a/src/flow/async/x_thread_task_loop.hpp b/src/flow/async/x_thread_task_loop.hpp index 29c5af968..c895c4f18 100644 --- a/src/flow/async/x_thread_task_loop.hpp +++ b/src/flow/async/x_thread_task_loop.hpp @@ -152,8 +152,8 @@ class Cross_thread_task_loop : * @param thread_init_func_or_empty * See superclass API. */ - void start(Task&& init_task_or_empty = Task(), - const Thread_init_func& thread_init_func_or_empty = Thread_init_func()) override; + void start(Task&& init_task_or_empty = Task{}, + const Thread_init_func& thread_init_func_or_empty = Thread_init_func{}) override; /** * Implements superclass API. In this implementation this essentially boils down to a single `Task_engine::stop()`, diff --git a/src/flow/cfg/cfg_manager.hpp b/src/flow/cfg/cfg_manager.hpp index d7f69c62a..7f93e7967 100644 --- a/src/flow/cfg/cfg_manager.hpp +++ b/src/flow/cfg/cfg_manager.hpp @@ -460,7 +460,7 @@ class Config_manager : * * Tip: On failure you may want to exit program with error; or you can continue knowing that * static_values() will return a reference to default values (and all_static_values() will emit pointers to - * `Value_set`s with default values) according to `Value_set()` no-arg ctor (for each `Value_set`). + * `Value_set`s with default values) according to `Value_set{}` no-arg ctor (for each `Value_set`). * WARNING(s) logged given failure. * * apply_static() will *not* be tolerant of unknown option names appearing in the config source. The reasoning @@ -946,7 +946,7 @@ class Config_manager : * @param handle * The handle which was returned by register_dynamic_change_listener() when the callback was registered. */ - void unregister_dynamic_change_listener (const On_dynamic_change_func_handle& handle); + void unregister_dynamic_change_listener(const On_dynamic_change_func_handle& handle); /** * Prints a human-targeted long-form summary of our contents, doubling as a usage message and a dump of current @@ -1337,7 +1337,7 @@ class Config_manager : * * This stores each dynamic slot's baseline `Value_set` state to load as just described. * - * After construction this payload is just the default-cted `Value_set()`. If the user chooses to execute + * After construction this payload is just the default-cted `Value_set{}`. If the user chooses to execute * a one-time apply_static_and_dynamic(), then that payload is replaced by the state after having parsed that * baseline state. Note, for context, that apply_static_and_dynamic() would be presumably loading *not* * from file F (which can change repeatedly, as dynamic updates come in) but some other file B, typically storing @@ -1412,7 +1412,7 @@ class Config_manager : * case only. * * When this is not-NONE, an `apply_*()` impl shall skip a couple of steps it would otherwise perform: - * - Individually-validating the default `Value_set()` values: Skip, as the first `apply_*()` in the sequence + * - Individually-validating the default `Value_set{}` values: Skip, as the first `apply_*()` in the sequence * would have already done it. So it's a waste of compute/entropy. * - `apply_dynamic()` applying #m_d_baseline_value_sets onto to #m_s_d_opt_sets: Skip, as the first `apply_*()` in * the sequence would have already done it. So doing it again would be not only redundant but also destructive, @@ -1503,11 +1503,11 @@ Config_manager::Config_manager // I (ygoldfel) tried make_shared() here, but it was too much for that gcc. The perf impact is negligible anyway. m_s_d_opt_sets[value_set_idx] = Void_ptr - (new Option_set // <-- Parameter pack expansion kernel here. - (get_logger(), + {new Option_set // <-- Parameter pack expansion kernel here. + {get_logger(), ostream_op_string(m_nickname, dyn_else_st ? "/dynamic" : "/static", value_set_idx / 2), // 0, 0, 1, 1, .... - std::move(declare_opts_func_moved))), // (And here.) + std::move(declare_opts_func_moved)}}, // (And here.) /* As noted above, fill a m_d_baseline_value_sets[] but for dynamic (non-null()) sets only. * Same deal for initializing m_d_value_sets. @@ -1516,10 +1516,10 @@ Config_manager::Config_manager // * (This is `if (!(...).null()) { ... }` in expression form.) && (opt_set(value_set_idx)->null() // (And here.) || (m_d_baseline_value_sets[d_value_set_idx] - = Void_ptr(new S_d_value_set), // (And here.) + = Void_ptr{new S_d_value_set}, // (And here.) true), m_d_value_sets[d_value_set_idx] - = Void_ptr(new typename S_d_value_set::Const_ptr), // (And here.) + = Void_ptr{new typename S_d_value_set::Const_ptr}, // (And here.) ++d_value_set_idx, true), @@ -1748,7 +1748,7 @@ bool Config_manager::apply_static_impl ..., ( option_set_canonicalize_or_reject(opt_set(value_set_idx), // Param pack expansion kernel here. - success, 0), + success, nullptr), value_set_idx += 2 // Skip dynamic ones. ) ); @@ -1804,7 +1804,7 @@ bool Config_manager::apply_static_or_dynamic_impl && dyn_else_st && (!opt_set(value_set_idx)->null())) ? d_baseline_value_set // (And here.) (s_d_value_set_idx) - : static_cast(0), // (And here.) + : static_cast(nullptr), // (And here.) cfg_path, all_opt_names_or_empty, final_validator_func, // (And here.) @@ -2036,7 +2036,7 @@ bool Config_manager::apply_static_and_dynamic_impl ( success = apply_impl(opt_set(value_set_idx), // Param pack expansion kernel here. // Initial (baseline) parse: no need to apply baseline state. - static_cast(0), // (And here.) + static_cast(nullptr), // (And here.) cfg_path, all_opt_names, final_validator_func, // (And here.) &skip_parsing), @@ -2067,7 +2067,7 @@ bool Config_manager::apply_static_and_dynamic_impl * harmless no-op by Option_set docs. */ option_set_canonicalize_or_reject (opt_set(value_set_idx), // Param pack expansion kernel here. - success, 0), + success, nullptr), ++value_set_idx ) ); @@ -2119,7 +2119,7 @@ bool Config_manager::apply_static_and_dynamic_impl // Next: Eureka! Set the dynamic-values pointers for the first time. { - Lock_guard lock(m_d_value_sets_mutex); + Lock_guard lock{m_d_value_sets_mutex}; value_set_idx = 0; dyn_else_st = false; ( @@ -2134,7 +2134,7 @@ bool Config_manager::apply_static_and_dynamic_impl dyn_else_st = !dyn_else_st ) ); - } // Lock_guard lock(m_d_value_sets_mutex); + } // Lock_guard lock{m_d_value_sets_mutex}; // They shouldn't do apply_static_and_dynamic() except as the first apply*_dynamic() thing. assert((!m_dynamic_values_set) @@ -2279,7 +2279,7 @@ bool Config_manager::apply_dynamic_impl option_set_canonicalize_or_reject(opt_set(value_set_idx), // Param pack expansion kernel here. success, // If initial dynamic parse, then `changed[] = true` by definition. - m_dynamic_values_set ? &changed[d_value_set_idx] : 0), + m_dynamic_values_set ? &changed[d_value_set_idx] : nullptr), // If initial dynamic parse, then `changed[] = true` by definition. changed[d_value_set_idx] = (!m_dynamic_values_set) || changed[d_value_set_idx], // some_changed = true iff: changed[x] == true for at least one x. @@ -2329,7 +2329,7 @@ bool Config_manager::apply_dynamic_impl * in a helper function -- though less concise (possible @todo). BTW generic lambdas could help there, but it would * require C++20 (I haven't looked into it thoroughly though). */ { - Lock_guard lock(m_d_value_sets_mutex); + Lock_guard lock{m_d_value_sets_mutex}; d_value_set_idx = 0; ( ..., @@ -2344,7 +2344,7 @@ bool Config_manager::apply_dynamic_impl ++d_value_set_idx ) ); - } // Lock_guard lock(m_d_value_sets_mutex); + } // Lock_guard lock{m_d_value_sets_mutex}; const bool was_dynamic_values_set = m_dynamic_values_set; m_dynamic_values_set = true; // Might already be true (if not initial parse). @@ -2384,7 +2384,7 @@ void Config_manager::reject_candidates() * it pertains to a static Value_set after an apply_dynamic() or dynamic after an apply_static(). */ option_set_canonicalize_or_reject (opt_set(value_set_idx), // Param pack expansion kernel here. - false, 0), // false <= reject. + false, nullptr), // false <= reject. ++value_set_idx ) ); @@ -2546,7 +2546,7 @@ void Config_manager::all_dynamic_values "Use nullptr for any Null_value_set and/or Value_set of no interest."); // As promised just copy out the values atomically and return them. Next time it might return different ones. - Lock_guard lock(m_d_value_sets_mutex); + Lock_guard lock{m_d_value_sets_mutex}; size_t d_value_set_idx = 0; size_t value_set_idx; @@ -2576,7 +2576,7 @@ typename Value_set::Const_ptr assert((d_value_set_idx < S_N_D_VALUE_SETS) && "Invalid dynamic option set index."); // As promised just copy out the value atomically and return it. Next time it might return different pointer. - Lock_guard lock(m_d_value_sets_mutex); + Lock_guard lock{m_d_value_sets_mutex}; return *(static_cast(m_d_value_sets[d_value_set_idx].get())); // Attn: --^ void* -> T* cast; no RTTI to check our type safety. } diff --git a/src/flow/cfg/dynamic_cfg_context.hpp b/src/flow/cfg/dynamic_cfg_context.hpp index 4d2ed9240..899c69ff6 100644 --- a/src/flow/cfg/dynamic_cfg_context.hpp +++ b/src/flow/cfg/dynamic_cfg_context.hpp @@ -145,9 +145,7 @@ Dynamic_cfg_context::Dynamic_cfg_context (const Config_manager& config_manager, Root_to_target_func&& root_to_target_func_moved, size_t d_value_set_idx) : m_get_root_func([&config_manager, d_value_set_idx]() - { - return config_manager.template dynamic_values(d_value_set_idx); - }), + { return config_manager.template dynamic_values(d_value_set_idx); }), m_root_to_target_func(std::move(root_to_target_func_moved)) { } @@ -164,7 +162,7 @@ Target_ptr Dynamic_cfg_context::dynamic_cfg() const auto root = root_dynamic_cfg(); /* This aliasing constructor provides the key mechanism: the constructed pointer object *shares ownership* of `root`, * but *stores* (and operates on) a pointer to the target object (contained by and translated from `root`). */ - return Target_ptr(root, &(m_root_to_target_func(*root))); + return Target_ptr{root, &(m_root_to_target_func(*root))}; } } // namespace flow::cfg diff --git a/src/flow/cfg/option_set.cpp b/src/flow/cfg/option_set.cpp index d675b453a..5f0f84fac 100644 --- a/src/flow/cfg/option_set.cpp +++ b/src/flow/cfg/option_set.cpp @@ -71,7 +71,7 @@ std::string value_set_member_id_to_opt_name(util::String_view member_id) constexpr char SEP_REPLACED = '_'; // From this... constexpr char SEP_REPLACEMENT = '-'; // ...to this. - string opt_name(member_id); + string opt_name{member_id}; /* Nuke space-y chars. * Am paranoid about locales; and also didn't feel like using std::isspace(..., std::locale("C")). */ @@ -89,7 +89,7 @@ std::string value_set_member_id_to_opt_name(util::String_view member_id) replace_all(opt_name, CONCAT_REPLACED, CONCAT_OK); // Now any leading M_PFX gone; and object separators normalized to CONCAT_OK. Can eliminate remaining M_PFX: - replace_all(opt_name, string(CONCAT_OK) + string(M_PFX), CONCAT_OK); + replace_all(opt_name, string{CONCAT_OK} + string{M_PFX}, CONCAT_OK); // Lastly transform the word-separators. replace(opt_name.begin(), opt_name.end(), SEP_REPLACED, SEP_REPLACEMENT); @@ -132,9 +132,9 @@ void validate(boost::any& target, const std::vector& user_strings, } catch (const bad_lexical_cast& exc) { - throw Runtime_error(ostream_op_string + throw Runtime_error{ostream_op_string ("Error converting [", user_string, - "] to boost::filesystem path. Is there something strange in that string?")); + "] to boost::filesystem path. Is there something strange in that string?")}; } } // else { Leave result_path at empty. lexical_cast("") would have yielded bad_lexical_cast, but we allow it. } diff --git a/src/flow/cfg/option_set.hpp b/src/flow/cfg/option_set.hpp index afb8f6698..e27a09585 100644 --- a/src/flow/cfg/option_set.hpp +++ b/src/flow/cfg/option_set.hpp @@ -249,7 +249,7 @@ class Option_set_base * - First, create your `Value_set` (named however you want, of course) `struct`. The values stored therein must * be reasonably deep-copyable; must have standard-stream `<<` and `>>` operators; and must reasonably implement * `==` comparison. `Value_set` itself must be copy-constructible and copy-assignable in a reasonable way. - * Lastly, and very importantly, the no-arg ctor `Value_set()` must initialize all configured + * Lastly, and very importantly, the no-arg ctor `Value_set{}` must initialize all configured * members to reasonable defaults: it is not possible to declare options as "required." * - If you use the optional features mutable_values_copy(), #Mutable_values_ptr, and/or #Values_ptr, then, also, * `Value_set` shall derive from util::Shared_ptr_alias_holder. (Don't worry: it's easy.) @@ -265,7 +265,7 @@ class Option_set_base * - Finally, create an instance of `Option_set`, and use parse_config_file() (or any other `parse_*()` * methods that might exist) to parse things at will. * - "At rest," Option_set is in CANONICAL state. values_candidate() returns null, and values() is the *canonical* - * (official, current) set of parsed config; originally it equals `Value_set()`. + * (official, current) set of parsed config; originally it equals `Value_set{}`. * It returns a reference to immutable internally stored `Value_set`. * - Invoking parse_config_file() (or any other `parse_*()`) either enters or continues PARSING state. * In this state `*values_candidate()` starts equal to values(); and is then potentially modified by each @@ -308,7 +308,7 @@ class Option_set_base * the following. * - Note that parse_config_file() (and any similar `parse_*()` that scans+parses strings) will *only* validate * (via individual-option-validator checks) values actually present in the config source. - * Defaults (from `Value_set()`) or baseline values (from parse_direct_values()) are not *mandatorily* checked. + * Defaults (from `Value_set{}`) or baseline values (from parse_direct_values()) are not *mandatorily* checked. * If one performs no additional validation calls, it will not be possible to know of bad defaults or baseline * values, and one can canonicalize_candidate() invalid values. This is allowed, for flexibility, but in most cases * one will want to reject_candidate() instead. The following abilities are provided to resolve this. @@ -578,7 +578,7 @@ class Option_set : * Constructs an option set in CANONICAL state with a default-valued values() payload and options declared * by synchronously invoking the callback `declare_opts_func()`. See below for details on the latter. * - * Post-condition: `values()` is equal to `Values()`; values_candidate() is null (so the state is initially + * Post-condition: `values()` is equal to `Values{}`; values_candidate() is null (so the state is initially * CANONICAL). (Therefore `Value_set` no-args ctor must by definition be written so as to initialize all its * relevant members to their defaults. Recall `Value_set` is a template parameter type with certain requirements.) * @@ -665,7 +665,7 @@ class Option_set : * @param success_or_null * If null exceptions mark failure; otherwise the pointed-to value shall indicate success or failure. */ - void validate_values(bool* success_or_null = 0) const; + void validate_values(bool* success_or_null = nullptr) const; /** * Validates an arbitrary `Value_set`, as parseable by *an* `Option_set`, according to the @@ -696,7 +696,7 @@ class Option_set : */ static void validate_values(log::Logger* logger_ptr, const Values& values_to_validate, const Declare_options_func& declare_opts_func, - bool* success_or_null = 0); + bool* success_or_null = nullptr); /** * Validates an arbitrary `Value_set`, using the same validators `*this` `Option_set` is configured to use @@ -713,7 +713,7 @@ class Option_set : * @param success_or_null * If null exceptions mark failure; otherwise the pointed-to value shall indicate success or failure. */ - void validate_values(const Values& values_to_validate, bool* success_or_null = 0) const; + void validate_values(const Values& values_to_validate, bool* success_or_null = nullptr) const; /** * Equivalent to `validate_values(success_or_null)` but validates `*values_candidate()` instead of values(). @@ -727,7 +727,7 @@ class Option_set : * @param success_or_null * If null exceptions mark failure; otherwise the pointed-to value shall indicate success or failure. */ - void validate_values_candidate(bool* success_or_null = 0) const; + void validate_values_candidate(bool* success_or_null = nullptr) const; /** * Writes a multi-line user-suitable representation of the current values in a #Values object, at some point @@ -739,7 +739,7 @@ class Option_set : * @param values_or_null * Values to serialize; if null then we act as-if it's `&(values())`. */ - void values_to_ostream(std::ostream& os, const Values* values_or_null = 0) const; + void values_to_ostream(std::ostream& os, const Values* values_or_null = nullptr) const; /** * Logs the given values payload using values_to_ostream(). @@ -751,7 +751,8 @@ class Option_set : * @param sev * Severity to use for the log message. */ - void log_values(util::String_view summary, const Values* values_or_null = 0, log::Sev sev = log::Sev::S_INFO) const; + void log_values(util::String_view summary, + const Values* values_or_null = nullptr, log::Sev sev = log::Sev::S_INFO) const; /** * Prints a multi-line help message about the set of options that `*this` can parse. This should typically @@ -806,7 +807,7 @@ class Option_set : * If null exceptions mark failure; otherwise the pointed-to value shall indicate success or failure. */ void parse_config_file(const fs::path& cfg_path, bool allow_unregistered, - bool* success_or_null = 0, + bool* success_or_null = nullptr, const boost::unordered_set& allowed_unregistered_opts_or_empty = {}); /** @@ -825,7 +826,7 @@ class Option_set : * state is desired before each update. Consider the example of an Option_set that stores dynamically changeable * values. Suppose each update consists only of a single `parse_config_file(F)` call, where `F` is some file that * might get changed at various times to deliver dynamic updates. Then consider this series: - * -# Initial Option_set construction. End state: `values() == Values()` (default). + * -# Initial Option_set construction. End state: `values() == Values{}` (default). * -# First update occurs: `parse_config_file(F)`, followed by canonicalize_candidate(). * End state: `values()` == defaults + changes in file `F` at time 1. * -# Second update occurs: `parse_config_file(F)`, followed by canonicalize_candidate(). @@ -842,7 +843,7 @@ class Option_set : * * To resolve this, one can save a *baseline* state of values() by copy; and then apply it via this * parse_direct_values() call before parsing the file in each dynamic update. The baseline state could just be - * defaults (`Values()`), or it could come from some special "baseline" config file that is not `F` which one knows + * defaults (`Values{}`), or it could come from some special "baseline" config file that is not `F` which one knows * to never change. (Such a file could also typically store static config managed by a separate Option_set.) * * So then the sequence might become not parse_config_file(), canonicalize_candidate(), parse_config_file(), @@ -880,7 +881,7 @@ class Option_set : * @param change_detected * If null, ignored; otherwise `*change_detected` is set to `true` if a setting changed; else `false`. */ - void canonicalize_candidate(bool* change_detected = 0); + void canonicalize_candidate(bool* change_detected = nullptr); /** * In PARSING state, returns to CANONICAL state, as if no parse attempts have occurred. In CANONICAL state, a no-op. @@ -908,7 +909,7 @@ class Option_set : * This must point inside `m_values_candidate`. * @param value_default_if_no_acc * Usually -- with regular (accumulating) options -- null; otherwise pointer to the default value for - * `*target_value` (as from `Values()`), inside `m_values_default`. In the latter case (non-null) this + * `*target_value` (as from `Values{}`), inside `m_values_default`. In the latter case (non-null) this * indicates this is an option marked by the user as non-accumulating * (see FLOW_CFG_OPTION_SET_DECLARE_OPTION_NO_ACC() and similar), meaning each time a config source * (e.g., a file) is parsed `*target_value` is first reset to this default; then overwritten with the value in @@ -1373,7 +1374,7 @@ class Option_set : ( \ char const * const FLOW_CFG_SET_DECL_OPT_MANUAL_name_c_str = ARG_opt_name_c_str; \ /* Subtlety: This is only safe to use here synchronously. */ \ - const ::flow::util::String_view FLOW_CFG_SET_DECL_OPT_MANUAL_name_view(FLOW_CFG_SET_DECL_OPT_MANUAL_name_c_str); \ + const ::flow::util::String_view FLOW_CFG_SET_DECL_OPT_MANUAL_name_view{FLOW_CFG_SET_DECL_OPT_MANUAL_name_c_str}; \ const bool FLOW_CFG_SET_DECL_OPT_MANUAL_no_acc = ARG_no_accumulation; \ switch (args.m_call_type) \ { \ @@ -1390,7 +1391,7 @@ class Option_set : (FLOW_CFG_SET_DECL_OPT_MANUAL_name_view, \ &args.m_args.m_fill_parsing_role_opt_table_args.m_values_candidate->ARG_m_value, \ /* Default is irrelevant if option accumulates from parse to parse. Pass null. */ \ - /* Default from Value_set() shall be in effect at construction time, but that's it. */ \ + /* Default from Value_set{} shall be in effect at construction time, but that's it. */ \ /* However if it's set as a non-accumulating option via knob, then pass-through the default: */ \ /* each parse via boost.program_options shall first reset option to that default; then if present */ \ /* overwrite that default. Hence the value from any preceding parse is always forgotten. */ \ @@ -1398,7 +1399,7 @@ class Option_set : ? &args.m_args.m_fill_parsing_role_opt_table_args.m_values_default_no_acc->ARG_m_value \ : nullptr, \ ::std::move(FLOW_CFG_SET_DECL_OPT_MANUAL_validator_func), \ - ::flow::util::String_view(#ARG_bool_validate_expr)); \ + ::flow::util::String_view{#ARG_bool_validate_expr}); \ break; \ } \ case ::flow::cfg::Option_set_base::Declare_options_func_call_type::S_FILL_OUTPUT_HELP_ROLE_OPT_TABLE: \ @@ -1441,7 +1442,7 @@ class Option_set : (FLOW_CFG_SET_DECL_OPT_MANUAL_name_view, \ args.m_args.m_validate_stored_vals_args.m_values_to_validate->ARG_m_value, \ ::std::move(FLOW_CFG_SET_DECL_OPT_MANUAL_validator_func), \ - ::flow::util::String_view(#ARG_bool_validate_expr)); \ + ::flow::util::String_view{#ARG_bool_validate_expr}); \ break; \ } \ /* No `default:` intentionally: most compilers should catch a missing enum value and warn. */ \ @@ -1495,13 +1496,13 @@ const Value_set& Option_set::values() const template typename Option_set::Mutable_values_ptr Option_set::mutable_values_copy() const { - return Mutable_values_ptr(new Values(values())); + return Mutable_values_ptr{new Values{values()}}; } template const Value_set* Option_set::values_candidate() const { - return m_parsing ? &m_values_candidate : 0; + return m_parsing ? &m_values_candidate : nullptr; } template @@ -1587,7 +1588,7 @@ void Option_set::declare_option_for_parsing(util::String_view name_vi const auto val_spec = value(target_value) ->notifier(throw_on_invalid_func(name_view, std::move(validator_func_moved), validator_cond_str_view)); - /* However: if non-accumulating mode is enabled then, in fact, set the value from Value_set() + /* However: if non-accumulating mode is enabled then, in fact, set the value from Value_set{} * as default_value(); so that starting to parse a config source (e.g., config file) shall always reset to default * first instead of accumulating from a previous parse (if any). This matters for a given parse only if this * option is not specified in that config source. */ @@ -1630,7 +1631,7 @@ Function Option_set_base::throw_on_invalid_func "Option value `val` = ["; value_to_ostream(msg_os.os(), val); msg_os.os() << "]." << flush; - throw Runtime_error(msg_os.str()); + throw Runtime_error{msg_os.str()}; } }; } // Option_set_base::throw_on_invalid_func() @@ -1716,7 +1717,7 @@ void Option_set::scan_parsed_option(util::String_view name_view, cons { using boost::any_cast; using std::string; - string name(name_view); + string name{name_view}; /* In this mode we are basically to check whether the value just parsed for `name`, which is in * m_iterable_values_candidate[name], is actually *different* from the current canonical value, which is @@ -1748,7 +1749,7 @@ void Option_set::load_option_value_as_if_parsed(util::String_view nam const Value& source_value) { using std::string; - string name(name_view); + string name{name_view}; assert(target_value); assert(m_parsing); @@ -1800,7 +1801,7 @@ void Option_set::parse_config_file { try { - parse_config_file(cfg_path, allow_unregistered, 0, allowed_unregistered_opts_or_empty); + parse_config_file(cfg_path, allow_unregistered, nullptr, allowed_unregistered_opts_or_empty); } catch (const exception& exc) { @@ -1832,13 +1833,13 @@ void Option_set::parse_config_file log_values("pre-parsing candidate config", &m_values_candidate, log::Sev::S_TRACE); - ifstream ifs(cfg_path); + ifstream ifs{cfg_path}; if (!ifs) { - const Error_code sys_err_code(errno, system_category()); + const Error_code sys_err_code{errno, system_category()}; FLOW_ERROR_SYS_ERROR_LOG_WARNING(); - throw Runtime_error(sys_err_code, ostream_op_string("Could not open file [", cfg_path, "].")); + throw Runtime_error{sys_err_code, ostream_op_string("Could not open file [", cfg_path, "].")}; } // else @@ -1849,7 +1850,7 @@ void Option_set::parse_config_file * stay maintainable. Still consider revisiting this and not catching the exception and not worrying about * backing this up -- if that's really how it works. */ const auto values_candidate_backup = m_values_candidate; - opts::parsed_options parsed_options(&m_opts_for_parsing); + opts::parsed_options parsed_options{&m_opts_for_parsing}; try { parsed_options = opts::parse_config_file(ifs, m_opts_for_parsing, allow_unregistered); @@ -1894,8 +1895,8 @@ void Option_set::parse_config_file FLOW_LOG_WARNING("Option_set[" << *this << "]: State PARSING: Unregistered option named " "[" << opt_name << "] is not approved; parse failed. Values payload untouched."); - throw Runtime_error(ostream_op_string("Unregistered option named [", opt_name, - "] is not approved; parse failed.")); + throw Runtime_error{ostream_op_string("Unregistered option named [", opt_name, + "] is not approved; parse failed.")}; } // else @@ -2179,7 +2180,7 @@ void value_to_ostream(std::ostream& os, const boost::chrono::duration bool + (bool) -> bool { /* *Main point*: The lambda's { body } *always* starts on the same column as the current *statement*. * This is regardless of there the lambda itself -- the [captures] -- started. @@ -866,16 +867,17 @@ const boost::unordered_map // @todo It's a bit ambiguous what the indentation "anchor" column should be here.... Event_set::S_EV_TYPE_TO_IS_ACTIVE_NODE_MTD // Use judgment to either have {} innards indented on separate line(s) or in-line. Examples of both: - ({ + {{ { Event_set::Event_type::S_PEER_SOCKET_READABLE, &Node::sock_is_readable }, - // ^-- Spaces around `contents` in `{ contents }`, when it's all on one line. --v { Event_set::Event_type::S_PEER_SOCKET_WRITABLE, &Node::sock_is_writable }, { Event_set::Event_type::S_SERVER_SOCKET_ACCEPTABLE, &Node::serv_is_acceptable } - }); + }}; +// See also "Constructor and initializer invocation" under BEST PRACTICES. // Misc. convention: -T* x = nullptr; // This is OK, but Flow was written before nullptr was available; we just use the following: -T* x = 0; // This is the Flow convention. Note, no `NULL`! NULL is C stuff, no need. +T* x = nullptr; // This is the modern Flow convention. +T* x = 0; // OK and may be seen due to Flow originating before nullptr existed. Ideally change to nullptr on sight. +// Note, no `NULL`! NULL is C stuff; no need. // -- STYLE GUIDE: Header files and forwarding; _fwd.hpp pattern -- @@ -1250,6 +1252,48 @@ if (mutex.locked()) // Mutex being locked here means we are in trouble. * to inline (`gcc -O2` + explicit inlining by dev) exceed the losses from LTO not being in effect yet. Update: * with widespread LTO support in all modern gcc and clang compilers this complaint is mitigated/eliminated.] */ +// -- BEST PRACTICES: Constructor and initializer invocation -- + +/* Before C++11, the language had an annoying ambiguity, inherited to this day; but if you follow the recommended + * conventions, you will bypass the ambiguity. So what's this ambiguity? It is, basically, the following. + * Note we're not trying to be complete and ultra-correct here; just giving a sense, for background: */ +T a(); // Is this constructing an object named `a` of type T, via its default constructor T::T()? +T a(); // Or maybe it is a function named `a` that returns a `T`? + +/* So the modern Flow style is to entirely avoid the `T()` or `T a()` style of invoking a constructor. + * Use the brace{} form instead. + * + * Some old-style ctor-invoking code may be encountered, as Flow originated before C++11. Ideally fix it on sight. + * + * There are more subleties, benefits, effects beyond removing ambiguity and avoiding strange compiler errors, but + * we aren't trying to copy/paste Effective C++ here. Please just trust us and do it. */ +T a; // No args, no problem. Could also write `T a{}`; why bother though? +f(T{}); // No args and no name. Use braces to explicitly convey it's a ctor invocation; not: `f(T())`. +T a{1}; // Takes an arg... similarly use braces. +auto x = string{"abc"}; // Not: auto x = string("abc"). +f(string{"abc"}); // Not: f(string("abc")). + +// If you're using an initializer-list and/or direct-init, please indicate this by placing spaces after { and before }. +struct Complex { float m_real; float m_imag; }; +Complex num{ -2, 2.3 }; // Initializing via direct-init, not via a constructor: indicate with spaces. +f(Complex{ -2, 2.3 }); + +/* Do be careful of one thing: a container with an initializer_list ctor will invoke it over the would-be constructor + * with a matching signature (if any). Unfortunately one must revert to the parentheses-using-constructor-call style. + * (Needless to say, the compiler doesn't care if you put spaces in there; that is for humans only.) */ +vector v{ 10, 1 }; // 2 elements: 10, 1. initializer_list ctor is invoked, even though the following ctor matches. +vector v(10, 1); // 10 elements, each one equal to `1`. Force compiler to ignore the initializer_list ctor. +// Nevertheless, prefer {} for construction whenever possibe which is most of the time by far. + +// When the type is built-in like the integers and real numbers, construction via (), {}, and static_cast<> are all OK. +f(size_t{-1}); // Might not compile, because size_t is unsigned, and this conversion is from signed -1. +f(size_t(-1)); // OK: defeats the compile error/warning; stylistically no big deal, as size_t is basically built-in. +f(static_cast(-1)); // OK: explicit cast. It's a bit wordy though, so the preceding could be preferred. +// Corollary w/r/t multi-word type names: +f(unsigned int{-1}); // SYNTAX ERROR. This is due to the space in the type name. +f((unsigned int)-1); // BAD. By convention we do not use draconian C-style casts, even though it works. +f(static_cast(-1)); // Defeat the syntax error. Could also `using uint = unsigned int` or something. + // -- BEST PRACTICES: Doxygen doc header deep-dive -- /* In "BEST PRACTICES: Comments" above we mentioned doc headers. Let's get into the topic more. diff --git a/src/flow/error/error.hpp b/src/flow/error/error.hpp index 958524f43..76157c3c9 100644 --- a/src/flow/error/error.hpp +++ b/src/flow/error/error.hpp @@ -54,7 +54,7 @@ class Runtime_error : * Constructs Runtime_error. * * @param err_code_or_success - * The #Error_code describing the error if available; or the success value (`Error_code()`) + * The #Error_code describing the error if available; or the success value (`Error_code{}`) * if an error code is unavailable or inapplicable to this error. * In the latter case what() will omit anything to do with error codes and feature only `context`. * @param context @@ -67,7 +67,7 @@ class Runtime_error : /** * Constructs Runtime_error, when one only has a context string and no applicable/known error code. - * Formally it's equivalent to `Runtime_error(Error_code(), context)`: it is syntactic sugar only. + * Formally it's equivalent to `Runtime_error(Error_code{}, context)`: it is syntactic sugar only. * * @param context * See the other ctor. @@ -100,8 +100,8 @@ class Runtime_error : * * The latter occurs in our superclass `what()` already, if we pass up `context` to the super-ctor. * Now suppose `!err_code_or_success`. No matter which super-ctor we use, it will memorize an `Error_code` -- - * if we pass `Error_code()` it'll remember that; if use a ctor that does not take an `Error_code()` it will - * memorize its own `Error_code()`. Therefore we must override `what()` behavior in our own what() in that case. + * if we pass `Error_code{}` it'll remember that; if use a ctor that does not take an `Error_code{}` it will + * memorize its own `Error_code{}`. Therefore we must override `what()` behavior in our own what() in that case. * * Therefore this algorithm works: * - `!err_code_or_success`: Memorize `context` in #m_context_if_no_code. what() just returns the latter. @@ -158,7 +158,7 @@ bool exec_and_throw_on_error(const Func& func, Ret* ret, /* Error was detected: do our duty. Pass through the context info from caller. * Note that passing in, say, FLOW_UTIL_WHERE_AM_I() would be less useful, since the present location * is not helpful to the log reader in determining where the actual error first occurred. */ - throw Runtime_error(our_err_code, context); + throw Runtime_error{our_err_code, context}; } return true; @@ -179,7 +179,7 @@ bool exec_void_and_throw_on_error(const Func& func, Error_code* err_code, util:: if (our_err_code) { - throw Runtime_error(our_err_code, context); + throw Runtime_error{our_err_code, context}; } return true; @@ -202,7 +202,7 @@ bool exec_void_and_throw_on_error(const Func& func, Error_code* err_code, util:: #define FLOW_ERROR_EMIT_ERROR(ARG_val) \ FLOW_UTIL_SEMICOLON_SAFE \ ( \ - ::flow::Error_code FLOW_ERROR_EMIT_ERR_val(ARG_val); \ + ::flow::Error_code FLOW_ERROR_EMIT_ERR_val{ARG_val}; \ FLOW_LOG_WARNING("Error code emitted: [" << FLOW_ERROR_EMIT_ERR_val << "] " \ "[" << FLOW_ERROR_EMIT_ERR_val.message() << "]."); \ *err_code = FLOW_ERROR_EMIT_ERR_val; \ @@ -218,7 +218,7 @@ bool exec_void_and_throw_on_error(const Func& func, Error_code* err_code, util:: #define FLOW_ERROR_EMIT_ERROR_LOG_INFO(ARG_val) \ FLOW_UTIL_SEMICOLON_SAFE \ ( \ - ::flow::Error_code FLOW_ERROR_EMIT_ERR_LOG_val(ARG_val); \ + ::flow::Error_code FLOW_ERROR_EMIT_ERR_LOG_val{ARG_val}; \ FLOW_LOG_INFO("Error code emitted: [" << FLOW_ERROR_EMIT_ERR_LOG_val << "] " \ "[" << FLOW_ERROR_EMIT_ERR_LOG_val.message() << "]."); \ *err_code = FLOW_ERROR_EMIT_ERR_LOG_val; \ @@ -233,7 +233,7 @@ bool exec_void_and_throw_on_error(const Func& func, Error_code* err_code, util:: #define FLOW_ERROR_LOG_ERROR(ARG_val) \ FLOW_UTIL_SEMICOLON_SAFE \ ( \ - ::flow::Error_code FLOW_ERROR_LOG_ERR_val(ARG_val); \ + ::flow::Error_code FLOW_ERROR_LOG_ERR_val{ARG_val}; \ FLOW_LOG_WARNING("Error occurred: [" << FLOW_ERROR_LOG_ERR_val << "] " \ "[" << FLOW_ERROR_LOG_ERR_val.message() << "]."); \ ) @@ -319,7 +319,7 @@ bool exec_void_and_throw_on_error(const Func& func, Error_code* err_code, util:: * // Example API. In this one, there's a 4th argument that happens to follow the standard Error_code* one. * // arg2 is a (const) reference, as opposed to a pointer or scalar, and is most efficiently handled by adding * // cref() to avoid copying it. - * T f(AT1 arg1, const AT2& arg2, Error_code* err_code = 0, AT3 arg3 = 0) + * T f(AT1 arg1, const AT2& arg2, Error_code* err_code = nullptr, AT3 arg3 = 0) * { * FLOW_ERROR_EXEC_AND_THROW_ON_ERROR(T, // Provide the return type of the API. * // Forward all the args into the macro, but replace `err_code` => `_1`. diff --git a/src/flow/log/async_file_logger.cpp b/src/flow/log/async_file_logger.cpp index f2d486387..697814498 100644 --- a/src/flow/log/async_file_logger.cpp +++ b/src/flow/log/async_file_logger.cpp @@ -114,7 +114,7 @@ Async_file_logger::Async_file_logger(Logger* backup_logger_ptr, Async_file_logger::Throttling_cfg Async_file_logger::throttling_cfg() const { - Lock_guard lock(m_throttling_mutex); + Lock_guard lock{m_throttling_mutex}; return m_throttling_cfg; } @@ -151,7 +151,7 @@ void Async_file_logger::throttling_cfg(bool active, const Throttling_cfg& cfg) // Deal with `cfg`. { // All this->m_ touched in { here } can concurrently change, unless we lock. - Lock_guard lock(m_throttling_mutex); + Lock_guard lock{m_throttling_mutex}; if (m_throttling_cfg.m_hi_limit != cfg.m_hi_limit) { @@ -171,7 +171,7 @@ void Async_file_logger::throttling_cfg(bool active, const Throttling_cfg& cfg) } /* else: As discussed in class doc header: no-op, unless they actually changed something; no state reset. * E.g., perhaps they changed `active` while passing-in `cfg = throttling_cfg()` unchanged. */ - } // Lock_guard lock(m_throttling_mutex); + } // Lock_guard lock{m_throttling_mutex}; } // Async_file_logger::throttling_cfg() Async_file_logger::~Async_file_logger() // Virtual. @@ -179,7 +179,7 @@ Async_file_logger::~Async_file_logger() // Virtual. using async::Synchronicity; { - Lock_guard lock(m_throttling_mutex); // Careful: really_log()s may well be happening right now via m_async_worker. + Lock_guard lock{m_throttling_mutex}; // Careful: really_log()s may well be happening right now via m_async_worker. FLOW_LOG_INFO("Async_file_logger [" << this << "]: Deleting. Worker thread will flush " "output if possible; then we will proceed to shut down. Current mem-use of queued " @@ -281,7 +281,7 @@ void Async_file_logger::do_log(Msg_metadata* metadata, util::String_view msg) // logs_sz_t pending_logs_sz; // For logging. logs_sz_t prev_pending_logs_sz; { - Lock_guard lock(m_throttling_mutex); + Lock_guard lock{m_throttling_mutex}; limit = static_cast(m_throttling_cfg.m_hi_limit); prev_pending_logs_sz = m_pending_logs_sz; pending_logs_sz = (m_pending_logs_sz += logs_sz); @@ -348,7 +348,7 @@ void Async_file_logger::do_log(Msg_metadata* metadata, util::String_view msg) // logs_sz_t pending_logs_sz; // For logging. logs_sz_t prev_pending_logs_sz; { - Lock_guard lock(m_throttling_mutex); + Lock_guard lock{m_throttling_mutex}; limit = m_throttling_cfg.m_hi_limit; // Just for logging in this case. prev_pending_logs_sz = m_pending_logs_sz; assert((prev_pending_logs_sz >= logs_sz) && "Bug? really_log() has no matching do_log()?"); diff --git a/src/flow/log/async_file_logger.hpp b/src/flow/log/async_file_logger.hpp index cc60a1b5f..d42ca732c 100644 --- a/src/flow/log/async_file_logger.hpp +++ b/src/flow/log/async_file_logger.hpp @@ -385,7 +385,7 @@ class Async_file_logger : uint64_t m_hi_limit; /** - * Value of `Async_file_logger(...).throttling_cfg().m_hi_limit`: default/initial value of #m_hi_limit. + * Value of `Async_file_logger{...}.throttling_cfg().m_hi_limit`: default/initial value of #m_hi_limit. * * Note that this value is not meant to be some kind of universally correct choice for #m_hi_limit. * Users can and should change `m_hi_limit`. diff --git a/src/flow/log/config.hpp b/src/flow/log/config.hpp index 14c25e1ad..19074ab6d 100644 --- a/src/flow/log/config.hpp +++ b/src/flow/log/config.hpp @@ -635,7 +635,7 @@ class Config template void init_component_names(const boost::unordered_multimap& component_names, bool output_components_numerically = false, - util::String_view payload_type_prefix_or_empty = util::String_view()); + util::String_view payload_type_prefix_or_empty = {}); /** * Sets the default verbosity to the given value, to be used by subsequent output_whether_should_log() calls whenever @@ -1108,8 +1108,8 @@ void Config::init_component_to_union_idx_mapping(component_union_idx_t enum_to_n assert(component_union_idx_max >= enum_to_num_offset); assert(component_union_idx_max >= enum_sparse_length); - /* typeid(Component_payload) (arg 1) would equal Component(C).payload_type(), where C is a value of type - * Component_payload. That is how this mapping would be used subsequently after this call. Component(C) is + /* typeid(Component_payload) (arg 1) would equal Component{C}.payload_type(), where C is a value of type + * Component_payload. That is how this mapping would be used subsequently after this call. Component{C} is * routinely provided at log call sites. */ m_component_cfgs_by_payload_type.insert(typeid(Component_payload), { enum_to_num_offset }, @@ -1128,8 +1128,8 @@ void Config::init_component_to_union_idx_mapping(component_union_idx_t enum_to_n * atomic<>::load()s from there. * (Note: OK, technically there's another way m_verbosities_by_component is set, in the copy ctor. See copy ctor's * doc header.) - * (Note: This requires copying of a Atomic_raw_sev(raw_sev_t(-1)) N times, where N is the number of elements, - * if any, added to m_verbosities_by_component. That's why Atomic_raw_sev() has a copy ctor unlike the + * (Note: This requires copying of a Atomic_raw_sev{raw_sev_t(-1)} N times, where N is the number of elements, + * if any, added to m_verbosities_by_component. That's why Atomic_raw_sev{} has a copy ctor unlike the * underlying atomic. Its use can be unsafe, in terms of propagation across threads, as said in the * copy ctor's doc header; but since we do the source construction and N copies in the same thread, per that doc * header we are safe.) */ diff --git a/src/flow/log/detail/test/component_cfg_test.cpp b/src/flow/log/detail/test/component_cfg_test.cpp index 7d4440da9..feef6ad56 100644 --- a/src/flow/log/detail/test/component_cfg_test.cpp +++ b/src/flow/log/detail/test/component_cfg_test.cpp @@ -91,7 +91,7 @@ String_view dict_type_printable(const std::type_index& type, bool brief = false) auto ret = impl(); if (brief) { - ret.remove_prefix(String_view("idx=...|impl=").size()); + ret.remove_prefix(String_view{"idx=...|impl="}.size()); } return ret; } @@ -373,7 +373,7 @@ void dict_benchmark(size_t n_cfgs) } // if (!by_ptr_else_val) } // for (rec : timing_vec) - vector rel_time_str_vec(timing_vec.size()); + vector rel_time_str_vec{timing_vec.size()}; std::transform(timing_vec.begin(), timing_vec.end(), rel_time_str_vec.begin(), [](const auto& rec) -> auto { return util::ostream_op_string('[', dict_type_printable(rec.m_type, true), @@ -382,7 +382,7 @@ void dict_benchmark(size_t n_cfgs) std::setprecision((rec.m_time_multiple == 1.f) ? 1 : 2), rec.m_time_multiple, ']'); }); - vector time_str_vec(timing_vec.size()); + vector time_str_vec{timing_vec.size()}; std::transform(timing_vec.begin(), timing_vec.end(), time_str_vec.begin(), [](const auto& rec) -> auto { @@ -498,7 +498,7 @@ TEST(Component_cfg_test, Dict_internals_benchmark) * perf matters has much subtlety to it -- this tests it end-to-end through the publicly available log::Config API. */ TEST(Component_cfg_test, Interface) { - Config cfg(Sev::S_INFO); + Config cfg{Sev::S_INFO}; Component comp0a{n0::n0::Cmps::S_COMP_A}; Component comp0b{n0::n0::Cmps::S_COMP_B}; diff --git a/src/flow/log/log.cpp b/src/flow/log/log.cpp index 72f48ba2a..a77388c65 100644 --- a/src/flow/log/log.cpp +++ b/src/flow/log/log.cpp @@ -46,8 +46,8 @@ void Logger::this_thread_set_logged_nickname(util::String_view thread_nickname, * value of s_this_thread_nickname.get() or dereference thereof in any thread except * the one in which we currently execute. */ s_this_thread_nickname_ptr.reset(thread_nickname.empty() - ? 0 - : new string(thread_nickname)); + ? nullptr + : new string{thread_nickname}); // Log about it if given an object capable of logging about itself. if (logger_ptr) @@ -86,7 +86,7 @@ void Logger::this_thread_set_logged_nickname(util::String_view thread_nickname, FLOW_LOG_SET_CONTEXT(logger_ptr, Flow_log_component::S_LOG); if (result_code == -1) { - const Error_code sys_err_code(errno, system_category()); + const Error_code sys_err_code{errno, system_category()}; FLOW_LOG_WARNING("Unable to set OS thread name to [" << os_name << "], possibly truncated " "to [" << MAX_PTHREAD_NAME_SZ << "] characters, via pthread_setname_np(). " "This should only occur due to an overlong name, which we guard against, so this is " @@ -156,9 +156,10 @@ std::ostream* Logger::this_thread_ostream() const // Component implementations. Component::Component() : - m_payload_type_or_null(0) // <=> empty() == true. + m_payload_type_or_null(nullptr), // <=> empty() == true. + m_payload_enum_raw_value() // Should not be necessary (uninit=OK), but in some contexts at least gcc-9 warns. { - // That's it. m_payload_enum_raw_value is uninitialized. + // That's it. } Component::Component(const Component& src) = default; @@ -202,14 +203,11 @@ Log_context& Log_context::operator=(const Log_context& src) = default; Log_context& Log_context::operator=(Log_context&& src) { - using std::swap; - if (&src != this) { - m_logger = 0; - m_component = Component(); - - swap(*this, src); + operator=(static_cast(src)); + src.m_logger = nullptr; + src.m_component = {}; } return *this; } @@ -219,6 +217,12 @@ Logger* Log_context::get_logger() const return m_logger; } +Logger* Log_context::set_logger(Logger* logger) +{ + std::swap(logger, m_logger); + return logger; +} + const Component& Log_context::get_log_component() const { return m_component; @@ -237,6 +241,114 @@ void swap(Log_context& val1, Log_context& val2) val1.swap(val2); } +// Log_context_mt implementations. + +Log_context_mt::Log_context_mt(Logger* logger) : + Log_context(logger) +{ + // Nothing. +} + +Log_context_mt::Log_context_mt(const Log_context_mt& src) : + Log_context(src) +{ + // Leave m_mutex alone. +} + +Log_context_mt::Log_context_mt(Log_context_mt&& src) : + Log_context(static_cast(src)) // See below. +{ + using Lock = util::Lock_guard; + + /* We could just do `operator=(std::move(src))`; but to avoid unnecessary locking of this->m_mutex do it manually; + * - lock-free copying from Log_context src onto Log_context *this = already done; + * - so it remains to lock src and nullify it: */ + Lock lock{src.m_mutex}; + static_cast(src).operator=(Log_context{}); +} + +Log_context_mt& Log_context_mt::operator=(const Log_context_mt& src) +{ + if (&src != this) + { + util::Lock_guard lock{m_mutex}; + Log_context::operator=(src); + } + return *this; +} + +Log_context_mt& Log_context_mt::operator=(Log_context_mt&& src) +{ + using Lock = util::Lock_guard; + + if (&src != this) + { + /* Naively we'd do something close to: + * Lock lock1{m_mutex}; + * Lock lock2{src.m_mutex}; + * Log_context::operator=(std::move(src)); + * However conceivably this could cause an obscure deadlock for reasons similar to those cited in swap(). As there: + * Seems there's no choice but to lock things piecewise and execute the move-assignment manually as its 2 + * component ops (copy-assign; then clear `src`), as we do so. */ + + { + Lock lock{m_mutex}; + Log_context::operator=(static_cast(src)); + } + { + Lock lock{src.m_mutex}; + static_cast(src).operator=(Log_context{}); + } + } + + return *this; +} // Log_context_mt::operator=(&&) + +Logger* Log_context_mt::get_logger() const +{ + util::Lock_guard lock{m_mutex}; + return Log_context::get_logger(); +} + +Logger* Log_context_mt::set_logger(Logger* logger) +{ + util::Lock_guard lock{m_mutex}; + return Log_context::set_logger(logger); +} + +const Component& Log_context_mt::get_log_component() const +{ + util::Lock_guard lock{m_mutex}; + return Log_context::get_log_component(); +} + +void Log_context_mt::swap(Log_context_mt& other) +{ + /* Naively we'd do something close to: + * Lock lock1{m_mutex}; + * Lock lock2{other.m_mutex}; + * Log_context::swap(other); + * However conceivably this could cause an obscure deadlock; e.g. at least if one concurrently tries + * lc_mt1.swap(lc_mt2); + * and + * lc_mt2.swap(lc_mt1); + * Strange thing to do, but it is legal, and a classic AB-BA deadlock results. + * Seems there's no choice but to lock things piecewise and execute the swap manually as the 3 classic ops, as + * we do so. */ + + Log_context_mt& obj1 = *this; + Log_context_mt& obj2 = other; + + Log_context_mt obj_tmp{static_cast(obj1)}; + obj1 = static_cast(obj2); // Will lock/unlock obj1.m_mutex. + obj2 = static_cast(obj_tmp); // Will lock/unlock obj2.m_mutex. +} // Log_context_mt::swap() + +void swap(Log_context_mt& val1, Log_context_mt& val2) +{ + val1.swap(val2); +} + // Sev implementations. std::ostream& operator<<(std::ostream& os, Sev val) diff --git a/src/flow/log/log.hpp b/src/flow/log/log.hpp index ac52d90e1..bc0e42a17 100644 --- a/src/flow/log/log.hpp +++ b/src/flow/log/log.hpp @@ -122,7 +122,7 @@ * Then one could even (when desired) write such things as * * ~~~ - * Logger some_logger(...); // Some Logger that is not available through get_logger() as would be more typical. + * Logger some_logger{...}; // Some Logger that is not available through get_logger() as would be more typical. * some_logger.warning("Error detected: [", err_num, "]."); * ~~~ * @@ -139,7 +139,7 @@ * might change this overall API) just to remove a few characters from each log call. The above call would become: * * ~~~ - * log_warning("Error detected: [", err_num, "]."); // Invoke superclass Log_context's Logger::warning() method. + * log_warning("Error detected: [", err_num, "]."); // Invoke super-class Log_context's Logger::warning() method. * ~~~ * * which is a little more compact. That can also be accomplished by having flow::log::Log_context implement @@ -374,7 +374,7 @@ /** * For the rest of the block within which this macro is instantiated, causes all `FLOW_LOG_...()` - * invocations to log to `ARG_logger_ptr` with component `flow::log::Component(ARG_component_payload)`, instead of the + * invocations to log to `ARG_logger_ptr` with component `flow::log::Component{ARG_component_payload}`, instead of the * normal `get_logger()` and `get_log_component()`, if there even such things are available in the block. This is * useful, for example, in `static` methods, where there is no `get_logger()` or `get_log_component()` function defined, * but a flow::log::Logger and component payload are available (for example) via parameters. It's also useful if one @@ -399,7 +399,7 @@ * `ARG_logger_ptr` will be used as the `Logger*` in subsequent `FLOW_LOG_...()` * invocations in this block. * @param ARG_component_payload - * `Component(ARG_component_payload)`, a light-weight holder of a copy of `ARG_component_payload`, will be used + * `Component{ARG_component_payload}`, a light-weight holder of a copy of `ARG_component_payload`, will be used * as the `const Component&` in subsequent `FLOW_LOG_...()` invocations in this block. */ #define FLOW_LOG_SET_CONTEXT(ARG_logger_ptr, ARG_component_payload) \ @@ -450,7 +450,7 @@ */ #define FLOW_LOG_SET_COMPONENT(ARG_component_payload) \ [[maybe_unused]] \ - const auto get_log_component = [component = ::flow::log::Component(ARG_component_payload)] \ + const auto get_log_component = [component = ::flow::log::Component{ARG_component_payload}] \ () -> const ::flow::log::Component & \ { \ return component; \ @@ -559,9 +559,11 @@ constexpr size_t FLOW_LOG_WO_CHK_file_sz = sizeof(__FILE__) - 1; \ constexpr char const * FLOW_LOG_WO_CHK_func_ptr = __FUNCTION__; \ constexpr size_t FLOW_LOG_WO_CHK_func_sz = sizeof(__FUNCTION__) - 1; \ + /* Minor: Using {} instead of () here leads to some macro trouble; not worth the pain to fix it. */ \ constexpr String_view FLOW_LOG_WO_CHK_full_file_str(FLOW_LOG_WO_CHK_file_ptr, FLOW_LOG_WO_CHK_file_sz); \ /* Yes -- get_last_path_segment() is constexpr and will thus "execute" at compile time! */ \ constexpr String_view FLOW_LOG_WO_CHK_file_str = get_last_path_segment(FLOW_LOG_WO_CHK_full_file_str); \ + /* Minor: Using {} instead of () here leads to some macro trouble; not worth the pain to fix it. */ \ constexpr String_view FLOW_LOG_WO_CHK_func_str(FLOW_LOG_WO_CHK_func_ptr, FLOW_LOG_WO_CHK_func_sz); \ const Component& FLOW_LOG_WO_CHK_component = get_log_component(); \ string FLOW_LOG_WO_CHK_call_thread_nickname; \ @@ -742,7 +744,7 @@ /* However, for an alleged perf bump (@todo verify!) we use a */ \ /* thread-local Msg_metadata to avoid making this thing on the stack and then destroying almost immediately. */ \ FLOW_LOG_DO_LOG_logger->do_log(FLOW_LOG_DO_LOG_msg_metadata_ptr, \ - String_view(FLOW_LOG_DO_LOG_appender.target_contents())); \ + String_view{FLOW_LOG_DO_LOG_appender.target_contents()}); \ ) /* FLOW_UTIL_SEMICOLON_SAFE() */ namespace flow::log @@ -1204,14 +1206,14 @@ struct Msg_metadata * (say) Simple_ostream_logger, Buffer_logger, and Async_file_logger, do this: * - Take a Config pointer at constructor and save it (do not copy the Config). (There are thread safety * implications.) - * - Internally use some kind of `ostream`-subclass member to a target device, if at all possible. + * - Internally use some kind of `ostream`-sub-class member to a target device, if at all possible. * (boost.asio will let you even write to network this way; but at least console output, file output, and * memory string output are 100% practical via `ostream`s. Existing `Logger`s provide examples.) * - Internally use an Ostream_log_msg_writer to write to said `ostream`, the formatting thereof being configurable * in a uniform way via the saved Config. * - Forward should_log() logic to the saved Config (Config::output_whether_should_log()), so that verbosity is * flexibly but uniformly set via Config. - * - It is up to the user, now, to set up the Config appropriately when passing it to your `Logger` subclass + * - It is up to the user, now, to set up the Config appropriately when passing it to your `Logger` sub-class * constructor. The user would simply follow the documentation for Config, and you need neither re-implement * nor re-document configurability of your Logger. * @@ -1226,7 +1228,7 @@ struct Msg_metadata * synchronously or asynchronously. To wit, the thread safety discussion: * * ### Thread safety ### - * The degree of thread safety for either of the 2 main operations is completely up to the subclass implementer. + * The degree of thread safety for either of the 2 main operations is completely up to the sub-class implementer. * Informally, we suggest here that you think about this topic carefully. In particular, without locking, * do_log() may run concurrently with itself from multiple threads; depending on the medium to which * it is writing, this may result in corruption or ugly output or turn out fine, depending on how you define @@ -1235,7 +1237,7 @@ struct Msg_metadata * concurrently with themselves from multiple threads on the same Logger. In general that should be expected in all but * the simplest single-threaded apps. * - * Implementation suggestions for Logger subclasses with respect to thread safety: There are 2 likeliest patterns one + * Implementation suggestions for Logger sub-classes with respect to thread safety: There are 2 likeliest patterns one * can use. * -# One can use a mutex lock around actual writing to the target device. There's nothing inherently un-performant * about this, in an of itself, and the implementation is incredibly simple. For example see @@ -1450,8 +1452,8 @@ class Logger : * assignment, not necessarily the most critical of information). Certainly this zero-to-one-`Logger` version must * continue to be available for syntactic-sugary convenience, even if the to-do is performed. */ - static void this_thread_set_logged_nickname(util::String_view thread_nickname = util::String_view(), - Logger* logger_ptr = 0, + static void this_thread_set_logged_nickname(util::String_view thread_nickname = {}, + Logger* logger_ptr = nullptr, bool also_set_os_name = true); /** @@ -1504,7 +1506,7 @@ class Logger : * Non-null pointer to value to modify. See above. */ static void set_thread_info(std::string* call_thread_nickname, - flow::util::Thread_id* call_thread_id); + util::Thread_id* call_thread_id); /** * Returns the stream dedicated to the executing thread and `this` Logger, so that the caller can apply @@ -1557,7 +1559,7 @@ class Logger : * Logger and Component via get_logger() and get_log_component() public accessors. It's extremely * useful (almost mandatory in conventional practice) for classes that want to log, as they can simply * derive from it (passing in the desired `Logger*` and Component payload (an `enum` - * value) into the Log_context superclass constructor), + * value) into the Log_context super-class constructor), * at which point the get_logger() and get_log_component() functions the `FLOW_LOG_...()` macros expect automatically * become available without any additional code having to be written in the logging class. Here is how: * @@ -1571,7 +1573,7 @@ class Logger : * Log_context(&m_logger, My_cool_components::S_FUN_HAVER), * // Initialize stdout logger that logs INFO-or-higher-severity messages. * m_logger(true, std::cout, std::cout, flow::log::Sev::S_INFO), - * // ... other initializers and superclass constructors, if any ... + * // ... other initializers and super-class constructors, if any ... * { * FLOW_LOG_INFO("I can log right from the constructor and throughout *this lifetime!"); * // ... other code ... @@ -1580,7 +1582,7 @@ class Logger : * private: * void do_fun_stuff() * { - * // This macro works, because Log_context superclass defines get_logger() which returns m_logger, + * // This macro works, because Log_context super-class defines get_logger() which returns m_logger, * // and component() returns My_cool_components::S_FUN_HAVER. * // But we need not ever worry about such details. * FLOW_LOG_INFO("I am about to do something cool and fun: " << 42 << "!"); @@ -1593,7 +1595,33 @@ class Logger : * ~~~ * * Note that the `operator=()` allows one to change the underlying Logger anytime after - * construction (e.g., `existing_log_context = Log_context(&some_logger, Some_enum::S_SOME_COMPONENT);`). + * construction (e.g., `existing_log_context = Log_context{&some_logger, Some_enum::S_SOME_COMPONENT};`). + * That said it is more convenient to use set_logger(); but see the next section, as this may involve more + * subtleties than one might think. + * + * ### Setting the logger / thready safety ### + * set_logger() allows one, including an external user, to change the Logger. However beware two points w/r/t + * thread safety. + * -# It is not safe to use set_logger() or `*this = Log_context{...}` concurrently with any call + * that would log via get_logger(): so anything that, e.g., does `FLOW_LOG_...()`. It only replaces + * a pointer in memory, but there is no mutex or atomic protection; so it is not safe. + * -# Assuming one avoids any issues with the get_logger() pointer safety, please be sure that the Logger + * itself is actually valid/alive. + * + * Informally: Generally it is best to avoid changing the active Logger, after an object is constructed. + * 99% of code in practice does not do so; it is usually far better to affect the Logger via its Config which + * takes massive pains to be both thread-safe and performant at that. + * + * However in practice there is at least one exception to this: when the sub-class's instance is `static` or + * even global and/or a singleton. Then it might operate before and even after `main()`, and even during `main()` there + * may not be a good `Logger` to use yet. One might then make use of set_logger(), e.g., early in `main()` to + * change it from null to a `Logger` and then back late in `main()` (or if not null then a default + * Simple_ostream_logger to `cout` + `cerr`... you get the idea). + * + * That however does not protect against thread-safety problems (point 1 above) necessarily. It depends when one + * does it. If it is necessary to be changing get_logger() return-value while get_logger() is potentially used + * by another thread, then consider using Log_context_mt. There is a bit of a perf trade-off there (see its doc + * header). * * ### Implementation notes ### * The code could be shorter by getting rid of non-copy constuctor in favor of direct member initialization by user; @@ -1615,10 +1643,10 @@ class Log_context * Constructs Log_context by storing the given pointer to a Logger and a null Component. * * @param logger - * Pointer to store. Rationale for providing the null default: To facilitate subclass `= default` no-arg + * Pointer to store. Rationale for providing the null default: To facilitate sub-class `= default` no-arg * ctors. */ - explicit Log_context(Logger* logger = 0); + explicit Log_context(Logger* logger = nullptr); /** * Constructs Log_context by storing the given pointer to a Logger and a new Component storing the @@ -1694,6 +1722,19 @@ class Log_context */ Logger* get_logger() const; + /** + * Sets the value to be returned by the next get_logger() call; returns get_logger() from before the change. + * + * Behavior is undefined if invoked concurrently with itself or get_logger() on the same `*this`. + * If that is unacceptable: see our class doc header for brief discussion / suggestion on alternative to + * Log_context. + * + * @param logger + * As in ctor. + * @return get_logger() pre-change. + */ + Logger* set_logger(Logger* logger); + /** * Returns reference to the stored Component object, particularly as many `FLOW_LOG_*()` macros expect. * @@ -1712,6 +1753,114 @@ class Log_context Component m_component; }; // class Log_context +/** + * Identical to Log_context but is safe w/r/t to set_logger(), assignment, and `swap()` done concurrently to + * ops (especially get_logger()) on the same `*this`. + * + * @see Log_context doc header section "Setting the logger / thready safety." + * + * There is a perf trade-off: essentially all operations will lock an internal mutex, proceed, then unlock. + * This will have a small cost when there is no lock contention (no simultaneous logging -- get_logger() calls); + * and a larger cost when there is (when indeed there is simultaneous logging -- therefore get_logger() calls). + * Informally: in our experience, as noted in the aforementioned doc header discussion, set_logger() is mainly + * used when the sub-class is instantiated `static`ally or globally; and usually it is possible to simply avoid + * logging along fast-paths of such classes. + * + * Still: it requires care. If you need to provide/use set_logger(), be mindful of this potential source of + * slow-down. + */ +class Log_context_mt : + private Log_context +{ +public: + // Constructors/destructor. + + /** + * Identical to Log_context API. + * @param logger + * See above. + */ + explicit Log_context_mt(Logger* logger = nullptr); + + /** + * Identical to Log_context API. + * @tparam Component_payload + * See above. + * @param logger + * See above. + * @param component_payload + * See above. + */ + template + explicit Log_context_mt(Logger* logger, Component_payload component_payload); + + /** + * Identical to Log_context API. + * @param src + * See above. + */ + explicit Log_context_mt(const Log_context_mt& src); + + /** + * Identical to Log_context API. + * @param src + * See above. + */ + Log_context_mt(Log_context_mt&& src); + + // Methods. + + /** + * Identical to Log_context API; but safe against concurrent operations on a `*this`. + * + * @param src + * See above. + * @return See above. + */ + Log_context_mt& operator=(const Log_context_mt& src); + + /** + * Identical to Log_context API; but safe against concurrent operations on a `*this`. + * @param src + * See above. + * @return See above. + */ + Log_context_mt& operator=(Log_context_mt&& src); + + /** + * Identical to Log_context API; but safe against concurrent operations on a `*this`. + * @param other + * See above. + */ + void swap(Log_context_mt& other); + + /** + * Identical to Log_context API; but safe against concurrent operations on a `*this`. + * @return See above. + */ + Logger* get_logger() const; + + /** + * Identical to Log_context API; but safe against concurrent operations on a `*this`. + * @param logger + * See above. + * @return See above. + */ + Logger* set_logger(Logger* logger); + + /** + * Identical to Log_context API. + * @return See above. + */ + const Component& get_log_component() const; + +private: + // Data. + + /// Protects access to data in `static_cast(*this)` (especially Log_context::m_logger). + mutable util::Mutex_non_recursive m_mutex; +}; // class Log_context_mt + // Free functions: in *_fwd.hpp. // Template implementations. @@ -1753,4 +1902,11 @@ Log_context::Log_context(Logger* logger, Component_payload component_payload) : // Nothing. } +template +Log_context_mt::Log_context_mt(Logger* logger, Component_payload component_payload) : + Log_context(logger, component_payload) +{ + // Nothing. +} + } // namespace flow::log diff --git a/src/flow/log/log_fwd.hpp b/src/flow/log/log_fwd.hpp index 532439557..8787e681d 100644 --- a/src/flow/log/log_fwd.hpp +++ b/src/flow/log/log_fwd.hpp @@ -143,6 +143,7 @@ class Component; class Config; class Logger; class Log_context; +class Log_context_mt; struct Msg_metadata; class Ostream_log_msg_writer; class Simple_ostream_logger; @@ -364,6 +365,15 @@ std::ostream& operator<<(std::ostream& os, Sev val); */ void swap(Log_context& val1, Log_context& val2); +/** + * Log_context_mt ADL-friendly swap: Equivalent to `val1.swap(val2)`. + * @param val1 + * Object. + * @param val2 + * Object. + */ +void swap(Log_context_mt& val1, Log_context_mt& val2); + /** * Sets certain `chrono`-related formatting on the given Logger in the current thread that results in a consistent, * desirable output of `duration`s and certain types of `time_point`s. The effect is that of diff --git a/src/flow/log/ostream_log_msg_writer.cpp b/src/flow/log/ostream_log_msg_writer.cpp index 7d7c84ef9..1a3a13a51 100644 --- a/src/flow/log/ostream_log_msg_writer.cpp +++ b/src/flow/log/ostream_log_msg_writer.cpp @@ -157,7 +157,7 @@ void Ostream_log_msg_writer::do_log_with_human_friendly_time_stamp(const Msg_met "{:%S}", metadata.m_called_when); } - m_os << String_view(m_last_human_friendly_time_stamp_str.data(), m_last_human_friendly_time_stamp_str_sz); + m_os << String_view{m_last_human_friendly_time_stamp_str.data(), m_last_human_friendly_time_stamp_str_sz}; log_past_time_stamp(metadata, msg); } // Ostream_log_msg_writer::do_log_with_human_friendly_time_stamp() diff --git a/src/flow/log/simple_ostream_logger.cpp b/src/flow/log/simple_ostream_logger.cpp index 452e7e8d3..a58d47249 100644 --- a/src/flow/log/simple_ostream_logger.cpp +++ b/src/flow/log/simple_ostream_logger.cpp @@ -32,11 +32,11 @@ Simple_ostream_logger::Simple_ostream_logger(Config* config, * It's just prettier (e.g., stream state saved/restores 1x instead of 2x) and a little better for performance. * However there could be 2+ Simple_ostream_loggers sharing the same ostream; nothing we can (reasonably) do to * detect that -- nor NEED we worry about it really; just a nicety. */ - m_os_writers[0] = Ostream_log_msg_writer_ptr(new Ostream_log_msg_writer(*m_config, os)); + m_os_writers[0] = Ostream_log_msg_writer_ptr{new Ostream_log_msg_writer{*m_config, os}}; m_os_writers[1] = (&os == &os_for_err) ? m_os_writers[0] - : Ostream_log_msg_writer_ptr(new Ostream_log_msg_writer(*m_config, os_for_err)); + : Ostream_log_msg_writer_ptr{new Ostream_log_msg_writer{*m_config, os_for_err}}; // See m_os_writers doc header for details about how they are freed at `*this` destruction. } diff --git a/src/flow/log/test/log_test.cpp b/src/flow/log/test/log_test.cpp new file mode 100644 index 000000000..b6660d00c --- /dev/null +++ b/src/flow/log/test/log_test.cpp @@ -0,0 +1,132 @@ +/* Flow + * Copyright 2023 Akamai Technologies, Inc. + * + * Licensed under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in + * compliance with the License. You may obtain a copy + * of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in + * writing, software distributed under the License is + * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing + * permissions and limitations under the License. */ + +#include "flow/log/log.hpp" +#include "flow/log/simple_ostream_logger.hpp" +#include "flow/util/util.hpp" +#include +#include + +namespace flow::log::test +{ + +namespace +{ +using std::string; +} // Anonymous namespace + +// Yes... this is very cheesy... but this is a test, so I don't really care. +#define CTX util::ostream_op_string("Caller context [", FLOW_UTIL_WHERE_AM_I_STR(), "].") + +// @todo Many more things to test in log.?pp surely. Gotta start somewhere though! + +TEST(Log_context, Interface) +{ + using std::swap; // ADL-swap. + + /* Log_context[_mt] is simple, and its essential aspects will be tested here -- aside from the thread-safety + * aspects of Log_context_mt (@todo). One area that is somewhat finicky is Log_context_mt's copy, move, swap + * facilities; due to mutex details they're not as straightforward as one normally sees; so we check those; + * it would've been easy to make a mistake that would not be obviously visible. */ + + const auto test_type = [](auto type_specimen) + { + using Log_context_t = decltype(type_specimen); // Log_context_t is either Log_context or ..._mt; identical APIs. + Config cfg; + Simple_ostream_logger logger1{&cfg}; + Simple_ostream_logger logger2{&cfg}; + const auto comp1 = Flow_log_component::S_UTIL; + const auto comp2 = Flow_log_component::S_LOG; + const auto comp0 = Component{}; + EXPECT_TRUE(comp0.empty()); + + // @todo Maybe should implement operator==(Component, Component)? Then use it/test it here? + + /* This isn't a test of Component functionality; it assumes that works; it just checks whether they're + * equal and ensures this matches what our test is expecting that for those particular c1 and c2. */ + const auto comps_equal = [](Component c1, Component c2, const string& ctx) + { + if (c1.empty() && c2.empty()) + { + return; // So equal then. + } + // Better both be not-empty. + EXPECT_EQ(c1.empty(), c2.empty()) << ctx; + + EXPECT_EQ(c1.payload_type(), c2.payload_type()) << ctx; + EXPECT_EQ(int(c1.payload_enum_raw_value()), int(c2.payload_enum_raw_value())) << ctx; + }; + + Log_context_t ctx1; + EXPECT_TRUE(ctx1.get_log_component().empty()); + comps_equal(ctx1.get_log_component(), comp0, CTX); + EXPECT_EQ(ctx1.get_logger(), nullptr); + ctx1.set_logger(&logger2); + EXPECT_EQ(ctx1.get_logger(), &logger2); + ctx1 = Log_context_t{&logger1, comp1}; + EXPECT_FALSE(ctx1.get_log_component().empty()); + comps_equal(ctx1.get_log_component(), comp1, CTX); + EXPECT_EQ(ctx1.get_logger(), &logger1); + + Log_context_t ctx2{&logger2}; + EXPECT_TRUE(ctx2.get_log_component().empty()); + comps_equal(ctx2.get_log_component(), comp0, CTX); + EXPECT_EQ(ctx2.get_logger(), &logger2); + ctx2.set_logger(&logger1); + EXPECT_EQ(ctx2.get_logger(), &logger1); + ctx2 = Log_context_t{&logger2, comp2}; + EXPECT_FALSE(ctx2.get_log_component().empty()); + comps_equal(ctx2.get_log_component(), comp2, CTX); + EXPECT_EQ(ctx2.get_logger(), &logger2); + + swap(ctx1, ctx2); + comps_equal(ctx1.get_log_component(), comp2, CTX); + EXPECT_EQ(ctx1.get_logger(), &logger2); + comps_equal(ctx2.get_log_component(), comp1, CTX); + EXPECT_EQ(ctx2.get_logger(), &logger1); + + ctx1 = ctx2; // Copy-assign. + comps_equal(ctx1.get_log_component(), comp1, CTX); + EXPECT_EQ(ctx1.get_logger(), &logger1); + comps_equal(ctx2.get_log_component(), comp1, CTX); + EXPECT_EQ(ctx2.get_logger(), &logger1); + ctx1 = Log_context_t{&logger2, comp2}; + + ctx2 = std::move(ctx1); // Move-assign. + EXPECT_TRUE(ctx1.get_log_component().empty()); + EXPECT_EQ(ctx1.get_logger(), nullptr); + comps_equal(ctx2.get_log_component(), comp2, CTX); + EXPECT_EQ(ctx2.get_logger(), &logger2); + + Log_context_t ctx3{ctx2}; // Copy-ct. + comps_equal(ctx3.get_log_component(), comp2, CTX); + EXPECT_EQ(ctx3.get_logger(), &logger2); + comps_equal(ctx2.get_log_component(), comp2, CTX); + EXPECT_EQ(ctx2.get_logger(), &logger2); + + Log_context_t ctx4{std::move(ctx3)}; // Move-ct. + EXPECT_TRUE(ctx3.get_log_component().empty()); + EXPECT_EQ(ctx3.get_logger(), nullptr); + comps_equal(ctx4.get_log_component(), comp2, CTX); + EXPECT_EQ(ctx4.get_logger(), &logger2); + }; // const auto test_type = + + test_type(Log_context{}); + test_type(Log_context_mt{}); +} // TEST(Log_context, Interface) + +} // namespace flow::log::test diff --git a/src/flow/log/verbosity_config.cpp b/src/flow/log/verbosity_config.cpp index e23871ed8..21f0770e4 100644 --- a/src/flow/log/verbosity_config.cpp +++ b/src/flow/log/verbosity_config.cpp @@ -37,7 +37,7 @@ Verbosity_config::Verbosity_config() using std::make_pair; // As promised: - m_component_sev_pairs.push_back(make_pair(string(), Sev(Config::S_MOST_VERBOSE_SEV_DEFAULT))); + m_component_sev_pairs.push_back(make_pair(string{}, Sev{Config::S_MOST_VERBOSE_SEV_DEFAULT})); assert(m_component_sev_pairs.size() == 1); } @@ -132,7 +132,7 @@ bool Verbosity_config::parse(std::istream& is) } result_pairs.push_back (make_pair - (string(to_upper_copy(leaf_tokens[0], locale::classic())), + (string{to_upper_copy(leaf_tokens[0], locale::classic())}, std::move(sev))); } // for (token : tokens) } // if (!tokens_str.empty()) @@ -143,7 +143,7 @@ bool Verbosity_config::parse(std::istream& is) { result_pairs.insert (result_pairs.begin(), - make_pair(string(), Sev(Config::S_MOST_VERBOSE_SEV_DEFAULT))); + make_pair(string(), Sev{Config::S_MOST_VERBOSE_SEV_DEFAULT})); } // Finalize only if all succeeded only (as promised). diff --git a/src/flow/net_flow/asio/node.hpp b/src/flow/net_flow/asio/node.hpp index d1e6ba005..29e039249 100644 --- a/src/flow/net_flow/asio/node.hpp +++ b/src/flow/net_flow/asio/node.hpp @@ -258,8 +258,8 @@ class Node : */ explicit Node(log::Logger* logger, util::Task_engine* target_async_task_engine, const util::Udp_endpoint& low_lvl_endpoint, - Net_env_simulator* net_env_sim = 0, Error_code* err_code = 0, - const Node_options& opts = Node_options()); + Net_env_simulator* net_env_sim = nullptr, Error_code* err_code = nullptr, + const Node_options& opts = Node_options{}); // Methods. @@ -379,7 +379,7 @@ class Node : void async_connect(const Remote_endpoint& to, const Handler& on_result, const boost::chrono::duration& max_wait, - const Peer_socket_options* opts = 0); + const Peer_socket_options* opts = nullptr); /** * A combination of async_connect() and connect_with_metadata() (asynchronously blocking connect, with supplied @@ -407,7 +407,7 @@ class Node : const Handler& on_result, const boost::chrono::duration& max_wait, const boost::asio::const_buffer& serialized_metadata, - const Peer_socket_options* opts = 0); + const Peer_socket_options* opts = nullptr); /** * Equivalent to `async_connect(to, on_result, duration::max(), opts)`; i.e., async_connect() @@ -425,7 +425,7 @@ class Node : template void async_connect(const Remote_endpoint& to, const Handler& on_result, - const Peer_socket_options* opts = 0); + const Peer_socket_options* opts = nullptr); /** * Equivalent to `async_connect_with_metadata(to, on_result, duration::max(), @@ -446,7 +446,7 @@ class Node : void async_connect_with_metadata(const Remote_endpoint& to, const Handler& on_result, const boost::asio::const_buffer& serialized_metadata, - const Peer_socket_options* opts = 0); + const Peer_socket_options* opts = nullptr); private: // Friends. @@ -647,7 +647,7 @@ void Node::async_op(typename Socket::Ptr sock, /* Timeout might be finite or infinite (non-existent). Latter case is much simpler, but for brevity we mix the code * paths of the two cases. */ - const bool timeout_given = wait_until != Fine_time_pt(); + const bool timeout_given = wait_until != Fine_time_pt{}; /* Explanation of why Strand is used / locking discussion: * @@ -701,7 +701,7 @@ void Node::async_op(typename Socket::Ptr sock, // Start one of the racers: the timer that'll fire once timeout is finished. // All the stuff needed by timeout can now be created (if !timeout_given, we save resources by not doing this). - timeout_state.reset(new Timeout_state(task_engine)); + timeout_state.reset(new Timeout_state{task_engine}); // Performance note: cannot move(on_result) here, as we still need on_result for 2nd closure made below. Copy. timeout_state->sched_task diff --git a/src/flow/net_flow/asio/peer_socket.cpp b/src/flow/net_flow/asio/peer_socket.cpp index cca2cb6b2..6f8ea5847 100644 --- a/src/flow/net_flow/asio/peer_socket.cpp +++ b/src/flow/net_flow/asio/peer_socket.cpp @@ -188,7 +188,7 @@ void Node::async_connect_impl(const Remote_endpoint& to, const Fine_duration& ma if (!sock) { // It's probably some user error like an invalid destination. - on_result(conn_err_code, Peer_socket::Ptr()); // It post()s user's originally-passed-in handler. + on_result(conn_err_code, Peer_socket::Ptr{}); // It post()s user's originally-passed-in handler. return; } // else we have a socket that has started connecting. @@ -213,7 +213,7 @@ void Node::async_connect_impl(const Remote_endpoint& to, const Fine_duration& ma Error_code dummy_prevents_throw; sock->close_abruptly(&dummy_prevents_throw); - on_result(wait_err_code, Peer_socket::Ptr()); // It post()s user's originally-passed-in handler. + on_result(wait_err_code, Peer_socket::Ptr{}); // It post()s user's originally-passed-in handler. // *sock should lose all references and destruct shortly, as we didn't pass it to on_result(). } else if (wait_err_code) @@ -223,13 +223,13 @@ void Node::async_connect_impl(const Remote_endpoint& to, const Fine_duration& ma /* So we won't even pass the short-lived socket to callback, indicating failure via null pointer. * See comment in sync_connect_impl() about how we avoid passing an error socket for user to discover. */ - on_result(wait_err_code, Peer_socket::Ptr()); // It post()s user's originally-passed-in handler. + on_result(wait_err_code, Peer_socket::Ptr{}); // It post()s user's originally-passed-in handler. // As above, *sock should destruct soon. } else { assert(!wait_err_code); - on_result(wait_err_code, Peer_socket::Ptr()); // It post()s user's originally-passed-in handler. + on_result(wait_err_code, Peer_socket::Ptr{}); // It post()s user's originally-passed-in handler. // *sock lives on by being passed to them and probably saved by them! } diff --git a/src/flow/net_flow/asio/peer_socket.hpp b/src/flow/net_flow/asio/peer_socket.hpp index d6d71c256..a9e2f4b9c 100644 --- a/src/flow/net_flow/asio/peer_socket.hpp +++ b/src/flow/net_flow/asio/peer_socket.hpp @@ -373,7 +373,7 @@ class Peer_socket : * for a boost.asio-compatible async-op. * * ### Rationale ### - * Why not simply create use `Handler_func(on_result)`? After all they have the same signature, so if the + * Why not simply create use `Handler_func{on_result}`? After all they have the same signature, so if the * idea is to de-template our internal implementation of the various `async_*()` APIs, then that would be sufficient. * Answer: * @@ -413,7 +413,7 @@ void Peer_socket::async_receive(const Mutable_buffer_sequence& target, Handler&& on_result) { assert(target.begin() != target.end()); - async_receive_impl(Target_bufs_ptr(new Target_bufs(target.begin(), target.end())), + async_receive_impl(Target_bufs_ptr{new Target_bufs{target.begin(), target.end()}}, handler_func(on_result), util::chrono_duration_from_now_to_fine_time_pt(max_wait)); } @@ -423,7 +423,7 @@ void Peer_socket::async_receive(std::nullptr_t, const boost::chrono::duration& max_wait, Handler&& on_result) { - async_receive_impl(Target_bufs_ptr(), handler_func(on_result), + async_receive_impl(Target_bufs_ptr{}, handler_func(on_result), util::chrono_duration_from_now_to_fine_time_pt(max_wait)); } @@ -432,14 +432,14 @@ void Peer_socket::async_receive(const Mutable_buffer_sequence& target, Handler&& on_result) { assert(target.begin() != target.end()); - async_receive_impl(Target_bufs_ptr(new Target_bufs(target.begin(), target.end())), - handler_func(on_result), Fine_time_pt()); + async_receive_impl(Target_bufs_ptr{new Target_bufs{target.begin(), target.end()}}, + handler_func(on_result), Fine_time_pt{}); } template void Peer_socket::async_receive(std::nullptr_t, Handler&& on_result) { - async_receive_impl(Target_bufs_ptr(), handler_func(on_result), Fine_time_pt()); + async_receive_impl(Target_bufs_ptr{}, handler_func(on_result), Fine_time_pt{}); } template @@ -448,7 +448,7 @@ void Peer_socket::async_send(const Const_buffer_sequence& source, Handler&& on_result) { assert(source.begin() != source.end()); - async_send_impl(Source_bufs_ptr(new Source_bufs(source.begin(), source.end())), + async_send_impl(Source_bufs_ptr{new Source_bufs{source.begin(), source.end()}}, handler_func(on_result), util::chrono_duration_from_now_to_fine_time_pt(max_wait)); } @@ -458,7 +458,7 @@ void Peer_socket::async_send(std::nullptr_t, const boost::chrono::duration& max_wait, Handler&& on_result) { - async_send_impl(Source_bufs_ptr(), handler_func(on_result), + async_send_impl(Source_bufs_ptr{}, handler_func(on_result), util::chrono_duration_from_now_to_fine_time_pt(max_wait)); } @@ -467,15 +467,15 @@ void Peer_socket::async_send(const Const_buffer_sequence& source, Handler&& on_result) { assert(source.begin() != source.end()); - async_send_impl(Source_bufs_ptr(new Source_bufs(source.begin(), source.end())), - handler_func(on_result), Fine_time_pt()); + async_send_impl(Source_bufs_ptr{new Source_bufs{source.begin(), source.end()}}, + handler_func(on_result), Fine_time_pt{}); } template void Peer_socket::async_send (std::nullptr_t, Handler&& on_result) { - async_send_impl(Source_bufs_ptr(), handler_func(on_result), Fine_time_pt()); + async_send_impl(Source_bufs_ptr{}, handler_func(on_result), Fine_time_pt{}); } template diff --git a/src/flow/net_flow/asio/server_socket.cpp b/src/flow/net_flow/asio/server_socket.cpp index 89b8ae565..ee486f6c3 100644 --- a/src/flow/net_flow/asio/server_socket.cpp +++ b/src/flow/net_flow/asio/server_socket.cpp @@ -27,7 +27,7 @@ namespace flow::net_flow::asio Server_socket::Server_socket(log::Logger* logger_ptr, const Peer_socket_options* child_sock_opts) : net_flow::Server_socket(logger_ptr, child_sock_opts), - m_target_task_engine(0) + m_target_task_engine(nullptr) { // Only print pointer value, because most members are garbage at this point. FLOW_LOG_TRACE("boost.asio-integrated Server_socket [" << static_cast(this) << "] created; no Task_engine."); @@ -73,7 +73,7 @@ void Server_socket::async_accept_impl(Handler_func&& on_result, FLOW_LOG_WARNING("Cannot perform async op on object [" << this << "]: it is already closed for " "reason [" << err_code << '/' << err_code.message() << "]."); - on_result(err_code, Peer_socket::Ptr()); // It post()s user's originally-passed-in handler. + on_result(err_code, Peer_socket::Ptr{}); // It post()s user's originally-passed-in handler. return; } // else diff --git a/src/flow/net_flow/asio/server_socket.hpp b/src/flow/net_flow/asio/server_socket.hpp index 3f72467ca..668da4eec 100644 --- a/src/flow/net_flow/asio/server_socket.hpp +++ b/src/flow/net_flow/asio/server_socket.hpp @@ -261,7 +261,7 @@ void Server_socket::async_accept(const boost::chrono::duration& max bool reactor_pattern, const Handler& on_result) { - async_accept_impl(Handler_func(on_result), + async_accept_impl(Handler_func{on_result}, util::chrono_duration_from_now_to_fine_time_pt(max_wait), reactor_pattern); } @@ -269,20 +269,20 @@ void Server_socket::async_accept(const boost::chrono::duration& max template void Server_socket::async_accept(const Handler& on_result) { - async_accept_impl(Handler_func(on_result), Fine_time_pt(), false); + async_accept_impl(Handler_func{on_result}, Fine_time_pt{}, false); } template void Server_socket::async_accept(bool reactor_pattern, const Handler& on_result) { - async_accept_impl(Handler_func(on_result), Fine_time_pt(), reactor_pattern); + async_accept_impl(Handler_func(on_result), Fine_time_pt{}, reactor_pattern); } template void Server_socket::async_accept(const boost::chrono::duration& max_wait, const Handler& on_result) { - async_accept_impl(Handler_func(on_result), + async_accept_impl(Handler_func{on_result}, util::chrono_duration_from_now_to_fine_time_pt(max_wait), false); } diff --git a/src/flow/net_flow/detail/cong_ctl.cpp b/src/flow/net_flow/detail/cong_ctl.cpp index 2bdc9c51c..52dd2267f 100644 --- a/src/flow/net_flow/detail/cong_ctl.cpp +++ b/src/flow/net_flow/detail/cong_ctl.cpp @@ -104,12 +104,12 @@ Congestion_control_strategy* Congestion_control_selector::create_strategy switch (strategy_choice) { case Strategy_choice::S_CLASSIC: - return new Congestion_control_classic(logger_ptr, sock); + return new Congestion_control_classic{logger_ptr, sock}; case Strategy_choice::S_CLASSIC_BANDWIDTH_ESTIMATED: - return new Congestion_control_classic_with_bandwidth_est(logger_ptr, sock); + return new Congestion_control_classic_with_bandwidth_est{logger_ptr, sock}; } assert(false); - return 0; + return nullptr; } void Congestion_control_selector::get_ids(std::vector* ids) // Static. diff --git a/src/flow/net_flow/detail/cong_ctl/cong_ctl_classic_bw.cpp b/src/flow/net_flow/detail/cong_ctl/cong_ctl_classic_bw.cpp index 3b79254ee..2b38c6165 100644 --- a/src/flow/net_flow/detail/cong_ctl/cong_ctl_classic_bw.cpp +++ b/src/flow/net_flow/detail/cong_ctl/cong_ctl_classic_bw.cpp @@ -180,7 +180,7 @@ size_t Congestion_control_classic_with_bandwidth_est::congestion_window_adjusted const Peer_socket::Const_ptr sock = socket(); /* The basic formula is CWND = B * RTTmin. Units and arithmetic are as follows. B is in units of - * bytes per Time_unit(1). (The selection of Time_unit(1) is discussed in detail in that + * bytes per Time_unit{1}. (The selection of Time_unit{1} is discussed in detail in that * alias's doc header.) RTTmin, accordingly, is in units of Time_unit. Therefore we can simply * multiply the two values. * @@ -199,7 +199,7 @@ size_t Congestion_control_classic_with_bandwidth_est::congestion_window_adjusted FLOW_LOG_TRACE("cong_ctl|bw_est [" << sock << "] info: window calculation: wnd " "= bw x rtt_min = [" << sock->bytes_blocks_str(size_t(bytes_per_time)) << "] bytes " - "per [" << Time_unit(1) << "] x [" << m_rtt_min << "] " + "per [" << Time_unit{1} << "] x [" << m_rtt_min << "] " "(subject to floor [" << sock->bytes_blocks_str(floor_wnd_bytes) << "]) " "= [" << sock->bytes_blocks_str(new_wnd_bytes) << "]."); diff --git a/src/flow/net_flow/detail/drop_timer.cpp b/src/flow/net_flow/detail/drop_timer.cpp index aba6326e2..81a7f0299 100644 --- a/src/flow/net_flow/detail/drop_timer.cpp +++ b/src/flow/net_flow/detail/drop_timer.cpp @@ -33,9 +33,9 @@ Drop_timer::Ptr Drop_timer::create_drop_timer const Function& timer_fired) // Static. { // See doc comment for rationale. - return Ptr(new Drop_timer(logger_ptr, + return Ptr{new Drop_timer{logger_ptr, node_task_engine, sock_drop_timeout, std::move(sock), - timer_failure, timer_fired)); + timer_failure, timer_fired}}; } void Drop_timer::start_contemporaneous_events() @@ -385,7 +385,7 @@ void Drop_timer::start_timer() // Get time when earliest packet sent. assert(!m_sock->m_snd_flying_pkts_by_sent_when.empty()); // Timer must not be started when no packets In-flight. const Fine_time_pt& first_packet_sent_when - = m_sock->m_snd_flying_pkts_by_sent_when.const_back().second->m_sent_when.back().m_sent_time; + = (--m_sock->m_snd_flying_pkts_by_sent_when.cend())->second->m_sent_when.back().m_sent_time; fire_time_pt = first_packet_sent_when + m_sock_drop_timeout; use_time_pt_over_duration = true; diff --git a/src/flow/net_flow/detail/drop_timer.hpp b/src/flow/net_flow/detail/drop_timer.hpp index 9c57b55cf..4b588a0f9 100644 --- a/src/flow/net_flow/detail/drop_timer.hpp +++ b/src/flow/net_flow/detail/drop_timer.hpp @@ -145,7 +145,7 @@ namespace flow::net_flow class Drop_timer : // Endow us with shared_ptr<>s ::Ptr and ::Const_ptr (syntactic sugar). public util::Shared_ptr_alias_holder>, - // Allow access to Ptr(this) from inside Drop_timer methods. Just call shared_from_this(). + // Allow access to Ptr{this} from inside Drop_timer methods. Just call shared_from_this(). public boost::enable_shared_from_this, public log::Log_context, private boost::noncopyable @@ -301,7 +301,7 @@ class Drop_timer : /** * Constructs Drop_timer as described in the factory constructor create_drop_timer(). * Why have the factory method? We guarantee that this won't get deleted before the timer - * callback executes (causing a crash therein), by passing a `Ptr(this)` to + * callback executes (causing a crash therein), by passing a `Ptr{this}` to * `basic_waitable_timer::async_wait()`. However that can only work if all users of the object also * access it by a sharing `Ptr`. Thus we only provide access to the outside via a `Ptr` (the * factory). @@ -462,7 +462,7 @@ class Drop_timer : * During the time period starting with the last start_contemporaneous_events() call and ending with the subsequent * end_contemporaneous_events() call, if any -- in other words, during the last * contemporary events group, finished or otherwise -- this is the ID of the most-recently-sent packet (highest - * packet ID) such that that packet was acknowledged during that time period. 0 if none were acknowledged. + * packet ID) such that that packet was acknowledged during that time period. 0 if none were acknowledged. */ packet_id_t m_during_events_newest_acked_packet; diff --git a/src/flow/net_flow/detail/low_lvl_io.cpp b/src/flow/net_flow/detail/low_lvl_io.cpp index b39734b61..cf57d94ee 100644 --- a/src/flow/net_flow/detail/low_lvl_io.cpp +++ b/src/flow/net_flow/detail/low_lvl_io.cpp @@ -198,7 +198,7 @@ unsigned int Node::handle_incoming_with_simulation(util::Blob* packet_data, const bool must_dupe = (!is_sim_duplicate_packet) && m_net_env_sim && m_net_env_sim->should_duplicate_received_packet(); - Blob packet_data_copy(get_logger()); + Blob packet_data_copy{get_logger()}; if (must_dupe) { /* We will simulate duplication of the packet below. Since packet handling can be @@ -207,7 +207,7 @@ unsigned int Node::handle_incoming_with_simulation(util::Blob* packet_data, packet_data_copy = *(static_cast(packet_data)); // Add const to express we require a copy, not move. } - Fine_duration latency(m_net_env_sim ? m_net_env_sim->received_packet_latency() : Fine_duration::zero()); + Fine_duration latency{m_net_env_sim ? m_net_env_sim->received_packet_latency() : Fine_duration::zero()}; if (latency == Fine_duration::zero()) { // No simulated latency; just handle the packet now (mainstream case). @@ -263,7 +263,7 @@ void Node::async_wait_latency_then_handle_incoming(const Fine_duration& latency, /* As advertised, *packet_data loses its buffer into this new container, so that caller can immediately * use it for whatever they want. Meanwhile, we asynchronously own the actual data in it now. * Make a smart pointer to ensure it lives long enough for handler to execute... but likely no longer than that. */ - shared_ptr packet_data_moved_ptr(new Blob(std::move(*packet_data))); + shared_ptr packet_data_moved_ptr{new Blob{std::move(*packet_data)}}; // Unused if it doesn't get logged, which is a slight perf hit, but anyway this sim feature is a debug/test thing. const Fine_time_pt started_at = Fine_clock::now(); @@ -309,7 +309,7 @@ void Node::async_no_sock_low_lvl_packet_send(const util::Udp_endpoint& low_lvl_r { /* As of this writing we don't pace things, when no Peer_socket is involved (e.g., some RSTs) => always `false`: -| * v-------------------------------------------------| */ - async_low_lvl_packet_send_impl(low_lvl_remote_endpoint, packet, false, Peer_socket::Ptr()); + async_low_lvl_packet_send_impl(low_lvl_remote_endpoint, packet, false, Peer_socket::Ptr{}); } void Node::async_low_lvl_packet_send_impl(const util::Udp_endpoint& low_lvl_remote_endpoint, @@ -723,7 +723,7 @@ void Node::sock_pacing_new_packet_ready(Peer_socket::Ptr sock, Low_lvl_packet::P // else packet is DATA packet. const Fine_time_pt now = Fine_clock::now(); - if ((pacing.m_slice_start == Fine_time_pt()) || (now >= (pacing.m_slice_start + pacing.m_slice_period))) + if ((pacing.m_slice_start == Fine_time_pt{}) || (now >= (pacing.m_slice_start + pacing.m_slice_period))) { /* We are past the current time slice (if there is such a thing) and have a packet to send. By * the algorithm in struct Send_pacing_data doc header, this means we create a new time slice with @@ -781,7 +781,7 @@ void Node::sock_pacing_new_time_slice(Peer_socket::Ptr sock, const Fine_time_pt& if (slice_ideal_period == Fine_duration::zero()) { // Avoid division by zero and any other tomfoolery below.... - slice_ideal_period = Fine_duration(1); + slice_ideal_period = Fine_duration{1}; } Fine_duration timer_min_period = opt(m_opts.m_st_timer_min_period); @@ -793,7 +793,7 @@ void Node::sock_pacing_new_time_slice(Peer_socket::Ptr sock, const Fine_time_pt& * about 15 msec. @todo Perhaps choose here based on platform. It can get hairy, as there is * wide variation, so it would require much experimentation; but might be worth it for * performance. */ - const Fine_duration TIMER_MIN_PERIOD_DEFAULT = milliseconds(15); + const Fine_duration TIMER_MIN_PERIOD_DEFAULT = milliseconds{15}; timer_min_period = TIMER_MIN_PERIOD_DEFAULT; } diff --git a/src/flow/net_flow/detail/low_lvl_packet.cpp b/src/flow/net_flow/detail/low_lvl_packet.cpp index 1c760299e..ee85a22e7 100644 --- a/src/flow/net_flow/detail/low_lvl_packet.cpp +++ b/src/flow/net_flow/detail/low_lvl_packet.cpp @@ -386,7 +386,7 @@ Low_lvl_packet::Ptr Low_lvl_packet::create_from_raw_data_packet(log::Logger* log using boost::endian::little_to_native; using std::ostream; - Const_buffer raw_buf(raw_packet->const_data(), raw_packet->size()); + Const_buffer raw_buf{raw_packet->const_data(), raw_packet->size()}; // Make FLOW_LOG_...() calls below use these (we are static). FLOW_LOG_SET_CONTEXT(logger_ptr, Flow_log_component::S_NET_FLOW); @@ -417,7 +417,7 @@ Low_lvl_packet::Ptr Low_lvl_packet::create_from_raw_data_packet(log::Logger* log { FLOW_LOG_WARNING("Unable to deserialize low-level packet: The packet is too small: " "[" << raw_buf_size << "] bytes."); - return Ptr(); + return Ptr{}; } /* We'll advance this as we keep reading off values from raw buffer. @@ -447,7 +447,7 @@ Low_lvl_packet::Ptr Low_lvl_packet::create_from_raw_data_packet(log::Logger* log ? create_uninit_packet_base(logger_ptr) : ((raw_type_id == type_id_native_to_raw(typeid(Rst_packet))) ? create_uninit_packet_base(logger_ptr) - : Ptr()))))); + : Ptr{}))))); if (!packet) { FLOW_LOG_WARNING("Unable to deserialize low-level packet: The packet type is invalid: " @@ -467,7 +467,7 @@ Low_lvl_packet::Ptr Low_lvl_packet::create_from_raw_data_packet(log::Logger* log if (reserved2 != 0) { FLOW_LOG_WARNING("Unable to deserialize low-level packet: The packet format is unknown."); - return Ptr(); + return Ptr{}; } // else @@ -490,7 +490,7 @@ Low_lvl_packet::Ptr Low_lvl_packet::create_from_raw_data_packet(log::Logger* log // Low_lvl_packet part is filled out. The sub-type part has junk. This will fill that part out. if (!packet->deserialize_type_specific_data_from_raw_data_packet(&raw_buf, prefer_no_move, raw_packet)) { - return Ptr(); // Error. It logged. + return Ptr{}; // Error. It logged. } // else @@ -851,7 +851,7 @@ bool Ack_packet::deserialize_type_specific_data_from_raw_data_packet(Const_buffe const auto& ack_delay_raw = *reinterpret_cast(data); data += sizeof ack_delay_raw; const Fine_duration ack_delay - = Ack_delay_time_unit(little_to_native(ack_delay_raw)); + = Ack_delay_time_unit{little_to_native(ack_delay_raw)}; unsigned int rexmit_id; if (m_opt_rexmit_on) @@ -865,7 +865,7 @@ bool Ack_packet::deserialize_type_specific_data_from_raw_data_packet(Const_buffe rexmit_id = 0; } - m_rcv_acked_packets.push_back(Individual_ack::Ptr(new Individual_ack{ seq_num, ack_delay, rexmit_id })); + m_rcv_acked_packets.push_back(Individual_ack::Ptr{new Individual_ack{ seq_num, ack_delay, rexmit_id }}); } // for (all acks) assert(data == static_cast(raw_buf->data())); diff --git a/src/flow/net_flow/detail/low_lvl_packet.hpp b/src/flow/net_flow/detail/low_lvl_packet.hpp index 8f5c51e93..a4446f01d 100644 --- a/src/flow/net_flow/detail/low_lvl_packet.hpp +++ b/src/flow/net_flow/detail/low_lvl_packet.hpp @@ -527,7 +527,7 @@ struct Low_lvl_packet : * @param raw_packet * See create_from_raw_data_packet(). `raw_buf` must start somewhere within it and be * sized to go exactly to its end. - * @return `false` if create_from_raw_data_packet() should return `Ptr()` (error); `true` if + * @return `false` if create_from_raw_data_packet() should return `Ptr{}` (error); `true` if * deserialization successful. */ virtual bool deserialize_type_specific_data_from_raw_data_packet(Const_buffer* raw_buf, @@ -1023,7 +1023,7 @@ struct Ack_packet : public Low_lvl_packet /** * Type used to store the ACK delay for a given individual acknowledged packet. The value - * specifies the number of multiples of `Ack_delay_time_unit(1)` comprising a packet's ACK delay. + * specifies the number of multiples of `Ack_delay_time_unit{1}` comprised by a packet's ACK delay. * * An earlier version of `net_flow` used the unit milliseconds and the encoding type uint16_t. The * reasoning was that this allowed a maximum ACK delay of ~65 sec which should be plenty; and that @@ -1033,13 +1033,13 @@ struct Ack_packet : public Low_lvl_packet * choose to use the same units as #Fine_duration, which is how we compute all time periods. As * for the the encoding width, we use 64 bits just in case. * - * @todo Reconsider the encoding width. If `Ack_delay_time_unit(1)` is a nanosecond, then 32 bits + * @todo Reconsider the encoding width. If `Ack_delay_time_unit{1}` is a nanosecond, then 32 bits * would support a maximum delay of ~4.1 seconds which is likely fine for most real-world * scenarios. This would reduce the size of ACK packets quite a bit. */ using ack_delay_t = uint64_t; - /// `Ack_delay_time_unit(1)` is the duration corresponding to the #ack_delay_t value 1; and proportionally further. + /// `Ack_delay_time_unit{1}` is the duration corresponding to the #ack_delay_t value 1; and proportionally further. using Ack_delay_time_unit = Fine_duration; struct Individual_ack; @@ -1254,7 +1254,7 @@ struct Ack_packet::Individual_ack_rexmit_off /// See Individual_ack::m_seq_num and Sequence_number::raw_num_ref(). const Sequence_number::seq_num_t m_seq_num_raw; - /// See Individual_ack::m_delay; this is in `Ack_delay_time_unit(1)` multiples. + /// See Individual_ack::m_delay; this is in `Ack_delay_time_unit{1}` multiples. const ack_delay_t m_delay; // Type checks. @@ -1389,7 +1389,7 @@ boost::shared_ptr Low_lvl_packet::create_uninit_packet(log:: // Note: Low_lvl_packet_sub is not Low_lvl_packet. It is a sub-type: Syn_packet, Ack_packet, etc. We're a template. // `friend` relation required to be able to call this private constructor. - return shared_ptr(new Low_lvl_packet_sub(logger)); + return shared_ptr(new Low_lvl_packet_sub{logger}); } template diff --git a/src/flow/net_flow/detail/port_space.cpp b/src/flow/net_flow/detail/port_space.cpp index 7ca2f466f..70864023f 100644 --- a/src/flow/net_flow/detail/port_space.cpp +++ b/src/flow/net_flow/detail/port_space.cpp @@ -263,7 +263,7 @@ size_t Port_space::find_available_port_bit_idx(const Bit_set& ports) using boost::random::uniform_int_distribution; // Pick a random bit in bit field. - uniform_int_distribution range(0, ports.size() - 1); + uniform_int_distribution range{0, ports.size() - 1}; size_t port_bit_idx = range(m_rnd_generator); // If that bit is 0, go right until you find a 1. diff --git a/src/flow/net_flow/detail/seq_num.cpp b/src/flow/net_flow/detail/seq_num.cpp index 297cde1ef..a5cbc2671 100644 --- a/src/flow/net_flow/detail/seq_num.cpp +++ b/src/flow/net_flow/detail/seq_num.cpp @@ -33,9 +33,9 @@ namespace flow::net_flow * byte it would take many centuries for the sequence numbers to overflow seq_num_t. */ const Sequence_number::seq_num_t Sequence_number::Generator::S_MAX_INIT_SEQ_NUM = std::numeric_limits::max() / 2; -const Fine_duration Sequence_number::Generator::S_TIME_PER_SEQ_NUM = boost::chrono::microseconds(4); // From RFC 793. +const Fine_duration Sequence_number::Generator::S_TIME_PER_SEQ_NUM = boost::chrono::microseconds{4}; // From RFC 793. const Fine_duration Sequence_number::Generator::S_MIN_DELAY_BETWEEN_ISN - = boost::chrono::milliseconds(500); // From TCP/IP Illustrated Vol. 2: The Implementation (BSD Net/3). + = boost::chrono::milliseconds{500}; // From TCP/IP Illustrated Vol. 2: The Implementation (BSD Net/3). // Implementations. @@ -60,7 +60,7 @@ Sequence_number Sequence_number::Generator::generate_init_seq_num() * various considerations this would introduce -- multi-threadedness, for instance -- might be too much to worry * about given our modest, non-cryptographic needs here. */ - Rnd_gen_uniform_range rnd_single_use(1, S_MAX_INIT_SEQ_NUM); // 0 is a reserved number; do not use. + Rnd_gen_uniform_range rnd_single_use{1, S_MAX_INIT_SEQ_NUM}; // 0 is a reserved number; do not use. m_last_init_seq_num.m_num = rnd_single_use(); } else @@ -128,7 +128,7 @@ Sequence_number& Sequence_number::operator-=(seq_num_delta_t delta) Sequence_number Sequence_number::operator+(seq_num_delta_t delta) const { - return Sequence_number(*this) += delta; + return Sequence_number{*this} += delta; } Sequence_number Sequence_number::operator-(seq_num_delta_t delta) const diff --git a/src/flow/net_flow/detail/seq_num.hpp b/src/flow/net_flow/detail/seq_num.hpp index 8aaf74178..8b6f15573 100644 --- a/src/flow/net_flow/detail/seq_num.hpp +++ b/src/flow/net_flow/detail/seq_num.hpp @@ -178,7 +178,7 @@ class Sequence_number Sequence_number& operator=(const Sequence_number& source); /** - * Returns true if and only if `*this != Sequence_number()` (i.e., is non-zero). + * Returns true if and only if `*this != Sequence_number{}` (i.e., is non-zero). * * @return Ditto. */ @@ -332,7 +332,7 @@ class Sequence_number * be such multiples" or "unknown"; this is the default at construction. */ void set_metadata(char num_line_id = 0, - const Sequence_number& zero_point = Sequence_number(), + const Sequence_number& zero_point = Sequence_number{}, seq_num_delta_t multiple_size = 0); private: diff --git a/src/flow/net_flow/detail/socket_buffer.cpp b/src/flow/net_flow/detail/socket_buffer.cpp index 5657062c3..350d287b1 100644 --- a/src/flow/net_flow/detail/socket_buffer.cpp +++ b/src/flow/net_flow/detail/socket_buffer.cpp @@ -63,18 +63,18 @@ size_t Socket_buffer::feed_buf_move(util::Blob* data, size_t max_data_size) if (target_space_left < src_data_size) { // Unfortunately we'll have to move only part of *data; so we'll have to do a linear-time thing. - Blob* bytes = new Blob(get_logger()); + Blob* bytes = new Blob{get_logger()}; // (All operations are max-performance:) Allocate N bytes; copy N bytes: (*data)[0, 1, ...]. bytes->assign_copy(const_buffer(data->const_data(), target_space_left)); data->erase(data->begin(), data->begin() + target_space_left); - m_q.push_back(Blob_ptr(bytes)); + m_q.push_back(Blob_ptr{bytes}); m_data_size = max_data_size; } else { // Enough space for all of *data -- so just use a constant-time swap. - Blob_ptr bytes_ptr(new Blob(std::move(*data))); // Move inner representation of *data into *bytes_ptr. + Blob_ptr bytes_ptr{new Blob{std::move(*data)}}; // Move inner representation of *data into *bytes_ptr. // *data empty now. m_q.push_back(bytes_ptr); diff --git a/src/flow/net_flow/detail/socket_buffer.hpp b/src/flow/net_flow/detail/socket_buffer.hpp index 1e6a02cf0..718690d97 100644 --- a/src/flow/net_flow/detail/socket_buffer.hpp +++ b/src/flow/net_flow/detail/socket_buffer.hpp @@ -433,9 +433,9 @@ size_t Socket_buffer::feed_bufs_copy(const Const_buffer_sequence& data, size_t m // Get the raw data pointer. const auto buf_start = static_cast(buf_data.data()); - const Blob_ptr buf_copy(new Blob(get_logger())); + const Blob_ptr buf_copy{new Blob{get_logger()}}; // Make a byte blob copy from that raw memory. Performance is highest possible (allocate, copy). - buf_copy->assign_copy(const_buffer(buf_start, to_copy)); + buf_copy->assign_copy(const_buffer{buf_start, to_copy}); m_q.push_back(buf_copy); // Accounting. @@ -501,7 +501,7 @@ size_t Socket_buffer::feed_bufs_copy(const Const_buffer_sequence& data, size_t m if (m_q.empty() || (m_q.back()->size() == m_block_size_hint)) { // Either the trailing buffer in queue is filled to capacity; or no trailing buffer exists. Make an all-new one. - m_q.push_back(Blob_ptr(new Blob(get_logger()))); + m_q.push_back(Blob_ptr{new Blob{get_logger()}}); // Reserve exactly N bytes of capacity (should be the only allocation for this member). m_q.back()->reserve(m_block_size_hint); @@ -596,7 +596,7 @@ size_t Socket_buffer::consume_bufs_copy(const Mutable_buffer_sequence& target_bu "slow-consumed buffer of size/total [" << to_copy << '/' << src_bytes.size() << "]."); // Very verbose and CPU-intensive! FLOW_LOG_DATA("Buffer data " - "[" << util::buffers_dump_string(const_buffer(src_bytes.const_begin(), to_copy), + "[" << util::buffers_dump_string(const_buffer{src_bytes.const_begin(), to_copy}, "", size_t(-1)) << "]."); @@ -660,8 +660,8 @@ void Socket_buffer::copy_bytes_from_buf_seq(Const_it* cur_buf_it, /* This is the reason for using this function instead of buffer_iterator (which would've been much easier -- but * this is way faster and probably uses memcpy() or similar). */ dest = dest_buf->emplace_copy(dest, - const_buffer(static_cast((*cur_buf_it)->data()) + *pos_in_buf, - to_copy_in_buf)); + const_buffer{static_cast((*cur_buf_it)->data()) + *pos_in_buf, + to_copy_in_buf}); to_copy -= to_copy_in_buf; *pos_in_buf += to_copy_in_buf; @@ -696,8 +696,8 @@ void Socket_buffer::copy_bytes_to_buf_seq(Const_it* cur_buf_it, const size_t to_copy_in_buf = min(to_copy, cur_buf_size - *pos_in_buf); src = src_buf.sub_copy(src, - mutable_buffer(static_cast((*cur_buf_it)->data()) + *pos_in_buf, - to_copy_in_buf)); + mutable_buffer{static_cast((*cur_buf_it)->data()) + *pos_in_buf, + to_copy_in_buf}); to_copy -= to_copy_in_buf; *pos_in_buf += to_copy_in_buf; diff --git a/src/flow/net_flow/detail/stats/bandwidth.cpp b/src/flow/net_flow/detail/stats/bandwidth.cpp index 19209c774..90a923bde 100644 --- a/src/flow/net_flow/detail/stats/bandwidth.cpp +++ b/src/flow/net_flow/detail/stats/bandwidth.cpp @@ -155,7 +155,7 @@ void Send_bandwidth_estimator::on_acks(size_t bytes) if (since_sample_start > min_sample_period) { /* Cool, enough time has passed since this sample was started; take a bandwidth sample over - * that time period (just bytes/time). As promised, our units are bytes per Time_unit(1), + * that time period (just bytes/time). As promised, our units are bytes per Time_unit{1}, * so convert from Fine_duration to (the likely less fine) Time_unit before dividing. We * use ceil() instead of truncation or round() to avoid rounding down to zero and * resulting in division by zero. (Shouldn't really happen due to sample_period_floor, @@ -173,7 +173,7 @@ void Send_bandwidth_estimator::on_acks(size_t bytes) m_no_samples_yet = false; FLOW_LOG_TRACE("bw_est [" << sock << "] update: first complete sample; bw_est = bw_est_less_smoothed " - "= [" << bytes_per_time_this_sample << "] bytes per [" << Time_unit(1) << "]" + "= [" << bytes_per_time_this_sample << "] bytes per [" << Time_unit{1} << "]" "= [" << util::to_mbit_per_sec(bytes_per_time_this_sample) << " Mbit/s]; " "start new sample at 0 bytes."); @@ -208,7 +208,7 @@ void Send_bandwidth_estimator::on_acks(size_t bytes) "= filter[" << prev_bytes_per_time_smoothed << ", " << m_bytes_per_time_less_smoothed << "] " "= [" << m_bytes_per_time_smoothed << "] units " "= [" << util::to_mbit_per_sec(m_bytes_per_time_smoothed) << " Mbit/s]; " - "units = bytes per [" << Time_unit(1) << "]."); + "units = bytes per [" << Time_unit{1} << "]."); } // if (since_sample_start > min_sample_period) /* Start new sample. Note that m_bytes_this_sample is about to get immediately incremented, as explained diff --git a/src/flow/net_flow/detail/stats/bandwidth.hpp b/src/flow/net_flow/detail/stats/bandwidth.hpp index 2fbd205af..1d381ecc6 100644 --- a/src/flow/net_flow/detail/stats/bandwidth.hpp +++ b/src/flow/net_flow/detail/stats/bandwidth.hpp @@ -81,7 +81,7 @@ namespace flow::net_flow * * ### Units ### * bandwidth_bytes_per_time() returns the current bandwidth estimate per unit time U, as an - * *integer* number of bytes (rounded down). What is U? U is given as `Time_unit(1)`, where + * *integer* number of bytes (rounded down). What is U? U is given as `Time_unit{1}`, where * #Time_unit is a public alias of a boost.chrono duration type. Be careful in any arithmetic done * with the value returned; both overflow and underflow can occur, if one does not take care to * sanity-check the arithmetic. The justification for the value of the #Time_unit alias is given @@ -136,7 +136,7 @@ class Send_bandwidth_estimator : /** * The primary time unit over which this class reports bandwidth. So when * bandwidth_bytes_per_time() return the number N, that means its bandwidth estimate is N bytes - * per `Time_unit(1)` time. + * per `Time_unit{1}` time. * * ### Implementation notes ### * Why choose milliseconds? There are two conflicting constraints on @@ -189,7 +189,7 @@ class Send_bandwidth_estimator : /** * Returns the current estimate of the available outgoing bandwidth per unit time for the - * containing socket's connection, in units of bytes per `Time_unit(1)`. This value may be zero if + * containing socket's connection, in units of bytes per `Time_unit{1}`. This value may be zero if * either there is not enough information to make a reasonable estimate, or if the estimated * bandwidth is less than a certain low threshold. * @@ -240,7 +240,7 @@ class Send_bandwidth_estimator : /** * Applies the low-pass filter that takes the given previous result of the filter and blends in * the given new sample. The values should be in the same units, which are presumably bytes per - * `Time_unit(1)`. + * `Time_unit{1}`. * * @param prev_val_per_time * Previous result of this filter. diff --git a/src/flow/net_flow/error/error.cpp b/src/flow/net_flow/error/error.cpp index 8419fbda6..5215a6ecf 100644 --- a/src/flow/net_flow/error/error.cpp +++ b/src/flow/net_flow/error/error.cpp @@ -94,7 +94,7 @@ Error_code make_error_code(Code err_code) { /* Assign Category as the category for net_flow::error::Code-cast error_codes; * this basically glues together Category::name()/message() with the Code enum. */ - return Error_code(static_cast(err_code), Category::S_CATEGORY); + return Error_code{static_cast(err_code), Category::S_CATEGORY}; } Category::Category() = default; diff --git a/src/flow/net_flow/error/error.hpp b/src/flow/net_flow/error/error.hpp index 0fa066918..81df5476e 100644 --- a/src/flow/net_flow/error/error.hpp +++ b/src/flow/net_flow/error/error.hpp @@ -36,7 +36,7 @@ * std::cout << "General error value = [" << code.value() << "]; msg = [" << code.message() << "].\n"; * // And we can throw an std::exception (`catch{}` can log the exception's .what() which will contain * // the numeric value, the human message, and the optional context string). - * throw flow::error::Runtime_error(code, "Additional context info here!"); + * throw flow::error::Runtime_error{code, "Additional context info here!"}; * ~~~ * * ### Discussion ### diff --git a/src/flow/net_flow/event_set.cpp b/src/flow/net_flow/event_set.cpp index 6aa0e7ac1..0cf871f1f 100644 --- a/src/flow/net_flow/event_set.cpp +++ b/src/flow/net_flow/event_set.cpp @@ -42,7 +42,7 @@ const boost::unordered_map empty socket set. m_can(empty_ev_type_to_socks_map()), // Ditto. m_baseline_check_pending(false) @@ -59,13 +59,13 @@ Event_set::~Event_set() Event_set::State Event_set::state() const { - Lock_guard lock(m_mutex); + Lock_guard lock{m_mutex}; return m_state; } Node* Event_set::node() const { - Lock_guard lock(m_mutex); + Lock_guard lock{m_mutex}; return m_node; } @@ -177,7 +177,7 @@ bool Event_set::async_wait(const Event_handler& on_event, Error_code* err_code) * wait" functionality in isolation (something like a select() with a 0 timeout). */ // Lock everything. We're going to be writing to things other user threads and W will be accessing/writing. - Lock_guard lock(m_mutex); + Lock_guard lock{m_mutex}; // Check for invalid arguments, basically. if (all_of(m_want, ev_type_to_socks_map_entry_is_empty)) @@ -232,7 +232,7 @@ bool Event_set::async_wait_finish(Error_code* err_code) * when/if it gets to it, as user is no longer interested in *this's events (which is correct). */ // Lock everything, as we'll be reading/changing m_state at least. - Lock_guard lock(m_mutex); + Lock_guard lock{m_mutex}; if (m_state == State::S_CLOSED) { @@ -289,7 +289,7 @@ bool Event_set::poll(Error_code* err_code) * Of course, the result is faster and simpler code as well (compared to post()ing it). */ // Lock everything, as we'll be reading/changing much state. - Lock_guard lock(m_mutex); + Lock_guard lock{m_mutex}; if (m_state == State::S_CLOSED) { @@ -355,7 +355,7 @@ bool Event_set::sync_wait_impl(const Fine_duration& max_wait, Error_code* err_co { // Lock so that we can safely check result of the poll() without another thread messing with it. - Lock_guard lock(m_mutex); // OK because m_mutex is recursive (poll() will also momentarily lock). + Lock_guard lock{m_mutex}; // OK because m_mutex is recursive (poll() will also momentarily lock). if (!poll(err_code)) // May throw. { @@ -471,7 +471,7 @@ void Event_set::close(Error_code* err_code) // We are in user thread U != W. - Lock_guard lock(m_mutex); // Lock m_node/m_state; also it's a pre-condition for Node::event_set_close(). + Lock_guard lock{m_mutex}; // Lock m_node/m_state; also it's a pre-condition for Node::event_set_close(). if (m_state == State::S_CLOSED) { @@ -502,7 +502,7 @@ bool Event_set::swap_wanted_sockets(Sockets* target_set, Event_type ev_type, Err assert(target_set); // Accessing m_state, socket sets, etc. which may be written by other threads at any time. Must lock. - Lock_guard lock(m_mutex); + Lock_guard lock{m_mutex}; Sockets& want_set = m_want[ev_type]; @@ -530,7 +530,7 @@ bool Event_set::clear_wanted_sockets(Event_type ev_type, Error_code* err_code) // We are in thread U != W. // Accessing m_state, the sets, etc. which may be written by other threads at any time. Must lock. - Lock_guard lock(m_mutex); + Lock_guard lock{m_mutex}; Sockets& want_set = m_want[ev_type]; @@ -558,7 +558,7 @@ bool Event_set::events_wanted(Error_code* err_code) const // We are in thread U != W. // Lock everything, as we'll be reading state and other things. - Lock_guard lock(m_mutex); + Lock_guard lock{m_mutex}; if (m_state == State::S_CLOSED) { @@ -581,7 +581,7 @@ bool Event_set::events_detected(Error_code* err_code) const // We are in thread U != qW. // Lock everything, as we'll be reading state. - Lock_guard lock(m_mutex); + Lock_guard lock{m_mutex}; if (m_state == State::S_CLOSED) { @@ -618,7 +618,7 @@ bool Event_set::emit_result_sockets(Sockets* target_set, Event_type ev_type, Err assert(target_set); // Accessing m_state, the sets, etc. which may be written by other threads at any time. Must lock. - Lock_guard lock(m_mutex); + Lock_guard lock{m_mutex}; Sockets& can_set = m_can[ev_type]; @@ -652,7 +652,7 @@ bool Event_set::clear_result_sockets(Event_type ev_type, Error_code* err_code) // We are in thread U != W. // Accessing m_state, the sets, etc. which may be written by other threads at any time. Must lock. - Lock_guard lock(m_mutex); + Lock_guard lock{m_mutex}; Sockets& can_set = m_can[ev_type]; @@ -712,7 +712,7 @@ bool Event_set::clear(Error_code* err_code) // We are in thread U != W. // Accessing m_state, the sets, etc. which may be written by other threads at any time. Must lock. - Lock_guard lock(m_mutex); + Lock_guard lock{m_mutex}; FLOW_LOG_TRACE("Clearing sets in Event_set [" << this << "]; pre-clear set sizes: " "wanted [" << ev_type_to_socks_map_sizes_to_str(m_want) << "], " @@ -734,12 +734,12 @@ bool Event_set::clear(Error_code* err_code) Event_set::Ev_type_to_socks_map Event_set::empty_ev_type_to_socks_map() // Static. { return Ev_type_to_socks_map - ({ + {{ // Linked_hash_map order is significant. Iteration will occur in this canonical order in logs, etc. { Event_type::S_PEER_SOCKET_READABLE, Sockets() }, { Event_type::S_PEER_SOCKET_WRITABLE, Sockets() }, { Event_type::S_SERVER_SOCKET_ACCEPTABLE, Sockets() } - }); + }}; } void Event_set::clear_ev_type_to_socks_map(Ev_type_to_socks_map* ev_type_to_socks_map) // Static. @@ -889,7 +889,7 @@ Event_set::Ptr Node::event_set_create(Error_code* err_code) if (!running()) { FLOW_ERROR_EMIT_ERROR(error::Code::S_NODE_NOT_RUNNING); - return Event_set::Ptr(); + return Event_set::Ptr{}; } // else @@ -897,7 +897,7 @@ Event_set::Ptr Node::event_set_create(Error_code* err_code) * Addendum regarding performance: event_set_create() should be pretty rare. */ // Load this body onto thread W boost.asio work queue. event_set_promise captured by reference, as we will wait. - Event_set::Ptr event_set(new Event_set(get_logger())); + Event_set::Ptr event_set{new Event_set{get_logger()}}; event_set->m_state = Event_set::State::S_INACTIVE; event_set->m_node = this; event_set->m_baseline_check_pending = false; @@ -989,7 +989,7 @@ void Node::event_set_check_baseline_assuming_state(Event_set::Ptr event_set) // We are in thread W. // Imperative to lock all of event_set. Much access possible from user threads. - Event_set::Lock_guard lock(event_set->m_mutex); + Event_set::Lock_guard lock{event_set->m_mutex}; /* event_set_async_wait() placed us onto thread W. When it did so, event_set->m_state == * S_WAITING (waiting for 1+ events to hold, so we can inform user). However that may have @@ -1189,7 +1189,7 @@ void Node::event_set_all_check_delta(bool defer_delta_check) for (Event_set::Ptr event_set : m_event_sets) { // As explained above, work on one Event_set at a time. Lock it. - Event_set::Lock_guard lock(event_set->m_mutex); + Event_set::Lock_guard lock{event_set->m_mutex}; if (event_set->m_state != Event_set::State::S_WAITING) { @@ -1306,7 +1306,7 @@ void Node::event_set_close(Event_set::Ptr event_set, Error_code* err_code) // We are in thread W. event_set_close() is waiting for us to set close_promise in thread U. // Something like async_wait_finish() may be setting event_set->m_state or other things... must lock. - Event_set::Lock_guard lock(event_set->m_mutex); + Event_set::Lock_guard lock{event_set->m_mutex}; /* Since we were placed onto thread W, another handler may have been executed before boost.asio * got to us. Therefore we may already be S_CLOSED. Detect this. */ @@ -1365,7 +1365,7 @@ void Node::event_set_close_worker(Event_set::Ptr event_set) assert(event_set->m_state != Event_set::State::S_CLOSED); event_set->m_state = Event_set::State::S_CLOSED; - event_set->m_node = 0; // Maintain invariant. + event_set->m_node = nullptr; // Maintain invariant. // Free resources. In particular, after Event_set close, user won't be able to see last set of fired events. Event_set::clear_ev_type_to_socks_map(&event_set->m_want); diff --git a/src/flow/net_flow/event_set.hpp b/src/flow/net_flow/event_set.hpp index 3c9bd7274..8413d68e0 100644 --- a/src/flow/net_flow/event_set.hpp +++ b/src/flow/net_flow/event_set.hpp @@ -247,7 +247,7 @@ namespace flow::net_flow class Event_set : // Endow us with shared_ptr<>s ::Ptr and ::Const_ptr (syntactic sugar). public util::Shared_ptr_alias_holder>, - // Allow access to Ptr(this) from inside Event_set methods. Just call shared_from_this(). + // Allow access to Ptr{this} from inside Event_set methods. Just call shared_from_this(). public boost::enable_shared_from_this, public log::Log_context, private boost::noncopyable diff --git a/src/flow/net_flow/info.cpp b/src/flow/net_flow/info.cpp index c116f331f..01a7fa2d1 100644 --- a/src/flow/net_flow/info.cpp +++ b/src/flow/net_flow/info.cpp @@ -267,7 +267,7 @@ void Peer_socket_send_stats::output(std::ostream* os) const "[snd] low_lvl_packet_send_requested "; Peer_socket_info::output_map_of_pkt_counts(os, m_low_lvl_packet_xfer_requested_count_by_type, - static_cast*>(0)); + static_cast*>(nullptr)); *os << "]\n" << diff --git a/src/flow/net_flow/net_env_simulator.cpp b/src/flow/net_flow/net_env_simulator.cpp index 53126d6e2..0284ecaa4 100644 --- a/src/flow/net_flow/net_env_simulator.cpp +++ b/src/flow/net_flow/net_env_simulator.cpp @@ -85,7 +85,7 @@ Fine_duration Net_env_simulator::received_packet_latency() if (m_recv_latency_seq.empty()) { // Ran out of prescribed outcomes; use randomness. - return Fine_duration(m_recv_latency_distribution_msec(m_rnd_generator)); + return Fine_duration{m_recv_latency_distribution_msec(m_rnd_generator)}; } // else const Fine_duration latency = m_recv_latency_seq.front(); diff --git a/src/flow/net_flow/net_env_simulator.hpp b/src/flow/net_flow/net_env_simulator.hpp index f4d6c5733..a9d0b972e 100644 --- a/src/flow/net_flow/net_env_simulator.hpp +++ b/src/flow/net_flow/net_env_simulator.hpp @@ -132,11 +132,11 @@ class Net_env_simulator : explicit Net_env_simulator(log::Logger* logger_ptr, seed_type_t random_seed = 0, prob_type_t recv_packet_loss_prob = 0, - const Packet_loss_seq& recv_packet_loss_seq = Packet_loss_seq(), - const Latency_range& recv_latency_range = Latency_range(), - const Latency_seq& recv_latency_seq = Latency_seq(), + const Packet_loss_seq& recv_packet_loss_seq = Packet_loss_seq{}, + const Latency_range& recv_latency_range = Latency_range{}, + const Latency_seq& recv_latency_seq = Latency_seq{}, prob_type_t recv_packet_dup_prob = 0, - const Packet_dup_seq& recv_packet_dup_seq = Packet_dup_seq()); + const Packet_dup_seq& recv_packet_dup_seq = Packet_dup_seq{}); // Methods. diff --git a/src/flow/net_flow/node.cpp b/src/flow/net_flow/node.cpp index 0a4c7db86..4bfcba5b2 100644 --- a/src/flow/net_flow/node.cpp +++ b/src/flow/net_flow/node.cpp @@ -26,7 +26,7 @@ namespace flow::net_flow { // Static initializations. -const Fine_duration Node::S_REGULAR_INFREQUENT_TASKS_PERIOD = boost::chrono::seconds(1); // Infrequent enough CPU-wise. +const Fine_duration Node::S_REGULAR_INFREQUENT_TASKS_PERIOD = boost::chrono::seconds{1}; // Infrequent enough CPU-wise. // Note that they're references, not copies. Otherwise non-deterministic static initialization order would screw us. const size_t& Node::S_NUM_PORTS = Port_space::S_NUM_PORTS; @@ -106,7 +106,7 @@ Node::Node(log::Logger* logger_ptr, const util::Udp_endpoint& low_lvl_endpoint, if (our_err_code) // Throw exception if there is an error, and they passed in no Error_code. { - throw Runtime_error(our_err_code, FLOW_UTIL_WHERE_AM_I_STR()); + throw Runtime_error{our_err_code, FLOW_UTIL_WHERE_AM_I_STR()}; } } // Node::Node() @@ -214,7 +214,7 @@ void Node::worker_run(const util::Udp_endpoint low_lvl_endpoint) post(m_task_engine, [this]() { // We are in thread W. - m_event_loop_ready.set_value(Error_code()); + m_event_loop_ready.set_value(Error_code{}); }); // When a packet is available for reading (or error), call this->low_lvl_recv_and_handle(). @@ -986,7 +986,7 @@ const Node_options& Node::validate_options(const Node_options& opts, bool init, const Node_options& result = validate_options(opts, init, &our_err_code); if (our_err_code) { - throw flow::error::Runtime_error(our_err_code, FLOW_UTIL_WHERE_AM_I_STR()); + throw flow::error::Runtime_error{our_err_code, FLOW_UTIL_WHERE_AM_I_STR()}; } return result; } @@ -1042,7 +1042,7 @@ const Node_options& Node::validate_options(const Node_options& opts, bool init, /* The above validated only global options. Now verify that the per-socket template options (that * will be used to generate child Peer_sockets) are also valid. */ - sock_validate_options(opts.m_dyn_sock_opts, 0, err_code); // Will not throw. Will set *err_code if needed. + sock_validate_options(opts.m_dyn_sock_opts, nullptr, err_code); // Will not throw. Will set *err_code if needed. // On error, that set *err_code. return opts; @@ -1061,7 +1061,7 @@ bool Node::set_options(const Node_options& opts, Error_code* err_code) if (!running()) { FLOW_ERROR_EMIT_ERROR(error::Code::S_NODE_NOT_RUNNING); - return Peer_socket::Ptr().get(); + return Peer_socket::Ptr{}.get(); } // else @@ -1072,7 +1072,7 @@ bool Node::set_options(const Node_options& opts, Error_code* err_code) FLOW_LOG_TRACE("\n\n" << opts); // Will be writing if all goes well, so must acquire exclusive ownership of m_opts. - Options_lock lock(m_opts_mutex); + Options_lock lock{m_opts_mutex}; /* Validate the new option set (including ensuring they're not changing static options' values). * Note that an explicit pre-condition of this method is that m_opts_mutex is locked if needed, diff --git a/src/flow/net_flow/node.hpp b/src/flow/net_flow/node.hpp index 201d7d2d5..58774bf4e 100644 --- a/src/flow/net_flow/node.hpp +++ b/src/flow/net_flow/node.hpp @@ -986,7 +986,7 @@ class Node : * @param low_lvl_endpoint * The UDP endpoint (IP address and UDP port) which will be used for receiving incoming and * sending outgoing Flow traffic in this Node. - * E.g.: `Udp_endpoint(Ip_address_v4::any(), 1234)` // UDP port 1234 on all IPv4 interfaces. + * E.g.: `Udp_endpoint{Ip_address_v4::any(), 1234}` // UDP port 1234 on all IPv4 interfaces. * @param logger * The Logger implementation through which all logging from this Node will run. * See notes on logger ownership above. @@ -1007,8 +1007,8 @@ class Node : * Peer_socket::set_options(), Peer_socket::options(). */ explicit Node(log::Logger* logger, const util::Udp_endpoint& low_lvl_endpoint, - Net_env_simulator* net_env_sim = 0, Error_code* err_code = 0, - const Node_options& opts = Node_options()); + Net_env_simulator* net_env_sim = nullptr, Error_code* err_code = nullptr, + const Node_options& opts = Node_options{}); /** * Destroys Node. Closes all Peer_socket objects as if by `sock->close_abruptly()`. Then closes all @@ -1060,7 +1060,7 @@ class Node : * chose 0 as the port, the value returned here will contain the actual emphemeral port randomly chosen by * the OS). * - * If `!running()`, this equals Udp_endpoint(). The logical value of the returned util::Udp_endpoint + * If `!running()`, this equals `Udp_endpoint{}`. The logical value of the returned util::Udp_endpoint * never changes over the lifetime of the Node. * * @return See above. Note that it is a reference. @@ -1096,8 +1096,8 @@ class Node : * @return Shared pointer to Peer_socket, which is in the `S_OPEN` main state; or null pointer, * indicating an error. */ - Peer_socket::Ptr connect(const Remote_endpoint& to, Error_code* err_code = 0, - const Peer_socket_options* opts = 0); + Peer_socket::Ptr connect(const Remote_endpoint& to, Error_code* err_code = nullptr, + const Peer_socket_options* opts = nullptr); /** * Same as connect() but sends, as part of the connection handshake, the user-supplied metadata, @@ -1132,8 +1132,8 @@ class Node : */ Peer_socket::Ptr connect_with_metadata(const Remote_endpoint& to, const boost::asio::const_buffer& serialized_metadata, - Error_code* err_code = 0, - const Peer_socket_options* opts = 0); + Error_code* err_code = nullptr, + const Peer_socket_options* opts = nullptr); /** * The blocking (synchronous) version of connect(). Acts just like connect() but instead of @@ -1189,8 +1189,8 @@ class Node : */ template Peer_socket::Ptr sync_connect(const Remote_endpoint& to, const boost::chrono::duration& max_wait, - Error_code* err_code = 0, - const Peer_socket_options* opts = 0); + Error_code* err_code = nullptr, + const Peer_socket_options* opts = nullptr); /** * A combination of sync_connect() and connect_with_metadata() (blocking connect, with supplied @@ -1212,8 +1212,8 @@ class Node : Peer_socket::Ptr sync_connect_with_metadata(const Remote_endpoint& to, const boost::chrono::duration& max_wait, const boost::asio::const_buffer& serialized_metadata, - Error_code* err_code = 0, - const Peer_socket_options* opts = 0); + Error_code* err_code = nullptr, + const Peer_socket_options* opts = nullptr); /** * Equivalent to `sync_connect(to, duration::max(), err_code, opt)s`; i.e., sync_connect() with no user @@ -1227,8 +1227,8 @@ class Node : * See sync_connect(). * @return See other sync_connect(). */ - Peer_socket::Ptr sync_connect(const Remote_endpoint& to, Error_code* err_code = 0, - const Peer_socket_options* opts = 0); + Peer_socket::Ptr sync_connect(const Remote_endpoint& to, Error_code* err_code = nullptr, + const Peer_socket_options* opts = nullptr); /** * Equivalent to `sync_connect_with_metadata(to, duration::max(), serialized_metadata, err_code, opts)`; i.e., @@ -1246,8 +1246,8 @@ class Node : */ Peer_socket::Ptr sync_connect_with_metadata(const Remote_endpoint& to, const boost::asio::const_buffer& serialized_metadata, - Error_code* err_code = 0, - const Peer_socket_options* opts = 0); + Error_code* err_code = nullptr, + const Peer_socket_options* opts = nullptr); /** * Sets up a server on the given local Flow port and returns Server_socket which can be used to @@ -1282,8 +1282,8 @@ class Node : * @return Shared pointer to Server_socket, which is in the Server_socket::State::S_LISTENING state at least * initially; or null pointer, indicating an error. */ - Server_socket::Ptr listen(flow_port_t local_port, Error_code* err_code = 0, - const Peer_socket_options* child_sock_opts = 0); + Server_socket::Ptr listen(flow_port_t local_port, Error_code* err_code = nullptr, + const Peer_socket_options* child_sock_opts = nullptr); /** * Creates a new Event_set in Event_set::State::S_INACTIVE state with no sockets/events stored; returns this @@ -1294,7 +1294,7 @@ class Node : * error::Code::S_NODE_NOT_RUNNING. * @return Shared pointer to Event_set; or null pointer, indicating an error. */ - Event_set::Ptr event_set_create(Error_code* err_code = 0); + Event_set::Ptr event_set_create(Error_code* err_code = nullptr); /** * Interrupts any blocking operation, a/k/a wait, and informs the invoker of that operation that the @@ -1319,7 +1319,7 @@ class Node : * See flow::Error_code docs for error reporting semantics. error::Code generated: * error::Code::S_NODE_NOT_RUNNING. */ - void interrupt_all_waits(Error_code* err_code = 0); + void interrupt_all_waits(Error_code* err_code = nullptr); /** * Dynamically replaces the current options set (options()) with the given options set. @@ -1339,7 +1339,7 @@ class Node : * error::Code::S_OPTION_CHECK_FAILED, error::Code::S_NODE_NOT_RUNNING. * @return `true` on success, `false` on error. */ - bool set_options(const Node_options& opts, Error_code* err_code = 0); + bool set_options(const Node_options& opts, Error_code* err_code = nullptr); /** * Copies this Node's option set and returns that copy. If you intend to use set_options() to @@ -1482,7 +1482,7 @@ class Node : * not yet be available, say, during object construction). * @return Address of the Logger that was configured (either `logger` or `this->get_logger()`). */ - log::Logger* this_thread_init_logger_setup(const std::string& thread_type, log::Logger* logger = 0); + log::Logger* this_thread_init_logger_setup(const std::string& thread_type, log::Logger* logger = nullptr); /** * Given a new set of Node_options intended to replace (or initialize) a Node's #m_opts, ensures @@ -2826,7 +2826,7 @@ class Node : * ESTABLISHED state: `operation_aborted` => NOOP; success or any other error => attempt to * send ACK(s). */ - void async_low_lvl_ack_send(Peer_socket::Ptr sock, const Error_code& sys_err_code = Error_code()); + void async_low_lvl_ack_send(Peer_socket::Ptr sock, const Error_code& sys_err_code = Error_code{}); /** * Return `true` if and only if there are enough data either in Peer_socket::m_snd_rexmit_q of `sock` (if @@ -2972,7 +2972,7 @@ class Node : * @param err_code * After return, `*err_code` is success or: error::Code::S_OPTION_CHECK_FAILED, * error::Code::S_STATIC_OPTION_CHANGED. - * If `!err_code`, error::Runtime_error() with that #Error_code is thrown instead. + * If `!err_code`, error::Runtime_error with that #Error_code is thrown instead. * @return `true` on success, `false` on validation error. */ bool sock_validate_options(const Peer_socket_options& opts, const Peer_socket_options* prev_opts, @@ -3261,7 +3261,7 @@ class Node : * Deserialized immutable SYN. * @param low_lvl_remote_endpoint * The remote Node address. - * @return New socket placed into Node socket table; or `Ptr()` on error, wherein no socket was saved. + * @return New socket placed into Node socket table; or `Ptr{}` on error, wherein no socket was saved. */ Peer_socket::Ptr handle_syn_to_listening_server(Server_socket::Ptr serv, boost::shared_ptr syn, @@ -3448,7 +3448,7 @@ class Node : * See this argument on the originating `sync_*()` method. * However, unlike that calling method's user-facing API, the present sync_op() method * does NOT allow null `err_code` (behavior undefined if `err_code` is null). - * Corollary: we will NOT throw Runtime_error(). + * Corollary: we will NOT throw `Runtime_error`. * @return The value that the calling `sync_*()` method should return to its caller. * Corner/special case: If `non_blocking_func.empty()` (a/k/a "reactor pattern" mode), then * this will always return `would_block_ret_val`; the caller shall interpret @@ -3715,9 +3715,9 @@ class Node : * should contain the actual local address and port (even if user specified 0 for the latter, * say). * - * This is equal to `Udp_endpoint()` until the constructor exits. After the constructor exits, its + * This is equal to `Udp_endpoint{}` until the constructor exits. After the constructor exits, its * value never changes, therefore all threads can access it without mutex. If the constructor - * fails to bind, this remains equal to `Udp_endpoint()` forever. + * fails to bind, this remains equal to `Udp_endpoint{}` forever. */ util::Udp_endpoint m_low_lvl_endpoint; @@ -3863,7 +3863,7 @@ struct Node::Socket_id // Data. /// The other side of the connection. - const Remote_endpoint m_remote_endpoint = Remote_endpoint(); + const Remote_endpoint m_remote_endpoint; /// This side of the connection (within this Node). const flow_port_t m_local_port = S_PORT_ANY; @@ -3994,7 +3994,7 @@ Non_blocking_func_ret_type Node::sync_op(typename Socket::Ptr sock, // else go ahead and wait. Non_blocking_func_ret_type op_result; - const bool timeout_given = wait_until != Fine_time_pt(); + const bool timeout_given = wait_until != Fine_time_pt{}; do { // We may have to call sync_wait() repeatedly; if timeout is given we must give less and less time each time. @@ -4143,20 +4143,20 @@ Opt_type Node::opt(const Opt_type& opt_val_ref) const * or boost.chrono time values which are internally also usually just integers), so the copy * should not be a big deal. */ - Options_lock lock(m_opts_mutex); + Options_lock lock{m_opts_mutex}; return opt_val_ref; } template Peer_socket* Node::sock_create_forward_plus_ctor_args(const Peer_socket_options& opts) { - return new Peer_socket_impl_type(get_logger(), &m_task_engine, opts); + return new Peer_socket_impl_type{get_logger(), &m_task_engine, opts}; } template Server_socket* Node::serv_create_forward_plus_ctor_args(const Peer_socket_options* child_sock_opts) { - return new Server_socket_impl_type(get_logger(), child_sock_opts); + return new Server_socket_impl_type{get_logger(), child_sock_opts}; } } // namespace flow::net_flow diff --git a/src/flow/net_flow/options.cpp b/src/flow/net_flow/options.cpp index 06139a320..dc6c8b8fd 100644 --- a/src/flow/net_flow/options.cpp +++ b/src/flow/net_flow/options.cpp @@ -152,7 +152,7 @@ void Node_options::setup_config_parsing(Options_description* opts_desc) std::ostream& operator<<(std::ostream& os, const Node_options& opts) { Node_options sink; - Node_options::Options_description opts_desc("Per-net_flow::Node option values"); + Node_options::Options_description opts_desc{"Per-net_flow::Node option values"}; Node_options::setup_config_parsing_helper(&opts_desc, &sink, opts, true); return os << opts_desc; } @@ -187,9 +187,9 @@ Peer_socket_options::Peer_socket_options() : * in the code. */ m_st_max_block_size(1024), // Initial value recommended by RFC 6298 is 1 sec but seems too conservative. @todo Decide. - m_st_connect_retransmit_period(boost::chrono::milliseconds(125)), + m_st_connect_retransmit_period(boost::chrono::milliseconds{125}), // @todo. - m_st_connect_retransmit_timeout(boost::chrono::seconds(3)), + m_st_connect_retransmit_timeout(boost::chrono::seconds{3}), /* @todo Reconsider. See also to-do in class Node doc header. * WARNING! If you change this, ensure s_st_max_cong_wnd_blocks is still sufficiently small. */ m_st_snd_buf_max_size(6 * 1024 * 1024), @@ -214,14 +214,14 @@ Peer_socket_options::Peer_socket_options() : * packets in SYN_RCVD state. */ m_st_rcv_max_packets_after_unrecvd_packet_ratio_percent(220), // Satisfies RFC 5681 (500 ms max); taken from BSD implementation (Stevens/Wright, TCP/IP Illustrated Vol. 2, 1995). - m_st_delayed_ack_timer_period(boost::chrono::milliseconds(200)), + m_st_delayed_ack_timer_period(boost::chrono::milliseconds{200}), // Per RFC 5681. m_st_max_full_blocks_before_ack_send(2), m_st_rexmit_on(true), // @todo Experiment and look at RFCs. m_st_max_rexmissions_per_packet(15), // @todo Experiment. RFC 6298 recommends this value. - m_st_init_drop_timeout(boost::chrono::seconds(1)), + m_st_init_drop_timeout(boost::chrono::seconds{1}), // @todo Experiment. Choosing less aggressive values for now, except for m_st_drop_all_on_drop_timeout. m_st_drop_packet_exactly_after_drop_timeout(false), // Consistent with RFC 4341, but see discussion where it's used. @@ -233,7 +233,7 @@ Peer_socket_options::Peer_socket_options() : /* Value taken from Linux's westwood.c which was written by the creators of Westwood+ bandwidth * estimation algorithm themselves. 50 msec seems like a reasonable line in the sand between * "small RTT" and "medium RTT." */ - m_st_snd_bandwidth_est_sample_period_floor(boost::chrono::milliseconds(50)), + m_st_snd_bandwidth_est_sample_period_floor(boost::chrono::milliseconds{50}), // Pass in a non-existent strategy ID, which will cause our operator<<() to choose what it considers the default. m_st_cong_ctl_strategy(boost::lexical_cast("none")), // Let code choose initial CWND using RFC 5681 method. @@ -257,13 +257,13 @@ Peer_socket_options::Peer_socket_options() : // Use RFC 5681 default (0 is special value). m_st_cong_ctl_classic_wnd_decay_percent(0), // The minimal allowed ceiling by RFC 6298. - m_dyn_drop_timeout_ceiling(boost::chrono::seconds(60)), + m_dyn_drop_timeout_ceiling(boost::chrono::seconds{60}), // RFC 6298 recommends this value. m_dyn_drop_timeout_backoff_factor(2), // This shouldn't be too bad. @todo Though it should probably be based off DTO or something.... - m_dyn_rcv_wnd_recovery_timer_period(boost::chrono::seconds(1)), + m_dyn_rcv_wnd_recovery_timer_period(boost::chrono::seconds{1}), // Seems OK. After a minute it's probably a lost cause. - m_dyn_rcv_wnd_recovery_max_period(boost::chrono::minutes(1)) + m_dyn_rcv_wnd_recovery_max_period(boost::chrono::minutes{1}) { // Nothing. } diff --git a/src/flow/net_flow/peer_socket.cpp b/src/flow/net_flow/peer_socket.cpp index 4ebb00e50..04f8ca1bb 100644 --- a/src/flow/net_flow/peer_socket.cpp +++ b/src/flow/net_flow/peer_socket.cpp @@ -42,7 +42,7 @@ Peer_socket::Peer_socket(log::Logger* logger_ptr, m_active_connect(false), // Meaningless; set explicitly. m_state(State::S_CLOSED), // Incorrect; set explicitly. m_open_sub_state(Open_sub_state::S_DISCONNECTING), // Incorrect; set explicitly. - m_node(0), // Incorrect; set explicitly. + m_node(nullptr), // Incorrect; set explicitly. m_rcv_buf(logger_ptr, 0), // Receive buffer mode: block size irrelevant (see Socket_buffer doc header). // Send buffer mode: pack data into block-sized chunks for dequeueing speed. See Socket_buffer doc header. m_snd_buf(logger_ptr, max_block_size()), @@ -76,7 +76,7 @@ Peer_socket::Peer_socket(log::Logger* logger_ptr, Peer_socket::~Peer_socket() // Virtual. { - /* Note that m_snd_cong_ctl, m_snd_bandwidth_estimator (etc.) and others store no Ptr(this), + /* Note that m_snd_cong_ctl, m_snd_bandwidth_estimator (etc.) and others store no Ptr{this}, * so this dtor will indeed execute (no circular shared_ptr problem). */ FLOW_LOG_TRACE("Peer_socket [" << this << "] destroyed."); @@ -84,7 +84,7 @@ Peer_socket::~Peer_socket() // Virtual. Peer_socket::State Peer_socket::state(Open_sub_state* open_sub_state) const { - Lock_guard lock(m_mutex); // State is liable to change at any time. + Lock_guard lock{m_mutex}; // State is liable to change at any time. if (open_sub_state && (m_state == State::S_OPEN)) { *open_sub_state = m_open_sub_state; @@ -94,13 +94,13 @@ Peer_socket::State Peer_socket::state(Open_sub_state* open_sub_state) const Node* Peer_socket::node() const { - Lock_guard lock(m_mutex); // m_node can simultaneously change to 0 if state changes to S_CLOSED. + Lock_guard lock{m_mutex}; // m_node can simultaneously change to 0 if state changes to S_CLOSED. return m_node; } Error_code Peer_socket::disconnect_cause() const { - Lock_guard lock(m_mutex); + Lock_guard lock{m_mutex}; return m_disconnect_cause; } @@ -115,7 +115,7 @@ bool Peer_socket::sync_send_reactor_pattern_impl(const Fine_time_pt& wait_until, FLOW_ERROR_EXEC_AND_THROW_ON_ERROR(size_t, sync_send_reactor_pattern_impl, wait_until, _1); - Lock_guard lock(m_mutex); + Lock_guard lock{m_mutex}; const Function empty_snd_buf_feed_func; assert(empty_snd_buf_feed_func.empty()); @@ -196,7 +196,7 @@ bool Peer_socket::sync_receive_reactor_pattern_impl(const Fine_time_pt& wait_unt FLOW_ERROR_EXEC_AND_THROW_ON_ERROR(size_t, sync_receive_reactor_pattern_impl, wait_until, _1); - Lock_guard lock(m_mutex); + Lock_guard lock{m_mutex}; const Function empty_rcv_buf_consume_func; assert(empty_rcv_buf_consume_func.empty()); @@ -270,7 +270,7 @@ void Peer_socket::close_abruptly(Error_code* err_code) // We are in user thread U != W. - Lock_guard lock(m_mutex); // Lock m_node/m_state; also it's a pre-condition for Node::close_abruptly(). + Lock_guard lock{m_mutex}; // Lock m_node/m_state; also it's a pre-condition for Node::close_abruptly(). const Ptr sock = shared_from_this(); if (!Node::ensure_sock_open(sock, err_code)) // Ensure it's open, so that we can access m_node. @@ -293,7 +293,7 @@ bool Peer_socket::set_options(const Peer_socket_options& opts, Error_code* err_c // We are in thread U != W. - Lock_guard lock(m_mutex); // Lock m_node at least. + Lock_guard lock{m_mutex}; // Lock m_node at least. const Ptr sock = shared_from_this(); if (!Node::ensure_sock_open(sock, err_code)) // Ensure it's open, so that we can access m_node. @@ -321,7 +321,7 @@ Peer_socket_info Peer_socket::info() const * Node). In the socket is closed (S_CLOSED), then no m_node owns it, so there is no thread W * applicable to this socket anymore, and we can just copy the data in thread U != W. */ - Lock_guard lock(m_mutex); // Lock m_node; also it's a pre-condition for Node::sock_info(). + Lock_guard lock{m_mutex}; // Lock m_node; also it's a pre-condition for Node::sock_info(). const Const_ptr sock = shared_from_this(); @@ -349,7 +349,7 @@ size_t Peer_socket::max_block_size_multiple(const size_t& opt_val_ref, const unsigned int* inflate_pct_val_ptr) const { // Similar to opt() but specialized for this purpose. Lock once to get both values. - Options_lock lock(m_opts_mutex); + Options_lock lock{m_opts_mutex}; const size_t& max_block_size = m_opts.m_st_max_block_size; const unsigned int inflate_pct = inflate_pct_val_ptr ? (*inflate_pct_val_ptr) : 0; @@ -388,7 +388,7 @@ size_t Peer_socket::get_connect_metadata(const boost::asio::mutable_buffer& buff // We are in user thread U != W. - Lock_guard lock(m_mutex); // Lock m_serialized_metadata (it can be changed in sock_free_memory()). + Lock_guard lock{m_mutex}; // Lock m_serialized_metadata (it can be changed in sock_free_memory()). if (!ensure_open(err_code)) // Ensure it's open; other m_serialized_metadata has been cleared. { @@ -883,7 +883,7 @@ Error_code Node::sock_categorize_data_to_established(Peer_socket::Ptr sock, *dupe = true; *slide = false; - return Error_code(); + return Error_code{}; } // if (seq_num < rcv_next_seq_num) // else if (seq_num >= rcv_next_seq_num) @@ -919,7 +919,7 @@ Error_code Node::sock_categorize_data_to_established(Peer_socket::Ptr sock, *slide = true; *slide_size = size_t(seq_num_end - seq_num); assert(*slide_size == data.size()); - return Error_code(); + return Error_code{}; } // else if: @@ -1010,7 +1010,7 @@ Error_code Node::sock_categorize_data_to_established(Peer_socket::Ptr sock, } *dupe = false; - return Error_code(); + return Error_code{}; } // if (next_packet does not exist) // else if (next_packet exists at the same or later sequence number as seq_num) @@ -1047,7 +1047,7 @@ Error_code Node::sock_categorize_data_to_established(Peer_socket::Ptr sock, "sequence numbers [" << seq_num << ", " << seq_num_end << ")."); *dupe = true; - return Error_code(); + return Error_code{}; } // if (seq_num_next_start == seq_num) // else if: assert(seq_num_next_start > seq_num); // lower_bound() is not horrifically broken. @@ -1084,7 +1084,7 @@ Error_code Node::sock_categorize_data_to_established(Peer_socket::Ptr sock, FLOW_LOG_TRACE("New packet partially fills first gap without sliding window; " "sequence numbers [" << seq_num << ", " << seq_num_end << "); " "first unreceived packet [" << rcv_next_seq_num << "]."); - return Error_code(); // There are none. We're good. + return Error_code{}; // There are none. We're good. } const Peer_socket::Recvd_pkt_const_iter prev_packet = prior(next_packet); @@ -1112,7 +1112,7 @@ Error_code Node::sock_categorize_data_to_established(Peer_socket::Ptr sock, "sequence numbers [" << seq_num << ", " << seq_num_end << "); " "first unreceived packet [" << rcv_next_seq_num << "]."); - return Error_code(); + return Error_code{}; } // Node::sock_categorize_data_to_established() bool Node::sock_data_to_rcv_buf_unless_overflow(Peer_socket::Ptr sock, @@ -1132,7 +1132,7 @@ bool Node::sock_data_to_rcv_buf_unless_overflow(Peer_socket::Ptr sock, size_t buf_size; { // Receive Buffer can be consumed by user threads (not W) at the same time. Must lock. - Peer_socket::Lock_guard lock(sock->m_mutex); + Peer_socket::Lock_guard lock{sock->m_mutex}; /* First we must check if block will fit into sock->m_rcv_buf. Why not just use feed_buf_move()'s * max_data_size argument? Because that would allow to partially enqueue the block, if there's @@ -1177,7 +1177,7 @@ bool Node::sock_data_to_rcv_buf_unless_overflow(Peer_socket::Ptr sock, assert(written == data_size); buf_size = sock->m_rcv_buf.data_size(); - } // lock(sock->m_mutex) + } // lock{sock->m_mutex} // Register one packet of N bytes of acceptable data that we accepted -- did not drop. rcv_stats.good_data_accepted_packet(data_size); @@ -1258,7 +1258,7 @@ void Node::sock_track_new_data_after_gap_rexmit_off(Peer_socket::Ptr sock, #endif rcv_packets_with_gaps.insert (make_pair(seq_num, - Peer_socket::Received_packet::Ptr(new Peer_socket::Received_packet(get_logger(), data_size, 0)))); + Peer_socket::Received_packet::Ptr{new Peer_socket::Received_packet{get_logger(), data_size, 0}})); // m_rcv_reassembly_q_data_size untouched because !rexmit_on. assert(!sock->rexmit_on()); assert(insert_result.second); // If was already there, there's some serious bug in above logic. @@ -1435,7 +1435,7 @@ bool Node::sock_data_to_reassembly_q_unless_overflow(Peer_socket::Ptr sock, #endif rcv_packets_with_gaps.insert (make_pair(seq_num, // Decimation occurs in here: ------------------v, hence the `&`: -------------v. - Peer_socket::Received_packet::Ptr(new Peer_socket::Received_packet(get_logger(), data_size, &data)))); + Peer_socket::Received_packet::Ptr{new Peer_socket::Received_packet{get_logger(), data_size, &data}})); sock->m_rcv_reassembly_q_data_size += data_size; assert(insert_result.second); // If was already there, there's some serious bug in above logic. // No other part of the invariant is violated, so that's it. @@ -1509,7 +1509,7 @@ void Node::sock_slide_rcv_next_seq_num(Peer_socket::Ptr sock, size_t slide_size, size_t written; size_t buf_size; { - Peer_socket::Lock_guard lock(sock->m_mutex); + Peer_socket::Lock_guard lock{sock->m_mutex}; /* Reassemble! This is constant-time. Note we don't check for overflow here, but that's because we * checked for it cleverly in first enqueueing this in rcv_packets_with_gaps @@ -1588,7 +1588,7 @@ void Node::async_acknowledge_packet(Peer_socket::Ptr sock, const Sequence_number * substracting the ACK delay from its RTT measurement. */ sock->m_rcv_pending_acks.push_back (Peer_socket::Individual_ack::Ptr - (new Peer_socket::Individual_ack{ seq_num, rexmit_id, Fine_clock::now(), data_size })); + {new Peer_socket::Individual_ack{ seq_num, rexmit_id, Fine_clock::now(), data_size }}); /* m_rcv_pending_acks now stores at least one packet to acknowledge. We can acknowledge it * immediately (modulo UDP layer availability of course). However, suppose there is a fast stream @@ -2980,7 +2980,7 @@ Peer_socket::Sent_pkt_ordered_by_when_iter * Then, invariant: high_ack_count_q contains the acks for all send attempts P where * P.m_sent_when < cur_sent_pkt.m_sent_when. In particular, P.m_sent_when.top < cur_sent_pkt.m_sent_when. */ priority_queue - high_ack_count_q(flying_now_acked_pkts.begin(), flying_now_acked_pkts.end()); + high_ack_count_q{flying_now_acked_pkts.begin(), flying_now_acked_pkts.end()}; // Invariant: this will be the m_acks_after_me increment applied to the just-considered packet in snd_flying_pkts*. using ack_count_t = Peer_socket::Sent_packet::ack_count_t; @@ -3153,7 +3153,7 @@ bool Node::drop_pkts_on_acks(Peer_socket::Ptr sock, if (!loss_event_finished) { if (// This is part of a new loss event if: There has been no loss event before this... - (sock->m_snd_last_loss_event_when != Fine_time_pt()) + (sock->m_snd_last_loss_event_when != Fine_time_pt{}) // ...OR there has, but this packet was sent after that event was detected. && (sent_when.m_sent_time < sock->m_snd_last_loss_event_when)) { @@ -3566,8 +3566,8 @@ void Node::new_round_trip_time_sample(Peer_socket::Ptr sock, Fine_duration round * high_resolution_timer exceed 5 microseconds. Therefore, let us pick the exceedingly * conservative G = 500 microseconds = 1/2 milliseconds. */ - const Fine_duration clock_resolution_at_least = microseconds(500); - const Fine_duration floor = seconds(1); + const Fine_duration clock_resolution_at_least = microseconds{500}; + const Fine_duration floor = seconds{1}; const Fine_duration ceiling = sock->opt(sock->m_opts.m_dyn_drop_timeout_ceiling); const unsigned int k = 4; @@ -3720,7 +3720,7 @@ Sequence_number Node::snd_past_last_flying_datum_seq_num(Peer_socket::Const_ptr const Peer_socket::Sent_pkt_by_seq_num_map& flying_packets = sock->m_snd_flying_pkts_by_seq_num; if (flying_packets.empty()) { - return Sequence_number(); // Default value. Less than all others. + return Sequence_number{}; // Default value. Less than all others. } // else @@ -3822,7 +3822,7 @@ void Node::snd_flying_pkts_push_one(Peer_socket::Ptr sock, // else Sequence_number seq_num_end; - get_seq_num_range(pkt_it, 0, &seq_num_end); + get_seq_num_range(pkt_it, nullptr, &seq_num_end); if (sock->rexmit_on()) { FLOW_LOG_TRACE_WITHOUT_CHECKING @@ -3920,7 +3920,7 @@ Peer_socket::Ptr Node::connect_with_metadata(const Remote_endpoint& to, if (!running()) { FLOW_ERROR_EMIT_ERROR(error::Code::S_NODE_NOT_RUNNING); - return Peer_socket::Ptr(); + return Peer_socket::Ptr{}; } // else @@ -3928,7 +3928,7 @@ Peer_socket::Ptr Node::connect_with_metadata(const Remote_endpoint& to, if (serialized_metadata.size() > max_block_size()) { FLOW_ERROR_EMIT_ERROR(error::Code::S_CONN_METADATA_TOO_LARGE); - return Peer_socket::Ptr(); + return Peer_socket::Ptr{}; } /* Put the rest of the work into thread W. For justification, see big comment in listen(). @@ -3947,7 +3947,7 @@ Peer_socket::Ptr Node::connect_with_metadata(const Remote_endpoint& to, if (sock->m_disconnect_cause) { *err_code = sock->m_disconnect_cause; - return Peer_socket::Ptr(); // sock will go out of scope and thus will be destroyed. + return Peer_socket::Ptr{}; // sock will go out of scope and thus will be destroyed. } // else err_code->clear(); @@ -3974,7 +3974,7 @@ void Node::connect_worker(const Remote_endpoint& to, const boost::asio::const_bu * validate them (for proper values and internal consistency, etc.). */ Error_code err_code; - const bool opts_ok = sock_validate_options(*sock_opts, 0, &err_code); + const bool opts_ok = sock_validate_options(*sock_opts, nullptr, &err_code); // Due to the advertised interface of the current method, we must create a socket even on error. sock.reset(sock_create(*sock_opts)); @@ -3998,7 +3998,7 @@ void Node::connect_worker(const Remote_endpoint& to, const boost::asio::const_bu * elsewhere when set. */ Peer_socket* sock_non_ptr; { - Options_lock lock(m_opts_mutex); + Options_lock lock{m_opts_mutex}; sock_non_ptr = sock_create(m_opts.m_dyn_sock_opts); } sock.reset(sock_non_ptr); @@ -4017,7 +4017,7 @@ void Node::connect_worker(const Remote_endpoint& to, const boost::asio::const_bu * outgoing bandwidth based on incoming acknowledgments). It may be used by m_snd_cong_ctl, * depending on the strategy chosen, but may be useful in its own right. Hence it's a separate * object, not inside *m_snd_cong_ctl. */ - sock->m_snd_bandwidth_estimator.reset(new Send_bandwidth_estimator(get_logger(), sock)); + sock->m_snd_bandwidth_estimator.reset(new Send_bandwidth_estimator{get_logger(), sock}); // Initialize the connection's congestion control strategy based on the configured strategy. sock->m_snd_cong_ctl.reset @@ -4165,7 +4165,7 @@ Peer_socket::Ptr Node::sync_connect_impl(const Remote_endpoint& to, const Fine_d if (!event_set) { assert(*err_code == error::Code::S_NODE_NOT_RUNNING); - return Peer_socket::Ptr(); // *err_code is set. + return Peer_socket::Ptr{}; // *err_code is set. } // Now we know Node is running(); and we have event_set. @@ -4211,7 +4211,7 @@ Peer_socket::Ptr Node::sync_connect_impl(const Remote_endpoint& to, const Fine_d // Clean up (as discussed above). sock->close_abruptly(&dummy_prevents_throw); // Eat any error; user doesn't care. - return Peer_socket::Ptr(); // *err_code is set. + return Peer_socket::Ptr{}; // *err_code is set. } // if (sync_wait() failed) // else we know event_set is still open, and sync_wait() succeeded. @@ -4240,7 +4240,7 @@ Peer_socket::Ptr Node::sync_connect_impl(const Remote_endpoint& to, const Fine_d // Return error as above. *err_code = sock->m_disconnect_cause; // No need to lock; m_disconnect_cause set and can't change later. - return Peer_socket::Ptr(); + return Peer_socket::Ptr{}; } // else it's probably really ready for action. @@ -4251,7 +4251,7 @@ Peer_socket::Ptr Node::sync_connect_impl(const Remote_endpoint& to, const Fine_d // Timed out! Clean up socket, as above, and return null with a specific error (as advertised). sock->close_abruptly(&dummy_prevents_throw); *err_code = error::Code::S_WAIT_USER_TIMEOUT; - return Peer_socket::Ptr(); + return Peer_socket::Ptr{}; } // Node::sync_connect_impl() void Node::setup_connection_timers(const Socket_id& socket_id, Peer_socket::Ptr sock, bool initial) @@ -4400,12 +4400,12 @@ void Node::cancel_timers(Peer_socket::Ptr sock) if (sock->m_init_rexmit_scheduled_task) { scheduled_task_cancel(get_logger(), sock->m_init_rexmit_scheduled_task); - sock->m_init_rexmit_scheduled_task = Scheduled_task_handle(); + sock->m_init_rexmit_scheduled_task = Scheduled_task_handle{}; } if (sock->m_connection_timeout_scheduled_task) { scheduled_task_cancel(get_logger(), sock->m_connection_timeout_scheduled_task); - sock->m_connection_timeout_scheduled_task = Scheduled_task_handle(); + sock->m_connection_timeout_scheduled_task = Scheduled_task_handle{}; } if (sock->m_rcv_in_rcv_wnd_recovery) { @@ -4443,7 +4443,7 @@ void Node::setup_drop_timer(const Socket_id& socket_id, Peer_socket::Ptr sock) * Additionally, when events m_snd_drop_timer wants to know about happen, we will call * m_snd_drop_timer->on_...(). */ sock->m_snd_drop_timer = Drop_timer::create_drop_timer(get_logger(), &m_task_engine, &sock->m_snd_drop_timeout, - Peer_socket::Ptr(sock), on_fail, on_timer); + Peer_socket::Ptr{sock}, on_fail, on_timer); } size_t Node::send(Peer_socket::Ptr sock, @@ -4645,7 +4645,7 @@ bool Node::sock_is_writable(const boost::any& sock_as_any) const const Peer_socket::Const_ptr sock = any_cast(sock_as_any); - Peer_socket::Lock_guard lock(sock->m_mutex); // Many threads can access/write below state. + Peer_socket::Lock_guard lock{sock->m_mutex}; // Many threads can access/write below state. /* Our task here is to return true if and only if at this very moment calling sock->send() would * yield either a return value of > 0 OR a non-success *err_code. In other words, send() would @@ -4773,7 +4773,7 @@ void Node::send_worker(Peer_socket::Ptr sock, bool defer_delta_check) = sock->m_snd_drop_timeout * Idle_timeout_dto_factor::num / Idle_timeout_dto_factor::den; const Fine_duration since_last_send = Fine_clock::now() - sock->m_snd_last_data_sent_when; - if ((sock->m_snd_last_data_sent_when != Fine_time_pt()) && (since_last_send > idle_timeout)) + if ((sock->m_snd_last_data_sent_when != Fine_time_pt{}) && (since_last_send > idle_timeout)) { // Arguable if this should be INFO or TRACE. We'll see. FLOW_LOG_INFO("Idle timeout triggered for [" << sock << "]; " @@ -4805,7 +4805,7 @@ void Node::send_worker(Peer_socket::Ptr sock, bool defer_delta_check) const bool rexmit_on = sock->rexmit_on(); bool writable; // See below. { - Peer_socket::Lock_guard lock(sock->m_mutex); + Peer_socket::Lock_guard lock{sock->m_mutex}; // Check whether enough data in retransmission queue or snd_buf to send a packet. if (!snd_deqable(sock)) @@ -4882,7 +4882,7 @@ void Node::send_worker(Peer_socket::Ptr sock, bool defer_delta_check) * with m_snd_flying_pkts_by_sent_when. */ // New packet: create new metadata object. Record send time. (The latter will be rewritten later.) - sent_pkt = Peer_socket::Sent_packet::Ptr(new Peer_socket::Sent_packet(rexmit_on, data, sent_when)); + sent_pkt = Peer_socket::Sent_packet::Ptr{new Peer_socket::Sent_packet{rexmit_on, data, sent_when}}; } else // if (!rexmit_q.empty()) { @@ -5160,7 +5160,7 @@ bool Node::sock_is_readable(const boost::any& sock_as_any) const const Peer_socket::Const_ptr sock = any_cast(sock_as_any); - Peer_socket::Lock_guard lock(sock->m_mutex); // Many threads can access/write below state. + Peer_socket::Lock_guard lock{sock->m_mutex}; // Many threads can access/write below state. /* Our task here is to return true if and only if at this very moment calling sock->receive(), * assuming sufficient user buffer space, would yield either a return value of > 0 OR a @@ -5475,7 +5475,7 @@ void Node::receive_emptied_rcv_buf_while_disconnecting(Peer_socket::Ptr sock) * * Could think about locking later in this function, but this is called so rarely I'd rather not have to * worry about whether it's OK to do that and just not. */ - Peer_socket::Lock_guard lock(sock->m_mutex); + Peer_socket::Lock_guard lock{sock->m_mutex}; if (sock->m_state == Peer_socket::State::S_CLOSED) { @@ -5523,7 +5523,7 @@ void Node::receive_emptied_rcv_buf_while_disconnecting(Peer_socket::Ptr sock) FLOW_LOG_TRACE('[' << sock << "] " "is gracefully closing, and Receive buffer is now empty. Ready to permanently close."); close_connection_immediately(socket_id, sock, - Error_code(), /* err_code == success indicates clean close here. */ + Error_code{}, /* err_code == success indicates clean close here. */ false); /* ^-- defer_delta_check == false: for similar reason as when calling send_worker() from * send_worker_check_state(). */ @@ -5544,7 +5544,7 @@ void Node::close_abruptly(Peer_socket::Ptr sock, Error_code* err_code) * to the caller, because we must unlock at a specific point below, right before post()ing * close_abruptly_worker() onto thread W. Use a Lock_guard that adopts an * already-locked mutex. */ - Peer_socket::Lock_guard lock(sock->m_mutex, adopt_lock); + Peer_socket::Lock_guard lock{sock->m_mutex, adopt_lock}; if (!running()) { @@ -5626,7 +5626,7 @@ void Node::close_connection_immediately(const Socket_id& socket_id, Peer_socket: * sock and Server_socket serv that may have originated it (if it was a passive open). I will * comment on the locking situation for those data members as they come up in the code. */ - // First, set various state in *sock (including emptying Send and Receive buffers and setting m_node = 0). + // First, set various state in *sock (including emptying Send and Receive buffers and setting m_node = nullptr). /* Save the final set of stats for Peer_socket::info(), as the source data will probably get * purged just below in sock_disconnect_*(). */ @@ -5929,22 +5929,22 @@ void Node::async_low_lvl_ack_send(Peer_socket::Ptr sock, const Error_code& sys_e "delay for packet [" << seq_num << ", ...) is [" << pkt_delay << "]; overflow; " "using max value [" << MAX_DELAY_VALUE << "] units."); // @todo Maybe there's a more sane ceiling value than the absolute maximum? - pkt_delay = Ack_packet::Ack_delay_time_unit(MAX_DELAY_VALUE); + pkt_delay = Ack_packet::Ack_delay_time_unit{MAX_DELAY_VALUE}; } // Finally write the individual acknowledgment. if (sock->rexmit_on()) { ack->m_rcv_acked_packets_rexmit_on_out.push_back - (Ack_packet::Individual_ack_rexmit_on(seq_num, + (Ack_packet::Individual_ack_rexmit_on{seq_num, ind_ack->m_rexmit_id, - Ack_packet::ack_delay_t(pkt_delay.count()))); + Ack_packet::ack_delay_t(pkt_delay.count())}); } else { ack->m_rcv_acked_packets_rexmit_off_out.push_back - (Ack_packet::Individual_ack_rexmit_off(seq_num, - Ack_packet::ack_delay_t(pkt_delay.count()))); + (Ack_packet::Individual_ack_rexmit_off{seq_num, + Ack_packet::ack_delay_t(pkt_delay.count())}); } size_est_so_far += size_est_inc; @@ -6016,7 +6016,7 @@ void Node::sock_set_int_state(Peer_socket::Ptr sock, Peer_socket::Int_state new_ void Node::sock_set_state(Peer_socket::Ptr sock, Peer_socket::State state, Peer_socket::Open_sub_state open_sub_state) { - Peer_socket::Lock_guard lock(sock->m_mutex); + Peer_socket::Lock_guard lock{sock->m_mutex}; // @todo Add TRACE logging. @@ -6031,14 +6031,14 @@ void Node::sock_set_state(Peer_socket::Ptr sock, Peer_socket::State state, Peer_ * receiving more data. At this point the originating Node removes the socket from its internal * structures. Therefore, the Node itself may even go away -- while this Peer_socket still * exists. Since we use shared_ptr when giving our socket objects, that's fine -- but we want to - * avoid returning an invalid Node* in node(). So, when S_CLOSED, sock->m_node = 0. */ - sock->m_node = 0; + * avoid returning an invalid Node* in node(). So, when S_CLOSED, sock->m_node = nullptr. */ + sock->m_node = nullptr; } } void Node::sock_disconnect_detected(Peer_socket::Ptr sock, const Error_code& disconnect_cause, bool close) { - Peer_socket::Lock_guard lock(sock->m_mutex); + Peer_socket::Lock_guard lock{sock->m_mutex}; sock->m_disconnect_cause = disconnect_cause; @@ -6059,7 +6059,7 @@ void Node::sock_disconnect_detected(Peer_socket::Ptr sock, const Error_code& dis void Node::sock_disconnect_completed(Peer_socket::Ptr sock) { - Peer_socket::Lock_guard lock(sock->m_mutex); + Peer_socket::Lock_guard lock{sock->m_mutex}; // Sanity-check pre-conditions. (Basically ensure disconnect_detected(err_code, false) was previously called.) assert(sock->m_disconnect_cause); @@ -6111,7 +6111,7 @@ bool Node::sock_set_options(Peer_socket::Ptr sock, const Peer_socket_options& op FLOW_LOG_TRACE("For [" << sock << "]:\n\n" << opts); // Will be writing sock->m_opts if all goes well, so must acquire exclusive ownership of m_opts. - Peer_socket::Options_lock lock(sock->m_opts_mutex); + Peer_socket::Options_lock lock{sock->m_opts_mutex}; /* Validate the new option set (including ensuring they're not changing static options' values). * Note that an explicit pre-condition of this method is that m_opts_mutex is locked if needed, @@ -6213,10 +6213,10 @@ bool Node::sock_validate_options(const Peer_socket_options& opts, VALIDATE_CHECK(opts.m_st_rcv_buf_max_size >= 4 * opts.m_st_max_block_size) && VALIDATE_CHECK(util::in_open_closed_range(0u, opts.m_st_rcv_buf_max_size_to_advertise_percent, 100u)) && VALIDATE_CHECK(opts.m_st_rcv_max_packets_after_unrecvd_packet_ratio_percent >= 100) && - VALIDATE_CHECK(opts.m_st_delayed_ack_timer_period <= seconds(1)) && + VALIDATE_CHECK(opts.m_st_delayed_ack_timer_period <= seconds{1}) && VALIDATE_CHECK(util::in_closed_range(Fine_duration::zero(), opts.m_st_delayed_ack_timer_period, - Fine_duration(seconds(1)))) && + Fine_duration{seconds{1}})) && VALIDATE_CHECK(opts.m_st_max_full_blocks_before_ack_send >= 1) && VALIDATE_CHECK(opts.m_st_max_rexmissions_per_packet >= 1) && VALIDATE_CHECK(opts.m_st_max_rexmissions_per_packet <= numeric_limits::max()); @@ -6304,7 +6304,7 @@ void Node::sock_load_info_struct(Peer_socket::Const_ptr sock, Peer_socket_info* { // Gotta lock, as Receive and Send buffers can be modified at any time by thread U at least. - Peer_socket::Lock_guard lock(sock->m_mutex); + Peer_socket::Lock_guard lock{sock->m_mutex}; stats->m_rcv_buf_size = sock->m_rcv_buf.data_size(); stats->m_snd_buf_size = sock->m_snd_buf.data_size(); } diff --git a/src/flow/net_flow/peer_socket.hpp b/src/flow/net_flow/peer_socket.hpp index 755c7a3f0..7d069ca3b 100644 --- a/src/flow/net_flow/peer_socket.hpp +++ b/src/flow/net_flow/peer_socket.hpp @@ -217,7 +217,7 @@ class Peer_socket : public util::Null_interface, // Endow us with shared_ptr<>s ::Ptr and ::Const_ptr (syntactic sugar). public util::Shared_ptr_alias_holder>, - // Allow access to Ptr(this) from inside Peer_socket methods. Just call shared_from_this(). + // Allow access to Ptr{this} from inside Peer_socket methods. Just call shared_from_this(). public boost::enable_shared_from_this, public log::Log_context, private boost::noncopyable @@ -286,7 +286,7 @@ class Peer_socket : * the current sub-state of `S_OPEN`. * @return Current main state of the socket. */ - State state(Open_sub_state* open_sub_state = 0) const; + State state(Open_sub_state* open_sub_state = nullptr) const; /** * Node that produced this Peer_socket. @@ -335,7 +335,7 @@ class Peer_socket : * @return The size of the copied metadata. */ size_t get_connect_metadata(const boost::asio::mutable_buffer& buffer, - Error_code* err_code = 0) const; + Error_code* err_code = nullptr) const; /** * Sends (adds to the Send buffer) the given bytes of data up to a maximum internal buffer size; @@ -405,7 +405,7 @@ class Peer_socket : * send() returns. */ template - size_t send(const Const_buffer_sequence& data, Error_code* err_code = 0); + size_t send(const Const_buffer_sequence& data, Error_code* err_code = nullptr); /** * Blocking (synchronous) version of send(). Acts just like send(), except that if Socket is not @@ -456,7 +456,7 @@ class Peer_socket : */ template size_t sync_send(const Const_buffer_sequence& data, - const boost::chrono::duration& max_wait, Error_code* err_code = 0); + const boost::chrono::duration& max_wait, Error_code* err_code = nullptr); /** * `sync_send()` operating in `nullptr_t` mode, wherein -- if Writable state is reached -- the actual data @@ -495,7 +495,7 @@ class Peer_socket : */ template bool sync_send(std::nullptr_t, - const boost::chrono::duration& max_wait, Error_code* err_code = 0); + const boost::chrono::duration& max_wait, Error_code* err_code = nullptr); /** * Equivalent to `sync_send(data, duration::max(), err_code)`; i.e., sync_send() with no timeout. @@ -509,7 +509,7 @@ class Peer_socket : * @return See other sync_send(). */ template - size_t sync_send(const Const_buffer_sequence& data, Error_code* err_code = 0); + size_t sync_send(const Const_buffer_sequence& data, Error_code* err_code = nullptr); /** * Equivalent to `sync_send(nullptr, duration::max(), err_code)`; i.e., `sync_send(nullptr_t)` @@ -519,7 +519,7 @@ class Peer_socket : * See other sync_receive(). * @return See other sync_receive(). */ - bool sync_send(std::nullptr_t, Error_code* err_code = 0); + bool sync_send(std::nullptr_t, Error_code* err_code = nullptr); /** * Receives (consumes from the Receive buffer) bytes of data, up to a given maximum @@ -573,7 +573,7 @@ class Peer_socket : * when receive() returns. */ template - size_t receive(const Mutable_buffer_sequence& target, Error_code* err_code = 0); + size_t receive(const Mutable_buffer_sequence& target, Error_code* err_code = nullptr); /** * Blocking (synchronous) version of receive(). Acts just like receive(), except that if socket @@ -623,7 +623,7 @@ class Peer_socket : */ template size_t sync_receive(const Mutable_buffer_sequence& target, - const boost::chrono::duration& max_wait, Error_code* err_code = 0); + const boost::chrono::duration& max_wait, Error_code* err_code = nullptr); /** * `sync_receive()` operating in `nullptr_t` mode, wherein -- if Readable state is reached -- the actual data @@ -662,7 +662,7 @@ class Peer_socket : */ template bool sync_receive(std::nullptr_t, - const boost::chrono::duration& max_wait, Error_code* err_code = 0); + const boost::chrono::duration& max_wait, Error_code* err_code = nullptr); /** * Equivalent to `sync_receive(target, duration::max(), err_code)`; i.e., sync_receive() @@ -677,7 +677,7 @@ class Peer_socket : * @return See other sync_receive(). */ template - size_t sync_receive(const Mutable_buffer_sequence& target, Error_code* err_code = 0); + size_t sync_receive(const Mutable_buffer_sequence& target, Error_code* err_code = nullptr); /** * Equivalent to `sync_receive(nullptr, duration::max(), err_code)`; i.e., `sync_receive(nullptr_t)` @@ -687,7 +687,7 @@ class Peer_socket : * See other sync_receive(). * @return See other sync_receive(). */ - bool sync_receive(std::nullptr_t, Error_code* err_code = 0); + bool sync_receive(std::nullptr_t, Error_code* err_code = nullptr); /** * Acts as if fatal error error::Code::S_USER_CLOSED_ABRUPTLY has been discovered on the @@ -723,7 +723,7 @@ class Peer_socket : * error::Code::S_NODE_NOT_RUNNING, or -- if socket already closed (`state() == State::S_CLOSED`) -- * then the error that caused the closure. */ - void close_abruptly(Error_code* err_code = 0); + void close_abruptly(Error_code* err_code = nullptr); /** * Dynamically replaces the current options set (options()) with the given options set. @@ -742,7 +742,7 @@ class Peer_socket : * error::Code::S_NODE_NOT_RUNNING. * @return `true` on success, `false` on error. */ - bool set_options(const Peer_socket_options& opts, Error_code* err_code = 0); + bool set_options(const Peer_socket_options& opts, Error_code* err_code = nullptr); /** * Copies this socket's option set and returns that copy. If you intend to use set_options() to @@ -1005,7 +1005,7 @@ class Peer_socket : * See sync_send(). * @param wait_until * See `sync_send(timeout)`. This is the absolute time point corresponding to that. - * `"duration::max()"` maps to the value `Fine_time_pt()` (Epoch) for this argument. + * `"duration::max()"` maps to the value `Fine_time_pt{}` (Epoch) for this argument. * @param err_code * See sync_send(). * @return See sync_send(). @@ -1065,7 +1065,7 @@ class Peer_socket : * See sync_receive(). * @param wait_until * See `sync_receive(timeout)`. This is the absolute time point corresponding to that. - * `"duration::max()"` maps to the value `Fine_time_pt()` (Epoch) for this argument. + * `"duration::max()"` maps to the value `Fine_time_pt{}` (Epoch) for this argument. * @param err_code * See sync_receive(). * @return See sync_receive(). @@ -1133,7 +1133,7 @@ class Peer_socket : * @return See above. */ size_t max_block_size_multiple(const size_t& opt_val_ref, - const unsigned int* inflate_pct_val_ptr = 0) const; + const unsigned int* inflate_pct_val_ptr = nullptr) const; /** * Whether retransmission is enabled on this connection. Short-hand for appropriate opt() call. @@ -1547,7 +1547,7 @@ class Peer_socket : * data structure because we may not send each desired acknowledgment right away, combining * several together, thus reducing overhead at the cost of short delays (or even nearly * non-existent delays, as in the case of several DATA packets handled in one - * NodeLLlow_lvl_recv_and_handle() invocation, i.e., having arrived at nearly at the same time). + * Node::low_lvl_recv_and_handle() invocation, i.e., having arrived at nearly at the same time). * * Any two packets represented by these Individual_ack objects may be duplicates of each other (same * Sequence_number, possibly different Individual_ack::m_received_when values). It's up to the sender (receiver @@ -2156,7 +2156,7 @@ class Peer_socket : /** * This is the final set of stats collected at the time the socket was moved to S_CLOSED #m_state. - * If it has not yet moved to that state, this is not applicable (but equals Peer_socket_info()). + * If it has not yet moved to that state, this is not applicable (but equals `Peer_socket_info{}`). * It's used by info() to get at the final set of stats, before the source info is purged by the * resource cleanup in sock_free_memory(). */ @@ -2443,7 +2443,7 @@ size_t Peer_socket::send(const Const_buffer_sequence& data, Error_code* err_code // We are in user thread U != W. - Lock_guard lock(m_mutex); // Lock m_node; also it's a pre-condition for Node::send(). + Lock_guard lock{m_mutex}; // Lock m_node; also it's a pre-condition for Node::send(). /* Forward the rest of the logic to Node. * Now, what I really want to do here is simply: @@ -2478,7 +2478,7 @@ size_t Peer_socket::send(const Const_buffer_sequence& data, Error_code* err_code template size_t Peer_socket::sync_send(const Const_buffer_sequence& data, Error_code* err_code) { - return sync_send_impl(data, Fine_time_pt(), err_code); // sync_send() with infinite timeout. + return sync_send_impl(data, Fine_time_pt{}, err_code); // sync_send() with infinite timeout. } template @@ -2511,7 +2511,7 @@ size_t Peer_socket::sync_send_impl(const Const_buffer_sequence& data, const Fine // We are in user thread U != W. - Lock_guard lock(m_mutex); // Lock m_node; also it's a pre-condition for Node::send(). + Lock_guard lock{m_mutex}; // Lock m_node; also it's a pre-condition for Node::send(). /* Forward the rest of the logic to Node. * Now, what I really want to do here is simply: @@ -2540,7 +2540,7 @@ size_t Peer_socket::receive(const Mutable_buffer_sequence& target, Error_code* e // We are in user thread U != W. - Lock_guard lock(m_mutex); // Lock m_node/m_state; also it's a pre-condition for Node::receive(). + Lock_guard lock{m_mutex}; // Lock m_node/m_state; also it's a pre-condition for Node::receive(). /* Forward the rest of the logic to Node. * Now, what I really want to do here is simply: @@ -2565,7 +2565,7 @@ size_t Peer_socket::receive(const Mutable_buffer_sequence& target, Error_code* e template size_t Peer_socket::sync_receive(const Mutable_buffer_sequence& target, Error_code* err_code) { - return sync_receive_impl(target, Fine_time_pt(), err_code); // sync_receive() with infinite timeout. + return sync_receive_impl(target, Fine_time_pt{}, err_code); // sync_receive() with infinite timeout. } template @@ -2596,7 +2596,7 @@ size_t Peer_socket::sync_receive_impl(const Mutable_buffer_sequence& target, // We are in user thread U != W. - Lock_guard lock(m_mutex); // Lock m_node; also it's a pre-condition for Node::send(). + Lock_guard lock{m_mutex}; // Lock m_node; also it's a pre-condition for Node::send(). /* Forward the rest of the logic to Node. * Now, what I really want to do here is simply: @@ -2621,7 +2621,7 @@ template Opt_type Peer_socket::opt(const Opt_type& opt_val_ref) const { // Similar to Node::opt(). - Options_lock lock(m_opts_mutex); + Options_lock lock{m_opts_mutex}; return opt_val_ref; } diff --git a/src/flow/net_flow/server_socket.cpp b/src/flow/net_flow/server_socket.cpp index 918b12498..688162e61 100644 --- a/src/flow/net_flow/server_socket.cpp +++ b/src/flow/net_flow/server_socket.cpp @@ -33,9 +33,9 @@ Server_socket::Server_socket(log::Logger* logger_ptr, const Peer_socket_options* * (when people connect to us), each peer socket's per-socket options will be copies of this. If * they did not supply a Peer_socket_options, the Node's global Peer_socket_options will be used * for each subsequent Peer_socket. */ - m_child_sock_opts(child_sock_opts ? new Peer_socket_options(*child_sock_opts) : 0), + m_child_sock_opts(child_sock_opts ? new Peer_socket_options{*child_sock_opts} : nullptr), m_state(State::S_CLOSED), // Incorrect; set explicitly. - m_node(0), // Incorrect; set explicitly. + m_node(nullptr), // Incorrect; set explicitly. m_local_port(S_PORT_ANY) // Incorrect; set explicitly. { // Only print pointer value, because most members are garbage at this point. @@ -44,26 +44,26 @@ Server_socket::Server_socket(log::Logger* logger_ptr, const Peer_socket_options* Server_socket::~Server_socket() { - delete m_child_sock_opts; // May be 0 (that's okay). + delete m_child_sock_opts; // May be null (that's okay). FLOW_LOG_TRACE("Server_socket [" << this << "] destroyed."); } Server_socket::State Server_socket::state() const { - Lock_guard lock(m_mutex); // State is liable to change at any time. + Lock_guard lock{m_mutex}; // State is liable to change at any time. return m_state; } Node* Server_socket::node() const { - Lock_guard lock(m_mutex); // m_node can simultaneously change to 0 if state changes to S_CLOSED. + Lock_guard lock{m_mutex}; // m_node can simultaneously change to 0 if state changes to S_CLOSED. return m_node; } Error_code Server_socket::disconnect_cause() const { - Lock_guard lock(m_mutex); + Lock_guard lock{m_mutex}; return m_disconnect_cause; } @@ -79,12 +79,12 @@ Peer_socket::Ptr Server_socket::accept(Error_code* err_code) // We are in user thread U != W. - Lock_guard lock(m_mutex); + Lock_guard lock{m_mutex}; const Ptr serv = shared_from_this(); if (!Node::ensure_sock_open(serv, err_code)) // Ensure it's open, so that we can access m_node. { - return Peer_socket::Ptr(); + return Peer_socket::Ptr{}; } // else m_node is valid. @@ -94,7 +94,7 @@ Peer_socket::Ptr Server_socket::accept(Error_code* err_code) Peer_socket::Ptr Server_socket::sync_accept(bool reactor_pattern, Error_code* err_code) { - return sync_accept_impl(Fine_time_pt(), reactor_pattern, err_code); + return sync_accept_impl(Fine_time_pt{}, reactor_pattern, err_code); } Peer_socket::Ptr Server_socket::sync_accept_impl(const Fine_time_pt& wait_until, bool reactor_pattern, @@ -107,12 +107,12 @@ Peer_socket::Ptr Server_socket::sync_accept_impl(const Fine_time_pt& wait_until, // We are in user thread U != W. - Lock_guard lock(m_mutex); + Lock_guard lock{m_mutex}; const Ptr serv = shared_from_this(); if (!Node::ensure_sock_open(serv, err_code)) // Ensure it's open, so that we can access m_node. { - return Peer_socket::Ptr(); + return Peer_socket::Ptr{}; } // else m_node is valid. @@ -123,7 +123,7 @@ Peer_socket::Ptr Server_socket::sync_accept_impl(const Fine_time_pt& wait_until, /* Operating on Server_sockets, returning Peer_socket::Ptr; Event_set socket set type is * Server_sockets. * Object is serv; non-blocking operation is m_node->accept(...) -- or N/A in "reactor pattern" mode.. - * Peer_socket::Ptr() is the "would-block" return value for this operation. + * Peer_socket::Ptr{} is the "would-block" return value for this operation. * S_SERVER_SOCKET_ACCEPTABLE is the type of event to watch for here. */ return m_node ->sync_op @@ -132,7 +132,7 @@ Peer_socket::Ptr Server_socket::sync_accept_impl(const Fine_time_pt& wait_until, ? Function() : Function([this, serv, err_code]() -> Peer_socket::Ptr { return m_node->accept(serv, err_code); }), - Peer_socket::Ptr(), Event_set::Event_type::S_SERVER_SOCKET_ACCEPTABLE, + Peer_socket::Ptr{}, Event_set::Event_type::S_SERVER_SOCKET_ACCEPTABLE, wait_until, err_code); } // Server_socket::sync_accept_impl() @@ -154,7 +154,7 @@ Server_socket::Ptr Node::listen(flow_port_t local_port, Error_code* err_code, if (!running()) { FLOW_ERROR_EMIT_ERROR(error::Code::S_NODE_NOT_RUNNING); - return Server_socket::Ptr(); + return Server_socket::Ptr{}; } // else @@ -205,7 +205,7 @@ Server_socket::Ptr Node::listen(flow_port_t local_port, Error_code* err_code, if (serv->m_disconnect_cause) { *err_code = serv->m_disconnect_cause; - return Server_socket::Ptr(); // serv will go out of scope and thus will be destroyed. + return Server_socket::Ptr{}; // serv will go out of scope and thus will be destroyed. } // else err_code->clear(); @@ -229,7 +229,7 @@ void Node::listen_worker(flow_port_t local_port, const Peer_socket_options* chil * (for proper values and internal consistency, etc.). */ Error_code err_code; - const bool opts_ok = sock_validate_options(*child_sock_opts, 0, &err_code); + const bool opts_ok = sock_validate_options(*child_sock_opts, nullptr, &err_code); // Due to the advertised interface of the current method, we must create a socket even on error. serv.reset(serv_create(child_sock_opts)); @@ -248,7 +248,7 @@ void Node::listen_worker(flow_port_t local_port, const Peer_socket_options* chil * Peer_socket constructor; this will mean that when a Peer_socket is generated on connection, * the code is to provide a copy of the global template for the per-socket options. That will * happen later; we just pass in null. */ - serv.reset(serv_create(0)); + serv.reset(serv_create(nullptr)); } // Server socket created; set members. @@ -309,7 +309,7 @@ Peer_socket::Ptr Node::accept(Server_socket::Ptr serv, Error_code* err_code) FLOW_ERROR_EMIT_ERROR_LOG_INFO(serv->m_disconnect_cause); // Not listening anymore; pretend nothing on queue. - return Peer_socket::Ptr(); + return Peer_socket::Ptr{}; } // else assert(serv->m_state == Server_socket::State::S_LISTENING); @@ -318,13 +318,14 @@ Peer_socket::Ptr Node::accept(Server_socket::Ptr serv, Error_code* err_code) { // Nothing on the queue. As advertised, this is not an error in LISTENING state. err_code->clear(); - return Peer_socket::Ptr(); + return Peer_socket::Ptr{}; } // else // Pop from queue. Linked_hash_set queues things up at the front (via insert()), so pop from the back. - Peer_socket::Ptr sock = serv->m_unaccepted_socks.const_back(); - serv->m_unaccepted_socks.pop_back(); + const auto it = --serv->m_unaccepted_socks.cend(); + Peer_socket::Ptr sock = *it; + serv->m_unaccepted_socks.erase(it); /* Now that it's accepted, remove reference to the server socket, so that when the server socket * is closed, sock is not closed (since it's a fully functioning independent socket now). */ @@ -342,7 +343,7 @@ bool Node::serv_is_acceptable(const boost::any& serv_as_any) const const Server_socket::Const_ptr serv = any_cast(serv_as_any); - Peer_socket::Lock_guard lock(serv->m_mutex); // Many threads can access/write below state. + Peer_socket::Lock_guard lock{serv->m_mutex}; // Many threads can access/write below state. /* Our task here is to return true if and only if at this very moment calling serv->accept()would * yield either a non-null return value OR a non-success *err_code. In other words, @@ -369,7 +370,7 @@ void Node::close_empty_server_immediately(const flow_port_t local_port, Server_s // Caller should have closed all the associated sockets already. assert(serv->m_connecting_socks.empty()); { - Server_socket::Lock_guard lock(serv->m_mutex); // At least m_unaccepted_socks can be accessed by user. + Server_socket::Lock_guard lock{serv->m_mutex}; // At least m_unaccepted_socks can be accessed by user. assert(serv->m_unaccepted_socks.empty()); } @@ -410,7 +411,7 @@ void Node::close_empty_server_immediately(const flow_port_t local_port, Server_s void Node::serv_set_state(Server_socket::Ptr serv, Server_socket::State state) { - Server_socket::Lock_guard lock(serv->m_mutex); + Server_socket::Lock_guard lock{serv->m_mutex}; // @todo Add TRACE logging. @@ -422,8 +423,8 @@ void Node::serv_set_state(Server_socket::Ptr serv, Server_socket::State state) * socket from its internal structures. Therefore, the Node itself may even go away -- while * this Server_socket still exists. Since we use shared_ptr when giving our socket objects, * that's fine -- but we want to avoid returning an invalid Node* in node(). So, when - * S_CLOSED, serv->m_node = 0. */ - serv->m_node = 0; + * S_CLOSED, serv->m_node = nullptr. */ + serv->m_node = nullptr; } } @@ -457,7 +458,7 @@ Peer_socket::Ptr Node::handle_syn_to_listening_server(Server_socket::Ptr serv, * * Note: no need to validate; global options (including per-socket ones) are validated * elsewhere when set. */ - Options_lock lock(m_opts_mutex); + Options_lock lock{m_opts_mutex}; sock.reset(sock_create(m_opts.m_dyn_sock_opts)); } @@ -479,7 +480,7 @@ Peer_socket::Ptr Node::handle_syn_to_listening_server(Server_socket::Ptr serv, * outgoing bandwidth based on incoming acknowledgments). It may be used by m_snd_cong_ctl, * depending on the strategy chosen, but may be useful in its own right. Hence it's a separate * object, not inside *m_snd_cong_ctl. */ - sock->m_snd_bandwidth_estimator.reset(new Send_bandwidth_estimator(get_logger(), sock)); + sock->m_snd_bandwidth_estimator.reset(new Send_bandwidth_estimator{get_logger(), sock}); // Initialize the connection's congestion control strategy based on the configured strategy. sock->m_snd_cong_ctl.reset @@ -503,7 +504,7 @@ Peer_socket::Ptr Node::handle_syn_to_listening_server(Server_socket::Ptr serv, * connection isn't going to happen. We didn't place sock into m_socks, so just let it * disappear via shared_ptr<> magic. */ async_no_sock_low_lvl_rst_send(Low_lvl_packet::const_ptr_cast(syn), low_lvl_remote_endpoint); - return Peer_socket::Ptr(); + return Peer_socket::Ptr{}; } // else @@ -522,7 +523,7 @@ Peer_socket::Ptr Node::handle_syn_to_listening_server(Server_socket::Ptr serv, // Same reasoning as above: send RST, and let sock disappear. async_no_sock_low_lvl_rst_send(syn, low_lvl_remote_endpoint); - return Peer_socket::Ptr(); + return Peer_socket::Ptr{}; } // if (that socket pair already exists) // else @@ -754,7 +755,7 @@ void Node::serv_close_detected(Server_socket::Ptr serv, /* @todo Nothing calls this yet, as we don't support any way to close a Server_socket yet. * Probably will reconsider this method when we do. */ - Server_socket::Lock_guard lock(serv->m_mutex); + Server_socket::Lock_guard lock{serv->m_mutex}; serv->m_disconnect_cause = disconnect_cause; if (close) { @@ -796,7 +797,7 @@ void Node::serv_peer_socket_closed(Server_socket::Ptr serv, Peer_socket::Ptr soc /* Remove from serv->m_unaccepted_socks. At this point accept() can access serv->m_unaccepted_socks and * m_originating_serv, so we must lock. */ - Server_socket::Lock_guard lock(serv->m_mutex); + Server_socket::Lock_guard lock{serv->m_mutex}; sock->m_originating_serv.reset(); // Maintain invariant. @@ -825,7 +826,7 @@ void Node::serv_peer_socket_acceptable(Server_socket::Ptr serv, Peer_socket::Ptr // We are in thread W. { - Server_socket::Lock_guard lock(serv->m_mutex); + Server_socket::Lock_guard lock{serv->m_mutex}; serv->m_unaccepted_socks.insert(sock); // Remember that Linked_hash_set<> insert()s at the *front*. } // This guy is only to be accessed from thread W (which we're in), so no lock needed. diff --git a/src/flow/net_flow/server_socket.hpp b/src/flow/net_flow/server_socket.hpp index aa1576305..0c0c0095b 100644 --- a/src/flow/net_flow/server_socket.hpp +++ b/src/flow/net_flow/server_socket.hpp @@ -119,7 +119,7 @@ class Server_socket : public util::Null_interface, // Endow us with shared_ptr<>s ::Ptr and ::Const_ptr (syntactic sugar). public util::Shared_ptr_alias_holder>, - // Allow access to Ptr(this) from inside Server_socket methods. Just call shared_from_this(). + // Allow access to Ptr{this} from inside Server_socket methods. Just call shared_from_this(). public boost::enable_shared_from_this, public log::Log_context, private boost::noncopyable @@ -156,7 +156,7 @@ class Server_socket : /** * Node that produced this Server_socket. - * @return Pointer to (guaranteed valid) Node; 0 if state is S_CLOSED. + * @return Pointer to (guaranteed valid) Node; null if state is S_CLOSED. */ Node* node() const; @@ -191,7 +191,7 @@ class Server_socket : * @return A Peer_socket `sock` with `sock->state() == Peer_socket::State::S_OPEN`. If no * connections are available (including if `bool(*err_code) == true`), returns null pointer. */ - Peer_socket_ptr accept(Error_code* err_code = 0); + Peer_socket_ptr accept(Error_code* err_code = nullptr); /** * Blocking (synchronous) version of accept(). Acts just like accept(), except that if `*this` is @@ -246,7 +246,7 @@ class Server_socket : template Peer_socket_ptr sync_accept(const boost::chrono::duration& max_wait, bool reactor_pattern = false, - Error_code* err_code = 0); + Error_code* err_code = nullptr); /** * Equivalent to `sync_accept(duration::max(), reactor_pattern, err_code)`; i.e., sync_accept() with no user @@ -258,7 +258,7 @@ class Server_socket : * See other sync_accept(). * @return See other sync_accept(). */ - Peer_socket_ptr sync_accept(bool reactor_pattern = false, Error_code* err_code = 0); + Peer_socket_ptr sync_accept(bool reactor_pattern = false, Error_code* err_code = nullptr); /** * The error code that perviously caused state() to become State::S_CLOSED, or success code if state @@ -317,7 +317,7 @@ class Server_socket : * * @param wait_until * See `sync_accept(timeout)`. This is the absolute time point corresponding to that. - * `"duration::max()"` maps to the value `Fine_time_pt()` (Epoch) for this argument. + * `"duration::max()"` maps to the value `Fine_time_pt{}` (Epoch) for this argument. * @param reactor_pattern * See sync_accept(). * @param err_code @@ -340,7 +340,7 @@ class Server_socket : State m_state; /** - * See node(). Should be set before user gets access to `*this` and not changed, except to 0 when + * See node(). Should be set before user gets access to `*this` and not changed, except to null when * state is S_CLOSED. Must not be modified by non-W threads. */ Node* m_node; diff --git a/src/flow/perf/checkpt_timer.cpp b/src/flow/perf/checkpt_timer.cpp index 1d04778df..eef2787ed 100644 --- a/src/flow/perf/checkpt_timer.cpp +++ b/src/flow/perf/checkpt_timer.cpp @@ -160,8 +160,8 @@ Time_pt Checkpointing_timer::now(Clock_type clock_type) #ifndef NDEBUG assert(ok); #endif - return Time_pt(nanoseconds(duration_rep_t(time_spec.tv_sec) * duration_rep_t(1000 * 1000 * 1000) - + duration_rep_t(time_spec.tv_nsec))); + return Time_pt{nanoseconds{duration_rep_t(time_spec.tv_sec) * duration_rep_t(1000 * 1000 * 1000) + + duration_rep_t(time_spec.tv_nsec)}}; } case Clock_type::S_CPU_THREAD_TOTAL_HI_RES: { @@ -174,8 +174,8 @@ Time_pt Checkpointing_timer::now(Clock_type clock_type) #ifndef NDEBUG assert(ok); #endif - return Time_pt(nanoseconds(duration_rep_t(time_spec.tv_sec) * duration_rep_t(1000 * 1000 * 1000) - + duration_rep_t(time_spec.tv_nsec))); + return Time_pt{nanoseconds{duration_rep_t(time_spec.tv_sec) * duration_rep_t(1000 * 1000 * 1000) + + duration_rep_t(time_spec.tv_nsec)}}; } case Clock_type::S_END_SENTINEL: assert(false && "END_SENTINEL passed to now() -- must specify actual Clock_type."); @@ -183,7 +183,7 @@ Time_pt Checkpointing_timer::now(Clock_type clock_type) } assert(false && "Bug? now() forgot to handle a Clock_type, yet compiler did not warn in switch()?"); - return Time_pt(); + return Time_pt{}; } // Checkpointing_timer::now(Clock_type) Time_pt_set Checkpointing_timer::now(const Clock_types_subset& which_clocks) // Static. @@ -196,7 +196,7 @@ Time_pt_set Checkpointing_timer::now(const Clock_types_subset& which_clocks) // * So just leave unused clock types' values at 0, and then aggregation will just add up a bunch of zeroes, * when performance no longer matters. This fill() should be quite cheap. * Also, string/stream output will list 0 on unused clocks instead of printing potential garbage. */ - time_pt_set.m_values.fill(Time_pt()); + time_pt_set.m_values.fill(Time_pt{}); const bool do_cpu_user_lo = which_clocks[size_t(Clock_type::S_CPU_USER_LO_RES)]; const bool do_cpu_sys_lo = which_clocks[size_t(Clock_type::S_CPU_SYS_LO_RES)]; @@ -265,8 +265,8 @@ Time_pt Checkpointing_timer::now_cpu_lo_res(const Cpu_split_clock_durs_since_epo * hence we can construct Cpu_split_component_duration from each. Probably type of each of the following is * nanoseconds; but anything with that resolution or worse will convert OK. Otherwise it wouldn't compile below * when converting to Duration in the construction of Time_pt. */ - return Time_pt(Duration(Cpu_split_component_duration(user_else_sys ? cpu_combo_now_raw.user - : cpu_combo_now_raw.system))); + return Time_pt{Duration{Cpu_split_component_duration(user_else_sys ? cpu_combo_now_raw.user + : cpu_combo_now_raw.system)}}; } // Checkpointing_timer::now_cpu_lo_res() const Checkpointing_timer::Checkpoint& Checkpointing_timer::checkpoint(std::string&& name_moved) @@ -379,7 +379,7 @@ Checkpointing_timer_ptr Checkpointing_timer::Aggregator::create_aggregated_resul auto model_timer = m_timers.front(); // Construct the thing minimally and fill out the rest of the fields as the public ctor would, with special values. - Checkpointing_timer_ptr agg_timer(new Checkpointing_timer(get_logger(), string(m_name), model_timer->m_which_clocks)); + Checkpointing_timer_ptr agg_timer{new Checkpointing_timer{get_logger(), string(m_name), model_timer->m_which_clocks}}; agg_timer->m_checkpoints.reserve(model_timer->m_checkpoints.size()); // m_start_when, m_last_checkpoint_when, and m_checkpoints itself are empty; we set them below. @@ -433,7 +433,7 @@ Checkpointing_timer_ptr Checkpointing_timer::Aggregator::create_aggregated_resul /* Note it's currently an uninitialized array; set it to 0s. Then the below calls will yield appropriate * agg_timer->since_start() result. This is a bit of a hack, perhaps, but since_start() is not an lvalue, and this * does work given that fact. */ - agg_timer->m_start_when.m_values.fill(Time_pt()); + agg_timer->m_start_when.m_values.fill(Time_pt{}); agg_timer->m_last_checkpoint_when = agg_timer->m_start_when; agg_timer->m_last_checkpoint_when += total_dur; // This is what we really wanted, but since_start() isn't an lvalue hence the above hackery. diff --git a/src/flow/perf/checkpt_timer.hpp b/src/flow/perf/checkpt_timer.hpp index 98538cc25..cafb77d3d 100644 --- a/src/flow/perf/checkpt_timer.hpp +++ b/src/flow/perf/checkpt_timer.hpp @@ -73,7 +73,7 @@ namespace flow::perf * lowest possible overhead: * * ~~~ - * flow::perf::Checkpointing_timer sum_timer(logger, "op name", which_clocks, 1); // 1 checkpoint only. + * flow::perf::Checkpointing_timer sum_timer{logger, "op name", which_clocks, 1}; // 1 checkpoint only. * const unsigned int n_samples = 1000000; * for (unsigned int sample_idx = 0; sample_idx != n_samples; ++sample_idx) * { @@ -85,7 +85,7 @@ namespace flow::perf * // And/or: Log mean duration(s), times N_SAMPLES_SCALE_CONVENIENCE, one per clock type! * { * // Just make a copy of the raw sum, then scale it x N_SAMPLES_SCALE_CONVENIENCE / n_samples. - * auto mean_scaled_timer(sum_timer); + * auto mean_scaled_timer = sum_timer; * mean_scaled_timer.scale(N_SAMPLES_SCALE_CONVENIENCE, n_samples); * flow::perf::Checkpointing_timer::Aggregator::log_aggregated_result_in_timer(mean_scaled_timer, n_samples, * true, N_SAMPLES_SCALE_CONVENIENCE); @@ -389,7 +389,7 @@ class Checkpointing_timer : * Sample all currently enabled `Clock_type`s' clocks and return those values, each of which is a time stamp * relative to some Epoch value. (The Epoch value differs by Clock_type. Durations can be generated by subtracting * one time point from another which mathematically makes Epoch's meaning irrelevant.) - * The value `Time_pt()` (internally: 0 a/k/a Epoch) is set for the disabled clocks. In particular checkpoint() will + * The value `Time_pt{}` (internally: 0 a/k/a Epoch) is set for the disabled clocks. In particular checkpoint() will * internally call this. * * ### Rationale ### @@ -546,7 +546,7 @@ class Checkpointing_timer : * construct it on the heap and provide a Checkpointing_timer_ptr accordingly: * * ~~~ - * Checkpointing_timer_ptr X(new Checkpointing_timer(...)); + * Checkpointing_timer_ptr X(new Checkpointing_timer{...}); * ~~~ * * ### Thread safety ### diff --git a/src/flow/perf/perf_fwd.hpp b/src/flow/perf/perf_fwd.hpp index 9e392302f..f5e260a5d 100644 --- a/src/flow/perf/perf_fwd.hpp +++ b/src/flow/perf/perf_fwd.hpp @@ -66,7 +66,7 @@ using Checkpointing_timer_ptr = boost::shared_ptr; * not using `atomic`. * * ~~~ - * flow::perf::duration_rep_t accumulated_ticks(0); + * flow::perf::duration_rep_t accumulated_ticks{0}; * const auto timed_func * = flow::perf::timed_function * (flow::perf::Clock_type::S_CPU_THREAD_TOTAL_HI_RES, &accumulated_ticks, @@ -76,7 +76,7 @@ using Checkpointing_timer_ptr = boost::shared_ptr; * timed_func(7, 7); // Note it can only be called void-style. * // ... * // Later, here's the result. Note the construction from type-unsafe ticks to type-safe Duration. - * const flow::perf::Duration total_dur(accumulated_ticks); + * const flow::perf::Duration total_dur{accumulated_ticks}; * // Can convert to whatever units type-safely now (duration_cast<> in this case allows for precision loss). * const auto total_dur_us = chrono::duration_cast(total_dur); * ~~~ @@ -84,7 +84,7 @@ using Checkpointing_timer_ptr = boost::shared_ptr; * Same thing but with an `atomic` to support timing/execution occuring concurrently: * * ~~~ - * std::atomic accumulated_ticks(0); + * std::atomic accumulated_ticks{0}; * const auto timed_func * = flow::perf::timed_function * (flow::perf::Clock_type::S_CPU_THREAD_TOTAL_HI_RES, &accumulated_ticks, @@ -94,7 +94,7 @@ using Checkpointing_timer_ptr = boost::shared_ptr; * timed_func(7, 7); // Note it can only be called void-style. * // ... * // Later, here's the result. Note the construction from type-unsafe ticks to type-safe Duration. - * const flow::perf::Duration total_dur(accumulated_ticks); + * const flow::perf::Duration total_dur{accumulated_ticks}; * // Can convert to whatever units type-safely now (duration_cast<> in this case allows for precision loss). * const auto total_dur_us = chrono::duration_cast(total_dur); * ~~~ @@ -108,7 +108,7 @@ using Checkpointing_timer_ptr = boost::shared_ptr; * `Accumulator` is understood to store raw ticks of #Duration -- not actual #Duration -- for performance reasons * (to wit: so that `atomic` plus-equals can be made use of, if it exists). If you need a #Duration * ultimately -- and for type safety you really *should* -- it is up to you to construct a #Duration from the - * accumulated `duration_rep_t`. This is trivially done via the `Duration(duration_rep_t)` constructor. + * accumulated `duration_rep_t`. This is trivially done via the `Duration{duration_rep_t}` constructor. * * @todo timed_function(), when operating on an `atomic`, uses `+=` for accumulation which may be * lock-free but uses strict ordering; a version that uses `fetch_add()` with relaxed ordering may be desirable @@ -154,7 +154,7 @@ auto timed_function(Clock_type clock_type, Accumulator* accumulator, Func&& func * allowing for concurrency by using an `atomic`. The difference: `timed_func()` returns a value. * * ~~~ - * std::atomic accumulated_ticks(0); + * std::atomic accumulated_ticks{0}; * const auto timed_func * = flow::perf::timed_function_nvr * (flow::perf::Clock_type::S_CPU_THREAD_TOTAL_HI_RES, &accumulated_ticks, @@ -217,7 +217,7 @@ auto timed_function_nvr(Clock_type clock_type, Accumulator* accumulator, Func&& * // ... * // Strand guaranteeing non-concurrency for any handler functions bound to it, perhaps pertaining to HTTP request R. * flow::util::Strand this_request_strand(multi_threaded_engine); - * std::atomic accumulated_ticks(0); + * std::atomic accumulated_ticks{0}; * auto timed_hnd * = flow::perf::timed_handler * (flow::perf::Clock_type::S_CPU_THREAD_TOTAL_HI_RES, &accumulated_ticks, diff --git a/src/flow/util/basic_blob.hpp b/src/flow/util/basic_blob.hpp index a3df36a7e..b2e124722 100644 --- a/src/flow/util/basic_blob.hpp +++ b/src/flow/util/basic_blob.hpp @@ -22,12 +22,23 @@ #include "flow/log/log.hpp" #include #include +#include #include #include +#include namespace flow::util { +// Types. + +/** + * Tag type used at least in Basic_blob and Blob_with_log_context to specify that an allocated buffer be zeroed. + * + * @see util::CLEAR_ON_ALLOC, the value to pass-in to relevant APIs such as Basic_blob::resize(). + */ +struct Clear_on_alloc {}; + /** * A hand-optimized and API-tweaked replacement for `vector`, i.e., buffer of bytes inside an allocated area * of equal or larger size; also optionally supports limited garbage-collected memory pool functionality and @@ -65,10 +76,14 @@ namespace flow::util * hoping that using a higher-level abstraction will ultimately do the same. * - In particular, the iterator types exposed by the API *are* pointers instead of introducing any performance * uncertainty by possibly using wrapper/proxy iterator class. - * - In particular, no element or memory area is *ever* initialized to zero(es) or any other particular filler + * - In particular (unless explicitly requested via optional Clear_on_alloc tag) + * no element or memory area is *ever* initialized to zero(es) or any other particular filler * value(s). (This is surprisingly difficult to avoid with STL containers! Google it. Though, e.g., * boost.container does provide a `default_init_t` extension to various APIs like `.resize()`.) If an allocation * does occur, the area is left as-is unless user specifies a source memory area from which to copy data. + * - However, if you *do* desire the zeroing of memory immediately upon allocation, you may request it + * via Clear_on_alloc tag arg to size-taking ctor, resize(), or reserve(). This is in many cases faster + * than an explicit `memset()` or `fill_n()` of your own; so do you use it; it is not mere syntactic sugar. * - Note that I am making no assertion about `vector` being slow; the idea is to guarantee *we* aren't by removing * any *question* about it; it's entirely possible a given `vector` is equally fast, but it cannot be guaranteed by * standard except in terms of complexity guarantees (which is usually pretty good but not everything). @@ -161,7 +176,7 @@ namespace flow::util * for overlapping such sharing `Basic_blob`s. * * Note that deallocation occurs regardless of which areas of that pool the relevant `Basic_blob`s represent, - * and whether they overlap or not (and, for that matter, whether they even together comprise the entire pool or + * and whether they overlap or not (and, for that matter, whether they even together make up the entire pool or * leave "gaps" in-between). The whole pool is deallocated the moment the last of the co-owning `Basic_blob`s * performs either make_zero() or ~Basic_blob() -- the values of start() and size() at the time are not relevant. * @@ -179,6 +194,20 @@ namespace flow::util * and often one would like a stateless -- zero-size -- allocator. Plus there are other limitations to * boost.interprocess SHM support, robust though it is.) * + * @note In the somewhat-exotic case wherein #Allocator_raw is stateful (therefore not `std::allocator` default), + * such that it is possible for two objects of that type to value-compare as not-equal, the following rules + * apply. Propagation of allocators via move-ct, copy-ct, move-assign, copy-assign, or swap follows standard + * rules (see cppreference.com or the like for those). This is normal. However the following is different + * from at least some standard containers and derivatives (e.g., boost.container ones), at least potentially: + * Even if an aforementioned op *did* propagate the allocator object from the source `Basic_blob`, + * any existing buffer (meaning `!zero()`) shall be deallocated using the same allocator object that allocated + * it originally (hence in this scenario there are now 2 allocator objects stored in `*this`). A reallocation + * with the new allocator object will *not* be forced. (Among other considerations this means that original + * allocator's resources -- the source pool or whatever it is -- must stay alive until the deallocation does + * occur according to the simple above-documented rules of when that must happen.) The rationale is that + * Basic_blob is biased toward simple, predictable behavior w/r/t deallocs and allocs occurring, even in the + * fact of exotic get_allocator() changes. + * * ### Logging ### * When and if `*this` logs, it is with log::Sev::S_TRACE severity or more verbose. * @@ -310,7 +339,7 @@ class Basic_blob * * @internal * - (If #S_SHARING) - * Accordingly the ref-counted buffer pointer #m_buf_ptr shall be a `boost::interprocess::shared_ptr` + * Accordingly the ref-counted buffer pointer buf_ptr() shall be a `boost::interprocess::shared_ptr` * instead of a vanilla `shared_ptr`; the latter may be faster and more full-featured, but it is likely * to internally store a raw `T*`; we need one that stores an `Allocator_raw::pointer` instead; * e.g., a fancy-pointer type (like `boost::interprocess::offset_ptr`) when dealing with @@ -337,14 +366,17 @@ class Basic_blob * @param alloc_raw * Allocator to copy and store in `*this` for all buffer allocations/deallocations. * If #Allocator_raw is stateless, then this has size zero, so nothing is copied at runtime, - * and by definition it is to equal `Allocator_raw()`. + * and by definition it is to equal `Allocator_raw{}`. */ - Basic_blob(const Allocator_raw& alloc_raw = Allocator_raw{}); + Basic_blob(const Allocator_raw& alloc_raw = {}); /** * Constructs blob with size() and capacity() equal to the given `size`, and `start() == 0`. Performance note: * elements are not initialized to zero or any other value. A new over-arching buffer (pool) is therefore allocated. * + * @see a similar ctor that takes Clear_on_alloc tag arg, if you *do* want the elements to be zero-initialized. + * Doing so is often faster than your own explicit `memset(X.data(), 0, X.size())` (or similar). + * * Corner case note: a post-condition is `zero() == (size() == 0)`. Note, also, that the latter is *not* a universal * invariant (see zero() doc header). * @@ -358,22 +390,47 @@ class Basic_blob * @param alloc_raw * Allocator to copy and store in `*this` for all buffer allocations/deallocations. * If #Allocator_raw is stateless, then this has size zero, so nothing is copied at runtime, - * and by definition it is to equal `Allocator_raw()`. + * and by definition it is to equal `Allocator_raw{}`. */ explicit Basic_blob(size_type size, log::Logger* logger_ptr = nullptr, - const Allocator_raw& alloc_raw = Allocator_raw{}); + const Allocator_raw& alloc_raw = {}); + + /** + * Identical to similar-sig ctor except, if `size > 0`, all `size` elements are performantly initialized to zero. + * + * Using this ctor form, instead of using the non-init one followed by your own explicit + * `memset(X.data(), 0, X.size())` (or similar), is likely to be significantly faster in at least some cases. + * It is *not* mere syntactic sugar. + * + * @see resize() and reserve() also have Clear_on_alloc forms. + * + * @param coa_tag + * API-choosing tag util::CLEAR_ON_ALLOC. + * @param size + * See similar ctor. + * @param logger_ptr + * See similar ctor. + * @param alloc_raw + * See similar ctor. + */ + explicit Basic_blob(size_type size, Clear_on_alloc coa_tag, log::Logger* logger_ptr = nullptr, + const Allocator_raw& alloc_raw = {}); /** * Move constructor, constructing a blob exactly internally equal to pre-call `moved_src`, while the latter is - * made to be exactly as if it were just constructed as `Basic_blob(nullptr)` (allocator subtleties aside). + * made to be exactly as if it were just constructed as `Basic_blob{nullptr}` (allocator subtleties aside). * Performance: constant-time, at most copying a few scalars. * + * @note It is important this be `noexcept`, if a copying counterpart to us exists in this class; otherwise + * (e.g.) `vector` will, on realloc, default to copying `*this`es around instead of moving: + * a terrible (in its stealthiness) perf loss. + * * @param moved_src * The object whose internals to move to `*this` and replace with a blank-constructed object's internals. * @param logger_ptr * The Logger implementation to use in *this* routine (synchronously) only. Null allowed. */ - Basic_blob(Basic_blob&& moved_src, log::Logger* logger_ptr = nullptr); + Basic_blob(Basic_blob&& moved_src, log::Logger* logger_ptr = nullptr) noexcept; /** * Copy constructor, constructing a blob logically equal to `src`. More formally, guarantees post-condition wherein @@ -430,16 +487,20 @@ class Basic_blob * The Logger implementation to use in *this* routine (synchronously) only. Null allowed. * @return `*this`. */ - Basic_blob& assign(Basic_blob&& moved_src, log::Logger* logger_ptr = nullptr); + Basic_blob& assign(Basic_blob&& moved_src, log::Logger* logger_ptr = nullptr) noexcept; /** * Move assignment operator (no logging): equivalent to `assign(std::move(moved_src), nullptr)`. * + * @note It is important this be `noexcept`, if a copying counterpart to us exists in this class; otherwise + * (e.g.) `vector` will, on realloc, default to copying `*this`es around instead of moving: + * a terrible (in its stealthiness) perf loss. + * * @param moved_src * See assign() (move overload). * @return `*this`. */ - Basic_blob& operator=(Basic_blob&& moved_src); + Basic_blob& operator=(Basic_blob&& moved_src) noexcept; /** * Copy assignment: assuming `(this != &src) && (!blobs_sharing(*this, src))`, @@ -531,7 +592,7 @@ class Basic_blob * @param logger_ptr * The Logger implementation to use in *this* routine (synchronously) only. Null allowed. */ - void swap(Basic_blob& other, log::Logger* logger_ptr = nullptr); + void swap(Basic_blob& other, log::Logger* logger_ptr = nullptr) noexcept; /** * Applicable to `!zero()` blobs, this returns an identical Basic_blob that shares (co-owns) `*this` allocated buffer @@ -669,7 +730,7 @@ class Basic_blob * * @tparam Blob_ptr_container * Something with method compatible with `push_back(Ptr&& blob_ptr_moved)`, - * where `Ptr` is `Blob_ptr_container::value_type`, and `Ptr(new Basic_blob)` can be created. + * where `Ptr` is `Blob_ptr_container::value_type`, and `Ptr{new Basic_blob}` can be created. * `Ptr` is to be a smart pointer type such as `unique_ptr` or `shared_ptr`. * @param size * See share_after_split_equally(). @@ -794,6 +855,9 @@ class Basic_blob * Ensures that an internal buffer of at least `capacity` elements is allocated and owned; disallows growing an * existing buffer; never shrinks an existing buffer; if a buffer is allocated, it is no larger than `capacity`. * + * @see a similar overload that takes Clear_on_alloc tag arg, if you *do* want the elements to be zero-initialized. + * Doing so is often faster than your own explicit `memset(X.data(), 0, X.size())` (or similar). + * * reserve() may be called directly but should be formally understood to be called by resize(), assign_copy(), * copy assignment operator, copy constructor. In all cases, the value passed to reserve() is exactly the size * needed to perform the particular task -- no more (and no less). As such, reserve() policy is key to knowing @@ -818,6 +882,25 @@ class Basic_blob */ void reserve(size_type capacity, log::Logger* logger_ptr = nullptr); + /** + * Identical to similar-sig overload except, if a `capacity`-sized buffer is allocated, then all `size` elements are + * performantly initialized to zero. + * + * Using this overload, instead of using the non-init one followed by your own explicit + * `memset(X.data(), 0, X.size())` (or similar), is likely to be significantly faster in at least some cases. + * It is *not* mere syntactic sugar. + * + * @see resize() and `size`-taking ctor also have Clear_on_alloc forms. + * + * @param coa_tag + * API-choosing tag util::CLEAR_ON_ALLOC. + * @param capacity + * See similar overload. + * @param logger_ptr + * See similar overload. + */ + void reserve(size_type capacity, Clear_on_alloc coa_tag, log::Logger* logger_ptr = nullptr); + /** * Guarantees post-condition `zero() == true` by dropping `*this` ownership of the allocated internal buffer if any; * if no other Basic_blob holds ownership of that buffer, then that buffer is deallocated also. Recall that @@ -855,6 +938,9 @@ class Basic_blob * Formally: If `size >= 1`, and `zero() == true`, then a buffer is allocated; and the internal ownership * ref-count is set to 1. * + * @see a similar overload that takes Clear_on_alloc tag arg, if you *do* want the elements to be zero-initialized. + * Doing so is often faster than your own explicit `memset(X.data(), 0, X.size())` (or similar). + * * ### Leaving start() unmodified ### * `start` is taken to be the value of arg `start_or_unchanged`; unless the latter is set to special value * #S_UNCHANGED; in which case `start` is taken to equal start(). Since the default is indeed #S_UNCHANGED, @@ -871,6 +957,28 @@ class Basic_blob */ void resize(size_type size, size_type start_or_unchanged = S_UNCHANGED, log::Logger* logger_ptr = nullptr); + /** + * Identical to similar-sig overload except, if a `capacity`-sized buffer is allocated, then all `size` elements are + * performantly initialized to zero. + * + * Using this overload, instead of using the non-init one followed by your own explicit + * `memset(X.data(), 0, X.size())` (or similar), is likely to be significantly faster in at least some cases. + * It is *not* mere syntactic sugar. + * + * @see reserve() and `size`-taking ctor also have Clear_on_alloc forms. + * + * @param coa_tag + * API-choosing tag util::CLEAR_ON_ALLOC. + * @param size + * See similar overload. + * @param start_or_unchanged + * See similar overload. + * @param logger_ptr + * See similar overload. + */ + void resize(size_type size, Clear_on_alloc coa_tag, + size_type start_or_unchanged = S_UNCHANGED, log::Logger* logger_ptr = nullptr); + /** * Restructures blob to consist of an internally allocated buffer and a `[begin(), end)` range starting at * offset `prefix_size` within that buffer. More formally, it is a simple resize() wrapper that ensures @@ -938,7 +1046,7 @@ class Basic_blob * * @param first * Pointer to first element to erase. It must be dereferenceable, or behavior is undefined (assertion may - * trip). + * trip). Corollary: invoking `erase()` when `empty() == true` is undefined behavior. * @param past_last * Pointer to one past the last element to erase. If `past_last <= first`, call is a no-op. * @return Iterator equal to `first`. (This matches standard expectation for container `erase()` return value: @@ -1148,8 +1256,8 @@ class Basic_blob * Internal deleter functor used if and only if #S_IS_VANILLA_ALLOC is `false` and therefore only with * #Buf_ptr being `boost::interprocess::shared_ptr` or * deleter-parameterized `unique_ptr`. Basically its task is to undo the - * `m_alloc_raw.allocate()` call made when allocating a buffer in reserve(); the result of that call is - * passed-to `shared_ptr::reset()` or `unique_ptr::reset()`; as is #m_alloc_raw (for any aux allocation needed, + * `alloc_raw().allocate()` call made when allocating a buffer in reserve(); the result of that call is + * passed-to `shared_ptr::reset()` or `unique_ptr::reset()`; as is alloc_raw() (for any aux allocation needed, * but only for `shared_ptr` -- `unique_ptr` needs no aux data); as is * an instance of this Deleter_raw (to specifically dealloc the buffer when the ref-count reaches 0). * (In the `unique_ptr` case there is no ref-count per se; or one can think of it as a ref-count that equals 1.) @@ -1161,6 +1269,24 @@ class Basic_blob * * Note: this is not used except with custom #Allocator_raw. With `std::allocator` the usual default `delete[]` * behavior is fine. + * + * ### How to delete using it ### + * `operator()()` gets invoked by smart-pointer machinery; the pointer to delete is passed to it as an arg. + * So we need not memorize it ourselves. + * + * ### How to initialize ### + * Before `operator()()` will work, it has to be loaded with the buffer size and allocator (both items needed + * by that operator in addition to the pointer itself). There are 3 ways to do this: + * - Construct via 2-arg ctor that takes those values. As usual the allocator object is to be copied (if it's + * even stateful; else that's a no-op). + * - Move-construct from already-initialized Deleter_raw. + * - First, default-construct. Then, move-assign from an existing already-initialized other Deleter_raw. + * + * For ~brevity we won't fully enumerate who uses these and when; but mainly wanted to + * place you attention on that last possibility. Basic_blob::reserve_impl(), in the non-sharing, non-vanilla-alloc + * case, when it does need to allocate `buf_ptr()`, will exercise that use-case. E.g., Basic_blob + * default-ctor would default-ct a Deleter_raw; then the next `reserve_impl()` + * would 2-arg-ct Deleter_raw and then move-assign it onto the default-cted Deleter_raw. */ class Deleter_raw { @@ -1181,8 +1307,8 @@ class Basic_blob // Constructors/destructor. /** - * Default ctor: Must never be invoked; suitable only for a null smart-pointer. - * Without this a `unique_ptr<..., Deleter_Raw>` cannot be default-cted. + * Default ctor: Deleter must never be invoked to delete anything in this state; suitable only for a null + * smart-pointer. Without this a `unique_ptr<..., Deleter_Raw>` cannot be default-cted. */ Deleter_raw(); @@ -1201,8 +1327,51 @@ class Basic_blob */ explicit Deleter_raw(const Allocator_raw& alloc_raw, size_type buf_sz); + /** + * Move-construction which may be required when we are used in `unique_ptr`. This is equivalent to + * default-construction followed by move-assignment. See move-assignment operator doc header regarding + * why we are defining both of these. + * + * @param moved_src + * Moved guy. For cleanliness it becomes as-if default-cted. + */ + Deleter_raw(Deleter_raw&& moved_src); + + /** + * Copy-construction which may be required when we are used in `boost::interprocess::shared_ptr` which + * as of this writing requires copyable deleter in its `.reset()` and other places. + * + * @param src + * Copied guy. + */ + Deleter_raw(const Deleter_raw& src); + // Methods. + /** + * Move-assignment which is required when we are used in `unique_ptr`. User might invoke move-construction + * or move-assignment of the Basic_blob; this reduces to Basic_blob::assign() (move overload); which will + * do a swap -- that ultimately will move the stored Deleter_raw up to a few times. + * + * As of this writing we also manually overwrite `.get_deleter()` it in one case in Basic_blob::reserve_impl(); + * so this is useful for that also. + * + * @param moved_src + * Moved guy. For cleanliness it becomes as-if default-cted (unless it is the same object as `*this`). + * @return `*this`. + */ + Deleter_raw& operator=(Deleter_raw&& moved_src); + + /** + * Copy-assignment which is required when we are used in `boost::interprocess::shared_ptr` which + * as of this writing requires copyable deleter in its `.reset()` and other places. + * + * @param src + * Copied guy. + * @return `*this`. + */ + Deleter_raw& operator=(const Deleter_raw& src); + /** * Deallocates using `Allocator_raw::deallocate()`, passing-in the supplied pointer (to `value_type`) `to_delete` * and the number of `value_type`s to delete (from ctor). @@ -1221,29 +1390,36 @@ class Basic_blob * * ### What's with `optional<>`? ### * ...Okay, so actually this has size (whatever `optional` adds, probably a `bool`) + `sizeof(Allocator_raw)`, - * the latter being indeed zero for stateless allocators. Why use `optional<>` though? Well, we only do - * to support stateful allocators which cannot be default-cted; and our own default ctor requires that - * #m_alloc_raw is initialized to *something*... even though it (by default ctor contract) will never be accessed. + * the latter being indeed zero for stateless allocators. Why use `optional<>` though? Two reasons at least: + * - Stateful allocators often cannot be default-cted; and our own default ctor requires that + * #m_alloc_raw is initialized to *something*... even though it (by default ctor contract) will never be + * accessed via `operator()()` in that form. Bottom line is a null smart-pointer needs a default-cted `*this` + * for at least some smart-pointer types (namely `unique_ptr` at least; probably not `shared_ptr`). + * - Yay, `optional` can be uninitialized. + * - Some allocators (such as `boost::interprocess::allocator`) are not assignable, only + * copy-constructible, while we need to be move-assignable (see doc header for move-assignment; spoiler alert: + * Basic_blob::reserve_impl() may need that). + * - Yay, `optional` has `.emplace()` which will construct (including copy-construct) a `T`. * * It is slightly annoying that we waste the extra space for `optional` internals even when `Allocator_raw` * is stateless (and it is often stateless!). Plus, when #Buf_ptr is `shared_ptr` instead of `unique_ptr` - * our default ctor is not even needed. Probably some meta-programming thing could be done to avoid even this + * these bullet points probably do not apply. Probably some meta-programming thing could be done to avoid even this * overhead, but in my (ygoldfel) opinion the overhead is so minor, it does not even rise to the level of a to-do. */ std::optional m_alloc_raw; - /// See ctor and operator()(): the size of the buffer to deallocate. + /// See ctor and `operator()()`: the size of the buffer to deallocate. size_type m_buf_sz; }; // class Deleter_raw /** - * The smart-pointer type used for #m_buf_ptr; a custom-allocator-and-SHM-friendly impl and parameterization is + * The smart-pointer type used for buf_ptr(); a custom-allocator-and-SHM-friendly impl and parameterization is * used if necessary; otherwise a more typical concrete type is used. * * The following discussion assumes the more complex case wherein #S_SHARING is `true`. We discuss the simpler * converse case below that. * - * Two things affect how #m_buf_ptr shall behave: + * Two things affect how buf_ptr() shall behave: * - Which type this resolves-to depending on #S_IS_VANILLA_ALLOC (ultimately #Allocator_raw). This affects * many key things but most relevantly how it is dereferenced. Namely: * - Typical `shared_ptr` (used with vanilla allocator) will internally store simply a raw `value_type*` @@ -1277,7 +1453,7 @@ class Basic_blob * (and passed to `.reset()`); there is no `make_shared()` equivalent (which also means somewhat lower * perf, as aux data and user buffer are separately allocated and stored). Accordingly deletion is left * to the user, as there is no default deleter; one must be supplied. Thus: - * - See reserve(); it calls `.reset()` as explained here, including using #m_alloc_raw to pre-allocate. + * - See reserve(); it calls `.reset()` as explained here, including using alloc_raw() to pre-allocate. * - See Deleter_raw, the deleter functor type an instance of which is saved by the `shared_ptr` to * invoke when ref-count reaches 0. * @@ -1313,7 +1489,7 @@ class Basic_blob * we use a special array-friendly `make_unique()` variant. * - Otherwise: As with `boost::interprocess::shared_ptr` we cannot `make_*()` -- though AFAIK without * any perf penalty (there is no aux data) -- but reserve() must be quite careful to also - * replace `m_buf_ptr`'s deleter (which `.reset()` does not do... while `boost::interprocess::shared_ptr` + * replace `buf_ptr()`'s deleter (which `.reset()` does not do... while `boost::interprocess::shared_ptr` * does). */ using Buf_ptr = std::conditional_t::propagate_on_container_move_assignment::value == true` (else untouched). - * - If `*this` is copy-cted: member set to - * `std::allocator_traits::select_on_container_copy_construction()` (pass-in source member + * - If `*this` is copy-cted: datum set to + * `std::allocator_traits::select_on_container_copy_construction()` (pass-in source datum * counterpart). - * - If `*this` is copy-assigned: member copy-assigned if + * - If `*this` is copy-assigned: datum copy-assigned if * `std::allocator_traits::propagate_on_container_copy_assignment::value == true` (else untouched). - * - If `*this` is `swap()`ed: member ADL-`swap()`ed with source member counterpart if + * - If `*this` is `swap()`ed: datum ADL-`swap()`ed with source datum counterpart if * `std::allocator_traits::propagate_on_container_swap::value == true` (else untouched). * - Otherwise this is supplied via a non-copy/move ctor arg by user. * * ### Specially treated value ### * If #Allocator_raw is `std::allocator` (as supposed to `something_else`), then - * #m_alloc_raw (while guaranteed set to the zero-sized copy of `std::allocator()`) is never + * this datum (while guaranteed set to the zero-sized copy of `std::allocator()`) is never * in practice touched (outside of the above-mentioned moves/copies/swaps, though they also do nothing in reality * for this stateless allocator). This value by definition means we are to allocate on the regular heap; * and as of this writing for perf/other reasons we choose to use a vanilla * `*_ptr` with its default alloc-dealloc APIs (which perform `new[]`-`delete[]` respectively); we do not pass-in - * #m_alloc_raw anywhere. See #Buf_ptr doc header for more. If we did pass it in to + * alloc_raw() anywhere. See #Buf_ptr doc header for more. If we did pass it in to * `allocate_shared*()` or `boost::interprocess::shared_ptr::reset` the end result would be functionally * the same (`std::allocator::[de]allocate()` would get called; these call `new[]`/`delete[]`). * - * ### Relationship between #m_alloc_raw and the allocator/deleter in #m_buf_ptr ### + * ### Relationship between this datum and the allocator/deleter in `buf_ptr()` ### * (This is only applicable if #S_IS_VANILLA_ALLOC is `false`.) - * #m_buf_ptr caches #m_alloc_raw internally in its centrally linked data. Ordinarily, then, they compare as equal. + * buf_ptr() caches this datum internally in its centrally linked data. Ordinarily, then, they compare as equal. * In the corner case where (1) move-assign or copy-assign or swap() was used on `*this`, *and* * (2) #Allocator_raw is stateful and *can* compare unequal (e.g., `boost::interprocess::allocator`): * they may come to compare as unequal. It is, however, not (in our case) particularly important: - * #m_alloc_raw affects the *next* reserve() (potentially); the thing stored in #m_buf_ptr affects the logic when + * this datum affects the *next* reserve() (potentially); the thing stored in buf_ptr() affects the logic when * the underlying buffer is next deallocated. The two don't depend on each other. + * + * @return See above. */ - Allocator_raw m_alloc_raw; + Allocator_raw& alloc_raw(); /** - * Pointer to currently allocated buffer of size #m_capacity; null if and only if `zero() == true`. - * Buffer is auto-freed at destruction; or in make_zero(); but only if by that point any share()-generated - * other `Basic_blob`s have done the same. Otherwise the ref-count is merely decremented. - * In the case of #S_SHARING being `false`, one can think of this ref-count as being always at most 1; - * since share() is not compiled, and as a corollary a `unique_ptr` is used to avoid perf costs. - * Thus make_zero() and dtor always dealloc in that case. + * Ref-to-immutable counterpart to the other overload. + * @return See above. + */ + const Allocator_raw& alloc_raw() const; + + /** + * Implements reserve() overloads. * - * For performance, we never initialize the values in the array to zeroes or otherwise. - * This contrasts with `vector` and most other standard or Boost containers which use an `allocator` to - * allocate any internal buffer, and most allocators default-construct (which means assign 0 in case of `uint8_t`) - * any elements within allocated buffers, immediately upon the actual allocation on heap. As noted in doc header, - * this behavior is surprisingly difficult to avoid (involving custom allocators and such). + * @param clear_on_alloc + * Whether the Clear_on_alloc overload or the other one was called. + * @param capacity + * See reserve(). + * @param logger_ptr + * See reserve(). + */ + void reserve_impl(size_type capacity, bool clear_on_alloc, log::Logger* logger_ptr); + + /** + * Implements resize() overloads. + * + * @param clear_on_alloc + * Whether the Clear_on_alloc overload or the other one was called. + * @param size + * See resize(). + * @param start_or_unchanged + * See resize(). + * @param logger_ptr + * See resize(). */ - Buf_ptr m_buf_ptr; + void resize_impl(size_type size, bool clear_on_alloc, size_type start_or_unchanged, log::Logger* logger_ptr); - /// See capacity(); but #m_capacity is meaningless (and containing unknown value) if `!m_buf_ptr` (i.e., zero()). + /** + * The body of swap(), except for the part that swaps (or decides not to swap) alloc_raw(). As of this writing + * used by swap() and assign() (move overload) which perform mutually different steps w/r/t alloc_raw(). + * + * @param other + * See swap(). + * @param logger_ptr + * See swap(). + */ + void swap_impl(Basic_blob& other, log::Logger* logger_ptr) noexcept; + + /** + * Returns iterator-to-mutable equivalent to given iterator-to-immutable. + * + * @param it + * Self-explanatory. No assumptions are made about valid_iterator() or derefable_iterator() status. + * @return Iterator to same location as `it`. + */ + Iterator iterator_sans_const(Const_iterator it); + + // Data. + + /** + * Combined -- to enable empty base-class optimization (EBO) -- storage for the two data items, refs to which are + * returned by alloc_raw() and buf_ptr() respectively. + * + * @see alloc_raw() and buf_ptr() doc headers for actual documentation for these two important items (especially + * buf_ptr()). + * + * ### Rationale ### + * Please look into EBO to grok this. That aside -- just think of `alloc_raw()` as essentially an + * `m_alloc_raw` data member, `buf_ptr()` as an `m_buf_ptr` data member. They are only stored in this pair thingie + * due to an obscure, but perf-affecting, C++ technicality. The aforementioned ref-returning accessors avoid having + * to write `m_alloc_and_buf_ptr.second` and `.first` all over the place. + */ + boost::compressed_pair m_alloc_and_buf_ptr; + + /// See capacity(); but #m_capacity is meaningless (and containing unknown value) if `!buf_ptr()` (i.e., zero()). size_type m_capacity; - /// See start(); but #m_start is meaningless (and containing unknown value) if `!m_buf_ptr` (i.e., zero()). + /// See start(); but #m_start is meaningless (and containing unknown value) if `!buf_ptr()` (i.e., zero()). size_type m_start; - /// See size(); but #m_size is meaningless (and containing unknown value) if `!m_buf_ptr` (i.e., zero()). + /// See size(); but #m_size is meaningless (and containing unknown value) if `!buf_ptr()` (i.e., zero()). size_type m_size; }; // class Basic_blob @@ -1420,10 +1665,13 @@ class Basic_blob // Template implementations. -// m_buf_ptr initialized to null pointer. n_capacity and m_size remain uninit (meaningless until m_buf_ptr changes). +// buf_ptr() initialized to null pointer. n_capacity and m_size remain uninit (meaningless until buf_ptr() changes). template Basic_blob::Basic_blob(const Allocator_raw& alloc_raw) : - m_alloc_raw(alloc_raw) // Copy allocator; stateless allocator should have size 0 (no-op for the processor). + m_alloc_and_buf_ptr(alloc_raw), // Copy allocator; stateless allocator should have size 0 (no-op for the processor). + m_capacity(0), // Not necessary, but some compilers will warn in some situations. Fine; it's cheap enough. + m_start(0), // Ditto. + m_size(0) // Ditto. { // OK. } @@ -1437,23 +1685,39 @@ Basic_blob::Basic_blob resize(size, 0, logger_ptr); } +template +Basic_blob::Basic_blob + (size_type size, Clear_on_alloc coa_tag, log::Logger* logger_ptr, const Allocator_raw& alloc_raw) : + + Basic_blob(alloc_raw) // Delegate. +{ + resize(size, coa_tag, 0, logger_ptr); +} + template Basic_blob::Basic_blob(const Basic_blob& src, log::Logger* logger_ptr) : - // Follow rules established in m_alloc_raw doc header: - m_alloc_raw(std::allocator_traits::select_on_container_copy_construction(src.m_alloc_raw)) + // Follow rules established in alloc_raw() doc header: + m_alloc_and_buf_ptr(std::allocator_traits::select_on_container_copy_construction(src.alloc_raw())), + m_capacity(0), // See comment in first delegated ctor above. + m_start(0), // Ditto. + m_size(0) // Ditto + { /* What we want to do here, ignoring allocators, is (for concision): `assign(src, logger_ptr);` - * However copy-assignment also must do something different w/r/t m_alloc_raw than what we had to do above - * (again see m_alloc_raw doc header); so just copy/paste the rest of what operator=(copy) would do. + * However copy-assignment also must do something different w/r/t alloc_raw() than what we had to do above + * (again see alloc_raw() doc header); so just copy/paste the rest of what operator=(copy) would do. * Skipping most comments therein, as they don't much apply in our case. Code reuse level is all-right; * and we can skip the `if` from assign(). */ assign_copy(src.const_buffer(), logger_ptr); } template -Basic_blob::Basic_blob(Basic_blob&& moved_src, log::Logger* logger_ptr) : - // Follow rules established in m_alloc_raw doc header: - m_alloc_raw(std::move(moved_src.m_alloc_raw)) +Basic_blob::Basic_blob(Basic_blob&& moved_src, log::Logger* logger_ptr) noexcept : + // Follow rules established in alloc_raw() doc header: + m_alloc_and_buf_ptr(std::move(moved_src.alloc_raw())), + m_capacity(0), // See comment in first delegated ctor above. + m_start(0), // Ditto. + m_size(0) // Ditto { /* Similar to copy ctor above, do the equivalent of assign(move(moved_src), logger_ptr) minus the allocator work. * That reduces to simply: */ @@ -1475,33 +1739,16 @@ Basic_blob& assert(!blobs_sharing(*this, src)); } - // For m_alloc_raw: Follow rules established in m_alloc_raw doc header. + // For alloc_raw(): Follow rules established in alloc_raw() doc header. if constexpr(std::allocator_traits::propagate_on_container_copy_assignment::value) { - m_alloc_raw = src.m_alloc_raw; - /* Let's consider what just happened. Allocator_raw's policy is to, yes, copy m_alloc_raw from - * src to *this; so we did. Now suppose !zero() and !src.zero(); and that old m_alloc_raw != src.m_alloc_raw. - * (E.g., boost::interprocess::allocator<>s with same type but set to different SHM segments S1 and S2 would - * compare unequal.) What needs to happen is *m_buf_ptr buffer must be freed (more accurately, its - * shared_ptr ref_count decremented and thus buffer possibly freed if not share()d); then allocated; then - * contents linear-copied from *src.m_buf_ptr buffer to *m_buf_ptr buffer. assign_copy() below naively - * does all that; but will it work since we've thrown away the old m_alloc_raw? Let's go through it: - * -# Basically m_buf_ptr.reset() is kinda like m_buf_ptr.reset() followed by - * m_buf_ptr.reset(); the former part is the possible-dealloc. So will it work? - * Yes: shared_ptr<> stores the buffer and aux data (ref-count, allocator, deleter) in one central - * place shared with other shared_ptr<>s in its group. The .reset() dec-refs the ref-count and dissociates - * m_buf_ptr from the central place; if the ref-count is 0, then it also deallocs the buffer and the - * aux data and eliminates the central place... using the allocator/deleter cached in that central - * place itself. Hence the old m_alloc_raw's copy will go in effect when the nullifying .reset() part - * happens. - * -# So then m_buf_ptr is .reset() to the newly allocated buffer which will be allocated by us explicitly - * using m_alloc_raw (which we've replaced just now). - * -# Then the linear copy in assign_copy() is uncontroversial; everything is allocated before this starts. */ + alloc_raw() = src.alloc_raw(); // No copy-assignment for some allocators, but then p_o_c_c_a would be false. } - /* else: Leave m_alloc_raw alone. Everything should be fine once we assign_copy() below: existing m_buf_ptr - * (if not null) will dealloc without any allocator-related disruption/change; then it'll be reset to a new buffer - * with contents linear-copied over. The unchanged m_alloc_raw will be used for the *next* allocating reserve() - * if any. */ + // else: Leave alloc_raw() alone. + + /* Either way: for stateful (not-always-equal) allocators: the allocator used to dealloc buf_ptr() (if + * buf_ptr() not null) is cached inside buf_ptr(). New alloc_raw(), even if it was changed, is relevant only to + * the future allocation(s) if any. */ // Now to the relatively uncontroversial stuff. To copy the rest we'll just do: @@ -1525,7 +1772,7 @@ Basic_blob& * That doesn't seem useful and would make things more difficult obviously. Now: * * Either src.zero(), or not; but regardless src.size() == 0. Our options are essentially these: - * make_zero(); or resize(0, 0). (We could also perhaps copy src.m_buf_ptr[] and then adjust m_size = 0, but + * make_zero(); or resize(0, 0). (We could also perhaps copy src.buf_ptr()[] and then adjust m_size = 0, but * this is clearly slower and only gains the thing we specifically pointed out is not a virtue above.) * * Let's break down those 2 courses of action, by situation, then: @@ -1551,61 +1798,50 @@ Basic_blob& Basic_blob Basic_blob& - Basic_blob::assign(Basic_blob&& moved_src, log::Logger* logger_ptr) + Basic_blob::assign(Basic_blob&& moved_src, log::Logger* logger_ptr) noexcept { if (this != &moved_src) { - // For m_alloc_raw: Follow rules established in m_alloc_raw doc header. + // For alloc_raw(): Follow rules established in alloc_raw() doc header. if constexpr(std::allocator_traits::propagate_on_container_move_assignment::value) { - m_alloc_raw = std::move(moved_src.m_alloc_raw); - /* Let's consider what just happened. Allocator_raw's policy is to, yes, move m_alloc_raw from - * src to *this; so we did -- I guess src.m_alloc_raw is some null-ish empty-ish thing now. - * Now suppose !zero() and !moved_src.zero(); and that old m_alloc_raw != new src.m_alloc_raw. - * (That is fairly exotic; at least Allocator_raw is stateful to begin with. - * E.g., boost::interprocess::allocator<>s with same type but set to different SHM pools S1 and S2 would - * compare unequal.) What needs to happen is m_buf_ptr buffer must be freed (more accurately, its - * shared_ptr ref_count decremented and thus buffer possibly freed if not share()d) + ptr nullified); then ideally - * simply swap m_buf_ptr (which will get moved_src.m_buf_ptr old value) and moved_src.m_buf_ptr (which will - * become null). That's what we do below. So will it work? - * -# The m_buf_ptr.reset() below will work fine for the same reason the long comment in assign(copy) - * states that nullifying m_buf_ptr, even with m_alloc_raw already replaced, will still use old m_alloc_raw: - * for it is stored inside the central area linked-to in the m_buf_ptr being nullified. - * -# The swap is absolutely smooth and fine. And indeed by making that swap we'll've ensured this->m_alloc_raw - * and the allocator stored inside m_buf_ptr are equal. */ + alloc_raw() = std::move(moved_src.alloc_raw()); // Similar comment here as for assign() copy overload. } - /* else: Leave m_alloc_raw alone. What does it mean though? Let's consider it. Suppose !zero() and - * !moved_src.zero(), and the two `m_alloc_raw`s do not compare equal (e.g., boost::interprocess::allocator<>s - * with mutually differing SHM-pools). m_buf_ptr.reset() below will work fine: m_alloc_raw is unchanged so no - * controversy. However once m_buf_ptr is moved from moved_src.m_buf_ptr, it will (same reason as above -- - * it is cached) keep using old m_alloc_raw; meaning if/when it is .reset() or destroyed the old allocator - * will deallocate. That is in fact what we want. It might seem odd that m_alloc_raw won't match what's - * used for this->m_buf_ptr, but it is fine: m_alloc_raw affects the *next* allocating reserve(). - * (And, as usual, if Allocator_raw is stateless, then none of this matters.) */ + // else: Leave alloc_raw() alone. + + /* Either way: for stateful (not-always-equal) allocators: the allocator used to dealloc buf_ptr() (if + * moved_src.buf_ptr() not null) is cached inside moved_src.buf_ptr() and will be swap_impl()ed into + * our buf_ptr() as part of the deleter. New alloc_raw(), even if it was changed, is relevant only to + * the future allocation(s) if any. */ // Now to the relatively uncontroversial stuff. - make_zero(logger_ptr); // Spoiler alert: it's: if (!zero()) { m_buf_ptr.reset(); } - // So now m_buf_ptr is null; hence the other m_* (other than m_alloc_raw) are meaningless. + make_zero(logger_ptr); // Spoiler alert: it's: if (!zero()) { buf_ptr().reset(); } + // So now buf_ptr() is null; hence the other m_* (other than alloc_raw()) are meaningless. swap_impl(moved_src, logger_ptr); - // Now *this is equal to old moved_src; new moved_src is valid and zero(); and nothing was copied -- as advertised. + /* Now *this is equal to old moved_src; new moved_src is valid and zero(); and nothing was copied -- as advertised. + * swap_impl() does not touch alloc_raw() or moved_src.alloc_raw(), and we handled that already. */ } // if (this != &moved_src) return *this; } // Basic_blob::assign(move) template -Basic_blob& Basic_blob::operator=(Basic_blob&& moved_src) +Basic_blob& + Basic_blob::operator=(Basic_blob&& moved_src) noexcept { return assign(std::move(moved_src)); } template -void Basic_blob::swap_impl(Basic_blob& other, log::Logger* logger_ptr) +void Basic_blob::swap_impl(Basic_blob& other, log::Logger* logger_ptr) noexcept { using std::swap; + /* As of this writing move-ct and move-assign both use us as the core of what needs to happen; so the below code + * has a particularly high responsibility of correctness and performance. */ + if (this != &other) { if (logger_ptr && logger_ptr->should_log(log::Sev::S_TRACE, S_LOG_COMPONENT)) @@ -1616,7 +1852,19 @@ void Basic_blob::swap_impl(Basic_blob& other, log: "[" << other.capacity() << "])."); } - swap(m_buf_ptr, other.m_buf_ptr); + /* The following looks simple, and that's great. Just realize that buf_ptr() refers to a smart-pointer of one + * of several types, and (see reserve_impl()) a custom deleter of type Deleter_raw may be stored therein. When + * the smart-pointer is of a shared_ptr<> variety, that doesn't complicate anything, probably, as the deleter is + * in the control block, so a swap just swaps a pair of ctl-block pointers, and that's that. However when it is + * a unique_ptr, then Deleter_raw has to be move-assignable and probably + * move-ctible; the swap (*) will then do a move-ct of a Deleter_raw followed by 2 move-assigns of them (probably). + * So, as of this writing, we specifically made Deleter_raw move-ctible and move-assignable carefully; and + * disabled any copy-ct or copy-assign for cleanliness and determinism. + * + * (*) We could make a swap(Deleter_raw&, Deleter_raw&) as well, which would be directly invoked via ADL-lookup + * in the following statement; but let's leave well-enough alone and leave it as std::swap() and move-ct/assigns. + * As of this writing reserve_impl() uses Deleter_raw move-assignment also, anyway. */ + swap(buf_ptr(), other.buf_ptr()); /* Some compilers in some build configs issue maybe-uninitialized warning here, when `other` is as-if * default-cted (hence the following three are intentionally uninitialized), particularly with heavy @@ -1633,27 +1881,27 @@ void Basic_blob::swap_impl(Basic_blob& other, log: #pragma GCC diagnostic pop - /* Skip m_alloc_raw: swap() has to do it by itself; we are called from it + move-assign/ctor which require - * mutually different treatment for m_alloc_raw. */ + /* Skip alloc_raw(): swap() has to do it by itself; we are called from it + move-assign/ctor which require + * mutually different treatment for alloc_raw(). */ } } // Basic_blob::swap_impl() template -void Basic_blob::swap(Basic_blob& other, log::Logger* logger_ptr) +void Basic_blob::swap(Basic_blob& other, log::Logger* logger_ptr) noexcept { using std::swap; - // For m_alloc_raw: Follow rules established in m_alloc_raw doc header. + // For alloc_raw(): Follow rules established in m_alloc_and_buf_ptr doc header. if constexpr(std::allocator_traits::propagate_on_container_swap::value) { - if (&m_alloc_raw != &other.m_alloc_raw) // @todo Is this redundant? Or otherwise unnecessary? + if (&alloc_raw() != &other.alloc_raw()) // @todo Is this redundant? Or otherwise unnecessary? { - swap(m_alloc_raw, other.m_alloc_raw); + swap(alloc_raw(), other.alloc_raw()); } } - /* else: Leave both `m_alloc_raw`s alone. What does it mean though? Well, see either assign(); the same - * theme applies here: Each m_buf_ptr's cached allocator/deleter will potentially not equal its respective - * m_alloc_raw anymore; but the latter affects only the *next* allocating reserve(); so it is fine. + /* else: Leave both `alloc_raw()`s alone. What does it mean though? Well, see either assign(); the same + * theme applies here: Each buf_ptr()'s cached allocator/deleter will potentially not equal its respective + * alloc_raw() anymore; but the latter affects only the *next* allocating reserve(); so it is fine. * That said, to quote cppreference.com: "Note: swapping two containers with unequal allocators if * propagate_on_container_swap is false is undefined behavior." So, while it will work for us, trying such * a swap() would be illegal user behavior in any case. */ @@ -1664,7 +1912,7 @@ void Basic_blob::swap(Basic_blob& other, log::Logg template void swap(Basic_blob& blob1, - Basic_blob& blob2, log::Logger* logger_ptr) + Basic_blob& blob2, log::Logger* logger_ptr) noexcept { return blob1.swap(blob2, logger_ptr); } @@ -1679,9 +1927,9 @@ Basic_blob Basic_blob::share_after_split_equally_emit_pt share_after_split_equally(size, headless_pool, [&](Basic_blob&& blob_moved) { - out_blobs_ptr->push_back(Ptr(new Basic_blob{std::move(blob_moved)})); + out_blobs_ptr->push_back(Ptr{new Basic_blob{std::move(blob_moved)}}); }, logger_ptr); } @@ -1865,19 +2113,36 @@ typename Basic_blob::size_type Basic_blob bool Basic_blob::zero() const { - return !m_buf_ptr; + return !buf_ptr(); } template void Basic_blob::reserve(size_type new_capacity, log::Logger* logger_ptr) +{ + reserve_impl(new_capacity, false, logger_ptr); +} + +template +void Basic_blob::reserve(size_type new_capacity, Clear_on_alloc, + log::Logger* logger_ptr) +{ + reserve_impl(new_capacity, true, logger_ptr); +} + +template +void Basic_blob::reserve_impl(size_type new_capacity, bool clear_on_alloc, + log::Logger* logger_ptr) { using boost::make_shared_noinit; + using boost::make_shared; using boost::shared_ptr; using std::numeric_limits; + using std::memset; /* As advertised do not allow enlarging existing buffer. They can call make_zero() though (but must do so consciously * hence considering the performance impact). */ - assert(zero() || ((new_capacity <= m_capacity) && (m_capacity > 0))); + assert((zero() || ((new_capacity <= m_capacity) && (m_capacity > 0))) + && "Basic_blob intentionally disallows reserving N>M>0, where M is current capacity. make_zero() first."); /* OK, but what if new_capacity < m_capacity? Then post-condition (see below) is already satisfied, and it's fastest * to do nothing. If user believes lower memory use is higher-priority, they can explicitly call make_zero() first @@ -1889,10 +2154,11 @@ void Basic_blob::reserve(size_type new_capacity, l { FLOW_LOG_SET_CONTEXT(logger_ptr, S_LOG_COMPONENT); FLOW_LOG_TRACE_WITHOUT_CHECKING("Blob [" << this << "] " - "allocating internal buffer sized [" << new_capacity << "]."); + "allocating internal buffer sized [" << new_capacity << "]; " + "will zero-fill? = [" << clear_on_alloc << "]."); } - if (new_capacity <= size_type(numeric_limits::max())) // (See explanation near bottom of method.) + if (new_capacity <= size_type(numeric_limits::max())) // (See explanation near bottom of method.) { /* Time to (1) allocate the buffer; (2) save the pointer; (3) ensure it is deallocated at the right time * and with the right steps. Due to Allocator_raw support this is a bit more complex than usual. Please @@ -1916,9 +2182,9 @@ void Basic_blob::reserve(size_type new_capacity, l { if (logger_ptr && logger_ptr->should_log(log::Sev::S_TRACE, S_LOG_COMPONENT)) { - /* This ensures delete[] call when m_buf_ptr ref-count reaches 0. + /* This ensures delete[] call when buf_ptr() ref-count reaches 0. * As advertised, for performance, the memory is NOT initialized. */ - m_buf_ptr.reset(new value_type[new_capacity], + buf_ptr().reset(clear_on_alloc ? (new value_type[new_capacity]()) : (new value_type[new_capacity]), // Careful! *this might be gone if some other share()ing obj is the one that 0s ref-count. [logger_ptr, original_blob = this, new_capacity] (value_type* buf_ptr) @@ -1934,14 +2200,16 @@ void Basic_blob::reserve(size_type new_capacity, l } else // if (!should_log()): No logging deleter; just delete[] it. { - /* This executes `new value_type[new_capacity]` and ensures delete[] when m_buf_ptr ref-count reaches 0. + /* This executes `new value_type[new_capacity]` and ensures delete[] when buf_ptr() ref-count reaches 0. * As advertised, for performance, the memory is NOT initialized. */ - m_buf_ptr = make_shared_noinit(new_capacity); + buf_ptr() = clear_on_alloc ? make_shared(new_capacity) + : make_shared_noinit(new_capacity); } } // if constexpr(S_SHARING) else // if constexpr(!S_SHARING) { - m_buf_ptr = boost::movelib::make_unique_definit(new_capacity); + buf_ptr() = clear_on_alloc ? boost::movelib::make_unique(new_capacity) + : boost::movelib::make_unique_definit(new_capacity); // Again -- the logging in make_zero() (and Blob_with_log_context dtor) is sufficient. } } // if constexpr(S_IS_VANILLA_ALLOC) @@ -1950,22 +2218,25 @@ void Basic_blob::reserve(size_type new_capacity, l /* Fancy (well, potentially) allocator time. Again, if you've read the Buf_ptr and Deleter_raw doc headers, * you'll know what's going on. */ + // Raw-allocate via Allocator_raw! No value-init occurs... but see below. + const auto ptr = alloc_raw().allocate(new_capacity); + if constexpr(S_SHARING) { - m_buf_ptr.reset(m_alloc_raw.allocate(new_capacity), // Raw-allocate via Allocator_raw! No value-init occurs. + buf_ptr().reset(ptr, /* Let them allocate aux data (ref count block) via Allocator_raw::allocate() * (and dealloc it -- ref count block -- via Allocator_raw::deallocate())! * Have them store internal ptr bits as `Allocator_raw::pointer`s, not * necessarily raw `value_type*`s! */ - m_alloc_raw, + alloc_raw(), /* When the time comes to dealloc, invoke this guy like: D()! It'll - * perform m_alloc_raw.deallocate(, n). + * perform alloc_raw().deallocate(, n). * Since only we happen to know the size of how much we actually allocated, we pass that info * into the Deleter_raw as well, as it needs to know the `n` to pass to - * m_alloc_raw.deallocate(p, n). */ - Deleter_raw{m_alloc_raw, new_capacity}); + * alloc_raw().deallocate(p, n). */ + Deleter_raw{alloc_raw(), new_capacity}); /* Note: Unlike the S_IS_VANILLA_ALLOC=true case above, here we omit any attempt to log at the time * of dealloc, even if the verbosity is currently set high enough. It is not practical to achieve: * Recall that the assumptions we take for granted when dealing with std::allocator/regular heap @@ -1976,7 +2247,7 @@ void Basic_blob::reserve(size_type new_capacity, l * in some custom-allocator situations, particularly when operating in SHM-heap. That is why * we take an optional Logger* as an arg to every possibly-logging API (we can't guarantee, if * S_IS_VANILLA_ALLOC=false, that a Logger* can meaningfully be stored in likely-Allocator-stored *this). - * For that same reason we cannot pass it to the Deleter_raw functor; m_buf_ptr (whose bits are in + * For that same reason we cannot pass it to the Deleter_raw functor; buf_ptr() (whose bits are in * *this) will save a copy of that Deleter_raw and hence *this will end up storing the Logger* which * (as noted) may be nonsensical. (With S_IS_VANILLA_ALLOC=true, though, it's safe to store it; and * since deleter would only fire at dealloc time, it doesn't present a new perf problem -- since TRACE @@ -1991,20 +2262,65 @@ void Basic_blob::reserve(size_type new_capacity, l { /* Conceptually it's quite similar to the S_SHARING case where we do shared_ptr::reset() above. * However there is an API difference that is subtle yet real (albeit only for stateful Allocator_raw): - * Current m_alloc_raw was used to allocate *m_buf_ptr, so it must be used also to dealloc it. + * Current alloc_raw() was used to allocate *buf_ptr(), so it must be used also to dealloc it. * unique_ptr::reset() does *not* take a new Deleter_raw; hence if we used it (alone) here it would retain - * the m_alloc from ction time -- and if that does not equal current m_alloc => trouble in make_zero() - * or dtor. + * the alloc_raw() from ction (or possibly last assignment) time -- and if that does not equal current + * m_alloc => trouble in make_zero() or dtor. * - * Anyway, to beat that, we can either manually overwrite get_deleter() (<-- non-const ref); - * or we can assign via unique_ptr move-ct. The latter is certainly pithier and prettier, - * but the former might be a bit faster. (Caution! Recall m_buf_ptr is null currently. If it were not + * Anyway, to beat that, we can manually overwrite get_deleter() (<-- non-const ref). + * This will require a Deleter_raw move-assignment to exist (and it does, carefully and explicitly, as + * of this writing; at least for it to be used here). + * (Also: Caution! Recall buf_ptr() is null currently. If it were not * we would need to explicitly nullify it before the get_deleter() assignment.) */ - m_buf_ptr.get_deleter() = Deleter_raw{m_alloc_raw, new_capacity}; - m_buf_ptr.reset(m_alloc_raw.allocate(new_capacity)); + buf_ptr().get_deleter() = Deleter_raw{alloc_raw(), new_capacity}; + buf_ptr().reset(ptr); } // else if constexpr(!S_SHARING) + + if (clear_on_alloc) + { + memset(ptr.get(), 0, new_capacity); + /* Perf discussion: That is obviously correct functionally; but can it be done faster? Why do we ask? + * Answer: See the S_IS_VANILLA_ALLOC case. Notice we use operations, when clear_on_alloc, that + * will allocate-and-clear "at the same time." (More specifically, really, the make_shared()s and + * the make_unique()s (as opposed to make_shared_noinit() or make_unique_definit()) promise to perform + * `new T[]()` (as opposed to `new T[]`) which does in fact clear-while-allocating. Is that faster though? + * Actually yes; empirically we've seen it be ~20% faster for a 64Ki buffer when comparing + * `new T[N]()` versus `p = new T[N]; memset(p, 0, N)`; and theoretically perhaps `new T[]()` ends up + * as `calloc()`, which in glibc might be clever -- making use of mmap()-ed areas being pre-zeroed, + * knowing when a page is being dirtied by the calloc(), and so forth.) Here, though, we are not doing + * any such thing; we simply A.allocate(N) via allocator (not std::allocator; possibly SHM-aware) -- + * then zero it. The user could even do it themselves, in this case making clear_on_alloc syntactic + * sugar at best. So could we do better, like we did in the S_IS_VANILLA_ALLOC=true case above? + * Answer: Well... I (ygoldfel) think... no, not per se. Not here at least. We do have use the + * Allocator_raw, and nothing in the C++1x or C++17 Allocator concept docs suggests it is possible to + * ask it to allocate-and-clear. We only did so for S_IS_VANILLA_ALLOC=true, because we know + * std::allocator by definition does heap new/delete; so we can call such things ourselves and not actually + * mention the Allocator_raw; it is used more as a binary determinant of S_IS_VANILLA_ALLOC=true. + * So by contract, since there's no way to alloc-and-zero at the same time, if we are told to + * clear_on_alloc, then we have to memset(); no choice. + * + * @todo However the code (as of this writing at least in Flow-IPC's SHM-related code including + * ipc::transport::struc::shm::Capnp_message_builder) that uses Basic_blob and *can* possibly guarantee + * that Allocator_raw::allocate(N) will pre-zero the N bytes as-needed -- such code could + * (1) specify clear_on_alloc=false and (2) explicitly guarantee .allocate() will alloc-and-zero. + * As of this writing that is in Flow-IPC (not Flow), a sister/dependent component that shares Flow's DNA; + * and in particular in that case we've got: + * - SHM-classic: Ultimately it leverages boost::interprocess::managed_shared_memory; look into it + * whether there's some kind of sped-up alloc-and-zero hook available. + * - SHM-jemalloc: That one has a home-grown jemalloc extension; it might be possible to use some kind of + * knob(s) to ensure an alloc-and-zero is performed. MALLOCX_ZERO flag to mallocx() will do it, and + * docs suggest care is taken to do this performantly; perhaps this mode can be set/unset through some + * kind of thread-local config system -- as long as it is not slow -- not sure. As of this writing + * there's already a mandatory activator-context object, so maybe it could be merely extended with + * this knob. Not sure... it's doable though. + * - Either way: The user that does must be careful; zeroing in *all* allocs would probably be bad, as + * many situations do not require it; it should only be done when actually desired. + * To be clear: all that is out of Basic_blob's purview; so really this to-do should be elsewhere arguably; + * but it's a closely related project, so here is better than nowhere. Plus it provides some non-obvious + * context. */ + } // if (clear_on_alloc) } // else if constexpr(!S_IS_VANILLA_ALLOC) - } // if (new_capacity <= numeric_limits::max()) // (See explanation just below.) + } // if (new_capacity <= numeric_limits::max()) // (See explanation just below.) else { assert(false && "Enormous or corrupt new_capacity?!"); @@ -2050,6 +2366,20 @@ void Basic_blob::reserve(size_type new_capacity, l template void Basic_blob::resize(size_type new_size, size_type new_start_or_unchanged, log::Logger* logger_ptr) +{ + resize_impl(new_size, false, new_start_or_unchanged, logger_ptr); +} + +template +void Basic_blob::resize(size_type new_size, Clear_on_alloc, + size_type new_start_or_unchanged, log::Logger* logger_ptr) +{ + resize_impl(new_size, true, new_start_or_unchanged, logger_ptr); +} + +template +void Basic_blob::resize_impl(size_type new_size, bool clear_on_alloc, + size_type new_start_or_unchanged, log::Logger* logger_ptr) { auto& new_start = new_start_or_unchanged; if (new_start == S_UNCHANGED) @@ -2059,16 +2389,12 @@ void Basic_blob::resize(size_type new_size, size_t const size_type min_capacity = new_start + new_size; - // Sanity checks/input checks (depending on how you look at it). - assert(min_capacity >= new_size); - assert(min_capacity >= new_start); - /* Ensure there is enough space for new_size starting at new_start. Note, in particular, this disallows * enlarging non-zero() buffer. * (If they want, they can explicitly call make_zero() first. But they must do so consciously, so that they're * forced to consider the performance impact of such an action.) Also note that zero() continues to be true * if was true. */ - reserve(min_capacity, logger_ptr); + reserve_impl(min_capacity, clear_on_alloc, logger_ptr); assert(capacity() >= min_capacity); if (!zero()) @@ -2076,7 +2402,7 @@ void Basic_blob::resize(size_type new_size, size_t m_size = new_size; m_start = new_start; } - // else { zero(): m_size is meaningless; size() == 0, as desired. } + // else { zero(): m_size, m_start are meaningless; size() and start() == 0, as desired. } assert(size() == new_size); assert(start() == new_start); @@ -2110,7 +2436,7 @@ void Basic_blob::clear() template void Basic_blob::make_zero(log::Logger* logger_ptr) { - /* Could also write more elegantly: `swap(Basic_blob());`, but following is a bit optimized (while equivalent); + /* Could also write more elegantly: `swap(Basic_blob{});`, but following is a bit optimized (while equivalent); * logs better. */ if (!zero()) { @@ -2130,7 +2456,7 @@ void Basic_blob::make_zero(log::Logger* logger_ptr } } - m_buf_ptr.reset(); + buf_ptr().reset(); } // if (!zero()) } // Basic_blob::make_zero() @@ -2144,9 +2470,9 @@ typename Basic_blob::size_type /* Either just set m_start = 0 and decrease/keep-constant (m_start + m_size) = n; or allocate exactly n-sized buffer * and set m_start = 0, m_size = n. * As elsewhere, the latter case requires that zero() be true currently (but they can force that with make_zero()). */ - resize(n, 0); // It won't log, as it cannot allocate, so no need to pass-through a Logger*. + resize(n, 0, logger_ptr); - // Performance: Basically equals: memcpy(m_buf_ptr, src.start, src.size). + // Performance: Basically equals: memcpy(buf_ptr(), src.start, src.size). emplace_copy(const_begin(), src, logger_ptr); // Corner case: n == 0. Above is equivalent to: if (!zero()) { m_size = m_start = 0; }. That behavior is advertised. @@ -2269,7 +2595,7 @@ typename Basic_blob::Iterator const Iterator dest = iterator_sans_const(first); - if (past_last > first) // (Note: `past_last < first` allowed, not illegal.) + if (past_last > first) // (Note: `past_last <= first` allowed, not illegal.) { const auto n_moved = size_type(const_end() - past_last); @@ -2353,11 +2679,11 @@ typename Basic_blob::Iterator { if (zero()) { - return 0; + return nullptr; } // else - /* m_buf_ptr.get() is value_type* when Buf_ptr = regular shared_ptr; but possibly Some_fancy_ptr + /* buf_ptr().get() is value_type* when Buf_ptr = regular shared_ptr; but possibly Some_fancy_ptr * when Buf_ptr = boost::interprocess::shared_ptr, namely when * Allocator_raw::pointer = Some_fancy_ptr and not simply value_type* again. We need value_type*. * Fancy-pointer is not really an officially-defined concept (offset_ptr<> is an example of one). @@ -2367,7 +2693,7 @@ typename Basic_blob::Iterator * * @todo In C++20 can replace this with std::to_address(). Or can implement our own (copy cppreference.com impl). */ - const auto raw_or_fancy_buf_ptr = m_buf_ptr.get(); + const auto raw_or_fancy_buf_ptr = buf_ptr().get(); return &(*raw_or_fancy_buf_ptr) + m_start; } @@ -2464,7 +2790,42 @@ template typename Basic_blob::Allocator_raw Basic_blob::get_allocator() const { - return m_alloc_raw; + return alloc_raw(); +} + +template +typename Basic_blob::Buf_ptr& Basic_blob::buf_ptr() +{ + return m_alloc_and_buf_ptr.second(); +} + +template +const typename Basic_blob::Buf_ptr& + Basic_blob::buf_ptr() const +{ + return const_cast(this)->buf_ptr(); +} + +template +typename Basic_blob::Allocator_raw& Basic_blob::alloc_raw() +{ + return m_alloc_and_buf_ptr.first(); +} + +template +const typename Basic_blob::Allocator_raw& + Basic_blob::alloc_raw() const +{ + return const_cast(this)->alloc_raw(); +} + + +template +Basic_blob::Deleter_raw::Deleter_raw() : + m_buf_sz(0) +{ + /* It can be left `= default;`, but some gcc versions then complain m_buf_sz may be used uninitialized (not true but + * such is life). */ } template @@ -2472,19 +2833,97 @@ Basic_blob::Deleter_raw::Deleter_raw(const Allocat /* Copy allocator; a stateless allocator should have size 0 (no-op for the processor in that case... except * the optional<> registering it has-a-value). */ m_alloc_raw(std::in_place, alloc_raw), - m_buf_sz(buf_sz) // We store a T*, where T is a trivial-deleter PoD, but we delete an array of Ts: this many. + m_buf_sz(buf_sz) // Smart-ptr stores a T*, where T is a trivial-deleter PoD, but we delete an array of Ts: this many. { // OK. } template -Basic_blob::Deleter_raw::Deleter_raw() : - m_buf_sz(0) +Basic_blob::Deleter_raw::Deleter_raw(Deleter_raw&& moved_src) { - /* This ctor is never invoked (see this ctor's doc header). It can be left `= default;`, but some gcc versions - * then complain m_buf_sz may be used uninitialized (not true but such is life). */ + /* We advertised our action is as-if we default-ct, then move-assign. While we skipped delegating to default-ctor, + * the only difference is that would've initialized m_buf_sz; but the following will just overwrite it anyway. So + * we can in fact move-assign now, and that's it. */ + operator=(std::move(moved_src)); } +/* Auto-generated copy-ct should be fine; the only conceivable source of trouble might be Allocator_raw copy-ction, + * but that must exist for all allocators. */ +template +Basic_blob::Deleter_raw::Deleter_raw(const Deleter_raw&) = default; + +template +typename Basic_blob::Deleter_raw& + Basic_blob::Deleter_raw::operator=(Deleter_raw&& moved_src) +{ + using std::swap; + + if (this != &moved_src) // @todo Maybe assert() on this, since our uses are so locked-down? + { + m_buf_sz = 0; + swap(m_buf_sz, moved_src.m_buf_sz); + + /* That's that for m_buf_sz; that leaves m_alloc_raw. That is trickier than one might think; a swap + * or explicitly copy- or move-assigning it will work with many allocators, but some are not assignable at all + * (for example boost::interprocess::allocator which is stateful). (There are good reasons for that having to + * do with propagate_on_container_*_assignment, but never mind; our task here is simpler than those worries.) + * Bottom line is, every allocator is copy-constructible, and we store m_alloc_raw as an optional<>, so + * we can simulate an assignment via destroy (if needed) + copy-construction, namely using optional::emplace(). + * + * Plus, arguably an optimization: it is very common they're the same allocator by-value (e.g., stateless + * allocators of the same class are always mutually equal, period); in which case can no-op. */ + if (!moved_src.m_alloc_raw) + { + // Another corner case. @todo Maybe assert() on this not being the case, since our uses are so locked-down? + m_alloc_raw.reset(); + // m_alloc_raw has been as-if copied over; and moved_src's guy is already as-if default-cted, as promised. + } + else + { + const auto& src_alloc_raw = *moved_src.m_alloc_raw; + if ((!m_alloc_raw) || (*m_alloc_raw != src_alloc_raw)) + { + m_alloc_raw.emplace(src_alloc_raw); // Destroy if needed; then copy-construct. + } + // else { m_alloc_raw is already as-if copied from moved_src.m_alloc_raw: the aforementioned optimization. } + + // m_alloc_raw has been copied over; as promised reset moved_src's guy to as-if-default-cted. + moved_src.m_alloc_raw.reset(); + } + } // if (this != &moved_src) + + return *this; +} // Basic_blob::Deleter_raw::operator=(&&) + +template +typename Basic_blob::Deleter_raw& + Basic_blob::Deleter_raw::operator=(const Deleter_raw& src) +{ + /* Ideally we'd just use `= default;`, but that might not compile, when Allocator_raw has no copy-assignment + * (as noted elsewhere this is entirely possible). So basically perform a simpler version of the move-assignment + * impl. Keeping comments light; please see move-assignment impl. */ + + if (this != &src) + { + m_buf_sz = src.m_buf_sz; + + if (!src.m_alloc_raw) + { + m_alloc_raw.reset(); + } + else + { + const auto& src_alloc_raw = *src.m_alloc_raw; + if ((!m_alloc_raw) || (*m_alloc_raw != src_alloc_raw)) + { + m_alloc_raw.emplace(src_alloc_raw); // Having to do this for some `Allocator_raw`s is why we can't `= default;`. + } + } + } // if (this != &src) + + return *this; +} // Basic_blob::Deleter_raw::operator=(const&) + template void Basic_blob::Deleter_raw::operator()(Pointer_raw to_delete) { diff --git a/src/flow/util/blob.cpp b/src/flow/util/blob.cpp new file mode 100644 index 000000000..605bd5594 --- /dev/null +++ b/src/flow/util/blob.cpp @@ -0,0 +1,29 @@ +/* Flow + * Copyright 2023 Akamai Technologies, Inc. + * + * Licensed under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in + * compliance with the License. You may obtain a copy + * of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in + * writing, software distributed under the License is + * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing + * permissions and limitations under the License. */ + +/// @file + +#include "flow/util/basic_blob.hpp" + +namespace flow::util +{ + +// Initializations. + +const Clear_on_alloc CLEAR_ON_ALLOC; + +} // namespace flow::util diff --git a/src/flow/util/blob.hpp b/src/flow/util/blob.hpp index 5e35955d0..97454c686 100644 --- a/src/flow/util/blob.hpp +++ b/src/flow/util/blob.hpp @@ -23,6 +23,8 @@ namespace flow::util { +// Types. + /** * Basic_blob that works in regular heap (and is itself placed in heap or stack) and memorizes a log::Logger, * enabling easier logging albeit with a small perf trade-off. Most users will use the concrete types @@ -128,7 +130,7 @@ class Blob_with_log_context : * @param logger_ptr * The Logger implementation to use subsequently. */ - Blob_with_log_context(log::Logger* logger_ptr = 0); + Blob_with_log_context(log::Logger* logger_ptr = nullptr); /** * On top of the similar 3-arg Basic_blob ctor, memorizes the given log::Logger for all future logging @@ -142,15 +144,33 @@ class Blob_with_log_context : */ explicit Blob_with_log_context(log::Logger* logger_ptr, size_type size); + /** + * On top of the similar 4-arg Basic_blob ctor, memorizes the given log::Logger for all future logging + * in `*this`. (Except, technically, one can subsequently override this by using super-class APIs which take + * `Logger*`.) + * + * @param logger_ptr + * The Logger implementation to use subsequently. + * @param size + * See super-class API. + * @param coa_tag + * See super-class API. + */ + explicit Blob_with_log_context(log::Logger* logger_ptr, size_type size, Clear_on_alloc coa_tag); + /** * On top of the similar Basic_blob move ctor, moves the source object's log::Logger for all future logging * in `*this`. (Except, technically, one can subsequently override this by using super-class APIs which take * `Logger*`.) * + * @note It is important this be `noexcept`, if a copying counterpart to us exists in this class; otherwise + * (e.g.) `vector` will, on realloc, default to copying `*this`es around instead of + * moving: a terrible (in its stealthiness) perf loss. + * * @param moved_src * See super-class API. */ - Blob_with_log_context(Blob_with_log_context&& moved_src); + Blob_with_log_context(Blob_with_log_context&& moved_src) noexcept; /** * On top of the similar Basic_blob copy ctor, copies the source object's log::Logger for all future logging @@ -174,11 +194,15 @@ class Blob_with_log_context : * operator. In Blob_with_log_context, unlike Basic_blob, there is no need for an extra `logger_ptr` optional * arg. * + * @note It is important this be `noexcept`, if a copying counterpart to us exists in this class; otherwise + * (e.g.) `vector` will, on realloc, default to copying `*this`es around instead of + * moving: a terrible (in its stealthiness) perf loss. + * * @param moved_src * See super-class API. * @return See super-class API. */ - Blob_with_log_context& operator=(Blob_with_log_context&& moved_src); + Blob_with_log_context& operator=(Blob_with_log_context&& moved_src) noexcept; /** * On top of the similar Basic_blob method, logs using the stored log context. @@ -199,7 +223,7 @@ class Blob_with_log_context : * @param other * See super-class API. */ - void swap(Blob_with_log_context& other); + void swap(Blob_with_log_context& other) noexcept; /** * On top of the similar Basic_blob method, logs using the stored log context and copies it to the returned @@ -319,6 +343,16 @@ class Blob_with_log_context : */ void reserve(size_type capacity); + /** + * On top of the similar Basic_blob method, logs using the stored log context. + * + * @param capacity + * See super-class API. + * @param coa_tag + * See super-class API. + */ + void reserve(size_type capacity, Clear_on_alloc coa_tag); + /// On top of the similar Basic_blob method, logs using the stored log context. void make_zero(); @@ -332,6 +366,18 @@ class Blob_with_log_context : */ void resize(size_type size, size_type start_or_unchanged = S_UNCHANGED); + /** + * On top of the similar Basic_blob method, logs using the stored log context. + * + * @param size + * See super-class API. + * @param start_or_unchanged + * See super-class API. + * @param coa_tag + * See super-class API. + */ + void resize(size_type size, Clear_on_alloc coa_tag, size_type start_or_unchanged = S_UNCHANGED); + // private: There are no added data per se, but there is the added base, log::Log_context, which stores some stuff. }; // class Blob_with_log_context @@ -342,7 +388,7 @@ class Blob_with_log_context : template Blob_with_log_context::Blob_with_log_context(log::Logger* logger_ptr) : log::Log_context(logger_ptr, Base::S_LOG_COMPONENT) - // And default-ct Base(). + // And default-ct Base{}. { // Nothing else. } @@ -356,7 +402,16 @@ Blob_with_log_context::Blob_with_log_context(log::Logger* log } template -Blob_with_log_context::Blob_with_log_context(Blob_with_log_context&& moved_src) : +Blob_with_log_context::Blob_with_log_context(log::Logger* logger_ptr, size_type size, + Clear_on_alloc coa_tag) : + log::Log_context(logger_ptr, Base::S_LOG_COMPONENT), + Base(size, coa_tag, get_logger()) +{ + // Nothing else. +} + +template +Blob_with_log_context::Blob_with_log_context(Blob_with_log_context&& moved_src) noexcept : log::Log_context(static_cast(std::move(moved_src))), Base(std::move(moved_src), get_logger()) { @@ -393,7 +448,7 @@ Blob_with_log_context& template Blob_with_log_context& - Blob_with_log_context::operator=(Blob_with_log_context&& moved_src) + Blob_with_log_context::operator=(Blob_with_log_context&& moved_src) noexcept { using log::Log_context; @@ -403,7 +458,7 @@ Blob_with_log_context& } template -void Blob_with_log_context::swap(Blob_with_log_context& other) +void Blob_with_log_context::swap(Blob_with_log_context& other) noexcept { using log::Log_context; using std::swap; @@ -416,7 +471,7 @@ void Blob_with_log_context::swap(Blob_with_log_context& other } template -void swap(Blob_with_log_context& blob1, Blob_with_log_context& blob2) +void swap(Blob_with_log_context& blob1, Blob_with_log_context& blob2) noexcept { return blob1.swap(blob2); } @@ -424,7 +479,7 @@ void swap(Blob_with_log_context& blob1, Blob_with_log_context template Blob_with_log_context Blob_with_log_context::share() const { - Blob_with_log_context blob(get_logger()); + Blob_with_log_context blob{get_logger()}; static_cast(blob) = Base::share(get_logger()); return blob; } @@ -433,7 +488,7 @@ template Blob_with_log_context Blob_with_log_context::share_after_split_left(size_type lt_size) { - Blob_with_log_context blob(get_logger()); + Blob_with_log_context blob{get_logger()}; static_cast(blob) = Base::share_after_split_left(lt_size, get_logger()); return blob; } @@ -442,7 +497,7 @@ template Blob_with_log_context Blob_with_log_context::share_after_split_right(size_type rt_size) { - Blob_with_log_context blob(get_logger()); + Blob_with_log_context blob{get_logger()}; static_cast(blob) = Base::share_after_split_right(rt_size, get_logger()); return blob; } @@ -490,7 +545,7 @@ void Blob_with_log_context::share_after_split_equally_emit_pt share_after_split_equally(size, headless_pool, [&](Blob_with_log_context&& blob_moved) { - out_blobs_ptr->push_back(Ptr(new Blob_with_log_context(std::move(blob_moved)))); + out_blobs_ptr->push_back(Ptr{new Blob_with_log_context{std::move(blob_moved)}}); }); } @@ -500,12 +555,25 @@ void Blob_with_log_context::reserve(size_type new_capacity) Base::reserve(new_capacity, get_logger()); } +template +void Blob_with_log_context::reserve(size_type new_capacity, Clear_on_alloc coa_tag) +{ + Base::reserve(new_capacity, coa_tag, get_logger()); +} + template void Blob_with_log_context::resize(size_type new_size, size_type new_start_or_unchanged) { Base::resize(new_size, new_start_or_unchanged, get_logger()); } +template +void Blob_with_log_context::resize(size_type new_size, Clear_on_alloc coa_tag, + size_type new_start_or_unchanged) +{ + Base::resize(new_size, coa_tag, new_start_or_unchanged, get_logger()); +} + template void Blob_with_log_context::make_zero() { diff --git a/src/flow/util/blob_fwd.hpp b/src/flow/util/blob_fwd.hpp index 707bb3f30..e1f79ad3c 100644 --- a/src/flow/util/blob_fwd.hpp +++ b/src/flow/util/blob_fwd.hpp @@ -32,6 +32,8 @@ class Basic_blob; template class Blob_with_log_context; +struct Clear_on_alloc; + /** * Short-hand for a Basic_blob that allocates/deallocates in regular heap and is itself assumed to be stored * in heap or on stack; sharing feature compile-time-disabled (with perf boost as a result). @@ -67,6 +69,11 @@ using Blob = Blob_with_log_context<>; */ using Sharing_blob = Blob_with_log_context; +// Constants. + +/// Tag value indicating init-with-zeroes-on-alloc policy. +extern const Clear_on_alloc CLEAR_ON_ALLOC; + // Free functions. /** @@ -103,7 +110,7 @@ bool blobs_sharing(const Basic_blob& blob1, */ template void swap(Basic_blob& blob1, - Basic_blob& blob2, log::Logger* logger_ptr = 0); + Basic_blob& blob2, log::Logger* logger_ptr = nullptr) noexcept; /** * On top of the similar Basic_blob related function, logs using the stored log context of `blob1`. @@ -115,6 +122,6 @@ void swap(Basic_blob& blob1, * See super-class related API. */ template -void swap(Blob_with_log_context& blob1, Blob_with_log_context& blob2); +void swap(Blob_with_log_context& blob1, Blob_with_log_context& blob2) noexcept; } // namespace flow::util diff --git a/src/flow/util/detail/linked_hash.hpp b/src/flow/util/detail/linked_hash.hpp new file mode 100644 index 000000000..c68df8570 --- /dev/null +++ b/src/flow/util/detail/linked_hash.hpp @@ -0,0 +1,334 @@ +/* Flow + * Copyright 2023 Akamai Technologies, Inc. + * + * Licensed under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in + * compliance with the License. You may obtain a copy + * of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in + * writing, software distributed under the License is + * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing + * permissions and limitations under the License. */ + +/// @file +#pragma once + +#include "flow/util/detail/util_fwd.hpp" +#include + +namespace flow::util +{ + +// Types. + +/** + * The internal-use key/iterator-wrapper, used as the key type in internal-use set-type #Linked_hash_key_set. + * + * For background please see Linked_hash_map doc header Impl section, and where that points. Note that + * the same information applies to Linked_hash_set as well. (`IS_ITER_TO_PAIR` indicates which guy this is helping + * implement.) + * + * That said, we very specifically just need to: + * + * - store either a #Key pointer (when a `*this` is used as the arg to `Linked_hash_key_set::find()` or an + * #Iterator copy (ditto but to `Linked_hash_key_set::insert()`); + * - be implicit-constructible from either (so the `Key` or `Iterator` can be passed seamlessly into + * `.find()` and `.insert()` respectively); + * - in the iterator-storing case provide access to that iterator via iter() accessor. + * + * @tparam Key_t + * See Linked_hash_map, Linked_hash_set. + * @tparam Iterator_t + * Linked_hash_map::Iterator or Linked_hash_set::Iterator. + * @tparam IS_ITER_TO_PAIR + * `true` for Linked_hash_map, `false` for Linked_hash_set. + */ +template +class Linked_hash_key +{ +public: + // Types. + + /// Convenience alias for template arg. + using Key = Key_t; + + /// Convenience alias for template arg. + using Iterator = Iterator_t; + + // Constants. + + /// Convenience alias for template arg. + static constexpr bool S_IS_ITER_TO_PAIR = IS_ITER_TO_PAIR; + + // Constructors/destructor. + + /** + * Constructs `*this` to contain a pointer to a #Key living outside the `*this`-using data structure + * #Linked_hash_key_set (presumably as arg to its `.find()`). + * + * @param key + * The key to which to point. The referred object must continue to be valid, until either + * `*this` is destroyed, or via assignment `*this` changes value. + */ + Linked_hash_key(const Key& key); + + /** + * Constructs `*this` to contain an #Iterator into the `*this`-using data structure + * #Linked_hash_key_set (presumably as arg to its `.insert()`). + * + * @param it + * The iterator a copy of which to store. The pointee of the iterator must not be erased or moved in + * an iterator-invalidating fashion, until either + * `*this` is destroyed, or via assignment `*this` changes value. + */ + Linked_hash_key(const Iterator& it); + + // Methods. + + /** + * Returns reference to immutable key to which we saved a pointer via one of our constructor forms. + * Specifically, then, that's either simply the ctor-saved pointer (in reference form); or the address of the key + * stored as stored within the `*this`-using data structure #Linked_hash_key_set. + * + * @return See above. + */ + const Key& key() const; + + /** + * Assuming we were as-if constructed via the ctor that takes an #Iterator (as opposed to a #Key reference), + * returns copy of the stored iterator. Behavior undefined if the assumption does not hold (as of this writing + * exception is thrown). + * + * Informally: + * if `this->key()` *was* found in the #Linked_hash_key_set, then this accessor may be used; else it may not. + * That `*this`-using data structure -- a hash-set -- shall use Linked_hash_key_hash and Linked_hash_key_pred for its + * hasher and equality functors respectively. + * + * @return See above. + */ + Iterator iter() const; + +private: + // Types. + + /// Short-hand for raw pointer to an immutable #Key living outside the `*this`-using data structure. + using Key_ptr = Key const *; + + // Data. + + /** + * Stores the key, without copying its actual value, as either a pointer to an immutable key, or as an iterator + * into a structure such that it contains the key. In the latter case: + * + * - #S_IS_ITER_TO_PAIR determines how to obtain the key from the iterator pointee; + * - the iterator is itself of value to the `Linked_hash_key_*` user (Linked_hash_set, Linked_hash_map). + */ + std::variant m_key_hndl; +}; // class Linked_hash_key + +/** + * The internal-use `Hash` functor wrapper, used as the hasher type in internal-use set-type #Linked_hash_key_set. + * + * Impl note: We store the `Hash` via private inheritance, making use of Empty Base-class Optimization (EBO) if + * possible; namely when `Hash` is an empty type (which is typical); in which case this shall waste no space. + * + * @tparam Hash + * See Linked_hash_map, Linked_hash_set. + */ +template +class Linked_hash_key_hash : + private Hash +{ +public: + // Constructors/destructor. + + /** + * Saves a (typically data-free) copy of a given Linked_hash_map or Linked_hash_set hasher object, + * as passed to that type's ctor. + * + * @param hasher + * See above. + */ + Linked_hash_key_hash(const Hash& hasher = Hash{}); + + // Methods. + + /** + * Returns hash of `val.key()`, where `val` is a Linked_hash_key instance, using the saved hasher object. + * + * @tparam Linked_hash_key_t + * A concrete Linked_hash_key type: the key-type of #Linked_hash_key_set being used. + * @param val + * Value to hash. + * @return See above. + */ + template + size_t operator()(const Linked_hash_key_t& val) const; +}; // class Linked_hash_key_hash + +/** + * The internal-use `Pred` functor wrapper, used as the key-equality-determiner type in internal-use set-type + * #Linked_hash_key_set. + * + * Impl note: We store the `Pred` via private inheritance, making use of Empty Base-class Optimization (EBO) if + * possible; namely when `Pred` is an empty type (which is typical); in which case this shall waste no space. + * + * @tparam Pred + * See Linked_hash_map, Linked_hash_set. + */ +template +class Linked_hash_key_pred : + private Pred +{ +public: + // Constructors/destructor. + + /** + * Saves a (typically data-free) copy of a given Linked_hash_map or Linked_hash_set equality-determiner object, + * as passed to that type's ctor. + * + * @param pred + * See above. + */ + Linked_hash_key_pred(const Pred& pred = Pred{}); + + // Methods + + /** + * Returns `true` if and only if `lhs.key()` and `rhs.key()` (where `lhs` and `rhs` are Linked_hash_key instances) + * are equal, using the saved equality-determiner object. + * + * @tparam Linked_hash_key_t + * A concrete Linked_hash_key type. + * @param lhs + * Value to compare. + * @param rhs + * Value to compare. + * @return See above. + */ + template + bool operator()(const Linked_hash_key_t& lhs, const Linked_hash_key_t& rhs) const; +}; // class Linked_hash_key_pred + +// Template implementations. + +// Linked_hash_key implementations. + +template +Linked_hash_key::Linked_hash_key(const Key& key) : + m_key_hndl(&key) // Store a Key* in the union. +{ + // Yep. +} + +template +Linked_hash_key::Linked_hash_key(const Iterator& it) : + m_key_hndl(it) // Store an Iterator in the union. +{ + // Yep. +} + +template +const Key_t& + Linked_hash_key::key() const +{ + using std::holds_alternative; + using std::get; + + if (holds_alternative(m_key_hndl)) + { + return *(get(m_key_hndl)); + } + // else + + if constexpr(S_IS_ITER_TO_PAIR) + { + return get(m_key_hndl)->first; // Iterator into list of `const Key`s. + } + else + { + return *(get(m_key_hndl)); // Iterator into list of pairs. + } +} + +template +Iterator_t + Linked_hash_key::iter() const +{ + return std::get(m_key_hndl); + // ^-- Throws if !holds_alternative(m_key_hndl); we advertised undefined behavior; so that's fine. +} + +// Linked_hash_key_hash implementations. + +template +Linked_hash_key_hash::Linked_hash_key_hash(const Hash& hasher) : + Hash(hasher) // Store `hasher` copy in our super-class, making use of Empty Base-class Optimization (EBO) if possible. +{ + /* For context: A regular unordered_set would store the `Hash hasher` copy inside itself. + * In our case it is unordered_set, ...> instead, so a *this is + * instead stored; and we store the original `Hash hasher` inside us (and nothing else). So it's the exact same + * thing in terms of what actually ends up in memory and likely in terms of processor cycles spent. + * + * Why do we even exist? Answer: Just for the extra little code in operator()(). */ +} + +template +template +size_t Linked_hash_key_hash::operator()(const Linked_hash_key_t& val) const +{ + return this->Hash::operator()(val.key()); // The piddly .key() call is the reason this class exists. +} + +// Linked_hash_key_pred implementations. + +template +Linked_hash_key_pred::Linked_hash_key_pred(const Pred& pred) : + Pred(pred) // Store `pred` copy in our super-class, making use of Empty Base-class Optimization (EBO) if possible. +{ + /* For context: A regular unordered_set would store the `Pred pred` copy inside itself. + * In our case it is unordered_set> instead, so a *this is + * instead stored; and we store the original `Pred pred` inside us (and nothing else). So it's the exact same + * thing in terms of what actually ends up in memory and likely in terms of processor cycles spent. + * + * Why do we even exist? Answer: Just for the extra little code in operator()(). */ +} + +template +template +bool Linked_hash_key_pred::operator()(const Linked_hash_key_t& lhs, const Linked_hash_key_t& rhs) const +{ + return this->Pred::operator()(lhs.key(), rhs.key()); // The piddly .key() calls are the reason this class exists. + + /* @todo Arguably this could be sped-up in the case where lhs and rhs both hold `Iterator`s + * (holds_alternative([lr]hs.m_key_hndl)); in which case one would return `lhs.iter() == rhs.iter()`. + * This would require formally that lhs and rhs are guaranteed to point into the same Linked_hash_{set|map}, and + * the formal doc for this operator()() would specify that it's not merely comparison of .key() values by Pred. + * + * "Arguably" above refers to how that might be too much trouble "just" to avoid a by-value key comparison in + * some cases (some cases as of this writing = Linked_hash_*::erase(). Then again that does + * make sense, if one breaks down the actual use cases in Linked_hash_set/map: + * - lhs and rhs are both iterator-storing <=> those "some cases," namely where lookup is by user-provided iterator. + * - lhs is iterator-storing; rhs is key-ptr-storing <=> the other cases, namely where lookup is by user-provided + * key. + * - The reverse <=> Shouldn't happen; the "haystack" Linked_hash_key_set is on the left. + * - lhs and rhs are both key-ptr-storing <=> Shouldn't happen; lookup is only needed when there's a haystack + * involved. + * + * So we could write it that way -- perhaps assert(false)ing on the "shouldn't happen" cases -- and arguably it + * would be (1) conceptually tighter (serving the exact known purpose) and (2) probably faster (avoids key comparison + * at least sometimes). On the other hand internal documentation would be more complex, and the code here would + * be longer/more complex (probably requiring among other things either `friend`ship or additional accessor(s) on + * Linked_hash_key. + * + * Oh, and also, I (ygoldfel) am not 100% certain about the following, but another difficulty might arise when + * Iterator and Const_iterator are not the same type.... Bottom line, probably best not jump into this, unless + * the perf gain is really determined to be worthwhile for someone in practice. */ +} + +} // namespace flow::util diff --git a/src/flow/util/detail/sched_task_handle_state.cpp b/src/flow/util/detail/sched_task_handle_state.cpp index b9f95728d..8b7e50180 100644 --- a/src/flow/util/detail/sched_task_handle_state.cpp +++ b/src/flow/util/detail/sched_task_handle_state.cpp @@ -30,7 +30,8 @@ Scheduled_task_handle_state::Scheduled_task_handle_state(Unique_id_holder::id_t m_id(id), m_body(std::move(body_moved)), m_task_engine(task_engine), - m_mutex_unless_single_threaded(single_threaded ? static_cast(0) : (new Mutex_non_recursive)), + m_mutex_unless_single_threaded(single_threaded ? static_cast(nullptr) + : (new Mutex_non_recursive)), m_timer(*m_task_engine), m_fired(false), m_canceled(false) diff --git a/src/flow/util/detail/util.hpp b/src/flow/util/detail/util.hpp index ffb9b0a1d..2b5f81443 100644 --- a/src/flow/util/detail/util.hpp +++ b/src/flow/util/detail/util.hpp @@ -47,13 +47,13 @@ Fine_time_pt chrono_duration_from_now_to_fine_time_pt(const boost::chrono::durat assert(dur.count() >= 0); return (dur == User_duration::max()) - ? Fine_time_pt() + ? Fine_time_pt{} : (Fine_clock::now() + ceil(dur)); } constexpr String_view get_last_path_segment(String_view full_path) { - String_view path(full_path); // This only copies the pointer and length (not the string). + String_view path{full_path}; // This only copies the pointer and length (not the string). // @todo Get it from boost::filesystem or something: # ifdef FLOW_OS_WIN constexpr char SEP = '\\'; diff --git a/src/flow/util/detail/util_fwd.hpp b/src/flow/util/detail/util_fwd.hpp index f09c140a0..0d8ae0607 100644 --- a/src/flow/util/detail/util_fwd.hpp +++ b/src/flow/util/detail/util_fwd.hpp @@ -21,6 +21,7 @@ #include "flow/util/string_view.hpp" #include "flow/common.hpp" #include +#include /// @cond // -^- Doxygen, please ignore the following. @@ -93,6 +94,33 @@ namespace flow::util */ namespace bind_ns = boost; +// Find doc headers near the bodies of these compound types. + +template +class Linked_hash_key; +template +class Linked_hash_key_hash; +template +class Linked_hash_key_pred; + +/** + * The lookup structure used inside Linked_hash_map and Linked_hash_set. See the former's doc header(s). + * + * @tparam Key + * See Linked_hash_map, Linked_hash_set. + * @tparam Iterator + * Linked_hash_map::Iterator or Linked_hash_set::Iterator. + * @tparam Hash + * See Linked_hash_map, Linked_hash_set. + * @tparam Pred + * See Linked_hash_map, Linked_hash_set. + * @tparam IS_ITER_TO_PAIR + * `true` for Linked_hash_map, `false` for Linked_hash_set. + */ +template +using Linked_hash_key_set = boost::unordered_set, + Linked_hash_key_hash, + Linked_hash_key_pred>; // Free functions. /** @@ -113,7 +141,7 @@ Fine_duration chrono_duration_to_fine_duration(const boost::chrono::duration -#include -#include namespace flow::util { /** - * An object of this class is a map that combines the lookup speed of a `boost::unordered_map<>` and ordering and - * iterator stability capabilities of an `std::list<>`. + * An object of this class is a map that combines the lookup speed of an `unordered_map<>` and ordering and + * iterator stability capabilities of a `list<>`. * * The API is generally that of an `unordered_map<>`. The differences essentially all have to do with iterators. * This map introduces a concept of "newness," which determines the iteration order. Moreover, *every* iterator remains @@ -38,59 +37,117 @@ namespace flow::util * thus formed orders elements from newest to oldest (hence newest() is begin(), past_oldest() is end()). * * Performance expectations: The best way to determine a method's time needs is to - * imagine what it must do. If it must perform a lookup by key, that is an `unordered_map<>` lookup resulting in an + * imagine what it must do. If it must perform a lookup by key, that is an `unordered_set<>` lookup resulting in an * (at least internal) iterator. If it must insert an element, it is always inserted at the start of a `list`; and - * also into an `unordered_map<>`. If it must erase an element based on an iterator, that element is erased from a list - * based on that iterator; and also by key from an `unordered_map<>`. Iteration itself is iteration along a `list`. + * also into an `unordered_set<>`. If it must erase an element based on an iterator, that element is erased from a list + * based on that iterator; and also by key from said `unordered_set<>`. Iteration itself is iteration along a `list`. * But essentially, every operation is either near constant time or constant time. - * In terms of space needs, this essentially stores the values themselves in a `list`; and also a copy of each key - * in an `unordered_map<>`, which also stores a pointer or list iterator per element. + * In terms of space needs, this essentially stores the values themselves in a `list`; and also a pointer to each + * list-held key/element in an `unordered_set<>`, which also stores a pointer or list iterator per element. + * + * Move semantics for both keys and mapped-values are supported (let `T` be a concrete type for a `*this` and `x` + * a `*this`): + * - `x.insert(std::make_pair(..., ...))`; + * - or `x.insert(T::Value_movable{..., ...})`; + * - `x[std::move(...)] = std::move(...)`. + * + * There is the standard complement of container-wide move operations: move-construction, move-assignment, and + * `swap()` (all constant-time, excluding any implied `this->clear()` in the move-assignment). + * + * The iterators are, really, `list>` iterators; and as such are not invalidated except + * due to direct erasure of a given pointee. + * + * @todo Linked_hash_map and Linked_hash_set have a reasonable complement of C++1x-ish APIs including move-semantics; + * but the API does not quite mirror the full complement of what is in existence for `unordered_*` counterparts in + * C++17 STL/Boost -- it would be nice to add these. This includes such things as `.emplace()` and `.try_emplace()` + * but more fundamentally would probably involve trolling `std::unordered_*` and copying its ~full API (and likely + * some of a decent impl too). That said what's available already acquits itself reasonably well. (Historically + * this was first written before C++11 and hasn't been given the full-on C++1x overhaul but instead merely the + * essentials thereof.) * * ### Thread safety ### * Same as for `unordered_map<>`. * - * @tparam Key - * Key type. Same requirements and behavior as `unordered_map<>` counterpart. Also (is it "in particular"?): - * `Key` must be Assignable, which is STL-speak for: If `Key x, y` are objects of this type, - * then `x = y;` is valid and has all the usual semantics. (There are other requirements, but - * that's the "controversial" one of interest.) In particular, `Key` cannot be of the form `T const` -- - * more commonly written as `const T` (but recall that, say, `const char*` = `char const *` really; - * which is therefore fine here). - * @tparam Mapped - * The 2nd (satellite) part of the `Value` pair type. Same requirements and behavior as `unordered_map<>` - * counterpart. Colloquially, in a K->V map, this is V, while formally the values stored are (K, V) - * pairs. - * @tparam Hash - * Hasher type. Same requirements and behavior as `unordered_map<>` counterpart. To get a hasher - * object, one must be able to call: `Hash h = Hash()`. To then hash a `Key`, one must be able to - * call `h(key)`. Typically one will simply define a `size_t hash_value(Key)` function, which will be - * activated via the default value for this template parameter. Defaults to `boost::hash`. - * @tparam Pred - * Equality functor type. Same requirements and behavior as `unordered_map<>` counterpart. - * Once a functor object `Pred e = Pred()` is obtained, `bool eq = e(a, b)` must return whether `a` equals - * `b`, where `a` and `b` are keys. Typically `operator==()` will be used via the default template parameter. - * Defaults to `std::equal_to`. + * @internal + * ### Impl notes ### + * You should get much of what you need to grok this just by reading the above and possibly looking at the Data + * section doc-headers under `private:`. Essentially, to repeat/recap: there's the `list>` + * to store the actual values, in order (#m_value_list); #Iterator and #Const_iterator come directly from there. + * + * When lookup by #Key is needed, the `unordered_set` #m_value_iter_set comes into play. This is arguably the + * only real mechanical trickiness. It actually stores `Iterator`s into #m_value_list but in such a way as to allow + * seamless lookup from a mere `const Key&`; so `m_value_iter_set.find(key)` "magically" either finds `.end()` -- + * then the key is not in `*this` -- or the iterator into the actual key/mapped-value store in #m_value_list. + * Using #m_value_iter_set is easy; but a bit of internal infrastructure is necessary to have it work. Namely + * we have support class template `Linked_hash_key`, and the `unordered_set m_value_iter_set` + * actually stores those guys, not raw #Key copies; so it is a wrapper around an `Iterator` *or* a #Key + * (union-style). #m_value_iter_set stores #Iterator wrappers, while lookup attempts use the #Key wrapper form + * to pass into `m_value_iter_set.find()`. + * + * An earlier version of Linked_hash_map instead used a simple `unordered_map` instead of the + * `unordered_set>`; so a lookup by key was just that. + * Eventually we replaced it with the more complex solution simply to avoid storing 2 copies of + * each #Key (one in the list, one in the map); as the `Iterator` itself + * includes a pointer to the thing containing the corresponding #Key in the first place. So this saves memory + * as well as various #Key copying. + * + * @endinternal + * + * @tparam Key_t + * Key type. We omit formal requirements, as it is tedious and full of corner cases depending on what + * you plan to invoke (e.g., whether you use move-semantics for keys). Please use common sense knowing + * the basic data structures involved as explained above. That said: if #Key is of non-trivial size, + * it is good to have it have performant move-constructibility and move-assignability and then make use + * of it via move-aware APIs as suggested in the doc header above. + * @tparam Mapped_t + * The 2nd (satellite) part of the #Value pair type. Same commentary as for #Key applies here. + * @tparam Hash_t + * Hasher type. Same requirements and behavior as `boost::unordered_set<>` counterpart. If using + * the default value for #Hash (`boost::hash`), and the default object is passed to ctor (`Hash{}`) (this + * is typical), but there is no hash-function already defined for #Key, then the easiest way to define + * it is: make a `size_t hash_value(Key)` free function in the same namespace as #Key. + * @tparam Pred_t + * Equality-determiner type. Same requirements and behavior as `boost::unordered_set<>` counterpart. If using + * the default value for #Pred (`std::equal_to`), and the default object is passed to ctor (`Pred{}`) + * (this is typical), but there is no equality op defined for #Key, then the easiest way to define + * it is: make an operator-method or free function such that `k1 == k2` (where `k1` and `k2` are `Key`s) + * determines equality or lack thereof. */ -template +template class Linked_hash_map { public: // Types. + /// Convenience alias for template arg. + using Key = Key_t; + + /// Convenience alias for template arg. + using Mapped = Mapped_t; + + /// Convenience alias for template arg. + using Hash = Hash_t; + + /// Convenience alias for template arg. + using Pred = Pred_t; + /// Short-hand for key/mapped-value pairs stored in the structure. using Value = std::pair; + /// Short-hand for key/mapped-value pair best-suited (perf-wise) as arg type for the moving `insert()` overload. + using Value_movable = std::pair; + private: // Types. These are here in the middle of public block due to inability to forward-declare aliases. - /// Short-hand for doubly linked list of (`Key`, `Mapped`) pairs. + /// Short-hand for doubly linked list of (#Key, #Mapped) pairs. using Value_list = std::list; public: // Types (continued). - /// Type for index into array of items, where items are all applicable objects including `Value`s and `Key`s. + /// Expresses sizes/lengths of relevant things. using size_type = std::size_t; /// Type for difference of `size_type`s. using difference_type = std::ptrdiff_t; @@ -107,24 +164,24 @@ class Linked_hash_map /// Type for reverse iterator pointing into an immutable structure of this type. using Const_reverse_iterator = typename Value_list::const_reverse_iterator; - /// For container compliance (hence the irregular capitalization): `Key` type. + /// For container compliance (hence the irregular capitalization): #Key type. using key_type = Key; - /// For container compliance (hence the irregular capitalization): `Mapped` type. + /// For container compliance (hence the irregular capitalization): #Mapped type. using mapped_type = Mapped; - /// For container compliance (hence the irregular capitalization): `Key`/`Mapped` pair type. + /// For container compliance (hence the irregular capitalization): #Key/#Mapped pair type. using value_type = Value; - /// For container compliance (hence the irregular capitalization): `Hash` type. + /// For container compliance (hence the irregular capitalization): #Hash type. using hasher = Hash; - /// For container compliance (hence the irregular capitalization): `Pred` type. + /// For container compliance (hence the irregular capitalization): #Pred type. using key_equal = Pred; - /// For container compliance (hence the irregular capitalization): pointer to `Key`/`Mapped` pair type. + /// For container compliance (hence the irregular capitalization): pointer to #Key/#Mapped pair type. using pointer = Value*; - /// For container compliance (hence the irregular capitalization): pointer to `const Key`/`Mapped` pair type. - using const_pointer = Value const *; - /// For container compliance (hence the irregular capitalization): reference to `Key`/`Mapped` pair type. + /// For container compliance (hence the irregular capitalization): pointer to `const Key`/#Mapped pair type. + using const_pointer = const Value*; + /// For container compliance (hence the irregular capitalization): reference to #Key/#Mapped pair type. using reference = Value&; - /// For container compliance (hence the irregular capitalization): reference to `const Key`/`Mapped` pair type. - using const_reference = Value const &; + /// For container compliance (hence the irregular capitalization): reference to `const Key`/#Mapped pair type. + using const_reference = const Value&; /// For container compliance (hence the irregular capitalization): #Iterator type. using iterator = Iterator; /// For container compliance (hence the irregular capitalization): #Const_iterator type. @@ -137,51 +194,54 @@ class Linked_hash_map * * @param n_buckets * Number of buckets for the unordered (hash) table. Special value -1 (default) will cause us to use - * whatever `unordered_map<>` would use by default. - * @param hasher_instance - * Instance of the hash function type (`hasher_instance(Key k)` should be `size_type`d hash of key `k`). - * @param key_equal_instance - * Instance of the equality function type (`key_equal_instance(Key k1, Key k2)` should return `true` if and - * only if `k1` equals `k2`). + * whatever `unordered_set<>` would use by default. + * @param hasher_obj + * Instance of the hash function type (`hasher_obj(k) -> size_t` should be hash of `Key k`). + * @param pred + * Instance of the equality function type (`pred(k1, k2)` should return `true` if and + * only if the `Key`s are equal by value). */ - explicit Linked_hash_map(size_type n_buckets = size_type(-1), - Hash const & hasher_instance = Hash(), - Pred const & key_equal_instance = Pred()); + Linked_hash_map(size_type n_buckets = size_type(-1), + const Hash& hasher_obj = Hash{}, + const Pred& pred = Pred{}); /** * Constructs structure with some basic parameters, and values initialized from initializer list. * The values are inserted as if `insert(v)` was called for each pair `v` in `values` - * in reverse order. Since the canonical ordering places the *newest* (last inserted/touch()ed) + * **in reverse order**. Since the canonical ordering places the *newest* (last inserted/`touch()`ed) * element at the *front* of the ordering, that means that forward iteration through the set (right after this * constructor runs) will yield values in the *same* order as in initializer list `values`. * * @param values * Values with which to fill the structure after initializing it. * Typically you'd provide a series of key/value pairs like this: - * `{{ key1, value1 }, { key2, value2 }, ...}`. They will appear in iterated sequence in the same order as + * `{ { key1, value1 }, { key2, value2 }, ... }`. They will appear in iterated sequence in the same order as * they appear in this list. * @param n_buckets * See other constructor. - * @param hasher_instance + * @param hasher_obj * See other constructor. - * @param key_equal_instance + * @param pred * See other constructor. */ explicit Linked_hash_map(std::initializer_list values, size_type n_buckets = size_type(-1), - Hash const & hasher_instance = Hash(), - Pred const & key_equal_instance = Pred()); + const Hash& hasher_obj = Hash{}, + const Pred& pred = Pred{}); /** - * Constructs object that is a copy of the given source. Equivalent to `operator=(src)`. + * Constructs object that is a copy of the given source. Equivalent to default-ction followed by `operator=(src)`. * * @param src * Source object. */ - Linked_hash_map(Linked_hash_map const & src); + Linked_hash_map(const Linked_hash_map& src); /** * Constructs object by making it equal to the given source, while the given source becomes as-if default-cted. + * Equivalent to default-ction followed by `operator=(std::move(src))`. + * + * This is a constant-time operation. * * @param src * Source object which is emptied. @@ -191,164 +251,149 @@ class Linked_hash_map // Methods. /** - * Overwrites this object with a copy of the given source. We become equal to `src` but independent of it to the max - * extent possible (if you've got pointers stored in there, for example, the pointers are copied, not the values - * at those pointers). In addition, the hasher instance and equality predicate are copied from `src`. Finally, a + * Overwrites this object with a copy of the given source. We become equal to `src` but independent of it to a + * common-sense extent. In addition, the hasher instance and equality predicate are copied from `src`. Finally, a * reasonable attempt is made to also make the internal structure of the hash map to be similar to that of `src. * * @param src - * Source object. + * Source object. No-op if `this == &src`. * @return `*this`. */ - Linked_hash_map& operator=(Linked_hash_map const & src); + Linked_hash_map& operator=(const Linked_hash_map& src); /** - * Overwrites this object making it equal to the given source, while the given source becomes as-if default-cted. + * Overwrites this object making it identical to the given source, while the given source becomes as-if default-cted. + * + * This is a constant-time operation, plus whatever is the cost of `this->clear()` (linear in pre-op `.size()`). * * @param src - * Source object which is emptied (unless it *is* `*this`; then no-op). + * Source object which is emptied; except no-op if `this == &src`. * @return `*this`. */ Linked_hash_map& operator=(Linked_hash_map&& src); /** - * Attempts to insert the given key/mapped-value pair into the map. If the key is already present in the map, - * does nothing. Return value indicates various info of interest about what occurred or did not occur. - * Key presence is determined according to the `Pred` template parameter which determines equality of 2 given keys; - * and via the `Hash` template parameter that enables efficient hash-based lookup. - * If inserted, the new element is considered "newest," as if by touch(). If not inserted, the existing element - * location is not affected. + * Swaps the contents of this structure and `other`. This is a constant-time operation, as internal + * representations are swapped instead of any copy-assignment. * - * @param key_and_mapped - * The key/mapped-value pair to attempt to insert. This value is copied, and the copy is inserted. - * @return A pair whose second element is true if and only if the insertion occurred; and whose first element - * is an iterator pointing to either the newly inserted element or already present one with a key equal to - * `key_and_mapped.first`. - */ - std::pair insert(Value const & key_and_mapped); - - /** - * Attempts to find value at the given key in the map. Key presence is determined according to the `Pred` template - * parameter which determines equality of 2 given keys; and via the `Hash` template parameter that enables efficient - * hash-based lookup. The returned iterator (if valid) can be used to mutate the elements inside the map. - * - * As long as the value is not removed from the map, the reference will continue to be valid. - * - * Any subsequent writes to the referred to (by returned iterator) area of memory will NOT have the effect of touch(). - * If you need it, call touch() yourself. + * @see The `swap()` free function. + * It is generally best (equivalent but covers more generic cases) to use the ADL-enabled `swap(a, b)` + * pattern instead of this member function. That is: `using std::swap; ...; swap(a, b);`. + * (Details are outside our scope here; but in short ADL will cause the right thing to happen.) * - * @param key - * Key whose equal to find. - * @return If found, iterator to the key/mapped-value pair with the equivalent key; else `this->end()`. + * @param other + * The other structure. */ - Iterator find(Key const & key); + void swap(Linked_hash_map& other); /** - * Attempts to find value at the given key in the map. Key presence is determined according to the `Pred` template - * parameter which determines equality of 2 given keys; and via the `Hash` template parameter that enables efficient - * hash-based lookup. The returned iterator (if valid) cannot be used to mutate the elements inside the map. + * Attempts to insert (copying both key and mapped-value) the given key/mapped-value pair into the map; if key + * already in `*this` makes no change. See also the overload which can avoid a copy and destructively move + * the key and mapped-value instead. * - * As long as the value is not removed from the map, the iterator will continue to be valid. + * Return value indicates various info of interest about what occurred or did not occur. + * If inserted, the new element is considered "newest," as if by touch(). If not inserted, the existing element + * location is not affected (use touch() upon consulting the return value, if this is desirable). * - * @param key - * Key whose equal to find. - * @return If found, iterator to the key/mapped-value pair with the equivalent key; else `this->const_past_oldest()`. + * @param key_and_mapped + * The key/mapped-value pair to attempt to insert. A copy of this value is placed in `*this`. + * @return A pair whose second element is true if and only if the insertion occurred; and whose first element + * is an iterator pointing to either the newly inserted element or already present one with a key equal to + * `key_and_mapped.first`. */ - Const_iterator find(Key const & key) const; + std::pair insert(const Value& key_and_mapped); /** - * Returns the number of times a key is equivalent to the given one is present in the map: either 1 or 0. + * Identical to the other overload, except that (if key not already present in `*this`) the key and mapped-value + * are moved, not copied, into `*this`. * - * @param key - * Key whose equal to find. - * @return 0 or 1. + * @note `key_and_mapped` pointee must be of type #Value_movable, a/k/a `pair` -- not + * #Value, a/k/a `pair` -- otherwise the other insert() overload may get invoked, + * and copying may occur contrary to your intention. E.g., use `std::make_pair()` or + * `"decltype(*this)::Value_movable{}"`. + * (For a move to occur, the source-object can't be `const`; so that's why.) + * @note You can often also use `x[std::move(key)] = std::move(value)`, particularly if you know `key` isn't in + * there, or you are OK with replacing the value if it is. In those cases it's probably more convenient, + * no pairs or `Value_movable`s to worry oneself. + * + * @param key_and_mapped + * The key/mapped-value pair to attempt to insert (both key and mapped-value are moved-from, if insertion + * occurs). + * @return See other overload. */ - size_type count(Key const & key) const; + std::pair insert(Value_movable&& key_and_mapped); /** - * Equivalent to `insert(Value(key, Mapped())).first->second` (but avoids unnecessarily invoking `Mapped()`/generally - * strives for better performance). Less formally, it either finds the value at the given key, or if not found - * inserts one with a default-constructed value; then returns reference to the in-structure stored `Mapped` value - * which can be used to to read and/or modify that value directly. - * - * Note that if `Mapped& x` is returned, then although `x` is mutable, in actuality `x.first` is `const`; so only - * `x.second` is truly mutable. You must not write to the key (such as via a `const_cast<>`); doing so will result - * in undefined behavior. + * Either finds the #Mapped value at the given key, or if not found inserts one with a default-constructed + * `Mapped{}`; then returns reference to the #Mapped. That ref can be used to read and/or modify that value + * directly. See also the overload which can avoid a copy and destructively move the key instead. * * If inserted, the new element is considered "newest," as if by touch(). If not inserted, the existing element * location is not affected. * + * So it is ~equivalent to + * + * - (`key` is in map) `return this->find(key)->second`; or + * - (otherwise) `return this->insert(key, Mapped{}).first->second`. + * * As long as the value is not removed from the map, the reference will continue to be valid. * - * Any subsequent writes to the referred to area of memory will NOT have the effect of touch(). If you need it, + * Any subsequent writes to the referred-to area of memory will NOT have the effect of touch(). If you need it * call touch() yourself. * * @param key - * Key whose equal to find or insert if not found. - * @return Reference to mutable Mapped value directly inside the data structure. + * Key whose equal to find or insert if not found. A copy of this value is placed in `*this`. + * @return Reference to mutable #Mapped value directly inside the data structure. */ - Mapped& operator[](Key const & key); + Mapped& operator[](const Key& key); /** - * Returns reference to mutable front ("newest") element in the structure; formally equivalent to - * `*(this->newest())`. - * - * OK to call when empty(); but behavior undefined if you attempt to access the result in any way if either empty() - * when this was called; or if `!empty()` at that time, but the underlying element is erased at time of access. - * If not empty() when this was called, then resulting reference continues to be valid as long as the underlying - * element is not erased; however, in the future the reference (while referring to the same element) might not refer - * to front ("newest") element any longer. (Informally, most uses would only call front() when `!empty()`, and would - * access it immediately and but once. However, I'm listing the corner cases above.) - * - * Note that if `Mapped& x` is returned, then although `x` is mutable, in actuality `x.first` is const; so only - * `x.second` is truly mutable. You must not write to the key (such as via a `const_cast<>`); doing so will result - * in undefined behavior. + * Identical to the other overload, except that (if key not already present in `*this`) the key + * is moved, not copied, into `*this`. * - * @return Reference to mutable value directly inside the data structure; or to undefined location if - * currently empty(). Note that only the `Mapped` part of `Value` is mutable. + * @param key + * The key to attempt to insert (key is moved-from, if insertion occurs). + * @return See other overload. */ - Value& front(); + Mapped& operator[](Key&& key); /** - * Returns reference to mutable back ("oldest") element in the structure; formally equivalent to - * `*(--this->past_oldest())`. + * Attempts to find value at the given key in the map. Key presence is determined identically to how it would be + * done in an `unordered_set`, with the particular #Hash and #Pred instances given to ctor + * (typically their default-cted instances, typically occupying no memory). + * + * The returned iterator (if valid) can be used to mutate the element inside the map; though only the #Mapped + * is mutable; the `const Key` is immutable. * - * All other comments for front() apply analogously. + * Any subsequent writes to the referred-to (by returned iterator) area of memory will NOT have the effect of touch(). + * If you need it call touch() yourself. * - * @return Reference to mutable `Mapped` value directly inside the data structure; or to undefined location if - * currently empty(). + * @param key + * Key whose equal to find. + * @return If found, iterator to the key/mapped-value pair with the equivalent key; else `this->end()`. */ - Value& back(); + Iterator find(const Key& key); /** - * Returns reference to immutable front ("newest") element in the structure; formally equivalent to - * `*(this->const_newest())`. - * - * OK to call when empty(); but behavior undefined if you attempt to access the result in any way if either empty() - * when this was called; or if `!empty()` at that time, but the underlying element is erased at time of access. - * If not empty() when this was called, then resulting reference continues to be valid as long as the underlying - * element is not erased; however, in the future the reference (while referring to the same element) may not refer - * to front ("newest") element any longer. (Informally, most uses would only call front() when `!empty()`, and would - * access it immediately and but once. However, I'm listing the corner cases above.) + * Identical to the other overload but in a `const` context: the returned iterator is to immutable memory. * - * @return Reference to immutable `Mapped` value directly inside the data structure; or to undefined location if - * currently empty(). + * @param key + * Key whose equal to find. + * @return If found, iterator to the key/mapped-value pair with the equivalent key; else `this->cend()`. */ - Value const & const_front() const; + Const_iterator find(const Key& key) const; /** - * Returns reference to immutable back ("oldest") element in the structure; formally equivalent - * to `*(--this->const_past_oldest())`. + * Returns the number of times a key equal to the given one is present (as-if via find()) in the map: either 1 or 0. * - * All other comments for const_front() apply analogously. - * - * @return Reference to immutable `Mapped` value directly inside the data structure; or to undefined location if - * currently empty(). + * @param key + * Key whose equal to find. + * @return 0 or 1. */ - Value const & const_back() const; + size_type count(const Key& key) const; /** - * Given a valid iterator into the structure, makes the pointed to element "newest" by moving it from wherever it + * Given a valid iterator into the structure, makes the pointed-to element "newest" by moving it from wherever it * is to be first in the iteration order. Behavior undefined if iterator invalid. * * The iterator continues to be valid. @@ -356,18 +401,18 @@ class Linked_hash_map * @param it * Iterator to an element of the structure. */ - void touch(Const_iterator const & it); + void touch(const Const_iterator& it); /** * Given a key into the structure, makes the corresponding element "newest" by moving it from wherever it - * is to be first in the iteration order; or does nothing if no such key. Return value indicates various info of - * interest about what occurred or did not occur. + * is to be first in the iteration order; or does nothing if no such key. `find(key)` equivalent is performed + * first. Return value indicates whether it was found. * * @param key * Key whose equal to find. * @return `true` if the key was found (even if it was already "newest"); `false` if not found. */ - bool touch(Key const & key); + bool touch(const Key& key); /** * Erases the element pointed to by the given valid iterator. Behavior undefined if it is not valid. `it` becomes @@ -377,11 +422,12 @@ class Linked_hash_map * Iterator of element to erase. * @return Iterator one position past (i.e., "older") than `it`, before `*it` was removed. */ - Iterator erase(Const_iterator const & it); + Iterator erase(const Const_iterator& it); /** * Erases all elements in the range [`it_newest`, `it_past_oldest`). Behavior undefined if a given iterator is - * invalid. `it_newest` becomes invalid. + * invalid, or if the range is invalid. Corner case: an empty range is allowed; then this no-ops. Unless no-op, + * `it_newest` becomes invalid. * * @param it_newest * Iterator of first ("newest") element to erase. @@ -389,29 +435,21 @@ class Linked_hash_map * Iterator of one past last ("oldest") element to erase. * @return `it_past_oldest` copy. */ - Iterator erase(Const_iterator const & it_newest, Const_iterator const & it_past_oldest); + Iterator erase(const Const_iterator& it_newest, const Const_iterator& it_past_oldest); /** - * Erases the element with the given key, if it exists. Return value indicates various info of interest about what - * occurred or did not occur. + * Erases the element with the given key, if it exists. `find(key)` equivalent is performed + * first. Return value indicates whether it existed. * * @param key * Key such that its equal's (if found) element will be erased. * @return Number of elements erased (0 or 1). */ - size_type erase(Key const & key); + size_type erase(const Key& key); /// Makes it so that `size() == 0`. void clear(); - /** - * Swaps the contents of this structure and `other`. This is a constant-time operation. - * - * @param other - * The other structure. - */ - void swap(Linked_hash_map& other); - /** * Synonym of newest(). * @return See newest(). @@ -419,7 +457,7 @@ class Linked_hash_map Iterator begin(); /** - * Returns first, a/k/a "newest," element's iterator. + * Returns first, a/k/a "newest," element's iterator; or past_oldest() if empty(). * @return Ditto. */ Iterator newest(); @@ -431,7 +469,9 @@ class Linked_hash_map Iterator end(); /** - * Returns one past last, a/k/a "oldest," element's iterator. + * Returns special iterator indicating the position just past the iteration order; if not empty() this is + * one past last, a/k/a "oldest," element in the iteration order. + * * @return Ditto. */ Iterator past_oldest(); @@ -449,7 +489,7 @@ class Linked_hash_map Const_iterator begin() const; /** - * Returns first, a/k/a "newest," element's iterator (to immutable element). + * Same as newest() but operating on immutable `*this`. * @return Ditto. */ Const_iterator const_newest() const; @@ -467,7 +507,7 @@ class Linked_hash_map Const_iterator end() const; /** - * Returns one past last, a/k/a "oldest," element's iterator (to immutable element). + * Same as past_oldest() but operating on immutable `*this`. * @return Ditto. */ Const_iterator const_past_oldest() const; @@ -515,25 +555,26 @@ class Linked_hash_map Const_reverse_iterator crend() const; /** - * Returns one past last, a/k/a "newest," element's reverse iterator (to immutable element). + * Returns special reverse iterator indicating the position just past the reverse-iteration order; if not empty() + * this is one past last, a/k/a "newest," element in the reverse-iteration order. * @return Ditto. */ Const_reverse_iterator const_past_newest() const; /** - * Returns true if and only if container is empty. Same performance as of `unordered_map<>`. + * Returns true if and only if container is empty. Same performance as of `unordered_set<>`. * @return Ditto. */ bool empty() const; /** - * Returns number of elements stored. Same performance as of `unordered_map<>.` + * Returns number of elements stored. Same performance as of `unordered_set<>.` * @return Ditto. */ size_type size() const; /** - * Returns max number of elements that can be stored. Same performance as of `unordered_map<>` + `list<>`. + * Returns max number of elements that can be stored. Same performance as of `unordered_set<>` + `list<>`. * @return Ditto. */ size_type max_size() const; @@ -543,138 +584,139 @@ class Linked_hash_map // Methods. /** - * Helper that modifies #m_value_list and #m_keys_into_list_map so that `key_and_mapped`'s copy is inserted into + * Helper that modifies #m_value_list and #m_value_iter_set so that `key_and_mapped`'s copy is inserted into * the structure. Pre-condition is that `key_and_mapped.first` is not in the structure (else behavior undefined). * * @param key_and_mapped * Same as in insert(). * @return Same as in `insert().first`. */ - Iterator insert_impl(Value const & key_and_mapped); - - // Types. - - /// Short-hand for iterator into doubly linked list of (`Key`, `Mapped`) pairs. - using Value_list_iter = Iterator; + Iterator insert_impl(const Value& key_and_mapped); - /// Short-hand for `const` iterator into doubly linked list of (`Key`, `Mapped`) pairs. - using Value_list_const_iter = Const_iterator; - - /// Short-hand for a hash map that maps `Key` to iterator into doubly linked list of (`Key`, `Mapped`) pairs. - using Key_to_value_iter_map = boost::unordered_map; + /** + * Simimlar to insert_impl(), except `key_and_mapped` components are `move()`d into `*this` instead of being copied. + * + * @param key_and_mapped + * Same as in insert(). + * @return Same as in `insert().first`. + */ + Iterator insert_impl_mv(Value_movable&& key_and_mapped); // Data. /** - * The actual values -- which, as in `unordered_map`, are instances of `Value` = `pair` -- + * The actual values -- which, as in `unordered_map`, are instances of #Value = `pair` -- * are stored in here, in the order in which user would iterate over them. If `Value v` is in this list, then no * `Value v1 == v` can be elsewhere in the list. The order is semantically defined to be from "newest" to "oldest." - * Therefore, any newly inserted value goes at the START of the list. Similarly, any "touched" value is moved to - * the START of the list (see touch() and other methods that are documented as "touching" the referenced key). - * This ordering is what a normal `unordered_map` would not supply (it's in the name!) but that we advertise. + * Therefore, any newly inserted value goes at the *start* of the list. Similarly, any "touched" value is moved to + * the *start* of the list (see touch()). * - * Since #m_keys_into_list_map stores keys, why store the keys here duplicately? Answer: that way we can expose - * iterators into #m_value_list directly to the user; so that they can take an iterator `I` and directly access - * the key and mapped value via `I->first` and `I->second`, respectively -- as is expected of any map container. - * This does, however, come at some memory cost. + * This ordering is what a normal `unordered_map` would not supply (it's in the name!) but that we advertise. * - * @todo It is probably possible to cut down on the memory cost of storing, for each element, a copy of the `Key` - * in #m_value_list (in addition to the mandatory one in the lookup table #m_keys_into_list_map). Perhaps the key - * copy would be replaced by an iterator back into #m_value_list. A custom iterator class would be necessary - * to properly dereference this (this is non-trivial given that `operator*()` would have to return a reference - * to a pair which is no longer stored anywhere in this hypothetical design). Moreover, iterators exposed to the - * user would become invalid the same way an `unordered_map<>` iterator does due to seemingly unrelated changes. - * Finally, the memory savings would not even exist for `Key` types roughly the size of a pointer. All in all, - * not a slam-dunk.... + * ### Design ### + * This is very much the central structure in a `*this`; its iterator type *is* our exposed #Iterator. + * Straight-up, #m_value_list supplies every single required operation (or at least ones on top of which any + * required ops could be implemented). There is exactly one exception to this: `find(const Key&)`. It too + * could be implemented with #m_value_list alone, but a linear search (linear-time worst- and average-case) + * would be necessary (unacceptable). Because of that we have #m_value_iter_set. See that guy's doc header. * * ### Performance ### * Moving a value from anywhere to either end of the list is a constant-time operation * (assuming the source location's iterator is known). Hence touch() is constant-time. Moreover, touch() - * does NOT involve a copy of a `Value` (it only involves assigning, internally, a few linked list pointers). - * Also note that insertion is similarly constant-time (but does, necessarily, require a `Value` copy as for - * any container). Finally, erasure is also constant-time. These are the only operations needed. + * does *not* involve a copy of a #Value (it only involves assigning, internally, a few linked list pointers). + * Also note that insertion is similarly constant-time. Finally, erasure is also constant-time. These are the + * basic operations needed. */ Value_list m_value_list; /** - * Maps each `Key K` that is in #m_value_list to an iterator into #m_value_list (note the iterator points to - * a `Value` instance, which itself contains a copy of `K` but also the `Mapped` value, in which the user likely - * has keen interest). This supplies the one capability #m_value_list alone cannot: near-constant-time lookup - * of a `Value` or a `Mapped` by `Key` (a linear search would be necessary). + * Data structure that allows the amortized-constant-time (as in `unordered_set`) implementation of + * `this->find(key)`, where `key` is `const Key&`. Namely, then, given a #Key, it gets us an #Iterator + * into #m_value_list -- the central data store -- or a null iterator if not-found. + * + * ### Design ### + * (There is quick intro in Impl section of the class doc header.) Ignoring various technicalities and C++isms, + * ultimately it stores `Iterator`s while supporting the **find** operation that + * + * - takes a `const Key& key`; and + * - yields the #Iterator `it` stored therein (if any) such that + * - it->second *equals by value* (via #Hash and #Pred) the `key`. + * + * This find-op must #Hash the `key`; and then perform a (series of) #Pred comparisons between + * `key` and the `Key`s stored at `Iterator`s within that hash-bucket. * - * The `unique_ptr<>` wrapper remains constant after setting it to non-null. Why have it at all? Because in at least - * one constructor we are unable to determine all the constructor arguments by the time the constructor body - * executes, and we don't want to construct the map until then. + * *How* this is accomplished is encapsulated inside #Linked_hash_key_set, Linked_hash_key, Linked_hash_key_hash, + * and Linked_hash_key_pred helper (internally-used only) types. This is abstracted away; the bottom line is + * `m_value_iter_set.find(key)->iter()` yields the proper #Iterator (assuming `.find()` didn't yield `.end()`). + * + * Similarly `m_value_iter_set.insert(iter)` -- where `iter` is an #Iterator into #m_value_list -- just works. * * ### Performance ### - * Anything they'll need to do to this map carries the same performance cost as if they used a - * straight `unordered_map<>`, so by definition it is acceptable. The only operation this does not provide is - * iteration and insertion in the proper order, and that's done through #m_value_list instead. + * Anything they'll need to do to this set (namely `.find()` and `.insert()`) carries the same performance cost as + * if they used a straight `unordered_map<>`, so by definition it is acceptable. */ - boost::movelib::unique_ptr m_keys_into_list_map; + Linked_hash_key_set m_value_iter_set; }; // class Linked_hash_map // Free functions: in *_fwd.hpp. // Template implementations. -template -Linked_hash_map::Linked_hash_map(size_type n_buckets, - hasher const & hasher_instance, - key_equal const & key_equal_instance) : - Linked_hash_map({}, n_buckets, hasher_instance, key_equal_instance) +template +Linked_hash_map::Linked_hash_map(size_type n_buckets, + const Hash& hasher_obj, + const Pred& pred) : + /* @todo Using detail:: like this is technically uncool, but so far all alternatives look worse. + * We blame the somewhat annoying ctor API for unordered_*. */ + m_value_iter_set((n_buckets == size_type(-1)) + ? boost::unordered::detail::default_bucket_count + : n_buckets, + hasher_obj, pred) { - // Nothing. + // That's all. } -template -Linked_hash_map::Linked_hash_map(std::initializer_list values, - size_type n_buckets, - hasher const & hasher_instance, - key_equal const & key_equal_instance) : +template +Linked_hash_map::Linked_hash_map(std::initializer_list values, + size_type n_buckets, + const Hash& hasher_obj, + const Pred& pred) : // Their initializer_list is meant for a dictionary, but it is perfect for our list of pairs! - m_value_list(values) + m_value_list(values), + m_value_iter_set((n_buckets == size_type(-1)) + ? boost::unordered::detail::default_bucket_count // See @todo above. + : n_buckets, + hasher_obj, pred) { - using boost::unordered_map; - - /* Guess the default size, if they specified the default, from a dummy unrelated-type map. Probably - * that'll be correct. Even use our template argument values, just in case that matters. */ - if (n_buckets == size_type(-1)) - { - unordered_map dummy; - n_buckets = dummy.bucket_count(); - } - - // We use a unique_ptr<> because of the above: we couldn't immediately initialize this map. - m_keys_into_list_map.reset(new Key_to_value_iter_map(n_buckets, hasher_instance, key_equal_instance)); - // Now link each key in the quick-lookup table to its stored location in the ordering. - for (Value_list_iter value_list_it = m_value_list.begin(); value_list_it != m_value_list.end(); - ++value_list_it) + const auto value_list_end_it = m_value_list.end(); + for (auto value_list_it = m_value_list.begin(); value_list_it != value_list_end_it; ++value_list_it) { - // Note this sets (at key K) the value: iterator to pair. - (*m_keys_into_list_map)[value_list_it->first] = value_list_it; + // Note that value_list_it contains both the iterator (lookup result) and the lookup key (iterator pointee). + m_value_iter_set.insert(value_list_it); } } -template -Linked_hash_map::Linked_hash_map(Linked_hash_map const & src) : - m_keys_into_list_map(new Key_to_value_iter_map()) // Dummy: all this is quickly replaced. +template +Linked_hash_map::Linked_hash_map(const Linked_hash_map& src) + // An empty m_value_iter_set is constructed here but immediately replaced within the {body}. { operator=(src); } -template -Linked_hash_map::Linked_hash_map(Linked_hash_map&& src) : - m_keys_into_list_map(new Key_to_value_iter_map()) // Dummy: all this is quickly replaced. +template +Linked_hash_map::Linked_hash_map(Linked_hash_map&& src) + // An empty m_value_iter_set is constructed here but immediately replaced within the {body}. { operator=(std::move(src)); } -template -Linked_hash_map& - Linked_hash_map::operator=(Linked_hash_map const & src) +template +Linked_hash_map& + Linked_hash_map::operator=(const Linked_hash_map& src) { + using Value_iter_set = decltype(m_value_iter_set); + if (&src == this) { return *this; @@ -683,7 +725,7 @@ Linked_hash_map& /* Values are values -- copy them over. Recall these are (const Key, Mapped) pairs. * - * Why not just: `m_value_list = src.m_value_list;`? Answer: It fails to build, at least with this Darwin-supplied + * Why not just: `m_value_list = src.m_value_list;`? Answer: It fails to build, at least with a * clang++, due to an interesting subtlety. Recall that the list stores (const Key, Mapped) pairs: note * the `const`. Say you have iterator `it` into m_value_list; then `*it = <...>;` will not compile, as * *it is partially const and cannot be assigned to (it is not "Assignable," in STL-speak). Yet the STL @@ -705,175 +747,168 @@ Linked_hash_map& * * That's fine. However, it does actually bypass a nice performance trick! We would rather not make that * concession. Therefore, let's temporarily pretend m_value_list and src.m_value_list store non-const Keys. - * Then we can assing. Note that, even though it's N lines of code, reinterpret_cast<> generates no machine + * Then we can assign. Note that, even though it's N lines of code, reinterpret_cast<> generates no machine * code: it just makes the code-that-would-have-been-generated-anyway look at the memory in a different way * (in this case, as storing Keys that can be overwritten instead of read-only ones). Finally, note that * we specifically require that the template parameter `typename Key` be Assignable; that is the piece of the * puzzle that GUARANTEES this reinterpret_cast<> (in general not a safe operation) is indeed safe/correct. */ { - using std::pair; - using std::list; + using Mutable_key_value_list = std::list; - using Mutable_key_value = pair; - using Mutable_key_value_list = list; - - Mutable_key_value_list* const dst_list_ptr - = reinterpret_cast(&m_value_list); - const Mutable_key_value_list* const src_list_ptr - = reinterpret_cast(&src.m_value_list); - - *dst_list_ptr = *src_list_ptr; + *(reinterpret_cast(&m_value_list)) + = *(reinterpret_cast(&src.m_value_list)); } - /* However, the iterators in any hash map would point to the wrong list! Build up that map from scratch. - * - * ...Actually, not quite. To attempt to keep the same structure as the source map, first copy it; then - * overwrite the values. Not sure how perfectly it works but seems worth a shot, as a regular unordered_map<> - * promises to copy over things like the load factor of the source object, not to mention the hasher and - * equality predicate, and we advertise the same. */ - *m_keys_into_list_map = *src.m_keys_into_list_map; - - // So now replace the keys in the ready map. - for (Value_list_iter value_list_it = m_value_list.begin(); value_list_it != m_value_list.end(); - ++value_list_it) + /* However, the iterators in any hash set would point into the wrong list! Build up that set from scratch. + * Do try to keep the same structure, as we advertise. */ + const auto& src_value_iter_set = src.m_value_iter_set; + m_value_iter_set = Value_iter_set{src_value_iter_set.bucket_count(), + src_value_iter_set.hash_function(), + src_value_iter_set.key_eq()}; + + const auto value_list_end_it = m_value_list.end(); + for (auto value_list_it = m_value_list.begin(); value_list_it != value_list_end_it; ++value_list_it) { - // Note this sets (at key K) the value: iterator to pair. - (*m_keys_into_list_map)[value_list_it->first] = value_list_it; + // Note that value_list_it contains both the iterator (lookup result) and the lookup key (iterator pointee). + m_value_iter_set.insert(value_list_it); } return *this; } // Linked_hash_map::operator=() -template -Linked_hash_map& - Linked_hash_map::operator=(Linked_hash_map&& src) +template +Linked_hash_map& + Linked_hash_map::operator=(Linked_hash_map&& src) { if (&src != this) { clear(); - swap(std::move(src)); + swap(src); } return *this; } -template -std::pair::Iterator, bool> - Linked_hash_map::insert(Value const & key_and_mapped) +template +void Linked_hash_map::swap(Linked_hash_map& other) { - using std::pair; - - Key const & key = key_and_mapped.first; - - // Check if this key is already in the map of iterators and therefore overall map. - typename Key_to_value_iter_map::iterator const map_it = m_keys_into_list_map->find(key); - if (map_it != m_keys_into_list_map->end()) - { - // Yes. *map_it is pair. So return 2nd half of that as the iterator to already existing element! - return pair(map_it->second, false); - } - // else Nope. + using std::swap; - return pair(insert_impl(key_and_mapped), true); + swap(m_value_iter_set, other.m_value_iter_set); // unordered_set<> exchange; constant-time for sure at least. + swap(m_value_list, other.m_value_list); // list<> exchange (probably ~= head+tail pointer pairs exchanged). + // Per cppreference.com `list<>::iterator`s (inside the `_maps`s) remain valid after list<>s swapped. } -template -typename Linked_hash_map::Iterator - Linked_hash_map::insert_impl(Value const & key_and_mapped) +template +std::pair::Iterator, bool> + Linked_hash_map::insert(const Value& key_and_mapped) { - /* Insert it at the front: as advertised, new element is "touched," meaning it is made "newest," so goes at start. - * Note that "it" = a copy of key_and_mapped. */ - m_value_list.push_front(key_and_mapped); - - // Iterator to the new element is therefore iterator to start of list of pairs. - Iterator const new_elem_it = m_value_list.begin(); - // And make sure we can look it up in the future quickly (such as what is done above). - (*m_keys_into_list_map)[key_and_mapped.first] = new_elem_it; + using std::pair; - return new_elem_it; + const auto set_it = m_value_iter_set.find(key_and_mapped.first); + return (set_it == m_value_iter_set.end()) + ? pair{insert_impl(key_and_mapped), // Key and Mapped copy occurs here. + true} + : pair{set_it->iter(), false}; // *set_it is Linked_hash_key. } -template -typename Linked_hash_map::Iterator - Linked_hash_map::find(Key const & key) +template +std::pair::Iterator, bool> + Linked_hash_map::insert(Value_movable&& key_and_mapped) { - typename Key_to_value_iter_map::iterator const map_it = m_keys_into_list_map->find(key); - return (map_it == m_keys_into_list_map->end()) ? m_value_list.end() : map_it->second; + using std::pair; + + const auto set_it = m_value_iter_set.find(key_and_mapped.first); + return (set_it == m_value_iter_set.end()) + ? pair{insert_impl_mv(std::move(key_and_mapped)), // <-- The difference from other overload. + true} + : pair{set_it->iter(), false}; // *set_it is Linked_hash_key. } -template -typename Linked_hash_map::Const_iterator - Linked_hash_map::find(Key const & key) const +template +Mapped_t& Linked_hash_map::operator[](const Key& key) { - typename Key_to_value_iter_map::const_iterator const map_it = m_keys_into_list_map->find(key); - return (map_it == m_keys_into_list_map->cend()) ? m_value_list.cend() : map_it->second; + const auto set_it = m_value_iter_set.find(key); + return ((set_it == m_value_iter_set.end()) + ? insert_impl_mv // Returns Iterator. *(Iterator) is pair. + (Value_movable{Key{key}, Mapped{}}) // Have to copy `key`, but empty temporary Mapped is moved. + : set_it->iter()) // *set_it is Linked_hash_key. *(that.iter()) is pair. + ->second; } -template -typename Linked_hash_map::size_type - Linked_hash_map::count(Key const & key) const +template +Mapped_t& Linked_hash_map::operator[](Key&& key) { - return m_keys_into_list_map->count(key); + const auto set_it = m_value_iter_set.find(key); + return ((set_it == m_value_iter_set.end()) + // v-- The difference from other overload. + ? insert_impl_mv // Returns Iterator. *(Iterator) is pair. + (Value_movable{std::move(key), Mapped{}}) + : set_it->iter()) // *set_it is Linked_hash_key. *(that.iter()) is pair. + ->second; } -template -Mapped& Linked_hash_map::operator[](Key const & key) +template +typename Linked_hash_map::Iterator + Linked_hash_map::insert_impl(const Value& key_and_mapped) { - using std::pair; - - // Check if this key is already in the map of iterators and therefore overall map. - typename Key_to_value_iter_map::iterator const map_it = m_keys_into_list_map->find(key); - if (map_it != m_keys_into_list_map->end()) - { - // Yes. *map_it is pair. *(that Iterator) is pair. Return 2nd half of latter. - return map_it->second->second; - } - // else Nope. - - return insert_impl(Value(key, Mapped()))->second; + /* Insert it at the front: as advertised, new element is "touched," meaning it is made "newest," so goes at start. + * Note that "it" = a copy of key_and_mapped; this invokes pair<> copy ctor, as emplace_front() forwards to it. */ + m_value_list.emplace_front(key_and_mapped); + + /* Iterator to the new element is therefore iterator to start of list of pairs. + * And make sure we can look it up in the future quickly (such as what is done first in insert()). + * Linked_hash_key_set m_value_iter_set achieves these aims black-boxily. */ + const auto list_it = m_value_list.begin(); + m_value_iter_set.insert(list_it); + return list_it; } -template -typename Linked_hash_map::Value& - Linked_hash_map::front() +template +typename Linked_hash_map::Iterator + Linked_hash_map::insert_impl_mv(Value_movable&& key_and_mapped) { - // No assert(): we promised not to crash even if empty(). They just can't access it subsequently if so. - return *(newest()); + /* Same as insert_impl() but construct value in-place inside the list<> as-if: + * pair p{move(k_a_m.first), move(k_a_m.second)}. */ + m_value_list.emplace_front(std::move(key_and_mapped)); + + const auto list_it = m_value_list.begin(); + m_value_iter_set.insert(list_it); + return list_it; } -template -typename Linked_hash_map::Value& - Linked_hash_map::back() +template +typename Linked_hash_map::Iterator + Linked_hash_map::find(const Key& key) { - // No assert(): we promised not to crash even if empty(). They just can't access it subsequently if so. - return *(--past_oldest()); + const auto set_it = m_value_iter_set.find(key); + return (set_it == m_value_iter_set.end()) ? m_value_list.end() : set_it->iter(); } -template -typename Linked_hash_map::Value const & - Linked_hash_map::const_front() const +template +typename Linked_hash_map::Const_iterator + Linked_hash_map::find(const Key& key) const { - // No assert(): we promised not to crash even if empty(). They just can't access it subsequently if so. - return *(const_newest()); + const auto set_it = m_value_iter_set.find(key); + return (set_it == m_value_iter_set.cend()) ? m_value_list.cend() : set_it->iter(); } -template -typename Linked_hash_map::Value const & - Linked_hash_map::const_back() const +template +typename Linked_hash_map::size_type + Linked_hash_map::count(const Key& key) const { - // No assert(): we promised not to crash even if empty(). They just can't access it subsequently if so. - return *(--const_past_oldest()); + return m_value_iter_set.count(key); } -template -void Linked_hash_map::touch(Const_iterator const & it) +template +void Linked_hash_map::touch(const Const_iterator& it) { m_value_list.splice(m_value_list.begin(), m_value_list, it); } -template -bool Linked_hash_map::touch(Key const & key) +template +bool Linked_hash_map::touch(const Key& key) { - const Const_iterator it = find(key); + const auto it = find(key); if (it == end()) { return false; @@ -884,209 +919,203 @@ bool Linked_hash_map::touch(Key const & key) return true; } -template -typename Linked_hash_map::Iterator - Linked_hash_map::erase(Const_iterator const & it) +template +typename Linked_hash_map::Iterator + Linked_hash_map::erase(const Const_iterator& it) { - m_keys_into_list_map->erase(m_keys_into_list_map->find(it->first)); + m_value_iter_set.erase(it->first); + // (^-- Subtlety: .erase(it) won't build due to Const_ness of `it`.) + return m_value_list.erase(it); } -template -typename Linked_hash_map::Iterator - Linked_hash_map::erase(Const_iterator const & it_newest, - Const_iterator const & it_past_oldest) +template +typename Linked_hash_map::Iterator + Linked_hash_map::erase(const Const_iterator& it_newest, + const Const_iterator& it_past_oldest) { - for (Value_list_const_iter it = it_newest; it != it_past_oldest; ++it) + for (auto it = it_newest; it != it_past_oldest; ++it) { - m_keys_into_list_map->erase(it->first); + m_value_iter_set.erase(it->first); } return m_value_list.erase(it_newest, it_past_oldest); } -template -typename Linked_hash_map::size_type - Linked_hash_map::erase(Key const & key) +template +typename Linked_hash_map::size_type + Linked_hash_map::erase(const Key& key) { - typename Key_to_value_iter_map::iterator const map_it = m_keys_into_list_map->find(key); - if (map_it == m_keys_into_list_map->end()) + const auto set_it = m_value_iter_set.find(key); + if (set_it == m_value_iter_set.end()) { return 0; } // else - m_value_list.erase(map_it->second); - m_keys_into_list_map->erase(map_it); + const auto list_it = set_it->iter(); + m_value_iter_set.erase(set_it); + m_value_list.erase(list_it); return 1; } -template -void Linked_hash_map::clear() +template +void Linked_hash_map::clear() { - m_keys_into_list_map->clear(); + m_value_iter_set.clear(); m_value_list.clear(); } -template -void Linked_hash_map::swap(Linked_hash_map& other) -{ - using std::swap; - - swap(m_keys_into_list_map, other.m_keys_into_list_map); // unique_ptr<>s exchanged (= raw pointers exchanged). - swap(m_value_list, other.m_value_list); // list<> exchange (probably = head+tail pointer pairs exchanged). - // Per cppreference.com `list<>::iterator`s (inside the `_maps`s) remain valid after list<>s swapped. -} - -template -typename Linked_hash_map::Iterator - Linked_hash_map::newest() +template +typename Linked_hash_map::Iterator + Linked_hash_map::newest() { return m_value_list.begin(); } -template -typename Linked_hash_map::Iterator - Linked_hash_map::begin() +template +typename Linked_hash_map::Iterator + Linked_hash_map::begin() { return newest(); } -template -typename Linked_hash_map::Iterator - Linked_hash_map::past_oldest() +template +typename Linked_hash_map::Iterator + Linked_hash_map::past_oldest() { return m_value_list.end(); } -template -typename Linked_hash_map::Iterator - Linked_hash_map::end() +template +typename Linked_hash_map::Iterator + Linked_hash_map::end() { return past_oldest(); } -template -typename Linked_hash_map::Const_iterator - Linked_hash_map::const_newest() const +template +typename Linked_hash_map::Const_iterator + Linked_hash_map::const_newest() const { return m_value_list.cbegin(); } -template -typename Linked_hash_map::Const_iterator - Linked_hash_map::cbegin() const +template +typename Linked_hash_map::Const_iterator + Linked_hash_map::cbegin() const { return const_newest(); } -template -typename Linked_hash_map::Const_iterator - Linked_hash_map::begin() const +template +typename Linked_hash_map::Const_iterator + Linked_hash_map::begin() const { return const_newest(); } -template -typename Linked_hash_map::Const_iterator - Linked_hash_map::const_past_oldest() const +template +typename Linked_hash_map::Const_iterator + Linked_hash_map::const_past_oldest() const { return m_value_list.cend(); } -template -typename Linked_hash_map::Const_iterator - Linked_hash_map::cend() const +template +typename Linked_hash_map::Const_iterator + Linked_hash_map::cend() const { return const_past_oldest(); } -template -typename Linked_hash_map::Const_iterator - Linked_hash_map::end() const +template +typename Linked_hash_map::Const_iterator + Linked_hash_map::end() const { return const_past_oldest(); } -template -typename Linked_hash_map::Reverse_iterator - Linked_hash_map::oldest() +template +typename Linked_hash_map::Reverse_iterator + Linked_hash_map::oldest() { return m_value_list.rbegin(); } -template -typename Linked_hash_map::Reverse_iterator - Linked_hash_map::rbegin() +template +typename Linked_hash_map::Reverse_iterator + Linked_hash_map::rbegin() { return oldest(); } -template -typename Linked_hash_map::Reverse_iterator - Linked_hash_map::past_newest() +template +typename Linked_hash_map::Reverse_iterator + Linked_hash_map::past_newest() { return m_value_list.rend(); } -template -typename Linked_hash_map::Reverse_iterator - Linked_hash_map::rend() +template +typename Linked_hash_map::Reverse_iterator + Linked_hash_map::rend() { return past_newest(); } -template -typename Linked_hash_map::Const_reverse_iterator - Linked_hash_map::const_oldest() const +template +typename Linked_hash_map::Const_reverse_iterator + Linked_hash_map::const_oldest() const { return m_value_list.crbegin(); } -template -typename Linked_hash_map::Const_reverse_iterator - Linked_hash_map::crbegin() const +template +typename Linked_hash_map::Const_reverse_iterator + Linked_hash_map::crbegin() const { return const_oldest(); } -template -typename Linked_hash_map::Const_reverse_iterator - Linked_hash_map::const_past_newest() const +template +typename Linked_hash_map::Const_reverse_iterator + Linked_hash_map::const_past_newest() const { return m_value_list.crend(); } -template -typename Linked_hash_map::Const_reverse_iterator - Linked_hash_map::crend() const +template +typename Linked_hash_map::Const_reverse_iterator + Linked_hash_map::crend() const { return const_past_newest(); } -template -typename Linked_hash_map::size_type - Linked_hash_map::size() const +template +typename Linked_hash_map::size_type + Linked_hash_map::size() const { - return m_keys_into_list_map->size(); // I'm skeptical/terrified of list::size()'s time complexity. + return m_value_iter_set.size(); // I'm skeptical/terrified of list::size()'s time complexity. } -template -bool Linked_hash_map::empty() const +template +bool Linked_hash_map::empty() const { - return m_keys_into_list_map->empty(); + return m_value_list.empty(); } -template -typename Linked_hash_map::size_type - Linked_hash_map::max_size() const +template +typename Linked_hash_map::size_type + Linked_hash_map::max_size() const { - return std::min(m_keys_into_list_map->max_size(), m_value_list.max_size()); + return std::min(m_value_iter_set.max_size(), m_value_list.max_size()); } -template -void swap(Linked_hash_map& val1, Linked_hash_map& val2) +template +void swap(Linked_hash_map& val1, + Linked_hash_map& val2) { val1.swap(val2); } diff --git a/src/flow/util/linked_hash_set.hpp b/src/flow/util/linked_hash_set.hpp index 48d584a1f..22d082b1f 100644 --- a/src/flow/util/linked_hash_set.hpp +++ b/src/flow/util/linked_hash_set.hpp @@ -18,37 +18,58 @@ /// @file #pragma once +#include "flow/util/detail/linked_hash.hpp" #include "flow/util/util_fwd.hpp" #include -#include -#include namespace flow::util { /** - * An object of this class is a set that combines the lookup speed of an `unordered_set<>` and ordering and iterator - * stability capabilities of an `std::list<>`. + * An object of this class is a map that combines the lookup speed of an `unordered_set<>` and ordering and + * iterator stability capabilities of a `list<>`. * * This is just like Linked_hash_map, except it only stores keys -- no mapped values. All comments, except for * self-explanatory differences, from Linked_hash_map apply here. Thus I will only speak of differences below to - * avoid duplication of this header. + * avoid duplication of this header. Incidentally the most visible API difference (aside from having no `Mapped`s + * to speak of, only `Key`s) is that Linked_hash_set lacks `(*this)[]` operator; so one always uses insert() to + * insert. * - * @see class Linked_hash_map. + * Move semantics for keys are supported (let `x` be a `*this`): + * - `x.insert(std::move(a_key))`; + * - `x.insert(Key{...})`. * - * @tparam Key + * The iterators are, really, `list` const-iterators; and as such are not invalidated except + * due to direct erasure of a given pointee. + * + * @internal + * ### Impl notes ### + * It's very much like Linked_hash_map; just the `list` #m_value_list stores only `Key`s as opposed to + * `pair`s. See Linked_hash_map. + * @endinternal + * + * @tparam Key_t * Key type. Same as for Linked_hash_map. - * @tparam Hash + * @tparam Hash_t * Hasher type. Same as for Linked_hash_map. - * @tparam Pred + * @tparam Pred_t * Equality functor type. Same as for Linked_hash_map. */ -template +template class Linked_hash_set { public: // Types. + /// Convenience alias for template arg. + using Key = Key_t; + + /// Convenience alias for template arg. + using Hash = Hash_t; + + /// Convenience alias for template arg. + using Pred = Pred_t; + /// Short-hand for values, which in this case are simply the keys. using Value = Key; @@ -62,7 +83,7 @@ class Linked_hash_set // Types (continued). - /// Type for index into array of items, where items are all applicable objects including `Value`s and `Key`s. + /// Expresses sizes/lengths of relevant things. using size_type = std::size_t; /// Type for difference of `size_type`s. using difference_type = std::ptrdiff_t; @@ -85,22 +106,22 @@ class Linked_hash_set */ using Reverse_iterator = Const_reverse_iterator; - /// For container compliance (hence the irregular capitalization): `Key` type. + /// For container compliance (hence the irregular capitalization): #Key type. using key_type = Key; - /// For container compliance (hence the irregular capitalization): `Value` type. + /// For container compliance (hence the irregular capitalization): #Value type. using value_type = Value; - /// For container compliance (hence the irregular capitalization): `Hash` type. + /// For container compliance (hence the irregular capitalization): #Hash type. using hasher = Hash; - /// For container compliance (hence the irregular capitalization): `Pred` type. + /// For container compliance (hence the irregular capitalization): #Pred type. using key_equal = Pred; - /// For container compliance (hence the irregular capitalization): pointer to `Key` type. + /// For container compliance (hence the irregular capitalization): pointer to #Key type. using pointer = Value*; /// For container compliance (hence the irregular capitalization): pointer to `const Key` type. - using const_pointer = Value const *; - /// For container compliance (hence the irregular capitalization): reference to `Key` type. + using const_pointer = const Value*; + /// For container compliance (hence the irregular capitalization): reference to #Key type. using reference = Value&; /// For container compliance (hence the irregular capitalization): reference to `const Key` type. - using const_reference = Value const &; + using const_reference = const Value&; /// For container compliance (hence the irregular capitalization): `Iterator` type. using iterator = Iterator; /// For container compliance (hence the irregular capitalization): `Const_iterator` type. @@ -112,52 +133,55 @@ class Linked_hash_set * Constructs empty structure with some basic parameters. * * @param n_buckets - * Number of buckets for the unordered (hash) table. Special value -1 (default) will cause us to use + * Number of buckets for the unordered (hash) table. Special value -1 (default) will cause us to use * whatever `unordered_set<>` would use by default. - * @param hasher_instance - * Instance of the hash function type (`hasher_instance(Key k)` should be `size_type`d hash of key `k`). - * @param key_equal_instance - * Instance of the equality function type (`key_equal_instance(Key k1, Key k2)` should return `true` if and - * only if `k1` equals `k2`). + * @param hasher_obj + * Instance of the hash function type (`hasher_obj(k) -> size_t` should be hash of `Key k`). + * @param pred + * Instance of the equality function type (`pred(k1, k2)` should return `true` if and + * only if the `Key`s are equal by value). */ - explicit Linked_hash_set(size_type n_buckets = size_type(-1), - Hash const & hasher_instance = Hash(), - Pred const & key_equal_instance = Pred()); + Linked_hash_set(size_type n_buckets = size_type(-1), + const Hash& hasher_obj = Hash{}, + const Pred& pred = Pred{}); /** * Constructs structure with some basic parameters, and values initialized from initializer list. - * The values are inserted as if `insert(v)` was called for each pair `v` in `values` - * in reverse order. Since the canonical ordering places the *newest* (last inserted/touch()ed) + * The values are inserted as if `insert(v)` was called for each element `v` in `values` + * **in reverse order**. Since the canonical ordering places the *newest* (last inserted/`touch()`ed) * element at the *front* of the ordering, that means that forward iteration through the set (right after this * constructor runs) will yield values in the *same* order as in initializer list `values`. * * @param values * Values with which to fill the structure after initializing it. * Typically you'd provide a series of keys like this: - * `{ key1, key2, ... }`. They will appear in iterated sequence in the same order as they appear - * in this list. + * `{ key1, key2, ... }`. They will appear in iterated sequence in the same order as + * they appear in this list. * @param n_buckets * See other constructor. - * @param hasher_instance + * @param hasher_obj * See other constructor. - * @param key_equal_instance + * @param pred * See other constructor. */ explicit Linked_hash_set(std::initializer_list values, size_type n_buckets = size_type(-1), - Hash const & hasher_instance = Hash(), - Pred const & key_equal_instance = Pred()); + const Hash& hasher_obj = Hash{}, + const Pred& pred = Pred{}); /** - * Constructs object that is a copy of the given source. Equivalent to `operator=(src)`. + * Constructs object that is a copy of the given source. Equivalent to default-ction followed by `operator=(src)`. * * @param src * Source object. */ - Linked_hash_set(Linked_hash_set const & src); + Linked_hash_set(const Linked_hash_set& src); /** * Constructs object by making it equal to the given source, while the given source becomes as-if default-cted. + * Equivalent to default-ction followed by `operator=(std::move(src))`. + * + * This is a constant-time operation. * * @param src * Source object which is emptied. @@ -167,99 +191,92 @@ class Linked_hash_set // Methods. /** - * Overwrites this object with a copy of the given source. We become equal to `src` but independent of it to the max - * extent possible (if you've got pointers stored in there, for example, the pointers are copied, not the values - * at those pointers). In addition, the hasher instance and equality predicate are copied from `src`. Finally, a - * reasonable attempt is made to also make the internal structure of the hash set to be similar to that of `src`. + * Overwrites this object with a copy of the given source. We become equal to `src` but independent of it to a + * common-sense extent. In addition, the hasher instance and equality predicate are copied from `src`. Finally, a + * reasonable attempt is made to also make the internal structure of the hash map to be similar to that of `src. * * @param src - * Source object. + * Source object. No-op if `this == &src`. * @return `*this`. */ - Linked_hash_set& operator=(Linked_hash_set const & src); + Linked_hash_set& operator=(const Linked_hash_set& src); /** - * Overwrites this object making it equal to the given source, while the given source becomes as-if default-cted. + * Overwrites this object making it identical to the given source, while the given source becomes as-if default-cted. + * + * This is a constant-time operation, plus whatever is the cost of `this->clear()` (linear in pre-op `.size()`). * * @param src - * Source object which is emptied (unless it *is* `*this`; then no-op). + * Source object which is emptied; except no-op if `this == &src`. * @return `*this`. */ Linked_hash_set& operator=(Linked_hash_set&& src); /** - * Attempts to insert the given key into the set. If the key is already present in the set, - * does nothing. Return value indicates various info of interest about what occurred or did not occur. - * Key presence is determined according to the `Pred` template parameter which determines equality of 2 given keys; - * and via the `Hash` template parameter that enables efficient hash-based lookup. - * If inserted, the new element is considered "newest," as if by touch(). If not inserted, the existing element - * location is not affected. + * Swaps the contents of this structure and `other`. This is a constant-time operation, as internal + * representations are swapped instead of any copy-assignment. * - * @param key - * The key to attempt to insert. This value is copied, and the copy is inserted. - * @return A pair whose second element is `true` if and only if the insertion occurred; and whose first element - * is an iterator pointing to either the newly inserted element or already present one equal to - * `key`. + * @see The `swap()` free function. + * It is generally best (equivalent but covers more generic cases) to use the ADL-enabled `swap(a, b)` + * pattern instead of this member function. That is: `using std::swap; ...; swap(a, b);`. + * (Details are outside our scope here; but in short ADL will cause the right thing to happen.) + * + * @param other + * The other structure. */ - std::pair insert(Value const & key); + void swap(Linked_hash_set& other); /** - * Attempts to find the given key in the set. Key presence is determined according to the `Pred` template - * parameter which determines equality of 2 given keys; and via the `Hash` template parameter that enables efficient - * hash-based lookup. The returned iterator (if valid) cannot be used to mutate the elements stored in the map. + * Attempts to insert (copying it) the given keyinto the map; if key + * already in `*this` makes no change. See also the overload which can avoid a copy and destructively move + * the key instead. * - * As long as the key is not removed from the map, the iterator will continue to be valid. + * Return value indicates various info of interest about what occurred or did not occur. + * If inserted, the new element is considered "newest," as if by touch(). If not inserted, the existing element + * location is not affected (use touch() upon consulting the return value, if this is desirable). * - * @note Let `r` be the returned value. Since no `key`-associated value beyond `key` itself is stored in the - * structure, the fact that `*r == key` is not valuable: you already had `key` after all! It is only useful - * in pin-pointing the relative location in the chronological ordering; in being used as an argument to - * various erasing methods; and in checking for presence of the key in the set. For the latter, I recommend - * the following utility: - * @see util::key_exists(), which uses this method to more concisely check for the presence of a key. * @param key - * Key whose equal to find. - * @return If found, iterator to the equivalent key; else `this->const_past_oldest()`. + * The key to attempt to insert. A copy of this value is placed in `*this`. + * @return A pair whose second element is true if and only if the insertion occurred; and whose first element + * is an iterator pointing to either the newly inserted element or already present one with a key equal to + * `key`. */ - Const_iterator find(Key const & key) const; + std::pair insert(const Key& key); /** - * Returns the number of times a key is equivalent to the given one is present in the hash: either 1 or 0. + * Identical to the other overload, except that (if key not already present in `*this`) the key + * is moved, not copied, into `*this`. * * @param key - * Key whose equal to find. - * @return 0 or 1. + * The key to attempt to insert (it is moved-from, if insertion occurs). + * @return See other overload. */ - size_type count(Key const & key) const; + std::pair insert(Key&& key); /** - * Returns reference to immutable front ("newest") element in the structure; formally equivalent to - * `*(this->const_newest())`. + * Attempts to find value at the given key in the map. Key presence is determined identically to how it would be + * done in an `unordered_set`, with the particular #Hash and #Pred instances given to ctor + * (typically their default-cted instances, typically occupying no memory). * - * OK to call when empty(); but behavior undefined if you attempt to access the result in any way if either empty() - * when this was called; or if `!empty()` at that time, but the underlying element is erased at time of access. - * If not `empty()` when this was called, then resulting reference continues to be valid as long as the underlying - * element is not erased; however, in the future the reference (while referring to the same element) might not refer - * to front ("newest") element any longer. (Informally, most uses would only call const_front() when `!empty()`, and - * would access it immediately and but once. However, I'm listing the corner cases above.) + * The returned iterator (if valid) *cannot* be used to mutate the key inside the map. * - * @return Reference to immutable `Key` (a/k/a `Value`) directly inside data structure; or to undefined location if - * currently empty(). + * @param key + * Key whose equal to find. + * @return If found, iterator to the key/mapped-value pair with the equivalent key; else `this->end()`. */ - Value const & const_front() const; + Const_iterator find(const Key& key) const; /** - * Returns reference to immutable back ("oldest") element in the structure; formally equivalent to - * `*(--this->const_past_oldest())`. - * - * All other comments for const_front() apply analogously. + * Returns the number of times a key equal to the given one is present (as-if via find()) in the map: either 1 or 0. * - * @return Reference to immutable `Key` (a/k/a `Value`) directly inside data structure; or to undefined location if - * currently empty(). + * @param key + * Key whose equal to find. + * @return 0 or 1. */ - Value const & const_back() const; + size_type count(const Key& key) const; /** - * Given a valid iterator into the structure, makes the pointed to element "newest" by moving it from wherever it + * Given a valid iterator into the structure, makes the pointed-to element "newest" by moving it from wherever it * is to be first in the iteration order. Behavior undefined if iterator invalid. * * The iterator continues to be valid. @@ -267,18 +284,18 @@ class Linked_hash_set * @param it * Iterator to an element of the structure. */ - void touch(Const_iterator const & it); + void touch(const Const_iterator& it); /** * Given a key into the structure, makes the corresponding element "newest" by moving it from wherever it - * is to be first in the iteration order; or does nothing if no such key. Return value indicates various info of - * interest about what occurred or did not occur. + * is to be first in the iteration order; or does nothing if no such key. `find(key)` equivalent is performed + * first. Return value indicates whether it was found. * * @param key * Key whose equal to find. - * @return `true` if the key was found (even if it was already "newest"); false if not found. + * @return `true` if the key was found (even if it was already "newest"); `false` if not found. */ - bool touch(Key const & key); + bool touch(const Key& key); /** * Erases the element pointed to by the given valid iterator. Behavior undefined if it is not valid. `it` becomes @@ -288,10 +305,11 @@ class Linked_hash_set * Iterator of element to erase. * @return Iterator one position past (i.e., "older") than `it`, before `*it` was removed. */ - Iterator erase(Const_iterator const & it); + Const_iterator erase(const Const_iterator& it); /** - * Erases all elements in the range [`it_newest`, `it_past_oldest`). Behavior undefined if given iterator is invalid. + * Erases all elements in the range [`it_newest`, `it_past_oldest`). Behavior undefined if a given iterator is + * invalid, or if the range is invalid. Corner case: an empty range is allowed; then this no-ops. Unless no-op, * `it_newest` becomes invalid. * * @param it_newest @@ -300,35 +318,21 @@ class Linked_hash_set * Iterator of one past last ("oldest") element to erase. * @return `it_past_oldest` copy. */ - Iterator erase(Const_iterator const & it_newest, Const_iterator const & it_past_oldest); + Const_iterator erase(const Const_iterator& it_newest, const Const_iterator& it_past_oldest); /** - * Erases the element with the given key, if it exists. Return value indicates various info of interest about what - * occurred or did not occur. + * Erases the element with the given key, if it exists. `find(key)` equivalent is performed + * first. Return value indicates whether it existed. * * @param key * Key such that its equal's (if found) element will be erased. * @return Number of elements erased (0 or 1). */ - size_type erase(Key const & key); - - /// Queue-style pop (erase) of the front -- a/k/a newest -- element. Behavior undefined if empty(). - void pop_front(); - - /// Queue-style pop (erase) of the back -- a/k/a oldest -- element. Behavior undefined if empty(). - void pop_back(); + size_type erase(const Key& key); /// Makes it so that `size() == 0`. void clear(); - /** - * Swaps the contents of this structure and `other`. This is a constant-time operation. - * - * @param other - * The other structure. - */ - void swap(Linked_hash_set& other); - /** * Synonym of newest(). * @return See newest(). @@ -427,119 +431,99 @@ class Linked_hash_set Const_reverse_iterator const_past_newest() const; /** - * Returns `true` if and only if container is empty. Same performance as of `unordered_map<>`. + * Returns true if and only if container is empty. Same performance as of `unordered_set<>`. * @return Ditto. */ bool empty() const; /** - * Returns number of elements stored. Same performance as of `unordered_map<>`. + * Returns number of elements stored. Same performance as of `unordered_set<>.` * @return Ditto. */ size_type size() const; /** - * Returns max number of elements that can be stored. Same performance as of `unordered_map<>` + `list<>`. + * Returns max number of elements that can be stored. Same performance as of `unordered_set<>` + `list<>`. * @return Ditto. */ size_type max_size() const; private: - // Methods. - - /** - * Helper that modifies #m_value_list and #m_keys_into_list_map so that `key`'s copy is inserted into - * the structure. Pre-condition is that `key` is not in the structure (else behavior undefined). - * - * @param key - * Same as in insert(). - * @return Same as in `insert().first`. - */ - Iterator insert_impl(Value const & key); - - // Types. - - /// Short-hand for iterator into doubly linked list of `Key` elements. - using Value_list_iter = Iterator; - - /// Short-hand for const iterator into doubly linked list of `Key` elements. - using Value_list_const_iter = Const_iterator; - - /// Short-hand for a hash map that maps `Key` to iterator into doubly linked list of `Key` elements. - using Key_to_value_iter_map = boost::unordered_map; - // Data. - /// See Linked_hash_map::m_value_list. Essentially all of that applies here. + /// Analogous to Linked_hash_map::m_value_list; but simpler in that it just stores `Key`s, not pairs of (stuff). Value_list m_value_list; - /// See Linked_hash_map::m_keys_into_list_map. Essentially all of that applies here. - boost::movelib::unique_ptr m_keys_into_list_map; + /** + * Analogous to Linked_hash_map::m_value_iter_set; just configured to generate a simpler `.iter()` off each element + * by supplying `false` instead of `true` for the last template arg. + */ + Linked_hash_key_set m_value_iter_set; }; // class Linked_hash_set // Free functions: in *_fwd.hpp. // Template implementations. -template -Linked_hash_set::Linked_hash_set(size_type n_buckets, - hasher const & hasher_instance, - key_equal const & key_equal_instance) : - Linked_hash_set({}, n_buckets, hasher_instance, key_equal_instance) -{ - // Nothing. -} - -template -Linked_hash_set::Linked_hash_set(std::initializer_list values, - size_type n_buckets, - hasher const & hasher_instance, - key_equal const & key_equal_instance) : - // Their initializer_list is meant for a set, but it is perfect for our list of keys. - m_value_list(values) +template +Linked_hash_set::Linked_hash_set(size_type n_buckets, + const Hash& hasher_obj, + const Pred& pred) : + /* @todo Using detail:: like this is technically uncool, but so far all alternatives look worse. + * We blame the somewhat annoying ctor API for unordered_*. */ + m_value_iter_set((n_buckets == size_type(-1)) + ? boost::unordered::detail::default_bucket_count + : n_buckets, + hasher_obj, pred) +{ + // That's all. +} + +template +Linked_hash_set::Linked_hash_set(std::initializer_list values, + size_type n_buckets, + const Hash& hasher_obj, + const Pred& pred) : + // Their initializer_list is meant for a set of keys, but it is perfect for our list of keys. + m_value_list(values), + m_value_iter_set((n_buckets == size_type(-1)) + ? boost::unordered::detail::default_bucket_count // See @todo above. + : n_buckets, + hasher_obj, pred) { - using boost::unordered_set; - - /* Guess the default size, if they specified the default, from a dummy unrelated-type set. Probably - * that'll be correct. Even use our template argument values, just in case that matters. */ - if (n_buckets == size_type(-1)) - { - unordered_set dummy; - n_buckets = dummy.bucket_count(); - } - - // We use a unique_ptr<> because of the above: we couldn't immediately initialize this map. - m_keys_into_list_map.reset(new Key_to_value_iter_map(n_buckets, hasher_instance, key_equal_instance)); - // Now link each key in the quick-lookup table to its stored location in the ordering. - for (Value_list_iter value_list_it = m_value_list.begin(); value_list_it != m_value_list.end(); - ++value_list_it) + const auto value_list_end_it = m_value_list.cend(); + for (auto value_list_it = m_value_list.cbegin(); value_list_it != value_list_end_it; ++value_list_it) { - // Note this sets (at key K) the value: iterator to K. - (*m_keys_into_list_map)[*value_list_it] = value_list_it; + // Note that value_list_it contains both the iterator (lookup result) and the lookup key (iterator pointee). + m_value_iter_set.insert(value_list_it); } } -template -Linked_hash_set::Linked_hash_set(Linked_hash_set const & src) : - m_keys_into_list_map(new Key_to_value_iter_map()) // Dummy: all this is quickly replaced. +template +Linked_hash_set::Linked_hash_set(const Linked_hash_set& src) + // An empty m_value_iter_set is constructed here but immediately replaced within the {body}. { operator=(src); } -template -Linked_hash_set::Linked_hash_set(Linked_hash_set&& src) : - m_keys_into_list_map(new Key_to_value_iter_map()) // Dummy: all this is quickly replaced. +template +Linked_hash_set::Linked_hash_set(Linked_hash_set&& src) + // An empty m_value_iter_set is constructed here but immediately replaced within the {body}. { operator=(std::move(src)); } -template -Linked_hash_set& - Linked_hash_set::operator=(Linked_hash_set const & src) +template +Linked_hash_set& + Linked_hash_set::operator=(const Linked_hash_set& src) { - // See Linked_hash_map equivalent method, to which this is analogous. Keeping comments here light. + /* See Linked_hash_map equivalent method, to which this is analogous. Keeping comments here light. + * Though we don't have to do the reinterpret_cast<> thing; can just assign the list to src's counterpart; + * in our case Value is just Key -- no const-ness involved. */ + + using Value_iter_set = decltype(m_value_iter_set); if (&src == this) { @@ -547,36 +531,25 @@ Linked_hash_set& } // else - { - using std::pair; - using std::list; - - using Mutable_key_list = list; - - Mutable_key_list* const dst_list_ptr - = reinterpret_cast(&m_value_list); - const Mutable_key_list* const src_list_ptr - = reinterpret_cast(&src.m_value_list); + m_value_list = src.m_value_list; - *dst_list_ptr = *src_list_ptr; - } - - *m_keys_into_list_map = *src.m_keys_into_list_map; + const auto& src_value_iter_set = src.m_value_iter_set; + m_value_iter_set = Value_iter_set{src_value_iter_set.bucket_count(), + src_value_iter_set.hash_function(), + src_value_iter_set.key_eq()}; - // So now replace the keys in the ready map. - for (Value_list_iter value_list_it = m_value_list.begin(); value_list_it != m_value_list.end(); - ++value_list_it) + const auto value_list_end_it = m_value_list.cend(); + for (auto value_list_it = m_value_list.cbegin(); value_list_it != value_list_end_it; ++value_list_it) { - // Note this sets (at key K) the value: iterator to K. - (*m_keys_into_list_map)[*value_list_it] = value_list_it; + m_value_iter_set.insert(value_list_it); } return *this; } // Linked_hash_set::operator=() -template -Linked_hash_set& - Linked_hash_set::operator=(Linked_hash_set&& src) +template +Linked_hash_set& + Linked_hash_set::operator=(Linked_hash_set&& src) { if (&src != this) { @@ -586,76 +559,88 @@ Linked_hash_set& return *this; } -template -std::pair::Iterator, bool> - Linked_hash_set::insert(Value const & key) +template +void Linked_hash_set::swap(Linked_hash_set& other) { - // See Linked_hash_map equivalent method, to which this is analogous. Keeping comments here light. + using std::swap; + swap(m_value_iter_set, other.m_value_iter_set); // unordered_set<> exchange; constant-time for sure at least. + swap(m_value_list, other.m_value_list); // list<> exchange (probably ~= head+tail pointer pairs exchanged). + // Per cppreference.com `list<>::iterator`s (inside the `_maps`s) remain valid after list<>s swapped. +} + +template +std::pair::Iterator, bool> + Linked_hash_set::insert(const Key& key) +{ using std::pair; - typename Key_to_value_iter_map::iterator const map_it = m_keys_into_list_map->find(key); - if (map_it != m_keys_into_list_map->end()) + const auto set_it = m_value_iter_set.find(key); + if (set_it != m_value_iter_set.end()) { - return pair(map_it->second, false); + return pair{set_it->iter(), false}; // *set_it is Linked_hash_key. } - return pair(insert_impl(key), true); + // else + + /* Insert it at the front: as advertised, new element is "touched," meaning it is made "newest," so goes at start. + * Note that "it" = a copy of key; this invokes Key copy ctor, as emplace_front() forwards to it. */ + m_value_list.emplace_front(key); + + /* Iterator to the new element is therefore iterator to start of list of `Key`s. + * And make sure we can look it up in the future quickly (such as what is done above). + * Linked_hash_key_set m_value_iter_set achieves these aims black-boxily. */ + const auto list_it = m_value_list.cbegin(); + m_value_iter_set.insert(list_it); + return pair{list_it, true}; } -template -typename Linked_hash_set::Iterator - Linked_hash_set::insert_impl(Value const & key) +template +std::pair::Iterator, bool> + Linked_hash_set::insert(Key&& key) { - // See Linked_hash_map equivalent method, to which this is analogous. Keeping comments here light. + using std::pair; - m_value_list.push_front(key); - Iterator const new_elem_it = m_value_list.begin(); - (*m_keys_into_list_map)[key] = new_elem_it; + // Same as other insert() but construct value in-place inside the list<> as-if: Key k2{move(k)}. - return new_elem_it; -} + const auto set_it = m_value_iter_set.find(key); + if (set_it != m_value_iter_set.end()) + { + return pair{set_it->iter(), false}; // *set_it is Linked_hash_key. + } + // else -template -typename Linked_hash_set::Const_iterator - Linked_hash_set::find(Key const & key) const -{ - typename Key_to_value_iter_map::const_iterator const map_it = m_keys_into_list_map->find(key); - return (map_it == m_keys_into_list_map->cend()) ? m_value_list.cend() : map_it->second; -} + m_value_list.emplace_front(std::move(key)); // <-- The difference. -template -typename Linked_hash_set::size_type - Linked_hash_set::count(Key const & key) const -{ - return m_keys_into_list_map->count(key); + const auto list_it = m_value_list.cbegin(); + m_value_iter_set.insert(list_it); + return pair{list_it, true}; } -template -typename Linked_hash_set::Value const & - Linked_hash_set::const_front() const +template +typename Linked_hash_set::Const_iterator + Linked_hash_set::find(const Key& key) const { - // No assert(): we promised not to crash even if empty(). They just can't access it subsequently if so. - return *(const_newest()); + const auto set_it = m_value_iter_set.find(key); + return (set_it == m_value_iter_set.cend()) ? m_value_list.cend() : set_it->iter(); } -template -typename Linked_hash_set::Value const & - Linked_hash_set::const_back() const +template +typename Linked_hash_set::size_type + Linked_hash_set::count(const Key& key) const { - // No assert(): we promised not to crash even if empty(). They just can't access it subsequently if so. - return *(--const_past_oldest()); + return m_value_iter_set.count(key); } -template -void Linked_hash_set::touch(Const_iterator const & it) +template +void Linked_hash_set::touch(const Const_iterator& it) { m_value_list.splice(m_value_list.begin(), m_value_list, it); } -template -bool Linked_hash_set::touch(Key const & key) +template +bool Linked_hash_set::touch(const Key& key) { - const Iterator it = find(key); + const auto it = find(key); if (it == end()) { return false; @@ -666,212 +651,187 @@ bool Linked_hash_set::touch(Key const & key) return true; } -template -typename Linked_hash_set::Iterator - Linked_hash_set::erase(Const_iterator const & it) +template +typename Linked_hash_set::Iterator + Linked_hash_set::erase(const Const_iterator& it) { - m_keys_into_list_map->erase(m_keys_into_list_map->find(*it)); + m_value_iter_set.erase(*it); return m_value_list.erase(it); } -template -typename Linked_hash_set::Iterator - Linked_hash_set::erase(Const_iterator const & it_newest, - Const_iterator const & it_past_oldest) +template +typename Linked_hash_set::Iterator + Linked_hash_set::erase(const Const_iterator& it_newest, const Const_iterator& it_past_oldest) { - for (Value_list_const_iter it = it_newest; it != it_past_oldest; ++it) + for (auto it = it_newest; it != it_past_oldest; ++it) { - m_keys_into_list_map->erase(it->first); + m_value_iter_set.erase(*it); } return m_value_list.erase(it_newest, it_past_oldest); } -template -typename Linked_hash_set::size_type - Linked_hash_set::erase(Key const & key) +template +typename Linked_hash_set::size_type + Linked_hash_set::erase(const Key& key) { - typename Key_to_value_iter_map::iterator const map_it = m_keys_into_list_map->find(key); - if (map_it == m_keys_into_list_map->end()) + const auto set_it = m_value_iter_set.find(key); + if (set_it == m_value_iter_set.end()) { return 0; } // else - m_value_list.erase(map_it->second); - m_keys_into_list_map->erase(map_it); + const auto list_it = set_it->iter(); + m_value_iter_set.erase(set_it); + m_value_list.erase(list_it); return 1; } -template -void Linked_hash_set::pop_front() +template +void Linked_hash_set::clear() { - assert(!empty()); - erase(const_newest()); -} - -template -void Linked_hash_set::pop_back() -{ - assert(!empty()); - erase(--const_past_oldest()); -} - -template -void Linked_hash_set::clear() -{ - m_keys_into_list_map->clear(); + m_value_iter_set.clear(); m_value_list.clear(); } -template -void Linked_hash_set::swap(Linked_hash_set& other) -{ - using std::swap; - - swap(m_keys_into_list_map, other.m_keys_into_list_map); // unique_ptr<>s exchanged (= raw pointers exchanged). - swap(m_value_list, other.m_value_list); // list<> exchange (probably = head+tail pointer pairs exchanged). - // Per cppreference.com `list<>::iterator`s (inside the `_maps`s) remain valid after list<>s swapped. -} - -template -typename Linked_hash_set::Iterator - Linked_hash_set::newest() const +template +typename Linked_hash_set::Iterator + Linked_hash_set::newest() const { return m_value_list.cbegin(); } -template -typename Linked_hash_set::Const_iterator - Linked_hash_set::const_newest() const +template +typename Linked_hash_set::Const_iterator + Linked_hash_set::const_newest() const { return newest(); // For us Iterator = Const_iterator. } -template -typename Linked_hash_set::Iterator - Linked_hash_set::begin() const +template +typename Linked_hash_set::Iterator + Linked_hash_set::begin() const { return newest(); } -template -typename Linked_hash_set::Const_iterator - Linked_hash_set::cbegin() const +template +typename Linked_hash_set::Const_iterator + Linked_hash_set::cbegin() const { return begin(); // For us Iterator = Const_iterator. } -template -typename Linked_hash_set::Iterator - Linked_hash_set::past_oldest() const +template +typename Linked_hash_set::Iterator + Linked_hash_set::past_oldest() const { return m_value_list.cend(); } -template -typename Linked_hash_set::Const_iterator - Linked_hash_set::const_past_oldest() const +template +typename Linked_hash_set::Const_iterator + Linked_hash_set::const_past_oldest() const { return past_oldest(); // For us Iterator = Const_iterator. } -template -typename Linked_hash_set::Iterator - Linked_hash_set::end() const +template +typename Linked_hash_set::Iterator + Linked_hash_set::end() const { return past_oldest(); } -template -typename Linked_hash_set::Const_iterator - Linked_hash_set::cend() const +template +typename Linked_hash_set::Const_iterator + Linked_hash_set::cend() const { return end(); // For us Iterator = Const_iterator. } -template -typename Linked_hash_set::Reverse_iterator - Linked_hash_set::oldest() const +template +typename Linked_hash_set::Reverse_iterator + Linked_hash_set::oldest() const { return m_value_list.crbegin(); } -template -typename Linked_hash_set::Const_reverse_iterator - Linked_hash_set::const_oldest() const +template +typename Linked_hash_set::Const_reverse_iterator + Linked_hash_set::const_oldest() const { return oldest(); // For us Iterator = Const_iterator. } -template -typename Linked_hash_set::Reverse_iterator - Linked_hash_set::rbegin() const +template +typename Linked_hash_set::Reverse_iterator + Linked_hash_set::rbegin() const { return oldest(); } -template -typename Linked_hash_set::Const_reverse_iterator - Linked_hash_set::crbegin() const +template +typename Linked_hash_set::Const_reverse_iterator + Linked_hash_set::crbegin() const { return rbegin(); // For us Reverse_iterator = Const_reverse_iterator. } -template -typename Linked_hash_set::Reverse_iterator - Linked_hash_set::past_newest() const +template +typename Linked_hash_set::Reverse_iterator + Linked_hash_set::past_newest() const { return m_value_list.crend(); // For us Reverse_iterator = Const_reverse_iterator. } -template -typename Linked_hash_set::Const_reverse_iterator - Linked_hash_set::const_past_newest() const +template +typename Linked_hash_set::Const_reverse_iterator + Linked_hash_set::const_past_newest() const { return past_newest(); // For us Reverse_iterator = Const_reverse_iterator. } -template -typename Linked_hash_set::Reverse_iterator - Linked_hash_set::rend() const +template +typename Linked_hash_set::Reverse_iterator + Linked_hash_set::rend() const { return past_newest(); } -template -typename Linked_hash_set::Const_reverse_iterator - Linked_hash_set::crend() const +template +typename Linked_hash_set::Const_reverse_iterator + Linked_hash_set::crend() const { return m_value_list.rend(); // For us Reverse_iterator = Const_reverse_iterator. } -template -typename Linked_hash_set::size_type - Linked_hash_set::size() const +template +typename Linked_hash_set::size_type + Linked_hash_set::size() const { - return m_keys_into_list_map->size(); // I'm skeptical/terrified of list::size()'s time complexity. + return m_value_iter_set.size(); // I'm skeptical/terrified of list::size()'s time complexity. } -template -bool Linked_hash_set::empty() const +template +bool Linked_hash_set::empty() const { - return m_keys_into_list_map->empty(); + return m_value_list.empty(); } -template -typename Linked_hash_set::size_type - Linked_hash_set::max_size() const +template +typename Linked_hash_set::size_type + Linked_hash_set::max_size() const { - return std::min(m_keys_into_list_map->max_size(), m_value_list.max_size()); + return std::min(m_value_iter_set.max_size(), m_value_list.max_size()); } -template -void swap(Linked_hash_set& val1, Linked_hash_set& val2) +template +void swap(Linked_hash_set& val1, Linked_hash_set& val2) { val1.swap(val2); } } // namespace flow::util - diff --git a/src/flow/util/sched_task.cpp b/src/flow/util/sched_task.cpp index 4f958ad47..63c3b2902 100644 --- a/src/flow/util/sched_task.cpp +++ b/src/flow/util/sched_task.cpp @@ -158,7 +158,7 @@ bool scheduled_task_short_fire(log::Logger* logger_ptr, Scheduled_task_handle ta } else { - Lock_guard lock(*task->m_mutex_unless_single_threaded); + Lock_guard lock{*task->m_mutex_unless_single_threaded}; noop = !fire_if_should(); } @@ -185,7 +185,7 @@ bool scheduled_task_short_fire(log::Logger* logger_ptr, Scheduled_task_handle ta } else { - Lock_guard lock(*task->m_mutex_unless_single_threaded); + Lock_guard lock{*task->m_mutex_unless_single_threaded}; assert(task->m_fired && (!task->m_canceled)); } #endif @@ -223,7 +223,7 @@ Fine_duration scheduled_task_fires_from_now_or_canceled(log::Logger* logger_ptr, const auto& mtx = task->m_mutex_unless_single_threaded; if (mtx) { - Lock_guard lock(*mtx); + Lock_guard lock{*mtx}; return do_it(); } // else @@ -249,7 +249,7 @@ bool scheduled_task_fired(log::Logger* logger_ptr, Scheduled_task_const_handle t const auto& mtx = task->m_mutex_unless_single_threaded; if (mtx) { - Lock_guard lock(*mtx); + Lock_guard lock{*mtx}; return do_it(); } // else @@ -270,7 +270,7 @@ bool scheduled_task_canceled(log::Logger* logger_ptr, Scheduled_task_const_handl const auto& mtx = task->m_mutex_unless_single_threaded; if (mtx) { - Lock_guard lock(*mtx); + Lock_guard lock{*mtx}; return do_it(); } // else diff --git a/src/flow/util/sched_task.hpp b/src/flow/util/sched_task.hpp index ee2e8493a..b2bf799ff 100644 --- a/src/flow/util/sched_task.hpp +++ b/src/flow/util/sched_task.hpp @@ -176,7 +176,7 @@ Scheduled_task_handle schedule_task_from_now(log::Logger* logger_ptr, } else { - Lock_guard lock(*task->m_mutex_unless_single_threaded); + Lock_guard lock{*task->m_mutex_unless_single_threaded}; assert(task->m_fired != task->m_canceled); } } @@ -224,7 +224,7 @@ Scheduled_task_handle schedule_task_from_now(log::Logger* logger_ptr, } else { - Lock_guard lock(*task->m_mutex_unless_single_threaded); + Lock_guard lock{*task->m_mutex_unless_single_threaded}; noop = !should_fire(); } diff --git a/src/flow/util/sched_task_fwd.hpp b/src/flow/util/sched_task_fwd.hpp index c1ea5fbb3..6e8d95c0f 100644 --- a/src/flow/util/sched_task_fwd.hpp +++ b/src/flow/util/sched_task_fwd.hpp @@ -34,7 +34,7 @@ struct Scheduled_task_handle_state; /** * Black-box type that represents a handle to a scheduled task as scheduled by * schedule_task_at() or schedule_task_from_now() or similar, which can be (optionally) used to control the - * scheduled task after it has been thus scheduled. Special value `Scheduled_task_handle()` represents an invalid task + * scheduled task after it has been thus scheduled. Special value `Scheduled_task_handle{}` represents an invalid task * and can be used as a sentinel, as with a null pointer. * * Values of this type are to be passed around by value, not reference. They are light-weight. @@ -128,8 +128,6 @@ using Scheduled_task = Function; * ASAP, a/k/a short-fired, via scheduled_task_short_fire(). * - One can (incrementally) schedule 2+ tasks to fire at the scheduled time on one #Timer; this facility only takes * exactly 1 task, up-front. (We could provide such an API, but again this feels like it defeats the point.) - * - #Timer has certain informational accessors (like one that returns the scheduled firing time) that we lack. - * (Again, we could provide this also -- but why?) * * @todo We could eliminate schedule_task_from_now() potential limitation versus #Timer wherein each call constructs * (internally) a new #Timer. A pool of `Timer`s can be internally maintained to implement this. This may or may not diff --git a/src/flow/util/shared_ptr_alias_holder.hpp b/src/flow/util/shared_ptr_alias_holder.hpp index 20c21a982..7f2f1c5da 100644 --- a/src/flow/util/shared_ptr_alias_holder.hpp +++ b/src/flow/util/shared_ptr_alias_holder.hpp @@ -188,7 +188,7 @@ typename Shared_ptr_alias_holder::Ptr { // This was taken, conceptually, from the `{static|dynamic|...}_pointer_cast` page of cppreference.com. auto const raw_ptr_post_cast = static_cast(ptr_to_cast.get()); - return Target_ptr(ptr_to_cast, raw_ptr_post_cast); + return Target_ptr{ptr_to_cast, raw_ptr_post_cast}; } template @@ -198,7 +198,7 @@ typename Shared_ptr_alias_holder::Const_ptr { // This was taken, conceptually, from the `{static|dynamic|...}_pointer_cast` page of cppreference.com. auto const raw_ptr_post_cast = static_cast(ptr_to_cast.get()); - return Const_target_ptr(ptr_to_cast, raw_ptr_post_cast); + return Const_target_ptr{ptr_to_cast, raw_ptr_post_cast}; } template @@ -208,8 +208,8 @@ typename Shared_ptr_alias_holder::Ptr { // This was taken, conceptually, from the `{static|dynamic|...}_pointer_cast` page of cppreference.com. auto const raw_ptr_post_cast = dynamic_cast(ptr_to_cast.get()); - return raw_ptr_post_cast ? Target_ptr(ptr_to_cast, raw_ptr_post_cast) - : Target_ptr(); + return raw_ptr_post_cast ? Target_ptr{ptr_to_cast, raw_ptr_post_cast} + : Target_ptr{}; } template @@ -219,8 +219,8 @@ typename Shared_ptr_alias_holder::Const_ptr { // This was taken, conceptually, from the `{static|dynamic|...}_pointer_cast` page of cppreference.com. auto const raw_ptr_post_cast = dynamic_cast(ptr_to_cast.get()); - return raw_ptr_post_cast ? Const_target_ptr(ptr_to_cast, raw_ptr_post_cast) - : Const_target_ptr(); + return raw_ptr_post_cast ? Const_target_ptr{ptr_to_cast, raw_ptr_post_cast} + : Const_target_ptr{}; } } // namespace flow::util diff --git a/src/flow/util/string_ostream.hpp b/src/flow/util/string_ostream.hpp index f28c12571..8c880482d 100644 --- a/src/flow/util/string_ostream.hpp +++ b/src/flow/util/string_ostream.hpp @@ -57,7 +57,7 @@ class String_ostream : * undefined behavior. (It should go without saying, but using `const_cast` or equivalents counts as being * outside the bounds of this class's API.) */ - explicit String_ostream(std::string* target_str = 0); + explicit String_ostream(std::string* target_str = nullptr); // Methods. diff --git a/src/flow/util/string_view.hpp b/src/flow/util/string_view.hpp index 75524cfbe..5d3a9a260 100644 --- a/src/flow/util/string_view.hpp +++ b/src/flow/util/string_view.hpp @@ -254,7 +254,7 @@ bool Basic_string_view::starts_with(Ch const * needle) const * arg to starts_with() below) could improve perf; but on the other * hand it could even slow it down in the haystack-too-small case; plus length() is likely to be * assembly-optimized nicely. Let's just not worry too much about it. */ - return starts_with(Basic_string_view(needle)); + return starts_with(Basic_string_view{needle}); } template @@ -288,7 +288,7 @@ template bool Basic_string_view::ends_with(Ch const * needle) const { // Same comment as in starts_with(). - return ends_with(Basic_string_view(needle)); + return ends_with(Basic_string_view{needle}); } template diff --git a/src/flow/util/test/blob_test.cpp b/src/flow/util/test/blob_test.cpp new file mode 100644 index 000000000..bd68cabf9 --- /dev/null +++ b/src/flow/util/test/blob_test.cpp @@ -0,0 +1,1093 @@ +/* Flow + * Copyright 2023 Akamai Technologies, Inc. + * + * Licensed under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in + * compliance with the License. You may obtain a copy + * of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in + * writing, software distributed under the License is + * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing + * permissions and limitations under the License. */ + +#include "flow/util/blob.hpp" +#include "flow/log/buffer_logger.hpp" +#include +#include +#include +#include +#include +#include +#include + +namespace flow::util::test +{ + +namespace +{ + +using boost::asio::const_buffer; +using boost::asio::mutable_buffer; +using std::make_shared; +using std::shared_ptr; +using std::string; +using std::optional; +using std::all_of; +using std::cout; +using std::flush; +using std::vector; +using std::allocator_traits; +namespace bipc = boost::interprocess; + +/* Basic_blob supports SHM-friendly allocators, and we test this to some extent. The testing just + * tests the ability to use those allocators, by using boost.interprocess's `allocator` in some of the sub-runs; + * as well as some specific stateful-allocator-based checks when a given blob type supports it (see `using Blob_t = `). + * + * We *could* actually fork a process and try sharing the memory realistically, but this isn't a SHM test per se; + * rather it's about (1) SHM-*friendly* allocators being properly handled (mostlly not using raw pointers but + * Allocator::pointer, inside Basic_blob that is); and (2) specifically *stateful* allocators (e.g., + * std::allocator and even Flow-IPC's Stateless_allocator are stateless -- we the former too, naturally). + * + * So we do use some SHM-pool-based bipc `allocator`s, but we don't go so far as to communicate between 2+ processes. */ +using Shm_pool = bipc::managed_shared_memory; +using Shm_allocator = bipc::allocator; + +using Vanilla_allocator = std::allocator; + +template +Blob_t make_blob([[maybe_unused]] const Allocator_t* alloc_if_applicable, + log::Logger* logger, Ctor_args&&... ctor_args) +{ + constexpr bool HAS_LOG_CTX = std::is_same_v>; + + if constexpr(HAS_LOG_CTX) // Also means does *not* take an Allocator arg (forces std::allocator). + { + return Blob_t{logger, std::forward(ctor_args)...}; + } + else + { + assert(alloc_if_applicable); + return Blob_t{std::forward(ctor_args)..., logger, *alloc_if_applicable}; + } +} + +} // Anonymous namespace + +// Yes... this is very cheesy... but this is a test, so I don't really care. +#define CTX ostream_op_string("Caller context [", FLOW_UTIL_WHERE_AM_I_STR(), "].") + +TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=true and fancy-allocator support. +{ + using std::swap; // This enables proper ADL. + + /* In this test-case we don't actually share memory between processes, but in some cases we do allocate + * from a SHM-pool, so set one up (for this process only), ensuring it is cleaned-up at the start and the end. */ + struct Shm_remove + { + Shm_remove() { bipc::shared_memory_object::remove("FlowUnitTestBlobInterface"); + bipc::shared_memory_object::remove("FlowUnitTestBlobInterface2"); } + ~Shm_remove() { bipc::shared_memory_object::remove("FlowUnitTestBlobInterface"); + bipc::shared_memory_object::remove("FlowUnitTestBlobInterface2"); } + } remover; + Shm_pool shm_pool{bipc::create_only, "FlowUnitTestBlobInterface", 64 * 1024 * 1024}; + Shm_pool shm_pool2{bipc::create_only, "FlowUnitTestBlobInterface2", 64 * 1024 * 1024}; + + const auto test_type = [&](auto type_specimen) + { + /* Is either Basic_blob<...> or Blob_with_log_context<...>; and further variation is possible due to <...> + * Much of the API is identical among them but not all of it. + * + * In most (not all) of this lambda we don't specifically test APIs enabled by SHARING=true, but the common API's + * impl still somewhat differs internally depending on SHARING; so it makes sense to invoke us for both + * situations, even if sharing itself is not being tested. + * + * Regarding logging: We don't intend to check the actual log output, if any, but we do want to ensure + * that enabling logging (non-null Logger) doesn't cause stability problems for example. There's further + * differentiation logging-wise; if !HAS_LOG_CTX then some APIs (not just ctor) take Logger* which has effect + * only ~within that method; else ctor takes Logger*, and it is memorized in that object. We choose to test + * both but "cheat" by knowing that HAS_LOG_CTX-case internally is implemented in terms of the !HAS_LOG_CTX + * case (Blob_with_log_context derives from Basic_blob and mainly uses the aforementioned Logger*-taking APIs + * from that point on, wrapping them). Since we know that: + * - When constructing a Blob_t, we'll use the proper API and pass it a Logger. + * - If HAS_LOG_CTX, then indirectly the !HAS_LOG_CTX case is tested too, fairly fully -- not just + * the logging by the ctor. + * - When calling other APIs of Blob_t, we do *not* pass a Logger*, even if !HAS_LOG_CTX. While formally + * this omits testing of that case, it still gets decent coverage in reality (as of this writing anyway) + * due to the preceding bullet. Meanwhile the test-code is less tedious. @todo Maybe reconsider. */ + using Blob_t = decltype(type_specimen); + constexpr bool SHARING = Blob_t::S_SHARING; + [[maybe_unused]] constexpr bool HAS_LOG_CTX = std::is_same_v>; + constexpr bool SHM_ALLOC = !Blob_t::S_IS_VANILLA_ALLOC; + using Allocator = typename Blob_t::Allocator_raw; + + cout << "Testing type [" << typeid(Blob_t).name() << "].\n" << flush; + + log::Config log_config; + log::Buffer_logger logger{&log_config}; + + optional alloc_v; + if constexpr(SHM_ALLOC) { alloc_v.emplace(shm_pool.get_segment_manager()); } else { alloc_v.emplace(); } + // For tests of 2 `Blob_t`s, each allocating with actually-different allocator (if SHM_ALLOC). + optional alloc2_v; + if constexpr(SHM_ALLOC) { alloc2_v.emplace(shm_pool2.get_segment_manager()); } else { alloc2_v.emplace(); } + const Allocator* const alloc = &(*alloc_v); + const Allocator* const alloc2 = &(*alloc2_v); + + constexpr size_t N_SM = 1024; + ASSERT_EQ(N_SM % 2, 0) << "We shall be dividing it by 2; should probably be even."; + constexpr size_t ZERO = 0; + + const uint8_t ONE = 1; + + constexpr auto CH = char(ONE); + const string STRING(37, CH); + const const_buffer STR_BUF{STRING.data(), STRING.size()}; + const string STRING_SM(14, '\0'); + const const_buffer STR_SM_BUF{STRING_SM.data(), STRING_SM.size()}; + + const auto RNG_ZERO_FN = [](const auto& it1, const auto& it2) -> auto + { + return all_of(it1, it2, [](uint8_t x) -> auto { return x == uint8_t(0); }); + }; + const auto ALL_ZERO_FN = [&](const auto& blob) -> auto + { + return RNG_ZERO_FN(blob.begin(), blob.end()); + }; + const auto RNG_ONES_FN = [](const auto& it1, const auto& it2) -> auto + { + return all_of(it1, it2, [](uint8_t x) -> auto { return x == ONE; }); + }; + + cout << " General tests....\n" << flush; + + { // Null blobs. + auto b1 = make_blob(alloc, &logger, ZERO); + EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); + EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + b1.resize(0); + EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); + EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + b1.resize(0, Blob_t::S_UNCHANGED); + EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); + EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + b1.resize(0, 0); + EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); + EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + b1.resize(0, CLEAR_ON_ALLOC); + EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); + EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + b1.resize(0, CLEAR_ON_ALLOC, Blob_t::S_UNCHANGED); + EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); + EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + b1.resize(0, CLEAR_ON_ALLOC, 0); + EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); + EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + b1.make_zero(); + EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); + EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + b1.reserve(0); + EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); + EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + b1.reserve(0, CLEAR_ON_ALLOC); + EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); + EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + b1.start_past_prefix(0); + EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); + EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + b1.start_past_prefix_inc(0); + EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); + EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + + Blob_t b2{b1}; // Copy-ct. + EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); + EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + EXPECT_TRUE(b2.zero()); EXPECT_TRUE(b2.empty()); + EXPECT_EQ(b2.size(), 0); EXPECT_EQ(b2.start(), 0); EXPECT_EQ(b2.capacity(), 0); + b1 = b2; // Copy-assign. + EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); + EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + EXPECT_TRUE(b2.zero()); EXPECT_TRUE(b2.empty()); + EXPECT_EQ(b2.size(), 0); EXPECT_EQ(b2.start(), 0); EXPECT_EQ(b2.capacity(), 0); + Blob_t b3{std::move(b2)}; // Move-ct. + EXPECT_TRUE(b3.zero()); EXPECT_TRUE(b3.empty()); + EXPECT_EQ(b3.size(), 0); EXPECT_EQ(b3.start(), 0); EXPECT_EQ(b3.capacity(), 0); + EXPECT_TRUE(b2.zero()); EXPECT_TRUE(b2.empty()); + EXPECT_EQ(b2.size(), 0); EXPECT_EQ(b2.start(), 0); EXPECT_EQ(b2.capacity(), 0); + b2 = std::move(b3); // Move-assign. + EXPECT_TRUE(b3.zero()); EXPECT_TRUE(b3.empty()); + EXPECT_EQ(b3.size(), 0); EXPECT_EQ(b3.start(), 0); EXPECT_EQ(b3.capacity(), 0); + EXPECT_TRUE(b2.zero()); EXPECT_TRUE(b2.empty()); + EXPECT_EQ(b2.size(), 0); EXPECT_EQ(b2.start(), 0); EXPECT_EQ(b2.capacity(), 0); + swap(b2, b3); + EXPECT_TRUE(b3.zero()); EXPECT_TRUE(b3.empty()); + EXPECT_EQ(b3.size(), 0); EXPECT_EQ(b3.start(), 0); EXPECT_EQ(b3.capacity(), 0); + EXPECT_TRUE(b2.zero()); EXPECT_TRUE(b2.empty()); + EXPECT_EQ(b2.size(), 0); EXPECT_EQ(b2.start(), 0); EXPECT_EQ(b2.capacity(), 0); + } // Null blobs. + + // We can now generally perhaps ignore null blobs when testing construct/assign ops. + + { // Copy-ct. + auto b1 = make_blob(alloc, &logger, N_SM, CLEAR_ON_ALLOC); + EXPECT_FALSE(b1.zero()); EXPECT_FALSE(b1.empty()); + EXPECT_EQ(b1.size(), N_SM); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), N_SM); + EXPECT_TRUE(ALL_ZERO_FN(b1)); + Blob_t b2{b1}; + EXPECT_FALSE(b1.zero()); EXPECT_FALSE(b1.empty()); + EXPECT_EQ(b1.size(), N_SM); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), N_SM); + EXPECT_TRUE(ALL_ZERO_FN(b1)); + EXPECT_FALSE(b2.zero()); EXPECT_FALSE(b2.empty()); + EXPECT_EQ(b2.size(), N_SM); EXPECT_EQ(b2.start(), 0); EXPECT_EQ(b2.capacity(), N_SM); + EXPECT_TRUE(ALL_ZERO_FN(b2)); + EXPECT_NE(b1.data(), b2.data()); + + b2.resize(b2.size() / 2, b2.size() / 2); + Blob_t b3{b2}; + EXPECT_FALSE(b3.zero()); EXPECT_FALSE(b3.empty()); + EXPECT_EQ(b3.size(), N_SM / 2); + // Attn: Only [.b(), .e()) range copied; start() shall be zero, and capacity() big-enough for size() only. + EXPECT_EQ(b3.start(), 0); EXPECT_EQ(b3.capacity(), N_SM / 2); + EXPECT_FALSE(b2.zero()); EXPECT_FALSE(b2.empty()); + EXPECT_EQ(b2.size(), N_SM / 2); EXPECT_EQ(b2.start(), N_SM / 2); EXPECT_EQ(b2.capacity(), N_SM); + EXPECT_NE(b3.data(), b2.data()); + EXPECT_TRUE(ALL_ZERO_FN(b3)); + EXPECT_TRUE(ALL_ZERO_FN(b2)); + } // Copy-ct. + + { // Copy-assign et al. + auto b1 = make_blob(alloc, &logger, N_SM, CLEAR_ON_ALLOC); + b1.resize(b1.size() / 2, b1.size() / 2); + auto b2 = make_blob(alloc, &logger, ZERO); + b2 = b1; // Overwrite null blob. + EXPECT_FALSE(b2.zero()); EXPECT_FALSE(b2.empty()); + EXPECT_EQ(b2.size(), N_SM / 2); + EXPECT_EQ(b2.start(), 0); EXPECT_EQ(b2.capacity(), N_SM / 2); // Attn: same deal as with copy-ct. + EXPECT_TRUE(ALL_ZERO_FN(b2)); + EXPECT_NE(b1.data(), b2.data()); + EXPECT_FALSE(b1.zero()); EXPECT_FALSE(b1.empty()); + EXPECT_EQ(b1.size(), N_SM / 2); EXPECT_EQ(b1.start(), N_SM / 2); EXPECT_EQ(b1.capacity(), N_SM); + EXPECT_TRUE(ALL_ZERO_FN(b1)); + + auto b3 = make_blob(alloc, &logger, N_SM); + std::memset(b3.begin(), ONE, b3.size()); + const size_t N_TN = 5; + b3.resize(N_SM - N_TN, N_TN); // Structure: [N_TN][N_SM - N_TN][], all ONEs. Terms: [prefix][body][postfix]. + EXPECT_TRUE(RNG_ONES_FN(b3.begin() - N_TN, b3.end())); // Ensure they're all ONEs in fact. + EXPECT_EQ(b3.capacity(), N_SM); ASSERT_EQ(b2.capacity(), N_SM / 2) << "Sanity-check."; + /* Overwrite non-null, larger-capacity blob, starting at buffer-start (not just past the leading [N_TN] area, + * meaning not at .begin()). + * I.e., it makes use of as much buffer as it can, as early in it as possible, meaning at its start. .begin() + * is adjusted accordingly. */ + b3 = b2; // So now it's [][N_SM / 2][N_SM - N_SN/2]. + EXPECT_FALSE(b3.zero()); EXPECT_FALSE(b3.empty()); + EXPECT_EQ(b3.start(), 0); EXPECT_EQ(b3.size(), N_SM / 2); EXPECT_EQ(b3.capacity(), N_SM); + EXPECT_TRUE(ALL_ZERO_FN(b3)); // Copied-stuff a/k/a body should be as in b2. + EXPECT_TRUE(RNG_ONES_FN(b3.end(), b3.begin() - b3.start() + b3.capacity())); // Postfix should be untouched. + + { // Copy blob over itself (no-op). + const auto saved_dt = b3.data(); + const auto saved_start = b3.start(); const auto saved_size = b3.size(); const auto saved_cap = b3.capacity(); + b3 = b3; + EXPECT_EQ(b3.data(), saved_dt); + EXPECT_EQ(b3.start(), saved_start); EXPECT_EQ(b3.size(), saved_size); EXPECT_EQ(b3.capacity(), saved_cap); + } + auto b4 = make_blob(alloc, &logger, ZERO); + { // Copy null blob over itself (no-op). + const auto saved_dt = b4.data(); + const auto saved_start = b4.start(); const auto saved_size = b4.size(); const auto saved_cap = b4.capacity(); + b4 = b4; + EXPECT_EQ(b4.data(), saved_dt); + EXPECT_EQ(b4.start(), saved_start); EXPECT_EQ(b4.size(), saved_size); EXPECT_EQ(b4.capacity(), saved_cap); + } + + /* "Cheating" white-boxily in this reasoning: really the above assignments build on various directly-accessible + * APIs including assign(), assign_copy(), emplace_copy(), resize(), and reserve(). Of these the "core" ones + * are emplace_copy() (copy bytes from anywhere into a blob) and reserve() (allocate if needed). So we've + * tested stuff already. Still throw in a few direct calls just in case (e.g., impl could change). We can + * really skip assign() though, as it is the same as copy-assignment. */ + // assign_copy(). + auto b5 = make_blob(alloc, &logger, ZERO); + EXPECT_EQ(b5.assign_copy(STR_BUF), STR_BUF.size()); // Made of ONEs. + EXPECT_EQ(b5.start(), 0); EXPECT_EQ(b5.size(), STRING.size()); EXPECT_EQ(b5.capacity(), STRING.size()); + EXPECT_TRUE(RNG_ONES_FN(b5.begin(), b5.end())); + EXPECT_EQ(b5.assign_copy(STR_SM_BUF), STR_SM_BUF.size()); // Made of zeroes. + EXPECT_EQ(b5.start(), 0); EXPECT_EQ(b5.size(), STRING_SM.size()); EXPECT_EQ(b5.capacity(), STRING.size()); + EXPECT_TRUE(ALL_ZERO_FN(b5)); + EXPECT_TRUE(RNG_ONES_FN(b5.end(), b5.begin() + STRING.size())); + // emplace_copy(). + b5.resize(b5.capacity()); + EXPECT_EQ(b5.start(), 0); EXPECT_EQ(b5.size(), STRING.size()); EXPECT_EQ(b5.capacity(), STRING.size()); + EXPECT_TRUE(RNG_ONES_FN(b5.begin() + STRING_SM.size(), b5.begin() + STRING.size())); + EXPECT_EQ(b5.emplace_copy(b5.begin() + (STRING_SM.size() / 2), STR_SM_BUF), + b5.begin() + (STRING_SM.size() / 2) + STRING_SM.size()); + // All these must remain unchanged; just bytes were copied into a sub-range of [.b(), .e()). + EXPECT_EQ(b5.start(), 0); EXPECT_EQ(b5.size(), STRING.size()); EXPECT_EQ(b5.capacity(), STRING.size()); + EXPECT_TRUE(RNG_ZERO_FN(b5.begin(), + b5.begin() + (STRING_SM.size() / 2))); + EXPECT_TRUE(RNG_ZERO_FN(b5.begin() + (STRING_SM.size() / 2), + b5.begin() + (STRING_SM.size() / 2) + STRING_SM.size())); + EXPECT_TRUE(RNG_ONES_FN(b5.begin() + (STRING_SM.size() / 2) + STRING_SM.size(), + b5.end())); + // Copy (non-overlappingly only) inside the blob. + EXPECT_EQ(b5.emplace_copy(b5.begin() + 1, + const_buffer{b5.end() - 5, 4}), + b5.begin() + 5); + EXPECT_EQ(b5.start(), 0); EXPECT_EQ(b5.size(), STRING.size()); EXPECT_EQ(b5.capacity(), STRING.size()); + EXPECT_EQ(b5.front(), 0); + EXPECT_TRUE(RNG_ONES_FN(b5.begin() + 1, + b5.begin() + 1 + 4)); + EXPECT_TRUE(RNG_ZERO_FN(b5.begin() + 1 + 4, + b5.begin() + (STRING_SM.size() / 2))); + EXPECT_TRUE(RNG_ZERO_FN(b5.begin() + (STRING_SM.size() / 2), + b5.begin() + (STRING_SM.size() / 2) + STRING_SM.size())); + EXPECT_TRUE(RNG_ONES_FN(b5.begin() + (STRING_SM.size() / 2) + STRING_SM.size(), + b5.end())); + } // Copy-assign et al. + + { // Move-ct, move-assign. + const size_t N_TN = 5; + + auto b1 = make_blob(alloc, &logger, N_SM, CLEAR_ON_ALLOC); + EXPECT_FALSE(b1.zero()); EXPECT_FALSE(b1.empty()); + EXPECT_EQ(b1.size(), N_SM); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), N_SM); + EXPECT_TRUE(ALL_ZERO_FN(b1)); + auto saved_dt = b1.const_data(); + Blob_t b2{std::move(b1)}; + EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); + EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + EXPECT_FALSE(b2.zero()); EXPECT_FALSE(b2.empty()); + EXPECT_EQ(b2.size(), N_SM); EXPECT_EQ(b2.start(), 0); EXPECT_EQ(b2.capacity(), N_SM); + EXPECT_TRUE(ALL_ZERO_FN(b2)); + EXPECT_EQ(b1.data(), nullptr); + EXPECT_EQ(b2.data(), saved_dt); + + b2.resize(b2.size() / 2, b2.size() / 2); + saved_dt = b2.const_data(); + Blob_t b3{std::move(b2)}; + EXPECT_TRUE(b2.zero()); EXPECT_TRUE(b2.empty()); + EXPECT_EQ(b2.size(), 0); EXPECT_EQ(b2.start(), 0); EXPECT_EQ(b2.capacity(), 0); + EXPECT_FALSE(b3.zero()); EXPECT_FALSE(b3.empty()); + EXPECT_EQ(b3.size(), N_SM / 2); EXPECT_EQ(b3.start(), N_SM / 2); EXPECT_EQ(b3.capacity(), N_SM); + EXPECT_TRUE(ALL_ZERO_FN(b3)); + EXPECT_EQ(b2.data(), nullptr); + EXPECT_EQ(b3.data(), saved_dt); + + auto b4 = make_blob(alloc, &logger, N_TN, CLEAR_ON_ALLOC); + EXPECT_FALSE(b4.zero()); EXPECT_FALSE(b4.empty()); + EXPECT_EQ(b4.size(), N_TN); EXPECT_EQ(b4.start(), 0); EXPECT_EQ(b4.capacity(), N_TN); + b4 = std::move(b3); // Move-assign. + EXPECT_TRUE(b3.zero()); EXPECT_TRUE(b3.empty()); + EXPECT_EQ(b3.size(), 0); EXPECT_EQ(b3.start(), 0); EXPECT_EQ(b3.capacity(), 0); + EXPECT_FALSE(b4.zero()); EXPECT_FALSE(b4.empty()); + EXPECT_EQ(b4.size(), N_SM / 2); EXPECT_EQ(b4.start(), N_SM / 2); EXPECT_EQ(b4.capacity(), N_SM); + EXPECT_TRUE(ALL_ZERO_FN(b4)); + EXPECT_EQ(b3.data(), nullptr); + EXPECT_EQ(b4.data(), saved_dt); + } // Move-ct, move-assign. + + { // reserve(), make_zero(). + constexpr size_t N1 = 20; + constexpr size_t N2 = 10; + constexpr size_t N_BIG = 1024 * 1024; // Biggish so as to lower chance all-zeroes being already there by accident. + auto b1 = make_blob(alloc, &logger, ZERO); + EXPECT_TRUE(b1.zero()); EXPECT_EQ(b1.capacity(), 0); EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); + EXPECT_EQ(b1.begin(), nullptr); EXPECT_EQ(b1.begin(), b1.end()); EXPECT_EQ(b1.data(), nullptr); + b1.make_zero(); // No-op. + EXPECT_TRUE(b1.zero()); EXPECT_EQ(b1.capacity(), 0); EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); + EXPECT_EQ(b1.begin(), nullptr); EXPECT_EQ(b1.begin(), b1.end()); EXPECT_EQ(b1.data(), nullptr); + b1.reserve(N1); + EXPECT_FALSE(b1.zero()); EXPECT_EQ(b1.capacity(), N1); EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); + EXPECT_NE(b1.begin(), nullptr); EXPECT_EQ(b1.begin(), b1.end()); EXPECT_NE(b1.data(), nullptr); + b1.reserve(N2); // Smaller => no-op. + EXPECT_FALSE(b1.zero()); EXPECT_EQ(b1.capacity(), N1); EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); + EXPECT_NE(b1.begin(), nullptr); EXPECT_EQ(b1.begin(), b1.end()); EXPECT_NE(b1.data(), nullptr); + b1.make_zero(); // Dealloc here (ahead of destructor). + EXPECT_TRUE(b1.zero()); EXPECT_EQ(b1.capacity(), 0); EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); + EXPECT_EQ(b1.begin(), nullptr); EXPECT_EQ(b1.begin(), b1.end()); EXPECT_EQ(b1.data(), nullptr); + b1.reserve(N_BIG, CLEAR_ON_ALLOC); + EXPECT_FALSE(b1.zero()); EXPECT_EQ(b1.capacity(), N_BIG); EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); + EXPECT_NE(b1.begin(), nullptr); EXPECT_EQ(b1.begin(), b1.end()); EXPECT_NE(b1.data(), nullptr); + RNG_ZERO_FN(b1.begin(), b1.begin() + b1.capacity()); + b1.reserve(N1); // Smaller => no-op. + EXPECT_FALSE(b1.zero()); EXPECT_EQ(b1.capacity(), N_BIG); EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); + EXPECT_NE(b1.begin(), nullptr); EXPECT_EQ(b1.begin(), b1.end()); EXPECT_NE(b1.data(), nullptr); + RNG_ZERO_FN(b1.begin(), b1.begin() + b1.capacity()); + // Destructor deallocs. + } // reserve(), make_zero(). + + { // begin(), end(), front(), back(), et al. + /* We've been using begin() and end() plenty. We test some corner cases like when zero(); but beyond that + * mainly we try to expose copy/paste errors like const_end() accidentally equalling begin() and such. + * We implicitly assume that more or less if begin() and end() work on one vanilla example, they work for all + * similar ones as opposed to being ultra-paranoid about secret bug-prone logic inside. */ + + constexpr size_t INC = 5; + + // Sanity-check begin() and end() cores when zero() and not zero() but empty() (degenerate cases). + auto b1 = make_blob(alloc, &logger, ZERO); + EXPECT_EQ(b1.begin(), nullptr); EXPECT_EQ(b1.end(), b1.begin()); + b1.reserve(N_SM, CLEAR_ON_ALLOC); ASSERT_EQ(b1.size(), 0); + EXPECT_NE(b1.begin(), nullptr); EXPECT_EQ(b1.end(), b1.begin()); + + // Now for the mainstream situation (!empty(); also have non-empty prefix (start()) and postfix). + b1.resize(b1.capacity() - INC - INC, INC); + uint8_t* const b = b1.begin(); + uint8_t* const e = b1.end(); + const uint8_t* const c_b = static_cast(b1).begin(); + const uint8_t* const c_e = static_cast(b1).end(); + const uint8_t* const c_cb = b1.cbegin(); + const uint8_t* const c_ce = b1.cend(); + const uint8_t* const c_ccb = b1.const_begin(); + const uint8_t* const c_cce = b1.const_end(); + uint8_t* const d = b1.data(); + const uint8_t* const c_d = b1.const_data(); + + EXPECT_EQ(e - b, b1.size()); + EXPECT_TRUE(RNG_ZERO_FN(b - INC, e + INC)); // Should be all derefable (and zeroed) as opposed to possible crash. + EXPECT_EQ(b, c_b); EXPECT_EQ(b, c_cb); EXPECT_EQ(b, c_ccb); EXPECT_EQ(b, d); EXPECT_EQ(b, c_d); + EXPECT_EQ(e, c_e); EXPECT_EQ(e, c_ce); EXPECT_EQ(e, c_cce); + + uint8_t& fr = b1.front(); + uint8_t& bk = b1.back(); + const uint8_t& c_fr = static_cast(b1).front(); + const uint8_t& c_bk = static_cast(b1).back(); + const uint8_t& c_cfr = b1.const_front(); + const uint8_t& c_cbk = b1.const_back(); + + EXPECT_EQ(b, &fr); EXPECT_EQ(e - 1, &bk); + EXPECT_EQ(&fr, &c_fr); EXPECT_EQ(&fr, &c_cfr); + EXPECT_EQ(&bk, &c_bk); EXPECT_EQ(&bk, &c_cbk); + } // begin(), end(), front(), back(), et al. + + { // start_past_prefix[_inc](), clear(). + /* resize() is a big one, but we've been using it a lot already, so let's not be tedious. + * start_past_prefix() is built on it, and start_past_prefix_inc() is built on the latter. */ + + constexpr size_t N1 = 20; + constexpr size_t N2 = 10; + constexpr size_t INC = 5; + + auto b1 = make_blob(alloc, &logger, N1); + EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.size(), N1); + b1.start_past_prefix(N2); + EXPECT_EQ(b1.start(), N2); EXPECT_EQ(b1.size(), N1 - N2); + b1.make_zero(); // Dealloc. + b1.resize(N2, INC); // Alloc. + EXPECT_EQ(b1.capacity(), N2 + INC); + b1.make_zero(); // Dealloc. + b1.reserve(N_SM); // Alloc: Much bigger play area. + b1.resize(N2, INC); + ASSERT_GT(b1.capacity(), N2 + INC) << "Sanity-check our own logic real quick."; + EXPECT_EQ(b1.start(), INC); EXPECT_EQ(b1.size(), N2); + ASSERT_GT(N1, N2 + INC) << "Sanity-check our own logic real quick."; + b1.start_past_prefix(N1); // Requested start() > current start() + size() => size() becomes 0. + EXPECT_EQ(b1.start(), N1); EXPECT_EQ(b1.size(), 0); + b1.start_past_prefix_inc(-1); + EXPECT_EQ(b1.start(), N1 - 1); EXPECT_EQ(b1.size(), 1); + b1.start_past_prefix_inc(-5); + EXPECT_EQ(b1.start(), N1 - 1 - 5); EXPECT_EQ(b1.size(), 1 + 5); + b1.start_past_prefix_inc(+2); + EXPECT_EQ(b1.start(), N1 - 1 - 5 + 2); EXPECT_EQ(b1.size(), 1 + 5 - 2); + b1.start_past_prefix_inc(+5); // Push past original start(). size() itself is floored at 0. + EXPECT_EQ(b1.start(), N1 + 1); EXPECT_EQ(b1.size(), 0); + b1.start_past_prefix_inc(-(N1 + 1)); + EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.size(), N1 + 1); + // (Recall that clear() never actually deallocs. It really just sets size() to 0; that's it.) + b1.resize(N2, INC); + EXPECT_EQ(b1.start(), INC); EXPECT_EQ(b1.size(), N2); + b1.clear(); // Attn. + EXPECT_FALSE(b1.zero()); EXPECT_TRUE(b1.empty()); // Empty but buffer is actually allocated still. + EXPECT_EQ(b1.start(), INC); EXPECT_EQ(b1.size(), 0); // Empty (size is 0) but start() unchanged as advertised. + b1.make_zero(); + EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.size(), 0); + b1.clear(); // No-op. + EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.size(), 0); + } // start_past_prefix[_inc](), clear(). + + { // erase(). + /* Historical note: there was a time when that method was horribly buggy! So it's well worth testing. + * Though the current impl is so simple that it's probably not gonna break if the basics work. We digress.... */ + + constexpr size_t N1 = 20; + constexpr size_t INC = 5; + + auto b1 = make_blob(alloc, &logger, N1, CLEAR_ON_ALLOC); + + b1.resize(b1.capacity() - INC, INC); // [INC][N1 - INC][], all 0. + EXPECT_TRUE(RNG_ZERO_FN(b1.begin() - b1.start(), b1.begin() - b1.start() + b1.capacity())); + memset(b1.begin() + INC, ONE, INC); // [INC x 0][INC x 0, INC x 1, rest x 0][]. + ASSERT_TRUE(RNG_ZERO_FN(b1.begin() - b1.start(), + b1.begin() - b1.start() + INC + INC)) << "Sanity-check selves."; + ASSERT_TRUE(RNG_ONES_FN(b1.begin() - b1.start() + INC + INC, + b1.begin() - b1.start() + INC + INC + INC)) << "Sanity-check selves."; + ASSERT_TRUE(RNG_ZERO_FN(b1.begin() - b1.start() + INC + INC + INC, + b1.begin() + b1.size())) << "Sanity-check selves."; + ASSERT_EQ(b1.start(), INC) << "Sanity-check selves."; + EXPECT_EQ(b1.size(), N1 - INC) << "Sanity-check selves."; + EXPECT_EQ(b1.erase(b1.begin() - b1.start() + INC + INC, // No-op. + b1.begin() - b1.start() + INC + INC), + b1.begin() - b1.start() + INC + INC); + EXPECT_EQ(b1.start(), INC); EXPECT_EQ(b1.size(), N1 - INC); + EXPECT_EQ(b1.erase(b1.begin() - b1.start() + INC + INC, // No-op. + b1.begin() - b1.start() + INC + INC - 1), + b1.begin() - b1.start() + INC + INC); + EXPECT_EQ(b1.start(), INC); EXPECT_EQ(b1.size(), N1 - INC); + EXPECT_TRUE(RNG_ZERO_FN(b1.begin() - b1.start(), + b1.begin() - b1.start() + INC + INC)); + EXPECT_TRUE(RNG_ONES_FN(b1.begin() - b1.start() + INC + INC, + b1.begin() - b1.start() + INC + INC + INC)); + EXPECT_TRUE(RNG_ZERO_FN(b1.begin() - b1.start() + INC + INC + INC, + b1.begin() + b1.size())); + + EXPECT_EQ(b1.erase(b1.begin() - b1.start() + INC + INC, // Erase the INCx1 area. Only 0s remain all-over. + b1.begin() - b1.start() + INC + INC + INC), + b1.begin() - b1.start() + INC + INC); + EXPECT_EQ(b1.start(), INC); EXPECT_EQ(b1.size(), N1 - INC - INC); + EXPECT_TRUE(RNG_ZERO_FN(b1.begin() - b1.start(), + b1.begin() + b1.size())); + } // erase(). + + { // sub_copy(). + constexpr size_t N1 = 20; + constexpr size_t INC = 10; + vector DIST_VEC(N1); // Don't use {} here due to stupid C++ ambiguity with initializer-list. + ASSERT_TRUE(RNG_ZERO_FN(DIST_VEC.begin(), DIST_VEC.end())); + + auto b1 = make_blob(alloc, &logger, N_SM, CLEAR_ON_ALLOC); + b1.resize(b1.capacity() - INC, INC); + memset(b1.begin() + INC, ONE, INC); + EXPECT_EQ(b1.sub_copy(b1.begin() + INC, mutable_buffer{&(DIST_VEC.front()), 0}), // Degenerate case (no-op). + b1.begin() + INC); + EXPECT_TRUE(RNG_ZERO_FN(DIST_VEC.begin(), DIST_VEC.end())); + EXPECT_TRUE(RNG_ZERO_FN(b1.begin() - b1.start(), + b1.begin() + INC)); + EXPECT_TRUE(RNG_ONES_FN(b1.begin() + INC, + b1.begin() + INC + INC)); + EXPECT_TRUE(RNG_ZERO_FN(b1.begin() + INC + INC, + b1.begin() - b1.start() + b1.capacity())); + EXPECT_EQ(b1.sub_copy(b1.begin() + INC, mutable_buffer{&(DIST_VEC.front()), INC}), + b1.begin() + INC + INC); + EXPECT_TRUE(RNG_ONES_FN(DIST_VEC.begin(), + DIST_VEC.begin() + INC)); + EXPECT_TRUE(RNG_ZERO_FN(DIST_VEC.begin() + INC, + DIST_VEC.end())); + EXPECT_TRUE(RNG_ZERO_FN(b1.begin() - b1.start(), // b1 shall be unchanged always from sub_copy()->external. + b1.begin() + INC)); + EXPECT_TRUE(RNG_ONES_FN(b1.begin() + INC, + b1.begin() + INC + INC)); + EXPECT_TRUE(RNG_ZERO_FN(b1.begin() + INC + INC, + b1.begin() - b1.start() + b1.capacity())); + // Copy inside non-overlappingly. + EXPECT_EQ(b1.sub_copy(b1.begin() + INC, mutable_buffer{b1.end() - INC - INC, INC}), + b1.begin() + INC + INC); + EXPECT_TRUE(RNG_ZERO_FN(b1.begin() - b1.start(), + b1.begin() + INC)); + EXPECT_TRUE(RNG_ONES_FN(b1.begin() + INC, + b1.begin() + INC + INC)); + EXPECT_TRUE(RNG_ZERO_FN(b1.begin() + INC + INC, + b1.end() - INC - INC)); + EXPECT_TRUE(RNG_ONES_FN(b1.end() - INC - INC, + b1.end() - INC)); + EXPECT_TRUE(RNG_ZERO_FN(b1.end() - INC, + b1.begin() - b1.start() + b1.capacity())); + // This stuff should all be unchanged throughout. + EXPECT_EQ(b1.start(), INC); EXPECT_EQ(b1.size(), N_SM - INC); EXPECT_EQ(b1.capacity(), N_SM); + } // sub_copy(). + + { // Allocators, multiple allocators. + // (Please see comment near `using Shm_pool =` for some background.) + + /* We've heavily exercised general/basic allocator-involving code and APIs already. Even when + * !SHM_ALLOC (Allocator is std::allocator), Basic_blob et al takes an allocator instance to + * to ctor, and we've been doing that; then naturally allocating/deallocating things with it. So what we + * test here, allocator-wise, is two-fold: + * - First, some boring checks involving get_allocator(): that this returns an object equal-by-value to + * what we gave to ctor. So like with std::allocator it'll always be true, since it is stateless and + * equality is always true as a result; but if SHM_ALLOC then there can be actually-unequal allocator + * objects, so we can sanity-check that. + * - Second, though, there are more subtle tests of what happens w/r/t allocators when `Blob_t`s are + * copy/move-constructed; copy/move-assigned; and swapped. There're the get_allocator() equality checks + * again -- but more saliently what if get_allocator() is replaced by some assignment, but there's already + * a buffer allocated within the receiving object? That stuff. P.S. Just getting it compile is itself + * a victory. P.P.S. Again, when !SHM_ALLOC there's only one stateless allocator really, so + * those same tests just vacuously pass; but it's harmless to still do it. */ + + /* Actually one basic check is to ensure the allocator is... used. There could easily be an error causing + * std::allocator to be used regardless of anything. (As of this writing internally there are ~3 possible + * smart-pointer types used inside; we could have chosen the wrong one, misused allocator APIs, etc.) */ + { + const auto check_alloc = [&](const Blob_t& b, const string& ctx) + { + const auto p = b.const_data(); + EXPECT_TRUE(p) << ctx; + if constexpr(SHM_ALLOC) + { + ASSERT_EQ(b.get_allocator(), *alloc) << "Sanity-check selves. " << ctx; + ASSERT_EQ(alloc->get_segment_manager(), shm_pool.get_segment_manager()) << "Sanity-check selves. " << ctx; + + const auto pool_base = static_cast(shm_pool.get_address()); + EXPECT_TRUE((p >= pool_base) && (p < (pool_base + shm_pool.get_size()))) << ctx; + } + // else { Not much we can check when it's just std::allocator. } + }; + + auto b1 = make_blob(alloc, &logger, N_SM); check_alloc(b1, CTX); b1.make_zero(); + b1.reserve(N_SM); check_alloc(b1, CTX); b1.make_zero(); + b1.resize(N_SM); check_alloc(b1, CTX); b1.make_zero(); + + auto b2 = make_blob(alloc, &logger, N_SM, CLEAR_ON_ALLOC); check_alloc(b2, CTX); b2.make_zero(); + b2.reserve(N_SM, CLEAR_ON_ALLOC); check_alloc(b2, CTX); b2.make_zero(); + b2.resize(N_SM, CLEAR_ON_ALLOC); check_alloc(b2, CTX); b2.make_zero(); + + b1.resize(N_SM); + Blob_t b3{std::move(b1)}; check_alloc(b3, CTX); + Blob_t b4{b3}; check_alloc(b4, CTX); + } + + if constexpr(SHM_ALLOC) + { + ASSERT_NE(*alloc, *alloc2) << "Sanity-check."; + } + else + { + ASSERT_EQ(*alloc, *alloc2) << "Sanity-check."; + } + + // get_allocator() value checks. + + auto b1 = make_blob(alloc, &logger, ZERO); + auto b2 = make_blob(alloc2, &logger, ZERO); + EXPECT_EQ(b1.get_allocator(), *alloc); EXPECT_EQ(b2.get_allocator(), *alloc2); + b1.resize(N_SM); b2.resize(N_SM); + EXPECT_EQ(b1.get_allocator(), *alloc); EXPECT_EQ(b2.get_allocator(), *alloc2); + + b1 = make_blob(alloc, &logger, N_SM); + b2 = make_blob(alloc2, &logger, N_SM); + EXPECT_EQ(b1.get_allocator(), *alloc); EXPECT_EQ(b2.get_allocator(), *alloc2); + + { // Copy-ct, copy-assign. No buffer. + auto b3 = make_blob(alloc, &logger, ZERO); + Blob_t b4{b3}; + EXPECT_EQ(b4.get_allocator(), *alloc); EXPECT_EQ(b3.get_allocator(), *alloc); + auto b5 = make_blob(alloc2, &logger, ZERO); + EXPECT_EQ(b5.get_allocator(), *alloc2); EXPECT_EQ(b3.get_allocator(), *alloc); + b3 = b5; + EXPECT_EQ(b5.get_allocator(), *alloc2); + static_assert(!allocator_traits::propagate_on_container_copy_assignment::value, + "Our test allocators, including the stateful ones, configured themselves to " + "*not* propagate on copy-assignment. If this static-assert trips, something major changed " + "in boost.interprocess maybe?!"); + EXPECT_EQ(b3.get_allocator(), *alloc); // Copy-construction propagates... but not copy-assignment. + } + { // Copy-ct, copy-assign. Yes buffer. + auto b3 = make_blob(alloc, &logger, N_SM); + Blob_t b4{b3}; + EXPECT_EQ(b4.get_allocator(), *alloc); EXPECT_EQ(b3.get_allocator(), *alloc); + EXPECT_EQ(b4.capacity(), N_SM); EXPECT_EQ(b3.capacity(), N_SM); + auto b5 = make_blob(alloc2, &logger, N_SM); + EXPECT_EQ(b5.get_allocator(), *alloc2); EXPECT_EQ(b3.get_allocator(), *alloc); + EXPECT_EQ(b5.capacity(), N_SM); EXPECT_EQ(b3.capacity(), N_SM); + b3 = b5; + EXPECT_EQ(b5.get_allocator(), *alloc2); + EXPECT_EQ(b3.get_allocator(), *alloc); // Copy-construction propagates... but not copy-assignment. + EXPECT_EQ(b5.capacity(), N_SM); EXPECT_EQ(b3.capacity(), N_SM); + } + + { // Move-ct, move-assign, swap. No buffer. + auto b3 = make_blob(alloc, &logger, ZERO); + Blob_t b4{std::move(b3)}; + EXPECT_EQ(b4.get_allocator(), *alloc); EXPECT_EQ(b3.get_allocator(), *alloc); + auto b5 = make_blob(alloc2, &logger, ZERO); + EXPECT_EQ(b5.get_allocator(), *alloc2); EXPECT_EQ(b3.get_allocator(), *alloc); + b3 = std::move(b5); + EXPECT_EQ(b5.get_allocator(), *alloc2); + static_assert(!(SHM_ALLOC && allocator_traits::propagate_on_container_move_assignment::value), + "Our stateful test allocators configured themselves to " + "*not* propagate on move-assignment. If this static-assert trips, something major changed " + "in boost.interprocess maybe?!"); + EXPECT_EQ(b3.get_allocator(), *alloc); // Move-construction propagates... but not move-assignment. + swap(b3, b5); + EXPECT_EQ(b5.get_allocator(), *alloc2); EXPECT_EQ(b3.get_allocator(), *alloc); + } + { // Move-ct, move-assign, swap. Yes buffer. + auto b3 = make_blob(alloc, &logger, N_SM); + Blob_t b4{std::move(b3)}; + /* (The b3's allocator is supposed to be moved-from, but in practice for allocators -- at the very least + * the ones we have here, but also generally from what we know of allocators -- it is the same as being + * copied-from. Like, the moved-from allocator shouldn't get nullified or something; generally that is not + * really a thing; that I (ygoldfel) know of.) So we'll just still check the moved-from get_allocator(). */ + EXPECT_EQ(b4.get_allocator(), *alloc); EXPECT_EQ(b3.get_allocator(), *alloc); + EXPECT_EQ(b4.capacity(), N_SM); EXPECT_EQ(b3.capacity(), 0); + auto b5 = make_blob(alloc2, &logger, N_SM); + EXPECT_EQ(b5.get_allocator(), *alloc2); EXPECT_EQ(b4.get_allocator(), *alloc); + EXPECT_EQ(b5.capacity(), N_SM); EXPECT_EQ(b4.capacity(), N_SM); + b4 = std::move(b5); + EXPECT_EQ(b5.get_allocator(), *alloc2); + EXPECT_EQ(b4.get_allocator(), *alloc); // Move-construction propagates... but not move-assignment. + EXPECT_EQ(b5.capacity(), 0); EXPECT_EQ(b4.capacity(), N_SM); + + static_assert(!(SHM_ALLOC && allocator_traits::propagate_on_container_swap::value), + "Our stateful test allocators configured themselves to " + "*not* propagate on swap. If this static-assert trips, something major changed " + "in boost.interprocess maybe?!"); + swap(b4, b5); + EXPECT_EQ(b5.capacity(), N_SM); EXPECT_EQ(b4.capacity(), 0); // Buf pointers apparently swapped... + EXPECT_EQ(b5.get_allocator(), *alloc2); // ...but not allocators. + EXPECT_EQ(b4.get_allocator(), *alloc); + b5.make_zero(); // Force deallocation now -- at least it shouldn't crash. + + /* Lastly let's do a full swap (of 2 buffers, 2 allocators), checking the buf ptr values before/after. + * We consider this decently sufficient without doing the same with move-assignment alone. */ + constexpr size_t N_LG = N_SM * 2; + auto b6 = make_blob(alloc, &logger, N_SM); + auto b7 = make_blob(alloc2, &logger, N_LG); + auto p6 = b6.begin(); auto p7 = b7.begin(); + ASSERT_EQ(b6.get_allocator(), *alloc); ASSERT_EQ(b7.get_allocator(), *alloc2); + swap(b6, b7); + EXPECT_EQ(b7.begin(), p6); EXPECT_EQ(b6.begin(), p7); + EXPECT_EQ(b7.size(), N_SM); EXPECT_EQ(b6.size(), N_LG); + EXPECT_EQ(b6.get_allocator(), *alloc); EXPECT_EQ(b7.get_allocator(), *alloc2); + b6.make_zero(); b7.make_zero(); // No crash here hopefully. + b6.resize(N_LG); b7.resize(N_SM); p6 = b7.begin(); p7 = b6.begin(); + // Invoke non-ADL-swap too (over-abundance of caution). + b6.swap(b7); + EXPECT_EQ(b6.begin(), p6); EXPECT_EQ(b7.begin(), p7); + EXPECT_EQ(b6.size(), N_SM); EXPECT_EQ(b7.size(), N_LG); + EXPECT_EQ(b6.get_allocator(), *alloc); EXPECT_EQ(b7.get_allocator(), *alloc2); + b6.make_zero(); b7.make_zero(); // No crash here hopefully. + } // Move-ct, move-assign, swap. Yes buffer. + } // Allocators, multiple allocators. + + { // Dealloc; sharing. + /* We combine a couple things here; we've been deallocing left and right but never really checked whether + * in fact dealloc occurred. It's not that easy to check it, but with SHM_ALLOC we can do it by using + * bipc's stats. So that's thing 1. Thing 2, relevant only if SHARING, is to test that share()-based + * feature, wher buf is deallocated only once all co-sharing blobs have been destroyed or make_zero()ed. + * + * We use a shared_ptr handle in heap, so that the allocator is used only for the actual buffer. */ + + auto b1 = make_shared(make_blob(alloc, &logger, ZERO)); + size_t N = 1024 * 1024; // Pretty bug, so that alloc-slack is minor compared to it. + + [[maybe_unused]] size_t n_base = 0; + if constexpr(SHM_ALLOC) + { + n_base = shm_pool.get_free_memory(); + } + + const auto check_alloc_sz = [&]([[maybe_unused]] size_t n_or_0, [[maybe_unused]] const string& ctx) + { + if constexpr(SHM_ALLOC) + { + if (n_or_0 == 0) + { + EXPECT_EQ(shm_pool.get_free_memory(), n_base) << ctx; + } + else + { + ASSERT_GE(n_base, shm_pool.get_free_memory()) << ctx; + const auto n_diff = static_cast(n_base - shm_pool.get_free_memory()); + // Should be buf-size x N, plus some possibly some alloc slack. + EXPECT_GE(n_diff, n_or_0 * N) << ctx; + EXPECT_LT(n_diff, (n_or_0 + 1) * N) << ctx; + } + } + // else { We cannot check anything about this. Oh well. } + }; + + check_alloc_sz(0, CTX); // Sanity-check. + b1->reserve(N); check_alloc_sz(1, CTX); + b1->make_zero(); check_alloc_sz(0, CTX); + b1->resize(N); check_alloc_sz(1, CTX); + + if constexpr(SHARING) + { + constexpr size_t INC = 15; + + const auto p = b1->const_data(); + + auto b2 = make_shared(b1->share()); check_alloc_sz(1, CTX); // No extra alloc. + EXPECT_EQ(b1->data() - b1->start(), p); EXPECT_EQ(b1->capacity(), N); + EXPECT_EQ(b1->size(), N); EXPECT_EQ(b1->start(), 0); + EXPECT_EQ(b2->data() - b2->start(), p); EXPECT_EQ(b2->capacity(), N); + EXPECT_EQ(b2->size(), N); EXPECT_EQ(b2->start(), 0); + + auto b3 = make_shared(b2->share_after_split_left(INC)); check_alloc_sz(1, CTX); + EXPECT_EQ(b1->data() - b1->start(), p); EXPECT_EQ(b1->capacity(), N); + EXPECT_EQ(b1->size(), N); EXPECT_EQ(b1->start(), 0); + EXPECT_EQ(b2->data() - b2->start(), p); EXPECT_EQ(b2->capacity(), N); + EXPECT_EQ(b2->size(), N - INC); EXPECT_EQ(b2->start(), INC); + EXPECT_EQ(b3->data() - b3->start(), p); EXPECT_EQ(b3->capacity(), N); + EXPECT_EQ(b3->size(), INC); EXPECT_EQ(b3->start(), 0); + + auto b4 = make_shared(b2->share_after_split_right(INC)); check_alloc_sz(1, CTX); + EXPECT_EQ(b1->data() - b1->start(), p); EXPECT_EQ(b1->capacity(), N); + EXPECT_EQ(b1->size(), N); EXPECT_EQ(b1->start(), 0); + EXPECT_EQ(b2->data() - b2->start(), p); EXPECT_EQ(b2->capacity(), N); + EXPECT_EQ(b2->size(), N - INC - INC); EXPECT_EQ(b2->start(), INC); + EXPECT_EQ(b3->data() - b3->start(), p); EXPECT_EQ(b3->capacity(), N); + EXPECT_EQ(b3->size(), INC); EXPECT_EQ(b3->start(), 0); + EXPECT_EQ(b4->data() - b4->start(), p); EXPECT_EQ(b4->capacity(), N); + EXPECT_EQ(b4->size(), INC); EXPECT_EQ(b4->start(), N - INC); + + b2->make_zero(); check_alloc_sz(1, CTX); // Still alive. + b3.reset(); check_alloc_sz(1, CTX); // Ditto. + b1.reset(); check_alloc_sz(1, CTX); // Ditto! Even after killing the original. + b4->clear(); ASSERT_EQ(b4->capacity(), N); check_alloc_sz(1, CTX); // Doesn't even dec the ref-count. + *b4 = make_blob(alloc, &logger, ZERO); check_alloc_sz(0, CTX); // Now the ref-count got to zero. + + /* Avoid tedium slightly; use one form of share_after_split_equally*(). We use another elsewhere in this + * test. @todo Technically should go through them all (~3), but they're all built on one core (cheating + * white-boxily when we say this). */ + vector headless_v{false, true}; + for (auto headless : headless_v) + { + cout << " Splitting sub-test with headless = [" << headless << "].\n" << flush; + constexpr size_t COUNT = 16; constexpr size_t N_PER_CT = 65536; constexpr size_t REM = 123; + constexpr size_t CAP = COUNT * N_PER_CT + REM; + vector> blob_vec; + + N = CAP; // Soooo hacky... @todo Come on... this is sad. + + check_alloc_sz(0, CTX); + auto b5 = make_shared(make_blob(alloc, &logger, CAP, CLEAR_ON_ALLOC)); + check_alloc_sz(1, CTX); + + const auto p = b5->const_data(); + b5->share_after_split_equally_emit_ptr_seq(N_PER_CT, headless, &blob_vec); + + EXPECT_EQ(blob_vec.size(), COUNT + 1); + check_alloc_sz(1, CTX); + + auto rem_blob = std::move(blob_vec.back()); blob_vec.pop_back(); + size_t start = 0; + for (const auto& b : blob_vec) + { + EXPECT_EQ(b->data() - b->start(), p); EXPECT_EQ(b->capacity(), CAP); + EXPECT_EQ(b->size(), N_PER_CT); EXPECT_EQ(b->start(), start); + start += N_PER_CT; + } + + EXPECT_EQ(rem_blob->data() - rem_blob->start(), p); EXPECT_EQ(rem_blob->capacity(), CAP); + EXPECT_EQ(rem_blob->size(), REM); EXPECT_EQ(rem_blob->start(), start); + + check_alloc_sz(1, CTX); + blob_vec.clear(); + rem_blob.reset(); + + EXPECT_TRUE(b5->empty()); + if (!headless) + { + check_alloc_sz(1, CTX); + b5->make_zero(); + } + EXPECT_TRUE(b5->zero()); + check_alloc_sz(0, CTX); + } // for (auto headless : headless_v) + } // if constexpr(SHARING) + } // Dealloc; sharing. + + /* Now to test the many assert-trip undefined-behavior triggers we've avoided above. This is arguably more + * important here than usual, in the sense that a common user error (since other containers allow it, but we + * *intentionally* do not) would be to perform an op that would require an allocation of a larger buffer, + * when one is already allocated (!zero()). So at least we need to test the s*** out of that. (Ultimately, + * internally, it comes down to reserve() which is called by 100% of code paths requiring allocation.) */ +#ifndef NDEBUG + { // Death tests. + cout << " Assertions enabled -- performing death-tests.\n" << flush; + + { // General input checks. + constexpr String_view RSRV_ERR_STR{"intentionally disallows reserving N>M>0.*make_zero.. first"}; + constexpr auto RSRV_ERR = RSRV_ERR_STR.data(); + + // Various attempts to ultimately reserve(N), when `N > .capacity() > 0`. + + // reserve(). + constexpr size_t INC = 1; + constexpr size_t N_LG = N_SM + INC; + auto b1 = make_blob(alloc, &logger, N_SM); + b1.resize(0); + ASSERT_EQ(b1.capacity(), N_SM); ASSERT_EQ(b1.size(), 0); + b1.reserve(N_SM); + EXPECT_DEATH(b1.reserve(N_LG), RSRV_ERR); + b1.make_zero(); + // resize(). + b1.reserve(N_LG); + EXPECT_DEATH(b1.resize(N_LG - INC, INC + INC), RSRV_ERR); + EXPECT_DEATH(b1.resize(N_LG, INC), RSRV_ERR); + EXPECT_DEATH(b1.resize(0, N_LG + INC), RSRV_ERR); + EXPECT_DEATH(b1.resize(N_LG - INC, CLEAR_ON_ALLOC, INC + INC), RSRV_ERR); + EXPECT_DEATH(b1.resize(N_LG, CLEAR_ON_ALLOC, INC), RSRV_ERR); + EXPECT_DEATH(b1.resize(0, CLEAR_ON_ALLOC, N_LG + INC), RSRV_ERR); + b1.resize(N_LG, 0); + b1.resize(0, N_LG); + b1.resize(N_LG - INC, 0); + b1.resize(N_LG - INC, INC); + b1.resize(N_LG, CLEAR_ON_ALLOC, 0); + b1.resize(0, CLEAR_ON_ALLOC, N_LG); + b1.resize(N_LG - INC, CLEAR_ON_ALLOC, 0); + b1.resize(N_LG - INC, CLEAR_ON_ALLOC, INC); + // Copy-assign (also assign_copy(), emplace_copy() really, as of this writing white-boxily). + auto b2 = make_blob(alloc, &logger, N_SM); + b1.resize(N_LG, 0); // A bit too big. + EXPECT_DEATH(b2 = b1, RSRV_ERR); + b2.clear(); + EXPECT_DEATH(b2 = b1, RSRV_ERR); + b2.make_zero(); + b2 = b1; + + // Other input errors. + + // start_past_prefix_inc(). + b2.resize(10, 5); + b2.start_past_prefix_inc(-1); ASSERT_EQ(b2.start(), 4); + b2.start_past_prefix_inc(-4); ASSERT_EQ(b2.start(), 0); + b2.start_past_prefix_inc(+4); ASSERT_EQ(b2.start(), 4); + EXPECT_DEATH(b2.start_past_prefix_inc(-5), "start.. >= size_type.-prefix_size_inc"); + + { // emplace/sub_copy(). + string string_sm(14, '\0'); + const const_buffer STR_SM_BUF{string_sm.data(), string_sm.size()}; + const mutable_buffer STR_SM_BUF_MUT{string_sm.data(), string_sm.size()}; + + // emplace_copy(). + b2.make_zero(); + b2.resize(N_SM); + EXPECT_DEATH(b2.emplace_copy(b2.begin() - 1, const_buffer{string_sm.data(), 0}), "valid_iterator"); + EXPECT_DEATH(b2.emplace_copy(b2.end() + 1, const_buffer{string_sm.data(), 0}), "valid_iterator"); + EXPECT_DEATH(b2.emplace_copy(b2.begin() - 1, STR_SM_BUF), "valid_iterator"); + EXPECT_DEATH(b2.emplace_copy(b2.end() + 1, STR_SM_BUF), "valid_iterator"); + EXPECT_DEATH(b2.emplace_copy(b2.end(), STR_SM_BUF), "derefable_iterator"); + EXPECT_DEATH(b2.emplace_copy(b2.end() - 1, const_buffer{string_sm.data(), 2}), + "n. <= .const_end.. - dest"); + EXPECT_DEATH(b2.emplace_copy(b2.end() - 2, const_buffer{string_sm.data(), 3}), + "n. <= .const_end.. - dest"); + b2.emplace_copy(b2.end() - 1, const_buffer{string_sm.data(), 1}); + b2.emplace_copy(b2.end() - 2, const_buffer{string_sm.data(), 2}); + + EXPECT_DEATH(b2.emplace_copy(b2.begin(), const_buffer{b2.begin() + 2, 3}), + ".dest_it \\+ n. <= src_data"); + b2.emplace_copy(b2.begin(), const_buffer{b2.begin() + 2, 2}); + EXPECT_DEATH(b2.emplace_copy(b2.begin() + 2, const_buffer{b2.begin(), 3}), + ".src_data \\+ n. <= dest_it"); + b2.emplace_copy(b2.begin() + 2, const_buffer{b2.begin(), 2}); + + // sub_copy(). + EXPECT_DEATH(b2.sub_copy(b2.begin() - 1, mutable_buffer{string_sm.data(), 0}), "valid_iterator"); + EXPECT_DEATH(b2.sub_copy(b2.end() + 1, mutable_buffer{string_sm.data(), 0}), "valid_iterator"); + EXPECT_DEATH(b2.sub_copy(b2.begin() - 1, STR_SM_BUF_MUT), "valid_iterator"); + EXPECT_DEATH(b2.sub_copy(b2.end() + 1, STR_SM_BUF_MUT), "valid_iterator"); + EXPECT_DEATH(b2.sub_copy(b2.end(), STR_SM_BUF_MUT), "derefable_iterator"); + EXPECT_DEATH(b2.sub_copy(b2.end() - 1, mutable_buffer{string_sm.data(), 2}), + "n. <= .const_end.. - src"); + EXPECT_DEATH(b2.sub_copy(b2.end() - 2, mutable_buffer{string_sm.data(), 3}), + "n. <= .const_end.. - src"); + b2.sub_copy(b2.end() - 1, mutable_buffer{string_sm.data(), 1}); + b2.sub_copy(b2.end() - 2, mutable_buffer{string_sm.data(), 2}); + + EXPECT_DEATH(b2.sub_copy(b2.begin(), mutable_buffer{b2.begin() + 2, 3}), + ".src \\+ n. <= dest_data"); + b2.sub_copy(b2.begin(), mutable_buffer{b2.begin() + 2, 2}); + EXPECT_DEATH(b2.sub_copy(b2.begin() + 2, mutable_buffer{b2.begin(), 3}), + ".dest_data \\+ n. <= src"); + b2.sub_copy(b2.begin() + 2, mutable_buffer{b2.begin(), 2}); + } // emplace/sub_copy(). + + b1.make_zero(); + b1.resize(N_SM, 0); + { // .erase() + EXPECT_DEATH(b1.erase(b1.begin() - 1, b1.end()), "derefable_iterator.first"); + EXPECT_DEATH(b1.erase(b1.end(), b1.end()), "derefable_iterator.first"); + EXPECT_DEATH(b1.erase(b1.end() + 1, b1.end()), "derefable_iterator.first"); + EXPECT_DEATH(b1.erase(b1.begin(), b1.end() + 1), "valid_iterator.past_last"); + EXPECT_DEATH(b1.erase(b1.begin(), b1.begin() - 1), "valid_iterator.past_last"); + b1.erase(b1.begin(), b1.end()); ASSERT_TRUE(b1.empty()); + EXPECT_DEATH(b1.erase(b1.begin(), b1.end()), "derefable_iterator.first"); // Because empty. + } + + // front(), back(), et al. + ASSERT_TRUE(b1.empty()); + EXPECT_DEATH(b1.front(), "!empty"); + EXPECT_DEATH(b1.back(), "!empty"); + // @todo ^-- This'll skip checking the const overloads. + EXPECT_DEATH(b1.const_front(), "!empty"); + EXPECT_DEATH(b1.const_back(), "!empty"); + b1.resize(1); + EXPECT_EQ(b1.front(), b1.back()); + EXPECT_EQ(b1.const_front(), b1.const_back()); + { + uint8_t& fr = b1.front(); + uint8_t& bk = b1.back(); + const uint8_t& c_fr = static_cast(b1).front(); + const uint8_t& c_bk = static_cast(b1).back(); + const uint8_t& c_cfr = b1.const_front(); + const uint8_t& c_cbk = b1.const_back(); + ASSERT_EQ(&fr, &bk); + ASSERT_EQ(&fr, &c_fr); ASSERT_EQ(&fr, &c_cfr); + ASSERT_EQ(&bk, &c_bk); ASSERT_EQ(&bk, &c_cbk); + } + } // General input checks. + + if constexpr(SHARING) + { + cout << " A few sharing-specific death-tests for this type....\n" << flush; + auto b3 = make_blob(alloc, &logger, N_SM); + auto b4 = b3.share_after_split_right(5); + ASSERT_TRUE(b4.size() == 5); ASSERT_TRUE(b3.size() == N_SM - 5); + ASSERT_TRUE(b3.size() > b4.size()); + // It's small enough... but copy-assign/etc. between shared blobs = not OK. + EXPECT_DEATH(b4 = b3, "!blobs_sharing"); + EXPECT_DEATH(b3 = b4, "!blobs_sharing"); + EXPECT_DEATH(b4.assign(b3), "!blobs_sharing"); + EXPECT_DEATH(b3.assign(b4), "!blobs_sharing"); + b4.clear(); + EXPECT_DEATH(b4 = b3, "!blobs_sharing"); + EXPECT_DEATH(b3 = b4, "!blobs_sharing"); + EXPECT_DEATH(b4.assign(b3), "!blobs_sharing"); + EXPECT_DEATH(b3.assign(b4), "!blobs_sharing"); + b4.make_zero(); + b4 = b3; + + b4.make_zero(); + EXPECT_DEATH(b4.share(), "!zero"); + EXPECT_DEATH(b4.share_after_split_right(5), "!zero"); + EXPECT_DEATH(b4.share_after_split_left(5), "!zero"); + + b4.reserve(N_SM); ASSERT_TRUE(b4.empty()); + vector blobs; + EXPECT_DEATH(b4.share_after_split_equally_emit_seq(0, true, &blobs), "size != 0"); + EXPECT_DEATH(b4.share_after_split_equally_emit_seq(0, false, &blobs), "size != 0"); + EXPECT_DEATH(b4.share_after_split_equally_emit_seq(1, true, &blobs), "!empty"); + EXPECT_DEATH(b4.share_after_split_equally_emit_seq(1, false, &blobs), "!empty"); + b4.resize(b4.capacity()); + EXPECT_DEATH(b4.share_after_split_equally_emit_seq(0, true, &blobs), "size != 0"); + EXPECT_DEATH(b4.share_after_split_equally_emit_seq(0, false, &blobs), "size != 0"); + b4.share_after_split_equally_emit_seq(1, true, &blobs); ASSERT_TRUE(blobs.size() == N_SM); + } // if constexpr(SHARING) + } // Death tests. +#endif // NDEBUG + + [[maybe_unused]] auto b1 = make_blob(alloc, &logger, size_t(3), CLEAR_ON_ALLOC); + if constexpr(SHM_ALLOC) + { + [[maybe_unused]] auto b2 = make_blob(alloc2, &logger, size_t(6)); + } + }; // const auto test_type = + + /* As noted earlier makes sense to test all permutations, even if only a "core" API were tested here. + * Again though we do various type-specific APIs via `if constexpr` too. */ + Shm_allocator dummy_shm_alloc{shm_pool.get_segment_manager()}; + test_type(Basic_blob()); + test_type(Basic_blob()); + test_type(Basic_blob(dummy_shm_alloc)); + test_type(Basic_blob(dummy_shm_alloc)); + test_type(Blob_with_log_context()); // Recall that for these it'll always use Vanilla_allocator. + test_type(Blob_with_log_context()); +} // TEST(Blob, Interface) + +} // namespace flow::util::test diff --git a/src/flow/util/test/linked_hash_test.cpp b/src/flow/util/test/linked_hash_test.cpp new file mode 100644 index 000000000..baa2f3aa6 --- /dev/null +++ b/src/flow/util/test/linked_hash_test.cpp @@ -0,0 +1,497 @@ +/* Flow + * Copyright 2023 Akamai Technologies, Inc. + * + * Licensed under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in + * compliance with the License. You may obtain a copy + * of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in + * writing, software distributed under the License is + * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing + * permissions and limitations under the License. */ + +#include "flow/util/linked_hash_map.hpp" +#include "flow/util/linked_hash_set.hpp" +#include "flow/util/util.hpp" +#include + +namespace flow::util::test +{ + +namespace +{ +using std::string; +using std::vector; + +using uint = unsigned int; +static uint s_n_copies = 0; + +struct Obj +{ + string m_str; + Obj() = default; + Obj(const char* str) : m_str(str) {} + Obj(const Obj& src) : m_str(src.m_str) { ++s_n_copies; } + Obj(Obj&&) = default; + Obj& operator=(const Obj& src) { if (this != &src) { m_str = src.m_str; ++s_n_copies; } + return *this; } + Obj& operator=(Obj&&) = default; + bool operator==(const Obj& rhs) const { return m_str == rhs.m_str; } +}; + +size_t hash_value(const Obj& obj) { return boost::hash_value(obj.m_str); }; + +} // Anonymous namespace + +// Yes... this is very cheesy... but this is a test, so I don't really care. +#define CTX ostream_op_string("Caller context [", FLOW_UTIL_WHERE_AM_I_STR(), "].") + +TEST(Linked_hash, Interface) +{ + /* @todo I am sure there's more stuff to torture in Linked_hash_* for complete coverage; like custom + * equality and hash predicates for example. Also n_buckets. + * As it stands, this at least tests a bunch of things which is by far better than nothing. */ + + using std::swap; // This enables proper ADL. + + const auto n_copies_check = [&](uint n, const string& ctx) + { + EXPECT_EQ(s_n_copies, n) << ctx; + s_n_copies = 0; + }; + + const auto keys_check_set = [](const auto& vals, const vector& exp, const string& ctx) + { + ASSERT_EQ(vals.size(), exp.size()) << ctx; + ASSERT_EQ(vals.size() == 0, vals.empty()) << ctx; + size_t idx = 0; + for (const auto& val : vals) + { + EXPECT_EQ(val.m_str, exp[idx]) << ctx; + ++idx; + } + for (auto rit = vals.crbegin(); rit != vals.crend(); ++rit) + { + --idx; + EXPECT_EQ(rit->m_str, exp[idx]) << ctx; + } + }; + + const auto keys_check_map = [](const auto& vals, const vector& exp, const string& ctx) + { + ASSERT_EQ(vals.size(), exp.size()) << ctx; + ASSERT_EQ(vals.size() == 0, vals.empty()) << ctx; + size_t idx = 0; + for (const auto& val : vals) + { + EXPECT_EQ(val.first, exp[idx]) << ctx; + ++idx; + } + for (auto rit = vals.crbegin(); rit != vals.crend(); ++rit) + { + --idx; + EXPECT_EQ(rit->first, exp[idx]) << ctx; + } + }; + + const auto vals_check_map = [](const auto& vals, const vector& exp, const string& ctx) + { + ASSERT_EQ(vals.size(), exp.size()) << ctx; + size_t idx = 0; + for (const auto& val : vals) + { + EXPECT_EQ(val.second.m_str, exp[idx]) << ctx; + ++idx; + } + for (auto rit = vals.crbegin(); rit != vals.crend(); ++rit) + { + --idx; + EXPECT_EQ(rit->second.m_str, exp[idx]) << ctx; + } + }; + + using Map = Linked_hash_map; // Obj can count copies thereof which we can check via n_copies_check(). + using Set = Linked_hash_set; // Might as well test a custom-ish key type. + + { // Map test block. + Map map1; + + s_n_copies = 0; + map1.insert(Map::Value_movable{"b", "X"}); + n_copies_check(0, CTX); // Move-cting .insert() should be chosen => no Obj{"X"} copy. + + map1["a"] = "A"; // Becomes newest (first) because inserted. + /* Inserting operator[] should internally avoid copying even the default-cted Obj{}. + * Then the assignment to the Obj& should also be move-assignment of Obj{"A"} temporary (sanity-check of Obj code; + * not really checking anything inside Linked_hash_map). + * Anyway no copies => counter should be 0. */ + n_copies_check(0, CTX); + + map1["b"] = "B"; // Does not become newest (first) because already present (but mapped-value is replaced, X->B). + n_copies_check(0, CTX); + + Map map2{{ { "a", "A" }, { "b", "B" } }}; + + keys_check_map(map1, { "a", "b" }, CTX); + vals_check_map(map1, { "A", "B" }, CTX); + keys_check_map(map2, { "a", "b" }, CTX); + vals_check_map(map2, { "A", "B" }, CTX); + + Map::Value_movable val_pair1{"c", "C"}; + Map::Value val_pair2{"d", "D"}; + s_n_copies = 0; + map2.insert(std::move(val_pair1)); + n_copies_check(0, CTX); // "C" should not be copied b/c move() -- .insert(&&) should be chosen. + EXPECT_EQ(val_pair1.first, ""); // String key got destroyed too via move. + EXPECT_EQ(val_pair1.second.m_str, ""); + map2.insert(val_pair2); + n_copies_check(1, CTX); // "D" should be copied b/c no move() -- .insert(const&) should be chosen. + keys_check_map(map2, { "d", "c", "a", "b" }, CTX); + vals_check_map(map2, { "D", "C", "A", "B" }, CTX); + EXPECT_EQ(val_pair2.first, "d"); // String key got destroyed too via move. + EXPECT_EQ(val_pair2.second.m_str, "D"); + + // We've tested operator[] OK already, higher up above, but haven't checked the move/copy of key aspect of it. + string val1{"e"}; + string val2{"f"}; + string val3{"b"}; + auto& ref = map2[val3] = "X"; + EXPECT_EQ(val3, "b"); // Already present, so definitely key untouched. + EXPECT_EQ(ref, "X"); + ref = "B"; // Just change it back to "steady state." + map2[std::move(val1)] = "E"; // Not present and we used move() -- operator[&&] should be chosen. + EXPECT_EQ(val1, ""); + map2[val2] = "F"; // Not present and we did not use move() -- operator[const&] should be chosen. + EXPECT_EQ(val2, "f"); + // Get rid of the 2 new keys to get back to "steady state." (We test .erase() varieties more fully lower-down.) + map2.erase("e"); + map2.erase("f"); + + s_n_copies = 0; + swap(map1, map2); + n_copies_check(0, CTX); // Not a single copy of `Obj`s. @todo Somehow ensure other stuff (`string`s?) not copied? + keys_check_map(map1, { "d", "c", "a", "b" }, CTX); + vals_check_map(map1, { "D", "C", "A", "B" }, CTX); + keys_check_map(map2, { "a", "b" }, CTX); + vals_check_map(map2, { "A", "B" }, CTX); + + s_n_copies = 0; + map2 = map1; + n_copies_check(4, CTX); // All `Obj`s did get copied. @todo Could also check post-copy independence of map1 vs map2. + keys_check_map(map1, { "d", "c", "a", "b" }, CTX); + vals_check_map(map1, { "D", "C", "A", "B" }, CTX); + keys_check_map(map2, { "d", "c", "a", "b" }, CTX); + vals_check_map(map2, { "D", "C", "A", "B" }, CTX); + + s_n_copies = 0; + map1 = std::move(map2); + n_copies_check(0, CTX); // Nothing should be copied. + keys_check_map(map1, { "d", "c", "a", "b" }, CTX); + vals_check_map(map1, { "D", "C", "A", "B" }, CTX); + keys_check_map(map2, { }, CTX); + vals_check_map(map2, { }, CTX); + + // Same deal but construct instead of assigning. + + s_n_copies = 0; + const auto map4 = map1; + n_copies_check(4, CTX); + keys_check_map(map1, { "d", "c", "a", "b" }, CTX); + vals_check_map(map1, { "D", "C", "A", "B" }, CTX); + keys_check_map(map4, { "d", "c", "a", "b" }, CTX); + vals_check_map(map4, { "D", "C", "A", "B" }, CTX); + + s_n_copies = 0; + const auto map5 = std::move(map1); + n_copies_check(0, CTX); + keys_check_map(map1, { }, CTX); + vals_check_map(map1, { }, CTX); + keys_check_map(map5, { "d", "c", "a", "b" }, CTX); + vals_check_map(map5, { "D", "C", "A", "B" }, CTX); + + auto map6 = std::move(map5); + auto ret = map6.insert(Map::Value{"e", "E"}); + EXPECT_TRUE(ret.second); + keys_check_map(map6, { "e", "d", "c", "a", "b" }, CTX); + vals_check_map(map6, { "E", "D", "C", "A", "B" }, CTX); + ret.first->second = "X"; // Note: ret.first is iterator; ret.first->first is key (which is const). + keys_check_map(map6, { "e", "d", "c", "a", "b" }, CTX); + vals_check_map(map6, { "X", "D", "C", "A", "B" }, CTX); + ret = map6.insert(Map::Value{"e", "E"}); + EXPECT_FALSE(ret.second); + keys_check_map(map6, { "e", "d", "c", "a", "b" }, CTX); + vals_check_map(map6, { "X", "D", "C", "A", "B" }, CTX); + ret.first->second = "E"; + keys_check_map(map6, { "e", "d", "c", "a", "b" }, CTX); + vals_check_map(map6, { "E", "D", "C", "A", "B" }, CTX); + + /* @todo Repeat, tediously, the insert() tests as on map6 above, but with the copying-insert instead of moving-insert. + * We did already test copying-insert, and the fact that it in facts inserts and moves, but we haven't tested its + * return value pair, nor that it no-ops if key already in map. */ + + const auto map7 = map6; + EXPECT_EQ(map7.find("b"), --map7.end()); + EXPECT_NE(map7.find("a"), map7.end()); + EXPECT_EQ(map7.find("x"), map7.end()); + EXPECT_EQ(map7.find("b"), --map7.cend()); + EXPECT_NE(map7.find("a"), map7.cend()); + EXPECT_EQ(map7.find("x"), map7.cend()); + const Map map8; + EXPECT_EQ(map8.find("a"), map8.end()); + Map map9; + EXPECT_EQ(map9.find("a"), map9.end()); + + auto map10 = map7; + const auto it1 = map10.find("c"); + EXPECT_EQ(it1->second, "C"); + it1->second = "X"; + const auto it2 = map10.find("c"); + EXPECT_EQ(it1->second, "X"); + EXPECT_EQ(it2->second, "X"); + EXPECT_EQ(it1, it2); + auto it3 = map10.cbegin(); + EXPECT_EQ(it3->first, "e"); + EXPECT_EQ(it3->second, "E"); + EXPECT_EQ(it3, map10.const_newest()); + it3 = --map10.cend(); + EXPECT_EQ(it3->first, "b"); + EXPECT_EQ(it3->second, "B"); + EXPECT_EQ(it3, --map10.const_past_oldest()); + auto it4 = map10.begin(); + EXPECT_EQ(it4->first, "e"); + EXPECT_EQ(it4->second, "E"); + EXPECT_EQ(it4, map10.const_newest()); + it4->second = "Z"; + keys_check_map(map10, { "e", "d", "c", "a", "b" }, CTX); + vals_check_map(map10, { "Z", "D", "X", "A", "B" }, CTX); + it4 = --map10.end(); + EXPECT_EQ(it4->first, "b"); + EXPECT_EQ(it4->second, "B"); + EXPECT_EQ(it4, --map10.past_oldest()); + EXPECT_EQ(it4, --map10.const_past_oldest()); + it4->second = "Y"; + keys_check_map(map10, { "e", "d", "c", "a", "b" }, CTX); + vals_check_map(map10, { "Z", "D", "X", "A", "Y" }, CTX); + + // Note: reverse-iterator accessors are tested OK in *_check_map(). @todo Test the non-const varieties also though. + + EXPECT_FALSE(map10.touch("x")); + EXPECT_TRUE(map10.touch("a")); + keys_check_map(map10, { "a", "e", "d", "c", "b" }, CTX); + vals_check_map(map10, { "A", "Z", "D", "X", "Y" }, CTX); + map10.touch(map10.find("b")); + keys_check_map(map10, { "b", "a", "e", "d", "c" }, CTX); + vals_check_map(map10, { "Y", "A", "Z", "D", "X" }, CTX); + map10.touch(map10.find("b")); + keys_check_map(map10, { "b", "a", "e", "d", "c" }, CTX); + vals_check_map(map10, { "Y", "A", "Z", "D", "X" }, CTX); + + EXPECT_EQ(map10.erase("x"), 0); + EXPECT_EQ(map10.erase("c"), 1); + EXPECT_EQ(map10.erase("c"), 0); + EXPECT_EQ(map10.erase("a"), 1); + EXPECT_EQ(map10.erase("a"), 0); + EXPECT_EQ(map10.erase("b"), 1); + EXPECT_EQ(map10.erase("b"), 0); + keys_check_map(map10, { "e", "d" }, CTX); + vals_check_map(map10, { "Z", "D" }, CTX); + map10.clear(); + keys_check_map(map10, { }, CTX); + vals_check_map(map10, { }, CTX); + map10 = map7; + keys_check_map(map10, { "e", "d", "c", "a", "b" }, CTX); + vals_check_map(map10, { "E", "D", "C", "A", "B" }, CTX); + auto it5 = map10.erase(map10.begin(), map10.find("c")); + keys_check_map(map10, { "c", "a", "b" }, CTX); + vals_check_map(map10, { "C", "A", "B" }, CTX); + EXPECT_EQ(it5, map10.begin()); + map10 = map7; + keys_check_map(map10, { "e", "d", "c", "a", "b" }, CTX); + vals_check_map(map10, { "E", "D", "C", "A", "B" }, CTX); + it5 = map10.erase(map10.find("c"), map10.end()); + keys_check_map(map10, { "e", "d" }, CTX); + vals_check_map(map10, { "E", "D" }, CTX); + EXPECT_EQ(it5, map10.end()); + map10 = map7; + keys_check_map(map10, { "e", "d", "c", "a", "b" }, CTX); + vals_check_map(map10, { "E", "D", "C", "A", "B" }, CTX); + it5 = map10.erase(map10.find("d"), map10.find("b")); + keys_check_map(map10, { "e", "b" }, CTX); + vals_check_map(map10, { "E", "B" }, CTX); + EXPECT_EQ(it5, --map10.end()); + EXPECT_EQ(it5->first, "b"); + EXPECT_EQ(it5->second, "B"); + + EXPECT_EQ(map10.count("e"), 1); + EXPECT_EQ(map10.count("b"), 1); + EXPECT_EQ(map10.count("x"), 0); + EXPECT_EQ(map10.count(""), 0); + map10[""] = "Q"; + EXPECT_EQ(map10.count(""), 1); + EXPECT_EQ(map10[""], "Q"); + EXPECT_EQ(map10["e"], "E"); + EXPECT_EQ(map10["b"], "B"); + + // @todo Test .max_size(), I suppose. + } // Map test block. + + // Test analogous stuff for Set, in the same order, omitting inapplicable things. Comments kept (even) lighter. + { + Set set1; + + s_n_copies = 0; + set1.insert("b"); + n_copies_check(0, CTX); // Move-cting .insert() should be chosen => no Obj{"b"} copy. + + set1.insert(Obj{"a"}); // Becomes newest (first) because inserted. + n_copies_check(0, CTX); + + Set set2{{ "a", "b" }}; + + keys_check_set(set1, { "a", "b" }, CTX); + keys_check_set(set2, { "a", "b" }, CTX); + + Obj val1{"c"}; + Obj val2{"d"}; + s_n_copies = 0; + set2.insert(std::move(val1)); + n_copies_check(0, CTX); // "c" should not be copied b/c move() -- .insert(&&) should be chosen. + EXPECT_EQ(val1.m_str, ""); // String key got destroyed too via move. + set2.insert(val2); + n_copies_check(1, CTX); // "d" should be copied b/c no move() -- .insert(const&) should be chosen. + keys_check_set(set2, { "d", "c", "a", "b" }, CTX); + EXPECT_EQ(val2.m_str, "d"); // String key got destroyed too via move. + + s_n_copies = 0; + swap(set1, set2); + n_copies_check(0, CTX); // Not a single copy of `Obj`s. @todo Somehow ensure other stuff (`string`s?) not copied? + keys_check_set(set1, { "d", "c", "a", "b" }, CTX); + keys_check_set(set2, { "a", "b" }, CTX); + + s_n_copies = 0; + set2 = set1; + n_copies_check(4, CTX); // All `Obj`s did get copied. @todo Could also check post-copy independence of set1 vs set2. + keys_check_set(set1, { "d", "c", "a", "b" }, CTX); + keys_check_set(set2, { "d", "c", "a", "b" }, CTX); + + s_n_copies = 0; + set1 = std::move(set2); + n_copies_check(0, CTX); // Nothing should be copied. + keys_check_set(set1, { "d", "c", "a", "b" }, CTX); + keys_check_set(set2, { }, CTX); + + // Same deal but construct instead of assigning. + + s_n_copies = 0; + const auto set4 = set1; + n_copies_check(4, CTX); + keys_check_set(set1, { "d", "c", "a", "b" }, CTX); + keys_check_set(set4, { "d", "c", "a", "b" }, CTX); + + s_n_copies = 0; + const auto set5 = std::move(set1); + n_copies_check(0, CTX); + keys_check_set(set1, { }, CTX); + keys_check_set(set5, { "d", "c", "a", "b" }, CTX); + + auto set6 = std::move(set5); + auto ret = set6.insert("e"); + EXPECT_TRUE(ret.second); + EXPECT_EQ(ret.first->m_str, "e"); + keys_check_set(set6, { "e", "d", "c", "a", "b" }, CTX); + ret = set6.insert("e"); + EXPECT_FALSE(ret.second); + EXPECT_EQ(ret.first->m_str, "e"); + keys_check_set(set6, { "e", "d", "c", "a", "b" }, CTX); + + /* @todo Repeat, tediously, the insert() tests as on set6 above, but with the copying-insert instead of + * moving-insert. + * We did already test copying-insert, and the fact that it in facts inserts and moves, but we haven't tested its + * return value pair, nor that it no-ops if key already in set. */ + + const auto set7 = set6; + EXPECT_EQ(set7.find("b"), --set7.end()); + EXPECT_NE(set7.find("a"), set7.end()); + EXPECT_EQ(set7.find("x"), set7.end()); + EXPECT_EQ(set7.find("b"), --set7.cend()); + EXPECT_NE(set7.find("a"), set7.cend()); + EXPECT_EQ(set7.find("x"), set7.cend()); + const Set set8; + EXPECT_EQ(set8.find("a"), set8.end()); + Set set9; + EXPECT_EQ(set9.find("a"), set9.end()); + + auto set10 = set7; + const auto it1 = set10.find("c"); + EXPECT_EQ(it1->m_str, "c"); + const auto it2 = set10.find("c"); + EXPECT_EQ(it2->m_str, "c"); + EXPECT_EQ(it1, it2); + auto it3 = set10.cbegin(); + EXPECT_EQ(it3->m_str, "e"); + EXPECT_EQ(it3, set10.const_newest()); + it3 = --set10.cend(); + EXPECT_EQ(it3->m_str, "b"); + EXPECT_EQ(it3, --set10.const_past_oldest()); + auto it4 = set10.begin(); + EXPECT_EQ(it4->m_str, "e"); + EXPECT_EQ(it4, set10.const_newest()); + keys_check_set(set10, { "e", "d", "c", "a", "b" }, CTX); + it4 = --set10.end(); + EXPECT_EQ(it4->m_str, "b"); + EXPECT_EQ(it4, --set10.past_oldest()); + EXPECT_EQ(it4, --set10.const_past_oldest()); + keys_check_set(set10, { "e", "d", "c", "a", "b" }, CTX); + + // Note: reverse-iterator accessors are tested OK in *_check_set(). @todo Test the non-const varieties also though. + + EXPECT_FALSE(set10.touch("x")); + EXPECT_TRUE(set10.touch("a")); + keys_check_set(set10, { "a", "e", "d", "c", "b" }, CTX); + set10.touch(set10.find("b")); + keys_check_set(set10, { "b", "a", "e", "d", "c" }, CTX); + set10.touch(set10.find("b")); + keys_check_set(set10, { "b", "a", "e", "d", "c" }, CTX); + + EXPECT_EQ(set10.erase("x"), 0); + EXPECT_EQ(set10.erase("c"), 1); + EXPECT_EQ(set10.erase("c"), 0); + EXPECT_EQ(set10.erase("a"), 1); + EXPECT_EQ(set10.erase("a"), 0); + EXPECT_EQ(set10.erase("b"), 1); + EXPECT_EQ(set10.erase("b"), 0); + keys_check_set(set10, { "e", "d" }, CTX); + set10.clear(); + keys_check_set(set10, { }, CTX); + set10 = set7; + keys_check_set(set10, { "e", "d", "c", "a", "b" }, CTX); + auto it5 = set10.erase(set10.begin(), set10.find("c")); + keys_check_set(set10, { "c", "a", "b" }, CTX); + EXPECT_EQ(it5, set10.begin()); + set10 = set7; + keys_check_set(set10, { "e", "d", "c", "a", "b" }, CTX); + it5 = set10.erase(set10.find("c"), set10.end()); + keys_check_set(set10, { "e", "d" }, CTX); + EXPECT_EQ(it5, set10.end()); + set10 = set7; + keys_check_set(set10, { "e", "d", "c", "a", "b" }, CTX); + it5 = set10.erase(set10.find("d"), set10.find("b")); + keys_check_set(set10, { "e", "b" }, CTX); + EXPECT_EQ(it5, --set10.end()); + EXPECT_EQ(it5->m_str, "b"); + + EXPECT_EQ(set10.count("e"), 1); + EXPECT_EQ(set10.count("b"), 1); + EXPECT_EQ(set10.count("x"), 0); + EXPECT_EQ(set10.count(""), 0); + set10.insert(""); + EXPECT_EQ(set10.count(""), 1); + } // Set test block. +} // TEST(Linked_hash, Interface) + +} // namespace flow::util::test diff --git a/src/flow/util/test/thread_lcl_test.cpp b/src/flow/util/test/thread_lcl_test.cpp new file mode 100644 index 000000000..555d6f6b5 --- /dev/null +++ b/src/flow/util/test/thread_lcl_test.cpp @@ -0,0 +1,247 @@ +/* Flow + * Copyright 2023 Akamai Technologies, Inc. + * + * Licensed under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in + * compliance with the License. You may obtain a copy + * of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in + * writing, software distributed under the License is + * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing + * permissions and limitations under the License. */ + +#include "flow/log/log.hpp" +#include "flow/util/thread_lcl.hpp" +#include "flow/util/util.hpp" +#include "flow/async/single_thread_task_loop.hpp" +#include "flow/test/test_logger.hpp" +#include +#include +#include + +namespace flow::util::test +{ + +namespace +{ +using std::optional; +using std::string; +using flow::test::Test_logger; +using Thread_loop = async::Single_thread_task_loop; +template +using Tl_reg = Thread_local_state_registry; + +static string s_events; +static std::atomic s_id{1}; + +struct State +{ + string m_stuff{"stuff"}; + + std::atomic m_do_action{false}; + + ~State() + { + s_events += ostream_op_string("~State/", s_id++, '\n'); + } +}; +struct State2 +{ + const int m_x; + + State2(int x): m_x(x) {} + + ~State2() + { + s_events += ostream_op_string("~State/", s_id++, '\n'); + } +}; + +static optional> s_reg2{std::in_place, nullptr, "testReg2"}; + +} // Anonymous namespace + +TEST(Thread_local_state_registry, Interface) +{ + Test_logger logger; + FLOW_LOG_SET_CONTEXT(&logger, Flow_log_component::S_UNCAT); + + optional> reg1; + reg1.emplace(&logger, "testReg1"); + + /* We don't test that this works (maybe we should? @todo), but one can see it work in the console output, + * and at least this won't crash; that's something. Plus we declared it `static` to test pre-main() init + * (.set_logger() exists essentially for that use-case). + * + * @todo Should also test set_logger() propagation: + * - If T in Thread_local_state_registry has Log_context_mt, set_logger() shall propagate to each thread's + * extant T and future such `T`s. + * - */ + s_reg2->set_logger(&logger); + + EXPECT_TRUE(s_events.empty()); + + auto s1 = reg1->this_thread_state(); + EXPECT_EQ(s1->m_stuff, "stuff"); + EXPECT_EQ(s1, reg1->this_thread_state()); + + auto s2 = s_reg2->this_thread_state(); + s2->m_stuff = "other"; + EXPECT_EQ(s2->m_stuff, "other"); + EXPECT_NE(s1, s2); + EXPECT_EQ(s2, s_reg2->this_thread_state()); + + s_reg2.reset(); + reg1.reset(); + + EXPECT_EQ(s_events, "~State/1\n~State/2\n"); + s_events.clear(); + + { + optional> reg3; + reg3.emplace(&logger, "testReg3"); + { + Thread_loop t1{&logger, "thread1"}; + t1.start([&]() + { + auto s1 = reg3->this_thread_state(); + EXPECT_EQ(s1->m_stuff, "stuff"); + EXPECT_EQ(s1, reg3->this_thread_state()); + }); + Thread_loop t2{&logger, "thread2"}; + t2.start([&]() + { + auto s1 = reg3->this_thread_state(); + EXPECT_EQ(s1->m_stuff, "stuff"); + EXPECT_EQ(s1, reg3->this_thread_state()); + }); + auto s2 = reg3->this_thread_state(); + EXPECT_EQ(s2, reg3->this_thread_state()); + + EXPECT_TRUE(s_events.empty()); + t1.stop(); + EXPECT_EQ(s_events, "~State/3\n"); + t2.stop(); + EXPECT_EQ(s_events, "~State/3\n~State/4\n"); + } + EXPECT_EQ(s_events, "~State/3\n~State/4\n"); + } + EXPECT_EQ(s_events, "~State/3\n~State/4\n~State/5\n"); + s_events.clear(); + + { + // Create a couple of `Tl_reg`s of the same type; and of a different type (some internal `static`s exercised). + optional> reg3; + reg3.emplace(&logger, "testReg3"); + optional> reg4; + reg4.emplace(&logger, "testReg4"); + optional> reg3b; + reg3b.emplace(&logger, "test3b", []() -> auto { return new State2{3}; }); + optional> reg4b; + reg4b.emplace(&logger, "test4b"); + reg4b->m_create_state_func = []() -> auto { return new State2{4}; }; + + Thread_loop t1{&logger, "thread1"}; + Thread_loop t2{&logger, "thread2"}; + t1.start([&]() { reg3->this_thread_state(); reg3b->this_thread_state(); + reg4->this_thread_state(); reg4b->this_thread_state(); + EXPECT_EQ(reg3b->this_thread_state()->m_x, 3); + EXPECT_EQ(reg4b->this_thread_state()->m_x, 4); }); + t2.start([&]() { reg3->this_thread_state(); reg4b->this_thread_state(); }); + EXPECT_TRUE(s_events.empty()); + t1.stop(); + EXPECT_EQ(s_events, "~State/6\n~State/7\n~State/8\n~State/9\n"); + reg4b.reset(); + EXPECT_EQ(s_events, "~State/6\n~State/7\n~State/8\n~State/9\n~State/10\n"); + reg3.reset(); + EXPECT_EQ(s_events, "~State/6\n~State/7\n~State/8\n~State/9\n~State/10\n~State/11\n"); + t2.stop(); + EXPECT_EQ(s_events, "~State/6\n~State/7\n~State/8\n~State/9\n~State/10\n~State/11\n"); + s_events.clear(); + } + EXPECT_TRUE(s_events.empty()); + + { + using Task = async::Scheduled_task; + + optional> reg3; + reg3.emplace(&logger, "testLock"); + + Thread_loop t1{&logger, "threadLoop1"}; + Task func1 = [&](bool) + { + bool exp{true}; + if (reg3->this_thread_state()->m_do_action.compare_exchange_strong(exp, false, std::memory_order_relaxed)) + { + s_events += "didAction\n"; + } + t1.schedule_from_now(boost::chrono::milliseconds(500), Task{func1}); + }; + Thread_loop t2{&logger, "threadLoop2"}; + Task func2 = [&](bool) + { + bool exp{true}; + if (reg3->this_thread_state()->m_do_action.compare_exchange_strong(exp, false, std::memory_order_relaxed)) + { + s_events += "didAction\n"; + } + t2.schedule_from_now(boost::chrono::milliseconds(500), Task{func2}); + }; + + t1.start([&]() { func1(false); }); + t2.start([&]() { func2(false); }); + + EXPECT_TRUE(s_events.empty()); + reg3->while_locked([&](const auto& lock) + { + const auto& states = reg3->state_per_thread(lock); + for (const auto& state_and_mdt : states) + { + FLOW_LOG_INFO("Affecting TLS state for thread [" << state_and_mdt.second.m_thread_nickname << "]."); + state_and_mdt.first->m_do_action.store(true, std::memory_order_relaxed); + } + }); + this_thread::sleep_for(boost::chrono::seconds(2)); + EXPECT_EQ(s_events, "didAction\ndidAction\n"); + this_thread::sleep_for(boost::chrono::seconds(2)); + EXPECT_EQ(s_events, "didAction\ndidAction\n"); + } + EXPECT_EQ(s_events, "didAction\ndidAction\n~State/12\n~State/13\n"); +} // TEST(Thread_local_state_registry, Interface) + +TEST(Thread_local_state_registry, DISABLED_Advanced) +{ + /* @todo I (ygoldfel) am unconvinced (my own) existing testing of T_l_s_registry above checks all code paths + * of this tricky facility. I wrote it under major time pressure, alongside developing the facility under + * major time pressure; so it does check the essential stuff most relevant at the time. I did not, however, + * scan all the code paths and subtleties and test them all for sure. + * + * This test thus "fails" but is DISABLED to perhaps draw some attention to this to-do. + * + * Recommended approach: + * - Go over `Interface` test carefully and (1) comment what is being tested and (2) therefore understand + * what is being tested. + * - Go over the Thread_local_state_registry code in detail and add tests in here for the parts missed + * in preceding bullet point. */ + EXPECT_TRUE(false); +} + +TEST(Thread_local_state_registry, DISABLED_Polled_shared_state) +{ + /* Flow-IPC (a Flow-using project) does use Polled_shared_state extensively, and those P_s_s-using facilities + * are tested more black-boxily, so there is some P_s_s unit/functional-testing indirectly, vaguely speaking + * in Flow-land (as of this writing Flow-IPC and Flow are generally tested together, at least in open-source + * official project). However: + * + * @todo Add unit test here for Polled_shared_state itself. + * + * This test thus "fails" but is DISABLED to perhaps draw some attention to this to-do. */ + EXPECT_TRUE(false); +} + +} // namespace flow::util::test diff --git a/src/flow/util/test/util_test.cpp b/src/flow/util/test/util_test.cpp new file mode 100644 index 000000000..733168977 --- /dev/null +++ b/src/flow/util/test/util_test.cpp @@ -0,0 +1,104 @@ +/* Flow + * Copyright 2023 Akamai Technologies, Inc. + * + * Licensed under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in + * compliance with the License. You may obtain a copy + * of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in + * writing, software distributed under the License is + * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing + * permissions and limitations under the License. */ + +#include "flow/util/util.hpp" +#include +#include +#include + +namespace flow::util::test +{ + +namespace +{ +using std::string; +} // Anonymous namespace + +// Yes... this is very cheesy... but this is a test, so I don't really care. +#define CTX FLOW_UTIL_WHERE_AM_I_STR() + +TEST(Util_scoped_setter, Interface) +{ + string str; + int num = 0; + EXPECT_TRUE(str.empty()); + EXPECT_EQ(num, 0); + { + Scoped_setter set{&str, "abc"}; + Scoped_setter set2{&num, 10}; + EXPECT_EQ(str, "abc"); + EXPECT_EQ(num, 10); + { + // Test out Scoped_setter move ctor... in 1-2 ways (return, direct). + auto set2 + = ([&]() -> auto { return std::make_unique>(&num, 5); }) + (); + auto set + = ([&]() -> auto { return std::make_unique>(&str, "def"); }) + (); + Scoped_setter set2_a{std::move(*set2)}; + Scoped_setter set_a{std::move(*set)}; + EXPECT_EQ(str, "def"); + EXPECT_EQ(num, 5); + set2.reset(); // Destroy the moved-from guys -- this should have no effect. + set.reset(); + EXPECT_EQ(str, "def"); + EXPECT_EQ(num, 5); + } + EXPECT_EQ(str, "abc"); + EXPECT_EQ(num, 10); + } + EXPECT_TRUE(str.empty()); + EXPECT_EQ(num, 0); +} // TEST(Util_scoped_setter, Interface) + +TEST(Util_misc, Interface) +{ + // ostream_op_string() is built on top of vaious things, so this alone is a pretty decent check. @todo More. + EXPECT_EQ(ostream_op_string("abc[", 2, "] flag[", true, "]:", std::hex, 12), + "abc[2] flag[1]:c"); + + EXPECT_EQ(ceil_div(0, 1024), 0); + EXPECT_EQ(ceil_div(1, 1024u), 1); + EXPECT_EQ(ceil_div(2, 1024l), 1); + EXPECT_EQ(ceil_div(1023, 1024ul), 1); + EXPECT_EQ(ceil_div(1024, 1024ll), 1); + EXPECT_EQ(ceil_div(1025, 1024ull), 2); + EXPECT_EQ(ceil_div(1026, 1024ll), 2); + EXPECT_EQ(ceil_div(2047, 1024ul), 2); + EXPECT_EQ(ceil_div(2048, 1024l), 2); + EXPECT_EQ(ceil_div(2049, 1024u), 3); + EXPECT_EQ(ceil_div(2050, 1024), 3); + EXPECT_EQ(ceil_div(8192, 1024u), 8); + EXPECT_EQ(ceil_div(8193, 1024l), 9); + + EXPECT_EQ(round_to_multiple(0, 1024), 0 * 1024); + EXPECT_EQ(round_to_multiple(1, 1024u), 1 * 1024); + EXPECT_EQ(round_to_multiple(2, 1024l), 1 * 1024); + EXPECT_EQ(round_to_multiple(1023, 1024ul), 1 * 1024); + EXPECT_EQ(round_to_multiple(1024, 1024ll), 1 * 1024); + EXPECT_EQ(round_to_multiple(1025, 1024ull), 2 * 1024); + EXPECT_EQ(round_to_multiple(1026, 1024ll), 2 * 1024); + EXPECT_EQ(round_to_multiple(2047, 1024ul), 2 * 1024); + EXPECT_EQ(round_to_multiple(2048, 1024l), 2 * 1024); + EXPECT_EQ(round_to_multiple(2049, 1024u), 3 * 1024); + EXPECT_EQ(round_to_multiple(2050, 1024), 3 * 1024); + EXPECT_EQ(round_to_multiple(8192, 1024u), 8 * 1024); + EXPECT_EQ(round_to_multiple(8193, 1024l), 9 * 1024); +} // TEST(Util_misc, Interface) + +} // namespace flow::util::test diff --git a/src/flow/util/thread_lcl.hpp b/src/flow/util/thread_lcl.hpp new file mode 100644 index 000000000..7fdd0bb1a --- /dev/null +++ b/src/flow/util/thread_lcl.hpp @@ -0,0 +1,1190 @@ +/* Flow + * Copyright 2023 Akamai Technologies, Inc. + * + * Licensed under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in + * compliance with the License. You may obtain a copy + * of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in + * writing, software distributed under the License is + * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing + * permissions and limitations under the License. */ + +/// @file +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace flow::util +{ +// Types. + +/** + * Similar to `boost::thread_specific_ptr` but with built-in lazy-init semantics; and more importantly on + * destruction deletes any outstanding `T`s belonging to threads that are still up; plus allows iteration + * through all per-thread data. + * + * An object of this type manages thread-local data as encapsulated in the user-supplied-as-template-arg type, + * each of which is instantiated (constructed) as needed for each given thread (on first this_thread_state() accessor + * call in that thread); and cleaned (via destructor) at each thread's exit or `*this` destruction -- whichever + * occurs earlier. + * + * ### Overview/rationale ### + * Fundamentally `Thread_local_state_registry` is quite similar to `boost::thread_specific_ptr`. + * There are some minor differences (it is more rigid, always using `new` and `delete` instead of leaving it + * to the user; mandating a lazy-initialization semantic instead of leaving it to the user; + * disallowing any reset to null and back from null), but these are just happenstance/preference-based. + * Likely we'd have just used the Boost guy, if that's all we wanted. + * + * The main reason `Thread_local_state_registry` exists is the following feature: + * - If `~Thread_local_state_registry` (a `*this` dtor) executes before a given thread X, that has earlier + * caused the creation of a thread-local `T` (by calling `this->this_thread_state()` from X), then: + * - That dtor, from whichever thread invoked it, deletes that thread-local `T` (for all `T`). + * - Corollary: A related feature is the ability to look at all per-thread data accumulated so far (from any + * thread). See state_per_thread() accessor (+ while_locked()). + * + * So `*this` dtor does the equivalent of standard per-thread cleanup of per-thread data, if it is invoked + * before such standard per-thread cleanup has run (because the relevant threads have simply not yet exited). + * + * `thread_specific_ptr` does not do that: you must either `.reset()` from each relevant thread, before + * the `thread_specific_ptr` is itself deleted; or any such thread must exit before (causing an implicit `.reset()`). + * Nor can one iterate through other threads' data. + * + * For this reason most people declare `thread_specific_ptr` either `static` or + * as a global, as then the `thread_specific_ptr` always outlives the relevant threads, and everything is fine and easy. + * What if you really must clean resources earlier, when they are no longer necessary, but relevant threads may + * stay around? Then try a `*this`. + * + * As a secondary reason (ignoring the above) `Thread_local_state_registry` has a more straightforward/rigid API + * that enforces certain assumptions/conventions (some of this is mentioned above). These might be appealing + * depending on one's taste/reasoning. + * + * ### How to use ### + * Ensure the template-arg #Thread_local_state has a proper destructor; and either ensure it has default (no-arg) ctor + * (in which case it'll be created via `new Thread_local_state{}`), or assign #m_create_state_func (to choose + * a method of creation yourself). + * + * Addendum: the default is `new Thread_local_state{this->get_logger()}` if and only if `Thread_local_state*` is + * convertible to `Log_context_mt*` (i.e., the latter is a `public` super-class of the former). + * See also Logging section below. + * + * From any thread where you need a #Thread_local_state, call `this->this_thread_state()`. The first time + * in a given thread, this shall perform and save `new Thread_local_state{}`; subsequent times it shall return + * the same pointer. (You can also save the pointer and reuse it; just be careful.) + * + * A given thread's #Thread_local_state object shall be deleted via `delete Thread_local_state` when one of + * the following occurs, whichever happens first: + * - The thread exits. (Deletion occurs shortly before.) + * - `*this` is destroyed (in some -- any -- thread, possibly a totally different one; or one of the ones + * for which this_thread_state() was called). + * + * That's it. It is simple. However we emphasize that using this properly may take some care: particularly + * as concerns the contents of the #Thread_local_state dtor (whether auto-generated or explicit or some mix), which + * might run from the relevant thread X in which the underlying object was created and used; but it might + * also instead run from whichever thread invokes the Thread_local_state_registry dtor first. + * + * See doc header for ~Thread_local_state_registry() (dtor) for more on that particular subject. + * + * ### How to iterate over/possibly modify other threads' data (safely) ### + * In short please see state_per_thread() doc header. + * + * Also, particularly if you are employing some variation on the "thread-cached access to central state" pattern, + * it is potentially critical to read the relevant notes in the this_thread_state() doc header. + * + * ### Logging ### + * Logging is somewhat more subtle than is typically the case, because a Thread_local_state_registry is often + * declared `static` or even global, which means a log::Logger might not be available at times such as before `main()`, + * after `main()`, or inside `main()` but outside when some main `Logger` is available. Therefore it is + * often useful to, e.g., start with `Logger* = nullptr`, change it to something else, then change it back. + * + * Please use super-class log::Log_context_mt::set_logger() accordingly. + * + * However to avoid any trouble if this_thread_state() is called during a `Logger` change: + * - Note that it is `Log_context_mt`, not `Log_context`, so this is thread-safe. + * - Internally, perf-wise, we take steps to avoid this having any appreciable effect on fast-path performance. + * - If and only if `Thread_local_state*` is convertible to `Log_context_mt*` (i.e., the latter is a `public` + * super-class of the former), set_logger() shall invoke `state->set_logger()` to each `state` in + * state_per_thread() (i.e., all `Thread_local_state`s currently extant). + * - Note that you can override `Log_context_mt::set_logger()` in your `Thread_local_state` so as to, e.g., + * further propagate the new logger to other parts of `Thread_local_state` internals. + * + * @tparam Thread_local_state_t + * Managed object type -- see above. We repeat: must have no-arg (default) ctor, or be compatible with your + * custom #m_create_state_func; dtor must perform + * appropriate cleanup which in particular shall be run from exactly one of exactly the following 2 contexts: + * (1) from the thread in which it was created via this_thread_state(), just before the thread exits; + * (2) from within the `*this` dtor from whichever thread that was invoked (which may be the creation-thread; + * one of the other creation-threads; or some other thread entirely). See dtor doc header. + */ +template +class Thread_local_state_registry : + public log::Log_context_mt, + private boost::noncopyable +{ +public: + // Types. + + /// Short-hand for template parameter type. See our class doc header for requirements. + using Thread_local_state = Thread_local_state_t; + + /// General info (as of this writing for logging only) about a given entry (thread/object) in state_per_thread(). + struct Metadata + { + // Data. + + /// Thread nickname as per log::Logger::set_thread_info(). (Reminder: Might equal `m_thread_id`.) + std::string m_thread_nickname; + /// Thread ID. + Thread_id m_thread_id; + }; + + /// Return type of state_per_thread(). + using State_per_thread_map = boost::unordered_map; + + /// Short-hand for mutex lock; made public for use in while_locked() and state_per_thread(). + using Lock = Lock_guard; + + // Constants. + + /** + * `true` if and only if #Thread_local_state is a public sub-class of log::Log_context_mt which has + * implications on set_logger() and default #m_create_state_func behavior. + */ + static constexpr bool S_TL_STATE_HAS_MT_LOG_CONTEXT = std::is_convertible_v; + + static_assert(!std::is_convertible_v, + "Thread_local_state_t template param type should not derive from log::Log_context, as " + "then set_logger() is not thread-safe; please use Log_context_mt (but be mindful of " + "locking-while-logging perf effects in fast-paths)."); + + // Data. + + /** + * this_thread_state(), when needing to create a thread's local new #Thread_local_state to return, makes + * a stack copy of this member, calls that copy with no args, and uses the `Thread_local_state*` result + * as the return value for that thread. + * + * If, when needed, this value is null (`m_create_state_func.empty() == true`), then: + * - If #Thread_local_state is default-ctible and #S_TL_STATE_HAS_MT_LOG_CONTEXT is `false`: + * uses `new Thread_local_state`. + * - If #Thread_local_state is ctible in form `Thread_local_state{lgr}` where (`lgr` is a `log::Logger*`), + * and #S_TL_STATE_HAS_MT_LOG_CONTEXT is `true`: + * uses `new Thread_local_state{get_logger()}`. + * - Otherwise: Behavior is undefined (assertion may trip at this time). + * + * `m_create_state_func` must return a pointer that can be `delete`d in standard fashion. + * + * ### Thread safety ### + * It is not safe to assign this while a thread-first this_thread_state() is invoked. + */ + Function m_create_state_func; + + // Constructors/destructor. + + /** + * Create empty registry. Subsequently you may call this_thread_state() from any thread where you want to use + * (when called first time, create) thread-local state (a #Thread_local_state). + * + * @param logger_ptr + * Logger to use for logging subsequently. + * @param nickname_str + * See nickname(). + * @param create_state_func + * Initial value for #m_create_state_func. Default is an `.empty()` (see member doc header for info). + */ + explicit Thread_local_state_registry(log::Logger* logger_ptr, String_view nickname_str, + decltype(m_create_state_func)&& create_state_func = {}); + + /** + * Deletes each #Thread_local_state to have been created so far by calls to this_thread_state() from various + * threads (possibly but not necessarily including this thread). + * + * ### Careful! ### + * No thread (not the calling or any other thread) must access a #Thread_local_state returned from `*this`, once + * this dtor begins executing. This is usually pretty natural to guarantee by having + * your Thread_local_state_registry properly placed among various private data members and APIs accessing them. + * + * The dtor in the type #Thread_local_state itself must correctly run from *any* thread. + * - For many things that's no problem... just normal C++ data and `unique_ptr`s and such. + * - For some resources it might be a problem, namely for resources that are thread-local in nature that must + * be explicitly freed via API calls. + * Example: flushing a memory manager's thread-cache X created for/in thread T might be only possible + * in thread T; while also being a quite-natural thing to do in that thread, during thread T cleanup. + * From any other thread it might lead to undefined behavior. + * - In this case recall this *fact*: + * `~Thread_local_state()` shall run *either* from its relevant thread; *or* from + * the daddy `~Thread_local_state_registry()`. + * - Usually in the latter case, everything is going down anyway -- hence typically it is not necessary to + * specifically clean such per-thread resources as thread-caches. + * - So it is simple to: + * - Save a data member containing, e.g., `boost::this_thread::get_id()` in #Thread_local_state. + * - In its dtor check whether the thread-ID at *that* time equals the saved one. If so -- great, + * clean the thing. If not -- just don't (it is probably moot as shown above). + * - If it is not moot, you'll have to come up with something clever. Unlikely though. + */ + ~Thread_local_state_registry(); + + // Methods. + + /** + * Returns pointer to this thread's thread-local object, first constructing it via #m_create_state_func if + * it is the first `this->this_thread_state()` call in this thread. In a given thread this shall always return + * the same pointer. + * + * The pointee shall be destroyed from `*this` dtor or just before this thread's exit, from this thread, whichever + * occurs first. You may not call this, or use the returned pointer, after either routine begins executing. + * + * ### Thread-caching of central canonical state: Interaction with while_locked() and state_per_thread() ### + * The following is irrelevant in the fast-path, wherein this is *not* the first call to this method in the + * current thread. It is relevant only in the fast-path, wherein this *is* the first call to this method in the + * current thread. In that case we make the following formal guarantee: + * + * - A ctor of #Thread_local_state is invoked (as you know). + * - It is invoked while_locked(). + * + * The most immediate consequence of the latter is simply: Do not call while_locked() inside #Thread_local_state + * ctor; it will deadlock. That aside though: + * + * What's the rationale for this guarantee? Answer: In many cases it does not matter, and other than the last bullet + * one would not need to worry about it. It *can* however matter in more complex setups, namely the + * pattern "thread-caching of central canonical state." In this pattern: + * + * - Some kind of *central state* (e.g., *canonical* info being distributed into thread-local caches) must be + * - seeded (copied, as a *pull*) into any new #Thread_local_state; and + * - updated (copied, as a *push*) into any existing #Thread_local_state, if the canonical state itself is + * modified (usually assumed to be rare). + * - Suppose one invokes while_locked() whenever modifying the central (canonical) state (perhaps infrequently). + * - And we also guarantee it is already in effect inside #Thread_local_state ctor. + * - Hence, as is natural, we do the seeding/pulling of the central state inside that ctor. + * - In that case while_locked() being active in the call stack (<=> its implied mutex being locked) guarantees + * the synchronization of the following state: + * - which `Thread_local_state`s exist (in the sense that that they might be returned via + * this_thread_state() in the future) in the process; + * - the cached copies of the canonical state in all existent (as defined in previous bullet) + * `Thread_local_state`s; + * - the canonical state (equal to every cached copy!). + * + * This simplifies thinking about the situation immensely, as to the extent that the central state is distributed + * to threads via thread-local `Thread_local_state` objects, the whole thing is monolithic: the state is synchronized + * to all relevant threads at all times. That said the following is an important corollary, at least for this + * use-case: + * + * - Assumption: To be useful, the central-state-copy must be accessed by users in relevant threads, probably + * via an accessor; so something like: `const auto cool_state_copy = X.this_thread_state()->cool_state()`. + * - There are of course variations on this; it could be a method of `Thread_local_state` that uses + * the value of the `private` central-state-copy for some computation. We'll use the accessor setup for + * exposition purposes. + * - Fact: The central-state-copy inside `*X.this_thread_state()` for the current thread can change at any time. + * - Therefore: cool_state() accessor, internally, *must* lock/unlock *some* mutex in order to guarantee + * synchronization. + * - It would be safe for cool_state() to "simply" use while_locked(). However, in any perf-sensitive scenario + * (which is essentially guaranteed to be the case: otherwise why setup thread-cached access to the cool-state + * in the first place?) this is utterly unacceptable. Now any threads using `->cool_state()` on their + * thread-local `Thread_local_state`s will contend for the same central mutex; it defeats the whole purpose. + * - Hence the corollary: + * - Probably you want to introduce your own additional mutex as a data member of #Thread_local_state. Call + * it `m_cool_state_mutex`, say. + * - In `->cool_state()` impl, lock it, get the copy of the central-state-copy cool-state, unlock it, return + * the copy. + * - In the *push* code invoked when the *canonical* central-state is updated -- as noted, this occurs + * while_locked() already -- similarly, when pushing to per-thread `Thread_local_state* x`, lock + * `x->m_cool_state_mutex`, update the central-state-copy of `*x`, unlock. + * - Since there are up to 2 mutexes involved (while_locked() central mutex, `x->m_cool_state_mutex` "little" + * mutex), there is some danger of deadlock; but if you are careful then it will be fine: + * - Fast-path is in `x->cool_state()`: Only lock `x->m_cool_state_mutex`. + * - Slow-path 1 is in the central-state-updating (push) code: `while_locked(F)`; inside `F()` lock + * `x->m_cool_state_mutex` for each `x` in state_per_thread(). + * - Slow-path 2 is in the `*x` ctor: central-state-init (pull) code: `while_locked()` is automatically + * in effect inside this_thread_state(); no need to lock `x->m_cool_state_mutex`, since no-place has + * access to `*x` until its ctor finishes and hence this_thread_state() returns. + * - The bad news: Your impl is no longer lock-free even in the fast-path: `X.this_thread_state()->cool_state()` + * does lock and unlock a mutex. + * - The good news: This mutex is ~never contended: At most 2 threads can even theoretically vie for it + * at a time; and except when *canonical* state must be updated (typically rare), it is only 1 thread. + * A regular mutex being locked/unlocked, sans contention, is quite cheap. This should more than defeat + * the preceding "bad news" bullet. + * + * @return See above. Never null. + */ + Thread_local_state* this_thread_state(); + + /** + * Returns pointer to this thread's thread-local object, if it has been created via an earlier this_thread_state() + * call; or null if that has not yet occurred. + * + * @return See above. + */ + Thread_local_state* this_thread_state_or_null(); + + /** + * Returns reference to immutable container holding info for each thread in which this_thread_state() has been + * called: the keys are resulting `Thread_local_state*` pointers; the values are potentially interesting thread + * info such as thread ID. + * + * ### What you may do ### + * You may access the returned data structure, including the #Thread_local_state pointees, in read-only mode. + * + * You may write to each individual #Thread_local_state pointee. Moreover you are guaranteed (see + * "Thread safety" below) that no while_locked() user is doing the same simultaneously (byt while_locked() + * contract). + * + * If you *do* write to a particular pointee, remember these points: + * - Probably (unless you intentionally avoid it) you're writing to it *not* from the thread to which it + * belongs (in the sense that this_thread_state() would be called to obtain the same pointer). + * - Therefore you must synchronize any such concurrent read/write accesses from this thread and the owner + * thread (your own code therein presumably). You can use a mutex, or the datum could be `atomic<>`; etc. + * - Generally speaking, one uses thread-local stuff to avoid locking, so think hard before you do this. + * That said, locking is only expensive assuming lock contention; and if state_per_thread() work + * from a not-owner thread is rare, this might not matter perf-wise. It *does* matter complexity-wise + * though (typically), so informally we'd recommend avoiding it. + * - Things like `atomic` flags are pretty decent in these situations. E.g., one can put into + * #Thread_local_state an `atomic m_do_flush{false}`; set it to `true` (with most-relaxed atomic mode) + * via while_locked() + state_per_thread() block when wanting a thread to perform an (e.g.) "flush" action; + * and in the owner-thread do checks like: + * `if (this_thread_state()->m_do_flush.compare_exchange_strong(true, false, relaxed) { flush_stuff(); }`. + * It is speedy and easy. + * - You could also surround any access, from the proper owner thread, to that `Thread_local_state` pointee + * with while_locked(). Again, usually one uses thread-local stuff to avoid such central-locking actions; + * but it is conceivable to use it judiciously. + * + * ### Thread safety ### + * Behavior is undefined, if this is called *not* from within while_locked(). + * Rationale: It might seem like it would have been safe to "just" make a copy of this container (while locking + * its contents briefly) and return that. In and of itself that's true, and as long as one never dereferences + * any `Thread_local_state` pointees, it is safe. (E.g., one could look at the thread IDs/nicknames in the + * thus-stored Metadata objects and log them. Not bad.) However dereferencing such a #Thread_local_state pointee + * is not safe outside while_locked(): at any moment its rightful-owning thread might exit and therefore + * `delete` it. + * + * @param safety_lock + * Please pass the argument to `task()` given to while_locked(). + * @return See above. + */ + const State_per_thread_map& state_per_thread(const Lock& safety_lock) const; + + /** + * Locks the non-recursive registry mutex, such that no access or modification of the (deep or shallow) contents + * of state_per_thread() shall concurrently occur from within `*this` + * or other `this->while_locked()` call(s); executes given task; and unlocks said mutex. + * + * It is informally expected, though not required, that `task()` shall use state_per_thread(). + * Please see state_per_thread() doc header. + * + * Behavior is undefined (actually: deadlock) if task() calls `this->while_locked()` (the mutex is non-recursive). + * + * ### Interaction with #Thread_local_state ctor ### + * See this_thread_state() doc header. To briefly restate, though: #Thread_local_state ctor, when invoked by + * this_thread_state() on first call in a given thread, is invoked inside a while_locked(). Therefore do not + * call while_locked() from such a ctor, as it will deadlock. From a more positive perspective, informally speaking: + * you may rely on while_locked() being active at all points inside a #Thread_local_state ctor. + * + * @tparam Task + * Function object matching signature `void F(const Lock&)`. + * @param task + * This will be invoked as follows: `task(lock)`. + */ + template + void while_locked(const Task& task); + + /** + * Returns nickname, a brief string suitable for logging. This is included in the output by the `ostream<<` + * operator as well. This always returns the same value. + * + * @return See above. + */ + const std::string& nickname() const; + + /** + * Performs `Log_context_mt::set_logger(logger_ptr)`; and -- if #S_TL_STATE_HAS_MT_LOG_CONTEXT is `true` -- + * propagates it to each extant #Thread_local_state via `state->set_logger(logger_ptr)`. + * + * @see also #m_create_state_func doc header w/r/t the effect of #S_TL_STATE_HAS_MT_LOG_CONTEXT on that by + * default. + * + * ### Thread safety ### + * It is safe to call this concurrently with (any thread-first invocation of) this_thread_state() on `*this`. + * + * @param logger_ptr + * Logger to use for logging subsequently. Reminder: can be null. + */ + void set_logger(log::Logger* logger_ptr); + +private: + // Types. + + /// Short-hand for mutex type. + using Mutex = Lock::mutex_type; + + /** + * The entirety of the cross-thread registry state, in a `struct` so as to be able to wrap it in a `shared_ptr`. + * See doc header for Registry_ctl::m_state_per_thread for key info. + */ + struct Registry_ctl + { + // Data. + + /// Protects the Registry_ctl (or `m_state_per_thread`; same difference). + mutable Mutex m_mutex; + + /** + * Registry containing each #Thread_local_state, one per distinct thread to have created one via + * this_thread_state() and not yet exited (rather, not yet executed the on-thread-exit cleanup + * of its #Thread_local_state). In addition the mapped values are informational metadata Metadata. + * + * ### Creation and cleanup of each `Thread_local_state` (using this member) ### + * So, in a given thread T: + * - The first (user-invoked) this_thread_state() call shall: lock #m_mutex, insert into #m_state_per_thread, + * unlock. + * - If `*this` is around when T is exiting, the on-thread-exit cleanup function shall: + * obtain `shared_ptr` (via `weak_ptr` observer); then lock, delete from #m_state_per_thread, + * `delete` the #Thread_local_state itself, unlock. + * - If `*this` is not around when T is exiting: + * - The cleanup function will not run at all, as the `thread_specific_ptr` controlling that is gone. + * - To free resources in timely fashion, the dtor shall (similarly to cleanup function): + * lock, delete from #m_state_per_thread (`.clear()` them all), `delete` the #Thread_local_state itself, + * unlock. + * - If `*this` is around when T is exiting, but `*this` is being destroyed, and `shared_ptr` + * has been destroyed already (as seen via `weak_ptr` observer); then the `*this` dtor has run already, + * so cleanup function will do (almost) nothing and be right to do so. + */ + State_per_thread_map m_state_per_thread; + }; // struct Registry_ctl + + /** + * The actual user #Thread_local_state stored per thread as lazily-created in this_thread_state(); plus + * a small bit of internal book-keeping. What book-keeping, you ask? Why not just a #Thread_local_state, you ask? + * Answer: + * + * ### Rationale w/r/t the `weak_ptr` ### + * The essential problem is that in cleanup() (which is called by thread X that earlier issued + * `Thread_local_state* x` via this_thread_state() if and only if at X exit `*this` still exists, and therefore + * so does #m_this_thread_state_or_null) we cannot be sure that `x` isn't being concurrently `delete`d and + * removed from #m_ctl by the (unlikely, but possibly) concurrently executing `*this` dtor. To do that + * we must first lock `m_ctl->m_mutex`. However, `*m_ctl` might concurrently disappear! This is perfect + * for `weak_ptr`: we can "just" capture a `weak_ptr` of `shared_ptr` #m_ctl and either grab a co-shared-pointer + * of `m_ctl` via `weak_ptr::lock()`; or fail to do so which simply means the dtor will do the cleanup anyway. + * + * Perfect! Only one small problem: `thread_specific_ptr` does take a cleanup function... but not a cleanup + * *function object*. It takes a straight-up func-pointer. Therefore we cannot "just capture" anything. This + * might seem like some bizarre anachronism, where boost.thread guys made an old-school API and never updated it. + * This is not the case though. A function pointer is a pointer to code -- which will always exist. A functor + * stores captures. So now they have to decide where/how to store that. To store it as regular non-thread-local + * data would mean needing a mutex, and in any case it breaks their guiding principle of holding only thread-local + * data -- either natively via pthreads/whatever of via `thread_local`. Storing it as thread-local means it's + * just more thread-local state that either itself has to be cleaned up -- which means user could just place it + * inside the stored type in the first place -- or something that will exist/leak beyond the `thread_specific_ptr` + * itself assigned to that thread. + * + * ### Rationale w/r/t the `weak_ptr` being in the `thread_specific_ptr` itself ### + * To summarize then: The member #m_ctl_observer is, simply, the (per-thread) `weak_ptr` to the registry's #m_ctl, + * so that `cleanup(X)` can obtain Registry_ctl::m_state_per_thread and delete the `Thread_local_state* X` from + * that map (see Registry_ctl::m_state_per_thread doc header). Simple, right? Well.... + * + * If cleanup() runs and finishes before dtor starts, then things are simple enough! Grab `m_ctl` from + * `m_ctl_observer`. Delete the Tl_context from Registry_ctl::m_state_per_thread. Delete the `Thread_local_state` + * and the Tl_context (passed to cleanup() by `thread_specific_ptr`). + * + * If dtor runs before a given thread exits, then again: simple enough. Dtor can just do (for each thread's stuff) + * what cleanup() what have done; hence for the thread in question it would delete the `Thread_local_state` and + * `Tl_context` and delete the entry from Registry_ctl::m_state_per_thread. cleanup() will just not run. + * + * The problems begin in the unlikely but eminently possible, and annoying, scenario wherein they both run at + * about the same time, but the dtor gets to the `m_mutex` first and deletes all the `Tl_context`s as well as + * clearing the map. cleanup() is already running though... and it needs the `weak_ptr m_ctl_observer` so it + * can even try to cooperate with the dtor, via `m_ctl_observer.lock()` to begin-with... except the `Tl_context` + * was just deleted: crash/etc. + * + * It's a chicken/egg problem: *the* chicken/egg problem. The `weak_ptr` cannot itself be part of the watched/deleted + * state, as it is used to synchronize access to it between dtor and cleanup(), if they run concurrently. + * So what do we do? Well... won't lie to you... we leak the `weak_ptr` and `Tl_context` that stores it + * (roughly 24-32ish bytes in x86-64), in the case where dtor runs first, and cleanup() doesn't (meaning, a thread + * outlives the `*this`). (If cleanup() runs, meaning the `*this` outlives a thread, such as if `*this` is + * being stored `static`ally or globally, then no leak.) It is a tiny leak, per thread (that outlives + * a `Thread_local_state_registry` object), per `Thread_local_state_registry` object. + * + * Any way to avoid it? Probably. Possibly. One approach (which we tried) is to store a + * `static thread_local unordered_map` and save the observer-ptr + * in that, while Tl_context is not necessary, and #m_this_thread_state_or_null holds a `Thread_local_state*` + * directly. The thread-outlives-`*this` scenario just then means any "leak" is only until the thread + * exits (at which point the whole `unordered_map` goes away by C++ rules of `thread_local`s, including any + * "leaked" entries in that map). That is better. The problem (which we observed -- painfully) is + * it cannot be guaranteed that this new `static thread_local` map de-init occurs *after* every cleanup() runs; + * it might happen before: then it's all over; cleanup() cannot trust the map's contents and might even crash. + * + * Now the problem is the interplay between a `thread_specific_ptr` and built-in `thread_local`; 2 `thread_local`s + * and their relative de-init order is already an obscure enough topic; but the `thread_specific_ptr` behavior in this + * sense is unspecified (and empirically speaking I (ygoldfel) couldn't see anything obvious; in terms of impl + * it might be doing native stuff in Linux as opposed to `thread_local`... but I digress... it is not workable). + * + * It is possibly (probably?) doable to abandon `thread_specific_ptr` and (essentially) reimplement that part + * by using `thread_local` directly. However that thing must be `static`, so now we have to reimplement a + * map from `this`es to `Thread_local_state*`... and so on. Having done that -- difficult/tedious enough -- now + * we have to wrangle `static thread_local` relative de-init order. Supposedly the order is guaranteed by the + * standard but... it's not particularly pleasant a prospect to deal with it. Hence I am not making this a formal + * to-do; even though a part of me thinks that's maybe the most solid approach and puts things in our control most + * firmly. + * + * Just, the Tl_context wrapper-with-small-possible-leak-per-thread design is fairly pragmatic without having to + * engage in all kinds of masochism. Still it's a bit yucky in an aesthetic sense. + */ + struct Tl_context + { + /// Observer of (existent or non-existent) daddy's #m_ctl. See Tl_context doc header for explanation. + boost::weak_ptr m_ctl_observer; + /** + * The main user state. Never null; but `*m_state` has been freed (`delete`d) if and only if the pointer + * `m_state` is no longer in `m_ctl_observer.lock()->m_state_per_thread`, or if `m_ctl_observer.lock() == nullptr`. + */ + Thread_local_state* m_state; + }; + + // Methods. + + /** + * Called by `thread_specific_ptr` for a given thread's `m_this_thread_state_or_null.get()`, if `*this` dtor has not + * yet destroyed #m_this_thread_state_or_null. With proper synchronization: + * does `delete ctx->m_state` and `delete ctx` and removes the former from Registry_ctl::m_state_per_thread. + * It is possible that the `*this` dtor runs concurrently (if a relevant thread is exiting right around + * the time the user chooses to invoke dtor) and manages to `delete ctx->m_state` first; however it will *not* + * delete the surrounding `ctx`; so that cleanup() can be sure it can access `*ctx` -- but not necessarily + * `*ctx->m_state`. + * + * @param ctx + * Value stored in #m_this_thread_state_or_null; where `->m_state` was returned by at least one + * this_thread_state() in this thread. Not null. + */ + static void cleanup(Tl_context* ctx); + + // Data. + + /// See nickname(). + const std::string m_nickname; + + /** + * In a given thread T, `m_this_thread_state_or_null.get()` is null if this_thread_state() has not yet been + * called by `*this` user; else (until either `*this` dtor runs, or at-thread-exit cleanup function runs) + * pointer to T's thread-local Tl_context object which consists mainly of a pointer to T's + * thread-local #Thread_local_state object; plus a bit of book-keeping. (See Tl_context for details on the + * latter.) + * + * ### Cleanup: key discussion ### + * People tend to declare `thread_specific_ptr x` either `static` or global, because in that case: + * - Either `delete x.get()` (default) or `cleanup_func(x.get())` (if one defines custom cleanup func) + * runs for each thread... + * - ...and *after* that during static/global deinit `x` own dtor runs. (It does do `x.reset()` in *that* + * thread but only that thread; so at "best" one thread's cleanup occurs during `thread_specific_ptr` dtor.) + * + * We however declare it as a non-`static` data member. That's different. When #m_this_thread_state_or_null + * is destroyed (during `*this` destruction), if a given thread T (that is not the thread in which dtor is + * executing) has called this_thread_state() -- thus has `m_this_thread_state_or_null.get() != nullptr` -- and + * is currently running, then its #Thread_local_state shall leak. Cleanup functions run only while the owner + * `thread_specific_ptr` exists. Boost.thread docs specifically say this. + * + * Therefore, in our case, we can make it `static`: but then any cleanup is deferred until thread exit; + * and while it is maybe not the end of the world, we strive to be better; the whole point of the registry + * is to do timely cleanup. So then instead of that we: + * - Keep a non-thread-local registry Registry_ctl::m_state_per_thread of each thread's thread-local + * #Thread_local_state. + * - In dtor iterate through that registry and delete 'em. + * + * Let `p` stand for `m_this_thread_state_or_null.get()->m_state`: if `p != nullptr`, that alone does not + * guarantee that `*p` is valid. It is valid if and only if #m_ctl is a live `shared_ptr` (as determined + * via `weak_ptr`), and `p` is in Registry_ctl::m_state_per_thread. If #m_ctl is not live + * (`weak_ptr::lock()` is null), then `*this` is destroyed or very soon to be destroyed, and its dtor thus + * has `delete`d `p`. If #m_ctl is live, but `p` is not in `m_ctl->m_state_per_thread`, then + * the same is true: just we happened to have caught the short time period after the dtor deleting all states + * and clearing `m_state_per_thread`, but while the surrounding Registry_ctl still exists. + * + * So is it safe to access `*p`, when we do access it? Answer: We access it in exactly 2 places: + * - When doing `delete p` (in dtor, or in on-thread-exit cleanup function for the relevant thread). + * This is safe, because `p` is live if and only if it is not in Registry_ctl::m_state_per_thread + * (all this being mutex-synchronized). + * - By user code, probably following this_thread_state() to obtain `p`. This is safe, because: + * It is illegal for them to access `*this`-owned state after destroying `*this`. + * + * As for the the stuff in `m_this_thread_state_or_null.get()` other than `p` -- the Tl_context surrounding it -- + * again: see Tl_context doc header. + */ + boost::thread_specific_ptr m_this_thread_state_or_null; + + /// The non-thread-local state. See Registry_ctl docs. `shared_ptr` is used only for `weak_ptr`. + boost::shared_ptr m_ctl; +}; // class Thread_local_state_registry + +/** + * Optional-use companion to Thread_local_state_registry that enables the `Polled_share_state` pattern wherein + * from some arbitrary thread user causes the extant thread-locally-activated threads opportunistically collaborate + * on/using locked shared state, with the no-op fast-path being gated by a high-performance-low-strictness + * atomic-flag being `false`. + * + * This `Polled_shared_state` pattern (I, ygoldfel, made that up... don't know if it's a known thing) is + * maybe best explained by example. Suppose we're using Thread_local_state_registry with + * `Thread_local_state` type being `T`. Suppose that sometimes some event occurs, in an arbitrary thread (for + * simplicity let's say that is not in any thread activated by the `Thread_local_state_registry`) that + * requires each state to execute `thread_locally_launch_rocket()`. Lastly, suppose that upon launching the + * *last* rocket required, we must report success via `report_success()` from whichever thread did it. + * + * However there are 2-ish problems at least: + * - We're not in any of those threads; we need to inform them somehow they each need to + * `thread_locally_launch_rocket()`. There's no way to signal them to do it immediately necessarily; + * but we can do it opportunistically to any thread that has already called `this_thread_state()` (been activated). + * - Plus there's the non-trivial accounting regarding "last one to launch does a special finishing step" that + * requires keeping track of work-load in some shared state. + * - Not to mention the fact that the "let's launch missiles!" event might occur before the planned launches + * have had a chance to proceed; since then more threads may have become activated and would need to be + * added to the list of "planned launches." + * - Typically we don't need to launch any rockets; and the *fast-path* is that we in fact don't. + * It is important that each activated thread can ask "do we need to launch-rocket?" and get the probable + * answer "no" extremely quickly: without locking any mutex, and even more importantly without any contention if + * we do. If the answer is "yes," which is assumed to be rare, *then* even lock-contended-locking is okay. + * + * To handle these challenges the pattern is as follows. + * - The #Shared_state template param here is perhaps `set`: the set of `T`s (each belonging to an + * activated thread that has called `Thread_local_state_registry::this_thread_state()`) that should execute, + * and have not yet executed, `thread_locally_launch_rocket()`. + * - Wherever the `Thread_local_state_registry` is declared/instantiated -- e.g., `static`ally -- + * also declare `Polled_shared_state>`, *immediately before* the registry. + * - In `T` ctor -- which by definition executes only in an activated thread and only once -- prepare + * an opaque atomic-flag-state by executing this_thread_poll_state() and saving the returned `void*` + * into a non-`static` data member of `T` (say, `void* const m_missile_launch_needed_poll_state`). + * - If the "let's launch missiles" event occurs, in its code do: + * + * ~~~ + * registry.while_locked([&](const auto& lock) // Any access across per-thread state is done while_locked(). + * { + * const auto& state_per_thread = registry.state_per_thread(lock); + * if (state_per_thread.empty()) { return; } // No missiles to launch for sure; forget it. + * + * // Load the shared state (while_locked()): + * missiles_to_launch_polled_shared_state.while_locked([&](set* threads_that_shall_launch_missiles) + * { + * // *threads_that_shall_launch_missiles is protected against concurrent change. + * for (const auto& state_and_mdt) + * { + * T* const active_per_thread_t = state_and_mdt.first; + * threads_that_shall_launch_missiles->insert(active_per_thread_t); + * } + * }); + * + * // *AFTER!!!* loading the shared state, arm the poll-flag: + * for (const auto& state_and_mdt) + * { + * T* const active_per_thread_t = state_and_mdt.first; + * missiles_to_launch_polled_shared_state.arm_next_poll(active_per_thread_t->m_missile_launch_needed_poll_state); + * // (We arm every per-thread T; but it is possible and fine to do it only for some.) + * // Also note it might already be armed; this would keep it armed; no problem. Before the for() + * // the set<> might already have entries (launches planned, now we're adding possibly more to it). + * } + * } + * ~~~ + * + * So that's the setup/arming; and now to consume it: + * - In each relevant thread, such that `this_thread_state()` has been called in it (and therefore a `T` exists), + * whenever the opportunity arises, check the poll-flag, and in the rare case where it is armed, + * do `thread_locally_launch_rocket()`: + * + * ~~~ + * void opportunistically_launch_when_triggered() // Assumes: bool(registry.this_thread_state_or_null()) == true. + * { + * T* const this_thread_state = registry.this_thread_state()l + * if (!missiles_to_launch_polled_shared_state.poll_armed(this_thread_state->m_missile_launch_needed_poll_state)) + * { // Fast-path! Nothing to do re. missile-launching. + * return; + * } + * // else: Slow-path. Examine the shared-state; do what's needed. Note: poll_armed() would now return false. + * missiles_to_launch_polled_shared_state.while_locked([&](set* threads_that_shall_launch_missiles) + * { + * if (threads_that_shall_launch_missiles->erase(this_thread_state) == 0) + * { + * // Already-launched? Bug? It depends on your algorithm. But the least brittle thing to do is likely: + * return; // Nothing to do (for us) after all. + * } + * // else: Okay: we need to launch, and we will, and we've marked our progress about it. + * thread_locally_launch_rocket(); + * + * if (threads_that_shall_launch_missiles->empty()) + * { + * report_success(); // We launched the last required missile... report success. + * } + * } + * } + * ~~~ + * + * Hopefully that explains it. It is a little rigid and a little flexible; the nature of #Shared_state is + * arbitrary, and the above is probably the simplest form of it (but typically we suspect it will usually involve + * some container(s) tracking some subset of extant `T*`s). + * + * Though, perhaps an even simpler scenario might be #Shared_state being an empty `struct Dummy {};`, + * so that the atomic-flags being armed are the only info actually being transmitted. + * In the above example that would have been enough -- if not for the requirement to `report_success()`, + * when the last missile is launched. + * + * ### Performance ### + * The fast-path reasoning is that (1) the arming event occurs rarely and therefore is not part of any fast-path; + * and (2) thread-local logic can detect `poll_armed() == false` first-thing and do nothing further. + * Internally we facilitate speed further by poll_armed() using an `atomic` with an optimized memory-ordering + * setting that is nevertheless safe (impl details omitted here). Point is, `if (!....poll_armed()) { return }` shall + * be a quite speedy check. + * + * Last but not least: If #Shared_state is empty (formally: `is_empty_v == true`; informally: + * use, e.g., `struct Dummy {};`), then while_locked() will not be generated, and trying to write code that + * calls it will cause a compile-time `static_assert()` fail. As noted earlier using Polled_shared_state, despite + * the name, for not any shared state but just the thread-local distributed flag arming/polling = a perfectly + * valid approach. + */ +template +class Polled_shared_state : + private boost::noncopyable +{ +public: + // Types. + + /// Short-hand for template parameter type. + using Shared_state = Shared_state_t; + + // Constructors/destructor. + + /** + * Forwards to the stored object's #Shared_state ctor. You should also, in thread-local context, + * memorize ptr returned by this_thread_poll_state(). + * + * Next: outside thread-local context use while_locked() to check/modify #Shared_state contents safely; then + * for each relevant per-thread context `this->arm_next_poll(x)`, where `x` is the saved this_thread_poll_state(); + * this shall cause `this->poll_armed()` in that thread-local context to return `true` (once, until you + * again arm_next_poll() it). + * + * @tparam Ctor_args + * See above. + * @param shared_state_ctor_args + * See above. + */ + template + Polled_shared_state(Ctor_args&&... shared_state_ctor_args); + + // Methods. + + /** + * Locks the non-recursive shared-state mutex, such that no access or modification of the contents + * of the #Shared_state shall concurrently occur; executes given task; and unlocks said mutex. + * + * Behavior is undefined (actually: deadlock) if task() calls `this->while_locked()` (the mutex is non-recursive). + * + * @tparam Task + * Function object matching signature `void F(Shared_state*)`. + * @param task + * This will be invoked as follows: `task(shared_state)`. `shared_state` shall point to the object + * stored in `*this` and constructed in our ctor. + */ + template + void while_locked(const Task& task); + + /** + * To be called from a thread-local context in which you'll be checking poll_armed(), returns opaque pointer + * to save in your Thread_local_state_registry::Thread_local_state and pass to poll_armed(). + * + * @return See above. + */ + void* this_thread_poll_state(); + + /** + * To be called from any context (typically not the targeted thread-local context in which you'll be checking + * poll_armed, though that works too), this causes the next poll_armed() called in the thread in which + * `thread_poll_state` was returned to return `true` (once). + * + * Tip: Typically one would use arm_next_poll() inside a Thread_local_state_registry::while_locked() + * statement, perhaps cycling through all of Thread_local_state_registry::state_per_thread() and + * arming the poll-flags of all or some subset of those `Thread_local_state`s. + * + * @param thread_poll_state + * Value from this_thread_poll_state() called from within the thread whose next poll_armed() you are + * targeting. + */ + void arm_next_poll(void* thread_poll_state); + + /** + * If the given thread's poll-flag is not armed, no-ops and returns `false`; otherwise returns `true` and resets + * poll-flag to `false`. Use arm_next_poll(), typically from a different thread, to affect when + * this methods does return `true`. Usually that means there has been some meaningful change to + * the stored #Shared_state, and therefore you should look there (and/or modify it) while_locked() immediately. + * + * @param thread_poll_state + * See arm_next_poll(). + * @return See above. + */ + bool poll_armed(void* thread_poll_state); + +private: + // Data. + + /** + * An atomic "do-something" flag per thread; usually/initially `false`; armed to `true` by arm_next_poll() + * and disarmed by poll_armed(). + */ + Thread_local_state_registry> m_poll_flag_registry; + + /// Protects #m_shared_state. + mutable Mutex_non_recursive m_shared_state_mutex; + + /// The managed #Shared_state. + Shared_state m_shared_state; +}; // class Polled_shared_state + +// Free functions: in *_fwd.hpp. + +// Thread_local_state_registry template implementations. + +template +Thread_local_state_registry::Thread_local_state_registry + (log::Logger* logger_ptr, String_view nickname_str, + decltype(m_create_state_func)&& create_state_func) : + + log::Log_context_mt(logger_ptr, Flow_log_component::S_UTIL), + + m_create_state_func(std::move(create_state_func)), + m_nickname(nickname_str), + m_this_thread_state_or_null(cleanup), + m_ctl(boost::make_shared()) +{ + FLOW_LOG_INFO("Tl_registry[" << *this << "]: " + "Registry created (watched type has ID [" << typeid(Thread_local_state).name() << "])."); +} + +template +typename Thread_local_state_registry::Thread_local_state* + Thread_local_state_registry::this_thread_state_or_null() +{ + const auto ctx = m_this_thread_state_or_null.get(); + return ctx ? ctx->m_state : nullptr; +} + +template +typename Thread_local_state_registry::Thread_local_state* + Thread_local_state_registry::this_thread_state() +{ + using log::Logger; + + auto ctx = m_this_thread_state_or_null.get(); + if (!ctx) + { + // (Slow-path. It is OK to log and do other not-so-fast things.) + + /* We shall be accessing (inserting into) m_state_per_thread which understandably requires while_locked(). + * So bracket the following with while_locked(). Notice, though, that we do this seemingly earlier that needed: + * Inside, we (1) construct the new Thread_local_state; and only then (2) add it into m_state_per_thread. + * The mutex-lock is only necessary for (2). So why lock it now? Answer: We promised to do so. Why did we? + * Answer: See method doc header for rationale. */ + + while_locked([&](auto&&...) // Versus this_thread_state()/cleanup(). + { + // Time to lazy-init. As advertised: + decltype(m_create_state_func) create_state_func; + if (m_create_state_func.empty()) + { + if constexpr(S_TL_STATE_HAS_MT_LOG_CONTEXT && std::is_constructible_v) + { + create_state_func = [&]() -> auto { return new Thread_local_state{get_logger()}; }; + } + else if constexpr((!S_TL_STATE_HAS_MT_LOG_CONTEXT) && std::is_default_constructible_v) + { + create_state_func = []() -> auto { return new Thread_local_state; }; + } + else + { + FLOW_LOG_FATAL("Chose not to supply m_create_state_func at time of needing a new Thread_local_state. " + "In this case you must either have or <*not* derived from Log_context_mt but " + "supplied made T_l_s default-ctible>. Breaks contract; aborting."); + assert(false && "Chose not to supply m_create_state_func at time of needing a new Thread_local_state. " + "In this case you must either have or <*not* derived from Log_context_mt but " + "supplied made T_l_s default-ctible>. Breaks contract."); + std::abort(); + } + /* Subtlety: The is_*_constructible_v checks may seem like mere niceties -- why not just let it not-compile + * if they don't provide the expected ctor form given S_TL_STATE_HAS_MT_LOG_CONTEXT being true or false -- + * or even actively bad (why not just let it not-compile, so they know the problem at compile-time; or + * why not static_assert() it?). Not so: Suppose they *did* provide m_create_state_func, always, but + * lack the ctor form needed for the case where they hadn't. Then this code path would still try to get + * compiled -- and fail to compile -- even though there's no intention of it even ever executing. That would + * be annoying and unjust. The only downside of the solution is the null-m_create_state_func path can only + * fail at run-time, not compile-time; on balance that is better than the unjust alternative. */ + } // if (m_create_state_func.empty()) + else // if (!m_create_state_func.empty()) + { + create_state_func = m_create_state_func; // We specifically said we'd copy it. + } + + ctx = new Tl_context; + ctx->m_ctl_observer = m_ctl; + ctx->m_state = create_state_func(); + + m_this_thread_state_or_null.reset(ctx); + + /* Now to set up the later cleanup, either at thread-exit, or from our ~dtor(), whichever happens first; + * and also to provide access to us via enumeration via state_per_thread(). */ + + typename decltype(Registry_ctl::m_state_per_thread)::value_type state_and_mdt{ctx->m_state, Metadata{}}; + auto& mdt = state_and_mdt.second; + /* Save thread info (for logging). (Note: Logger::set_thread_info() semantics are a bit surprising, out of + * the log-writing context. It outputs nickname if available; else thread ID if not.) */ + log::Logger::set_thread_info(&mdt.m_thread_nickname, + &mdt.m_thread_id); + if (mdt.m_thread_id == Thread_id{}) + { + mdt.m_thread_id = this_thread::get_id(); // Hence get it ourselves. + } + // else { nickname is blank. Nothing we can do about that though. } + + FLOW_LOG_INFO("Tl_registry[" << *this << "]: Adding thread-local-state @[" << ctx->m_state << "] " + "for thread ID [" << mdt.m_thread_id << "]/nickname [" << mdt.m_thread_nickname << "]; " + "watched type has ID [" << typeid(Thread_local_state).name() << "])."); +#ifndef NDEBUG + const auto result = +#endif + m_ctl->m_state_per_thread.insert(state_and_mdt); + assert(result.second && "How did `state` ptr value get `new`ed, if another thread has not cleaned up same yet?"); + }); // while_locked() + } // if (!ctx) + // else if (ctx) { Fast path: state already init-ed. Do not log or do anything unnecessary. } + + return ctx->m_state; +} // Thread_local_state_registry::this_thread_state() + +template +Thread_local_state_registry::~Thread_local_state_registry() +{ + using std::vector; + + FLOW_LOG_INFO("Tl_registry[" << *this << "]: " + "Registry shutting down (watched type has ID [" << typeid(Thread_local_state).name() << "]). " + "Will now delete thread-local-state for each thread that has not exited before this point."); + vector states_to_delete; + while_locked([&](auto&&...) // Versus cleanup() (possibly even 2+ of them). + { + for (const auto& state_and_mdt : m_ctl->m_state_per_thread) + { + const auto state = state_and_mdt.first; + const auto& mdt = state_and_mdt.second; + FLOW_LOG_INFO("Tl_registry[" << *this << "]: Deleting thread-local-state @[" << state << "] " + "for thread ID [" << mdt.m_thread_id << "]/nickname [" << mdt.m_thread_nickname << "]."); + + // Let's not `delete state` while locked, if only to match cleanup() avoiding it. + states_to_delete.push_back(state); + } + m_ctl->m_state_per_thread.clear(); + }); // while_locked() + + for (const auto state : states_to_delete) + { + delete state; + /* Careful! We delete `state` (the Thread_local_state) but *not* the Tl_context (we didn't even store + * it in the map) that is actually stored in the thread_specific_ptr m_this_thread_state_or_null. + * See Tl_context doc header for explanation. In short by leaving it alive we leave cleanup() able to + * run concurrently with ourselves -- unlikely but possible. */ + } + + /* Subtlety: When m_this_thread_state_or_null is auto-destroyed shortly, it will auto-execute + * m_this_thread_state_or_null.reset() -- in *this* thread only. If in fact this_thread_state() has been + * called in this thread, then it'll try to do cleanup(m_this_thread_state_or_null.get()); nothing good + * can come of that really. We could try to prevent it by doing m_this_thread_state_or_null.reset()... but + * same result. Instead we do the following which simply replaces the stored (now bogus) ptr with null, and + * that's it. We already deleted it, so that's perfect. */ + m_this_thread_state_or_null.release(); + + // After the }, m_ctl is nullified, and lastly m_this_thread_state_or_null is destroyed (a no-op in our context). +} // Thread_local_state_registry::~Thread_local_state_registry() + +template +void Thread_local_state_registry::cleanup(Tl_context* ctx) // Static. +{ + /* If the relevant *this has been destroyed, typically we would not be called. + * However it is possible that our thread T is exiting, and just then user in another thread chose to + * invoke *this dtor. Therefore we must carefully use locking and weak_ptr (as you'll see) to contend + * with this possibility; it might be worthwhile to read cleanup() and the dtor in parallel. + * + * By the way: Among other things, the relevant *this's Log_context might be around at one point but not another; + * and by contract same with the underlying Logger. So we cannot use ->get_logger()/log using that necessarily; we + * will just have to be quiet; that's life. */ + + auto& weak_ptr_to_ctl = ctx->m_ctl_observer; + const auto shared_ptr_to_ctl = weak_ptr_to_ctl.lock(); + if (!shared_ptr_to_ctl) + { + /* Relevant Thread_local_state_registry dtor was called late enough to coincide with current thread about to exit + * but not quite late enough for its thread_specific_ptr ->m_this_thread_state_or_null to be destroyed. + * Its shared_ptr m_ctl did already get destroyed though. So -- we need not worry about cleanup after all. + * This is rare and fun, but it is no different from that dtor simply running before this thread exited. + * It will be/is cleaning up our stuff (and everything else) -- except the *ctx wrapper itself. So clean that + * up (not actual ctx->m_state payload!); and GTFO. */ + delete ctx; + return; + } + // else + + /* Either the relevant Thread_local_state_registry dtor has not at all run yet, or perhaps it has started to run -- + * but we were able to grab the m_ctl fast enough. So now either they'll grab m_ctl->m_mutex first, or we will. */ + bool do_delete; + { + Lock lock{shared_ptr_to_ctl->m_mutex}; // Versus this_thread_state()/dtor/other thread's/threads' cleanup()(s). + do_delete = (shared_ptr_to_ctl->m_state_per_thread.erase(ctx->m_state) == 1); + } // shared_ptr_to_ctl->m_mutex unlocks here. + + /* We don't want to `delete ctx->m_state` inside the locked section; it might not be necessarily always criminal -- + * but in some exotic but real situations the Thread_local_state dtor might launch a new, presumably detached, thread + * that would itself call this_thread_state() which would deadlock trying to lock the same mutex, if the + * dtor call doesn't finish fast enough. */ + if (do_delete) + { + delete ctx->m_state; // We got the mutex first. Their ~Thread_local_state() dtor runs here. + } + /* else { Guess the concurrently-running dtor got there first! It `delete`d ctx->m_state and + * m_state_per_thread.clear()ed instead of us. } */ + + delete ctx; // Either way we can free the little Tl_context; dtor never does that (known/justified leak by dtor). +} // Thread_local_state_registry::cleanup() // Static. + +template +template +void Thread_local_state_registry::while_locked(const Task& task) +{ + Lock lock{m_ctl->m_mutex}; + task(lock); +} + +template +const typename Thread_local_state_registry::State_per_thread_map& + Thread_local_state_registry::state_per_thread([[maybe_unused]] const Lock& safety_lock) const +{ + assert(safety_lock.owns_lock() && "Please call with value while_locked() passed to your task()."); + + return m_ctl->m_state_per_thread; +} + +template +const std::string& Thread_local_state_registry::nickname() const +{ + return m_nickname; +} + +template +void Thread_local_state_registry::set_logger(log::Logger* logger_ptr) +{ + using log::Log_context_mt; + + Log_context_mt::set_logger(logger_ptr); + + if constexpr(S_TL_STATE_HAS_MT_LOG_CONTEXT) + { + while_locked([&](auto&&...) + { + for (const auto& state_and_mdt : m_ctl->m_state_per_thread) + { + const auto state = state_and_mdt.first; + + state->set_logger(logger_ptr); + } + }); + } +} // Thread_local_state_registry::set_logger() + +template +std::ostream& operator<<(std::ostream& os, const Thread_local_state_registry& val) +{ + return os << '[' << val.nickname() << "]@" << &val; +} + +// Polled_shared_state template implementations. + +template +template +Polled_shared_state::Polled_shared_state(Ctor_args&&... shared_state_ctor_args) : + m_poll_flag_registry(nullptr, "", + []() -> auto { using Atomic_flag = typename decltype(m_poll_flag_registry)::Thread_local_state; + return new Atomic_flag{false}; }), + m_shared_state(std::forward(shared_state_ctor_args)...) +{ + // Yep. +} + +template +template +void Polled_shared_state::while_locked(const Task& task) +{ + static_assert(!(std::is_empty_v), + "There is no need to call while_locked(), when your Shared_state type is empty; " + "doing the latter is useful when only Polled_shared_state thread-local flag arm/poll feature " + "is needed; but then there's no need to lock anything."); + + flow::util::Lock_guard lock{m_shared_state_mutex}; + task(&m_shared_state); +} + +template +void* Polled_shared_state::this_thread_poll_state() +{ + return static_cast(m_poll_flag_registry.this_thread_state()); +} + +template +void Polled_shared_state::arm_next_poll(void* thread_poll_state) +{ + using Atomic_flag = typename decltype(m_poll_flag_registry)::Thread_local_state; + + static_cast(thread_poll_state)->store(true, std::memory_order_acquire); + + /* Explanation of memory_order_release here + memory_order_acquire when loading it in + * if_requested_destroy_tcaches_and_possibly_finish_arena_kills(): + * + * Our goal is to signal thread_poll_state's thread to -- when it next has a chance (opportunistic piggy-backing) -- + * do stuff. Yet since it is opportunistic piggy-backing, the fast-path (where nothing needs to be done) + * needs to be lightning-fast. So we "just" set that bool flag. However + * we also need to tell it some more info and/or even have it update it, namely whatever + * Shared_state the user should have set-up before calling this arm_next_poll(). So we do it in the order: + * -1- user updates Shared_state while_locked() + * -2- set flag = true + * and in the proper thread later + * -A- check flag (poll_armed()), and if that returns true + * -B- user reads/updates Shared_state while_locked() + * The danger is that, to the other thread, our steps -1-2- will be reordered to -2-1-, and it will + * see flag=true with Shared_state not-ready/empty/whatever (disaster). However by using RELEASE for -2- and ACQUIRE + * for -A-, together with the presence of mutex-locking around -1- and -B- (while_locked(); plus the if-relationship + * between -A-B-), we guarantee the ordering -1-2-A-B- as required. + * + * Regarding perf: probably even the strict memory_order_seq_cst in both here and poll_armed() would've been + * reasonably quick; but we did better than that, approaching the minimally-strict memory_order_relaxed (but + * not quite). */ +} // Polled_shared_state::arm_next_poll() + +template +bool Polled_shared_state::poll_armed(void* thread_poll_state) +{ + using Atomic_flag = typename decltype(m_poll_flag_registry)::Thread_local_state; // some_namespace::atomic. + + /* Replace true (armed) with false (no longer armed); return true (was armed)... + * ...but if was already false (not armed), do nothing ("replace" it with false); return false (was not armed). + * memory_order_release: See explanation in arm_next_poll(). */ + return static_cast(thread_poll_state)->exchange(false, std::memory_order_release); + + /* (I (ygoldfel) initially wrote it as: + * bool exp = true; return ...->compare_exchange_strong(exp, false, ...release); + * because it "felt" somehow more robust to "formally" do-nothing, if it is already `false`... but it is clearly + * slower/weirder to most eyes.) */ +} + +} // namespace flow::util diff --git a/src/flow/util/util.cpp b/src/flow/util/util.cpp index f1132ccb3..2ac6bac81 100644 --- a/src/flow/util/util.cpp +++ b/src/flow/util/util.cpp @@ -100,7 +100,7 @@ class Duration_units_beautified : public boost::chrono::duration_units_default #include @@ -37,7 +38,7 @@ namespace flow::util * destructor, even if it is `= default` or `{}`. Otherwise, trying to delete an object of subclass `C2 : public C` * via a `C*` pointer will fail to call destructor `~C2()` -- which may not be empty, causing leaks and so on. * Declaring `~C()` and its empty implementation is surprisingly verbose. So, instead, don't; and `public`ly derive - * from Null_interface instead. + * from Null_interface. * * It is particularly useful for interface classes. */ @@ -93,10 +94,10 @@ struct Noncopyable * thread_local int s_this_thread_val; * ... * { - * Scoped_setter setter(&s_this_thread_val, 75); // Set it to 75 and memorize (whatever). + * Scoped_setter setter{&s_this_thread_val, 75}; // Set it to 75 and memorize (whatever). * ... * { - * Scoped_setter setter(&s_this_thread_val, 125); // Set it to 125 and memorize 75. + * Scoped_setter setter{&s_this_thread_val, 125}; // Set it to 125 and memorize 75. * ... * } // Restored from 125 to 75. * ... @@ -110,7 +111,7 @@ struct Noncopyable * thread_local Widget s_widget; * auto widget_setter_auto(Widget&& widget_moved) * { - * return flow::util::Scoped_setter(&s_widget, std::move(widget_moved)); + * return flow::util::Scoped_setter{&s_widget, std::move(widget_moved)}; * } * ... * { // Later, some block: Set s_widget. Code here doesn't even know/care a Scoped_setter is involved. @@ -231,7 +232,7 @@ Scoped_setter::~Scoped_setter() // else { `*this` must have been moved-from. No-op. } } -// Free function template implementations. +// Free function template (and/or constexpr) implementations. template double to_mbit_per_sec(N_items items_per_time, size_t bits_per_item) @@ -254,19 +255,27 @@ double to_mbit_per_sec(N_items items_per_time, size_t bits_per_item) / (double(Time_unit::period::num) * double(1000 * 1000)); } -template -Integer ceil_div(Integer dividend, Integer divisor) +template +constexpr Integer ceil_div(Integer dividend, Integer2 divisor) { // ceil(A : B) = (A + B - 1) / B, where : is floating point division, while / is integer division. - static_assert(std::is_integral_v, "ceil_div: T must be an integer type."); - assert(dividend >= 0); - assert(divisor > 0); + static_assert(std::is_integral_v, "ceil_div: T must be an integer type."); + static_assert(std::is_integral_v, "ceil_div: T2 must be an integer type."); + + // assert(dividend >= 0); // Cannot do that (or throw in C++17, can in C++20; @todo) in constexpr. + // assert(divisor > 0); // Ditto. - return (dividend + divisor - 1) / divisor; + return (dividend + static_cast(divisor) - static_cast(1)) / static_cast(divisor); /* (Could one do further bitwise trickery? Perhaps but let optimizer do it. Wouldn't optimizer also just * optimize a literal floating-point `ceil(a / b)`? Well, no. Probably not. So we wrote this function.) */ } +template +constexpr Integer round_to_multiple(Integer dividend, Integer2 unit) +{ + return static_cast(unit) * ceil_div(dividend, unit); +} + template bool in_closed_range(T const & min_val, T const & val, T const & max_val) { @@ -316,8 +325,8 @@ Auto_cleanup setup_auto_cleanup(const Cleanup_func& func) * * Subtlety: we need to make a copy (via capture) of func, as there's zero guarantee (and low likelihood in practice) * that func is a valid object at the time cleanup is actually needed (sometime after we return). */ - return Auto_cleanup(static_cast(0), - [func](void*) { func(); }); + return Auto_cleanup{static_cast(nullptr), + [func](void*) { func(); }}; } template @@ -329,7 +338,7 @@ bool subtract_with_floor(Minuend* minuend, const Subtrahend& subtrahend, const M * The one underflow we allow is the subtraction of `floor`: doc header says keep `floor` small. * So it's their problem if it's not. */ - const Minuend converted_subtrahend = Minuend(subtrahend); + const Minuend converted_subtrahend = Minuend{subtrahend}; // min - sub <= floor <===> min - floor <= sub. if ((*minuend - floor) <= converted_subtrahend) @@ -348,23 +357,14 @@ size_t size_unit_convert(From num_froms) return ((num_froms * sizeof(From)) + sizeof(To) - 1) / sizeof(To); } -template -void feed_args_to_ostream(std::ostream* os, T1 const & ostream_arg1, T_rest const &... remaining_ostream_args) +template +void feed_args_to_ostream(std::ostream* os, T&&... ostream_args) { - // Induction step for variadic template. - feed_args_to_ostream(os, ostream_arg1); - feed_args_to_ostream(os, remaining_ostream_args...); + (*os << ... << std::forward(ostream_args)); } -template -void feed_args_to_ostream(std::ostream* os, T const & only_ostream_arg) -{ - // Induction base. - *os << only_ostream_arg; -} - -template -void ostream_op_to_string(std::string* target_str, T const &... ostream_args) +template +void ostream_op_to_string(std::string* target_str, T&&... ostream_args) { using std::flush; @@ -372,18 +372,18 @@ void ostream_op_to_string(std::string* target_str, T const &... ostream_args) * by copy via `ostringstream::copy()`. This is for performance and may make a large difference * overall, if this is used in logging for example. However, Thread_local_string_appender accomplishes * better performance still and some other features. */ - String_ostream os(target_str); - feed_args_to_ostream(&(os.os()), ostream_args...); + String_ostream os{target_str}; + feed_args_to_ostream(&(os.os()), std::forward(ostream_args)...); os.os() << flush; } -template -std::string ostream_op_string(T const &... ostream_args) +template +std::string ostream_op_string(T&&... ostream_args) { using std::string; string result; - ostream_op_to_string(&result, ostream_args...); + ostream_op_to_string(&result, std::forward(ostream_args)...); return result; } @@ -443,9 +443,9 @@ std::ostream& buffers_to_ostream(std::ostream& os, } // Ensure format settings return to their previous values subsequently. - ios_flags_saver flags_saver(os); - ios_fill_saver fill_saver(os); - ios_width_saver width_saver(os); + ios_flags_saver flags_saver{os}; + ios_fill_saver fill_saver{os}; + ios_width_saver width_saver{os}; /* Set formatting and output numeric value (hex) of first byte. * @todo Is there a way to write this with manipulators too? */ @@ -453,7 +453,6 @@ std::ostream& buffers_to_ostream(std::ostream& os, os << std::setfill('0'); const Bufs_iter end_byte_it = Bufs_iter::end(data); - for (Bufs_iter cur_byte_it = Bufs_iter::begin(data); cur_byte_it != end_byte_it; /* Advancing of cur_byte_it occurs within body of loop. */) @@ -512,7 +511,7 @@ std::string buffers_dump_string(const Const_buffer_sequence& data, const std::st // See comment in ostream_op_to_string() which applies here too (re. perf). string target_str; - String_ostream os(&target_str); + String_ostream os{&target_str}; buffers_to_ostream(os.os(), data, indentation, bytes_per_line); os.os() << flush; @@ -539,7 +538,7 @@ Enum istream_to_enum(std::istream* is_ptr, Enum enum_default, Enum enum_sentinel assert(enum_t(enum_lowest) >= 0); // Otherwise we'd have to allow '-' (minus sign), and we'd... just rather not. auto& is = *is_ptr; - const is_iequal i_equal_func(locale::classic()); + const is_iequal i_equal_func{locale::classic()}; // Read into `token` until (and not including) the first non-alphanumeric/underscore character or stream end. string token; @@ -565,9 +564,9 @@ Enum istream_to_enum(std::istream* is_ptr, Enum enum_default, Enum enum_sentinel { num_enum = enum_t(enum_default); } - val = Enum(num_enum); + val = Enum{num_enum}; } - catch (const bad_lexical_cast& exc) + catch (const bad_lexical_cast&) { assert(val == enum_default); } @@ -578,7 +577,7 @@ Enum istream_to_enum(std::istream* is_ptr, Enum enum_default, Enum enum_sentinel // This assumes a vanilla enum integer value ordering within this [closed range]. for (idx = enum_t(enum_lowest); idx != enum_t(enum_sentinel); ++idx) { - const auto candidate = Enum(idx); + const auto candidate = Enum{idx}; /* Note -- lexical_cast(Enum) == (operator<<(ostringstream&, Enum)).str() -- the symbolic * encoding of Enum (as we promised to accept, case-[in]sensitively), not the numeric encoding. The numeric * encoding is checked-for in the `if (accept_num_encoding...)` branch above using a non-looping technique. */ diff --git a/src/flow/util/util_fwd.hpp b/src/flow/util/util_fwd.hpp index a62b3426e..6867b1024 100644 --- a/src/flow/util/util_fwd.hpp +++ b/src/flow/util/util_fwd.hpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -39,6 +40,11 @@ */ namespace flow::util { +// Constants. + +/// Configures a #Span at compile-time to have a variable, rather than constant, length (use it as template arg). +constexpr size_t DYNAMIC_EXTENT = boost::dynamic_extent; + // Types. // Find doc headers near the bodies of these compound types. @@ -66,6 +72,12 @@ class Shared_ptr_alias_holder; class String_ostream; +template +class Thread_local_state_registry; + +template +class Polled_shared_state; + class Unique_id_holder; /** @@ -326,6 +338,20 @@ using Lock_guard_noop_shared_non_recursive_sh = boost::shared_lock; +/** + * Short-hand for `std::span`-like type; that is contiguous sequence of objects with the first element of the sequence + * at position zero. + * + * @todo Once we switch to C++20 (from C++17), #Span can be pointed at `std::span` instead of `boost::span`. + * + * @tparam T + * Element type. + * @tparam E + * Compile-time constant length of a span of this type; or `DYNAMIC_EXTENT` for it to be variable. + */ +template +using Span = boost::span; + // Free functions. /** @@ -337,8 +363,9 @@ using Lock_guard_noop_shared_non_recursive_ex = boost::unique_lock -void swap(Linked_hash_map& val1, Linked_hash_map& val2); +template +void swap(Linked_hash_map& val1, + Linked_hash_map& val2); /** * Equivalent to `val1.swap(val2)`. @@ -349,8 +376,9 @@ void swap(Linked_hash_map& val1, Linked_hash_map -void swap(Linked_hash_set& val1, Linked_hash_set& val2); +template +void swap(Linked_hash_set& val1, + Linked_hash_set& val2); /** * Get the current POSIX (Unix) time as a duration from the Epoch time point. @@ -503,7 +531,7 @@ std::string buffers_dump_string(const Const_buffer_sequence& data, const std::st /** * Utility that converts a bandwidth in arbitrary units in both numerator and denominator to the same bandwidth - * in megabits per second. The input bandwidth is given in "items" per `Time_unit(1)`; where `Time_unit` is an + * in megabits per second. The input bandwidth is given in "items" per `Time_unit{1}`; where `Time_unit` is an * arbitrary boost.chrono `duration` type that must be explicitly provided as input; and an "item" is defined * as `bits_per_item` bits. Useful at least for logging. It's probably easiest to understand by example; see below; * rather than by parsing that description I just wrote. @@ -544,7 +572,7 @@ std::string buffers_dump_string(const Const_buffer_sequence& data, const std::st * @tparam N_items * Some (not necessarily integral) numeric type. Strictly speaking, any type convertible to `double` works. * @param items_per_time - * The value, in items per `Time_unit(1)` (where there are `bits_per_item` bits in 1 item) to convert to + * The value, in items per `Time_unit{1}` (where there are `bits_per_item` bits in 1 item) to convert to * megabits per second. Note this need not be an integer. * @param bits_per_item * Number of bits in an item, where `items_per_time` is given as a number of items. @@ -557,16 +585,42 @@ double to_mbit_per_sec(N_items items_per_time, size_t bits_per_item = 8); * Returns the result of the given non-negative integer divided by a positive integer, rounded up to the nearest * integer. Internally, it avoids floating-point math for performance. * + * @note `constexpr`: can be used in compile-time expressions. + * @see round_to_multiple() which is essentially `return b * ceil_div(a, b)`. + * * @tparam Integer * A signed or unsigned integral type. + * @tparam Integer2 + * Ditto. * @param dividend - * Dividend; non-negative or assertion trips. + * Dividend; non-negative (else results undefined). * @param divisor - * Divisor; positive or assertion trips. + * Divisor; positive (else results undefined). * @return Ceiling of `(dividend / divisor)`. */ -template -Integer ceil_div(Integer dividend, Integer divisor); +template +constexpr Integer ceil_div(Integer dividend, Integer2 divisor); + +/** + * Returns the smallest integer >= the given integer `dividend` such that it is a multiple + * of the given integer `unit`. Essentialy that is: `unit * ceil_div(dividend, unit)`. + * + * For example: 0 in 1024s => 0; 1 in 1024s => 1024; 1024 in 1024s => 1024; 1025 in 1024s => 2048. + * + * @note `constexpr`: can be used in compile-time expressions. + * + * @tparam Integer + * A signed or unsigned integral type. + * @tparam Integer2 + * Ditto. + * @param dividend + * Dividend; non-negative (else results undefined). + * @param unit + * Divisor; positive (else results undefined). + * @return See above. + */ +template +constexpr Integer round_to_multiple(Integer dividend, Integer2 unit); /** * Provides a way to execute arbitrary (cleanup) code at the exit of the current block. Simply @@ -697,7 +751,7 @@ bool key_exists(const Container& container, const typename Container::key_type& * @tparam Minuend * Numeric type. * @tparam Subtrahend - * Numeric type, such that given `Subtrahend s`, `Minuend(s)` is something reasonable for all `s` involved. + * Numeric type, such that given `Subtrahend s`, `Minuend{s}` is something reasonable for all `s` involved. * @param minuend * `*minuend` is set to either `(*minuend - subtrahend)` or `floor`, whichever is higher. * @param subtrahend @@ -790,7 +844,7 @@ void sequence_to_inverted_lookup_map * @see log::Thread_local_string_appender for an even more efficient version of this for some applications that can * also enable a continuous stream across multiple stream-writing statements over time. * - * @tparam ...T + * @tparam T * Each type `T` is such that `os << t`, with types `T const & t` and `ostream& os`, builds and writes * `t` to `os`, returning lvalue `os`. * Usually in practice this means the existence of `ostream& operator<<(ostream&, T const &)` or @@ -803,8 +857,8 @@ void sequence_to_inverted_lookup_map * One or more arguments, such that each argument `arg` is suitable for `os << arg`, where * `os` is an `ostream`. */ -template -void ostream_op_to_string(std::string* target_str, T const &... ostream_args); +template +void ostream_op_to_string(std::string* target_str, T&&... ostream_args); /** * Equivalent to ostream_op_to_string() but returns a new `string` by value instead of writing to the caller's @@ -814,45 +868,27 @@ void ostream_op_to_string(std::string* target_str, T const &... ostream_args); * With the C++11-y use of move semantics in STL it should be no slower than using * `ostream_op_to_string()` -- meaning, it is no slower, period, as this library now requires C++11. * - * @tparam ...T + * @tparam T * See ostream_op_to_string(). * @param ostream_args * See ostream_op_to_string(). * @return Resulting `std::string`. */ -template -std::string ostream_op_string(T const &... ostream_args); - -/** - * "Induction step" version of variadic function template that simply outputs arguments 2+ via - * `<<` to the given `ostream`, in the order given. - * - * @tparam ...T_rest - * See `...T` in ostream_op_to_string(). - * @param remaining_ostream_args - * See `ostream_args` in ostream_op_to_string(). - * @tparam T1 - * Same as each of `...T_rest`. - * @param ostream_arg1 - * Same as each of `remaining_ostream_args`. - * @param os - * Pointer to stream to which to sequentially send arguments for output. - */ -template -void feed_args_to_ostream(std::ostream* os, T1 const & ostream_arg1, T_rest const &... remaining_ostream_args); +template +std::string ostream_op_string(T&&... ostream_args); /** - * "Induction base" for a variadic function template, this simply outputs given item to given `ostream` via `<<`. + * Function template that simply outputs arguments 2+ via `<<` to the given `ostream`, in the order given. * * @tparam T - * See each of `...T` in ostream_op_to_string(). + * See `T` in ostream_op_to_string(). + * @param ostream_args + * See `ostream_args` in ostream_op_to_string(). * @param os * Pointer to stream to which to sequentially send arguments for output. - * @param only_ostream_arg - * See each of `ostream_args` in ostream_op_to_string(). */ -template -void feed_args_to_ostream(std::ostream* os, T const & only_ostream_arg); +template +void feed_args_to_ostream(std::ostream* os, T&&... ostream_args); /** * Deserializes an `enum class` value from a standard input stream. Reads up to but not including the next @@ -934,6 +970,20 @@ void beautify_chrono_ostream(std::ostream* os); */ size_t deep_size(const std::string& val); +/** + * Serializes a Thread_local_state_registry to a standard output stream. + * + * @relatesalso Thread_local_state_registry + * + * @param os + * Stream to which to serialize. + * @param val + * Value to serialize. + * @return `os`. + */ +template +std::ostream& operator<<(std::ostream& os, const Thread_local_state_registry& val); + // Macros. /** diff --git a/test/basic/net_flow/echo/cli/echo_client.cpp b/test/basic/net_flow/echo/cli/echo_client.cpp index f1c31c1e7..2cee45e53 100644 --- a/test/basic/net_flow/echo/cli/echo_client.cpp +++ b/test/basic/net_flow/echo/cli/echo_client.cpp @@ -66,7 +66,7 @@ int main([[maybe_unused]] int argc, [[maybe_unused]] const char** argv) std_log_config.init_component_to_union_idx_mapping(1000, 999, true); std_log_config.init_component_names(flow::S_FLOW_LOG_COMPONENT_NAME_MAP, false, "cli-"); - Simple_ostream_logger std_logger(&std_log_config); + Simple_ostream_logger std_logger{&std_log_config}; FLOW_LOG_SET_CONTEXT(&std_logger, Flow_log_component::S_UNCAT); // This is separate: the Flow node's logging will go into this file. Just pass log_logger to flow::net_flow::Node. @@ -75,10 +75,10 @@ int main([[maybe_unused]] int argc, [[maybe_unused]] const char** argv) log_config.configure_default_verbosity(Sev::S_DATA, true); /* First arg: could use &std_logger to log-about-logging to console; but it's a bit heavy for such a console-dependent * little program. Just just send it to /dev/null metaphorically speaking. */ - Async_file_logger log_logger(0, &log_config, LOG_FILE, false /* No rotation; we're no serious business. */); + Async_file_logger log_logger{0, &log_config, LOG_FILE, false /* No rotation; we're no serious business. */}; unsigned int n_times; - if (((argc - 1) != 4) || String_view(argv[4]).empty() || + if (((argc - 1) != 4) || String_view{argv[4]}.empty() || (!try_lexical_convert(argv[1], n_times)) || (n_times == 0)) { FLOW_LOG_WARNING("Usage: " << argv[0] << " = 1> "); @@ -86,7 +86,7 @@ int main([[maybe_unused]] int argc, [[maybe_unused]] const char** argv) } // else - const String_view message(argv[4]); + const String_view message{argv[4]}; const size_t msg_size = message.size(); if (msg_size >= MAX_MSG_SIZE) { @@ -94,8 +94,8 @@ int main([[maybe_unused]] int argc, [[maybe_unused]] const char** argv) return BAD_EXIT; } - const String_view host_str(argv[2]); - const String_view port_str(argv[3]); + const String_view host_str{argv[2]}; + const String_view port_str{argv[3]}; // Arguments parsed. Go. @@ -106,7 +106,7 @@ int main([[maybe_unused]] int argc, [[maybe_unused]] const char** argv) Udp_endpoint remote_udp_endpoint; { io_context io; - resolver res(io); + resolver res{io}; Error_code ec; const auto results = res.resolve(host_str, port_str, ec); if (ec) @@ -128,7 +128,7 @@ int main([[maybe_unused]] int argc, [[maybe_unused]] const char** argv) } // Now put our transport endpoint on the IPADDR_ANY address (all interfaces), random ephemeral UDP port. - Node node(&log_logger, Udp_endpoint(Ip_address_v4(), 0)); + Node node{&log_logger, Udp_endpoint(Ip_address_v4(), 0)}; /* Connect to the above-resolved UDP endpoint (host/port); within that, hard-coded Flow port REMOTE_FLOW_PORT. * Time out if unsuccessful after a while (throw exception; note that our user timeout may exceed lower-layer diff --git a/test/basic/net_flow/echo/srv/echo_server.cpp b/test/basic/net_flow/echo/srv/echo_server.cpp index 10075f020..1b75069c3 100644 --- a/test/basic/net_flow/echo/srv/echo_server.cpp +++ b/test/basic/net_flow/echo/srv/echo_server.cpp @@ -112,7 +112,7 @@ int Main::main(int argc, const char** argv) log_config.configure_default_verbosity(Sev::S_DATA, true); /* First arg: could use &m_logger to log-about-logging to console; but it's a bit heavy for such a console-dependent * little program. Just just send it to /dev/null metaphorically speaking. */ - Async_file_logger log_logger(0, &log_config, LOG_FILE, true /* Hook up SIGHUP log rotation for fun. */); + Async_file_logger log_logger{0, &log_config, LOG_FILE, true /* Hook up SIGHUP log rotation for fun. */}; if ((argc == 1) || ((argc - 1) > 2) || (((argc - 1) == 2) && (argv[2] != LOCALHOST_TOKEN))) { @@ -121,7 +121,7 @@ int Main::main(int argc, const char** argv) } // else - const String_view port_str(argv[1]); + const String_view port_str{argv[1]}; const bool bind_to_localhost = (argc - 1) == 2; // Argument parsing done. Go! @@ -135,7 +135,7 @@ int Main::main(int argc, const char** argv) const String_view host_str = bind_to_localhost ? "127.0.0.1" : "0.0.0.0"; io_context io; - resolver res(io); + resolver res{io}; Error_code ec; const auto results = res.resolve(host_str, port_str, ec); if (ec) @@ -160,7 +160,7 @@ int Main::main(int argc, const char** argv) Node_options opts; opts.m_st_capture_interrupt_signals_internally = true; // Be reasonably graceful on SIGTERM (throw exception, etc.). - Node node(&log_logger, local_udp_endpoint, 0, 0, opts); + Node node{&log_logger, local_udp_endpoint, 0, 0, opts}; // Within that, listen on hard-coded Flow port. Server_socket::Ptr serv = node.listen(LOCAL_FLOW_PORT); @@ -221,7 +221,7 @@ void Main::on_client_connected(flow::net_flow::Peer_socket::Ptr sock) assert(rcvd != 0); /* Copy received buffer into this string. We are going to need a string to log it anyway, so might as well use * the same string as the reply payload buffer. (Or could use buf_rcvd for that; doesn't matter.) */ - const string rcvd_msg(buf_rcvd.data(), rcvd); + const string rcvd_msg{buf_rcvd.data(), rcvd}; FLOW_LOG_INFO("On Flow socket [" << sock << "]: received message [" << rcvd_msg << "]."); sent = sock->sync_send(buffer(rcvd_msg), seconds(5), &err_code); diff --git a/test/suite/unit_test/CMakeLists.txt b/test/suite/unit_test/CMakeLists.txt index 69d65b15d..dc661411b 100644 --- a/test/suite/unit_test/CMakeLists.txt +++ b/test/suite/unit_test/CMakeLists.txt @@ -25,6 +25,11 @@ set(SRCS ${PROJECT_SOURCE_DIR}/src/flow/test/test_common_util.cpp ${PROJECT_SOURCE_DIR}/src/flow/test/test_file_util.cpp ${PROJECT_SOURCE_DIR}/src/flow/log/detail/test/component_cfg_test.cpp + ${PROJECT_SOURCE_DIR}/src/flow/log/test/log_test.cpp + ${PROJECT_SOURCE_DIR}/src/flow/util/test/blob_test.cpp + ${PROJECT_SOURCE_DIR}/src/flow/util/test/linked_hash_test.cpp + ${PROJECT_SOURCE_DIR}/src/flow/util/test/thread_lcl_test.cpp + ${PROJECT_SOURCE_DIR}/src/flow/util/test/util_test.cpp test_main.cpp) add_executable(${name} ${SRCS}) @@ -42,6 +47,11 @@ common_set_target_properties(${name}) # Link good ol' flow; and gtest. target_link_libraries(${name} PRIVATE flow ${GTEST_LIB}) +# At least blob_test.cpp requires boost.interprocess SHM ops which require this lib. (Flow itself doesn't as of +# this writing.) +if(LINUX) + target_link_libraries(${name} PRIVATE rt) +endif() # Might as well export if they `make install` or equivalent. install(TARGETS ${name} diff --git a/test/suite/unit_test/test_main.cpp b/test/suite/unit_test/test_main.cpp index 79c480f5a..cac303217 100644 --- a/test/suite/unit_test/test_main.cpp +++ b/test/suite/unit_test/test_main.cpp @@ -58,7 +58,7 @@ int configure_logging(int argc, char* argv[]) Test_config::get_singleton(); // Touching it just below. - po::options_description cmd_line_opts("unit test options"); + po::options_description cmd_line_opts{"unit test options"}; const string HELP_PARAM = "help"; cmd_line_opts.add_options() ((HELP_PARAM + ",h").c_str(), "help") diff --git a/tools/cmake/FlowLikeCodeGenerate.cmake b/tools/cmake/FlowLikeCodeGenerate.cmake index deaf61a5b..f31fbfc7b 100644 --- a/tools/cmake/FlowLikeCodeGenerate.cmake +++ b/tools/cmake/FlowLikeCodeGenerate.cmake @@ -310,11 +310,14 @@ function(common_set_target_properties name) # Show only a few errors per file before bailing out. # TODO: This is convenient when developing locally but can (IME rarely) be limiting in automated builds/CI/etc.; # consider making it configurable via knob. + # Don't skip "required by" lines in template-related error messages (default limit 10 can make stuff shorter, but + # when it hides that 1-2 lines one actually wants... quite frustrating... especially if one doesn't realize that's + # where the key info is). set(MAX_ERRORS 3) if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - target_compile_options(${name} PRIVATE "-fmax-errors=${MAX_ERRORS}") + target_compile_options(${name} PRIVATE "-fmax-errors=${MAX_ERRORS}" "-ftemplate-backtrace-limit=0") elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") - target_compile_options(${name} PRIVATE "-ferror-limit=${MAX_ERRORS}") + target_compile_options(${name} PRIVATE "-ferror-limit=${MAX_ERRORS}" "-ftemplate-backtrace-limit=0") else() message(FATAL_ERROR "Target [${name}]: For this target wanted to limit # of compile errors per file but compiler " "(CMAKE_CXX_COMPILER_ID/VERSION [${CMAKE_CXX_COMPILER_ID}/${CMAKE_CXX_COMPILER_VERSION}])" From 3904d52b5f2ccdb4c2b687b680b709e7d76dda58 Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Thu, 20 Nov 2025 17:54:27 -0800 Subject: [PATCH 02/37] GitHub CI pipeline: The specified image no longer pre-installs gcc-9, so setting the manual-install flag for that compiler/version. --- .github/workflows/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index da4eb03ec..7bf81223d 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -183,6 +183,7 @@ jobs: version: 9 c-path: /usr/bin/gcc-9 cpp-path: /usr/bin/g++-9 + install: True - id: gcc-10 name: gcc version: 10 From fb4cd70772399f6ec143eacea20a4a022e508f41 Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Thu, 20 Nov 2025 18:05:29 -0800 Subject: [PATCH 03/37] (cont) Eliminate unit-test compile warnings in higher gcc versions: sign-compare. --- src/flow/util/test/blob_test.cpp | 157 +++++++++++++----------- src/flow/util/test/linked_hash_test.cpp | 48 ++++---- 2 files changed, 107 insertions(+), 98 deletions(-) diff --git a/src/flow/util/test/blob_test.cpp b/src/flow/util/test/blob_test.cpp index bd68cabf9..4df6fee17 100644 --- a/src/flow/util/test/blob_test.cpp +++ b/src/flow/util/test/blob_test.cpp @@ -139,7 +139,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr const Allocator* const alloc2 = &(*alloc2_v); constexpr size_t N_SM = 1024; - ASSERT_EQ(N_SM % 2, 0) << "We shall be dividing it by 2; should probably be even."; + ASSERT_EQ(N_SM % 2, size_t(0)) << "We shall be dividing it by 2; should probably be even."; constexpr size_t ZERO = 0; const uint8_t ONE = 1; @@ -168,66 +168,66 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr { // Null blobs. auto b1 = make_blob(alloc, &logger, ZERO); EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); - EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.capacity(), size_t(0)); b1.resize(0); EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); - EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.capacity(), size_t(0)); b1.resize(0, Blob_t::S_UNCHANGED); EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); - EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); - b1.resize(0, 0); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.capacity(), size_t(0)); + b1.resize(0, size_t(0)); EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); - EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.capacity(), size_t(0)); b1.resize(0, CLEAR_ON_ALLOC); EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); - EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.capacity(), size_t(0)); b1.resize(0, CLEAR_ON_ALLOC, Blob_t::S_UNCHANGED); EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); - EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); - b1.resize(0, CLEAR_ON_ALLOC, 0); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.capacity(), size_t(0)); + b1.resize(0, CLEAR_ON_ALLOC, size_t(0)); EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); - EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.capacity(), size_t(0)); b1.make_zero(); EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); - EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.capacity(), size_t(0)); b1.reserve(0); EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); - EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.capacity(), size_t(0)); b1.reserve(0, CLEAR_ON_ALLOC); EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); - EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.capacity(), size_t(0)); b1.start_past_prefix(0); EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); - EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.capacity(), size_t(0)); b1.start_past_prefix_inc(0); EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); - EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.capacity(), size_t(0)); Blob_t b2{b1}; // Copy-ct. EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); - EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.capacity(), size_t(0)); EXPECT_TRUE(b2.zero()); EXPECT_TRUE(b2.empty()); - EXPECT_EQ(b2.size(), 0); EXPECT_EQ(b2.start(), 0); EXPECT_EQ(b2.capacity(), 0); + EXPECT_EQ(b2.size(), size_t(0)); EXPECT_EQ(b2.start(), size_t(0)); EXPECT_EQ(b2.capacity(), size_t(0)); b1 = b2; // Copy-assign. EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); - EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.capacity(), size_t(0)); EXPECT_TRUE(b2.zero()); EXPECT_TRUE(b2.empty()); - EXPECT_EQ(b2.size(), 0); EXPECT_EQ(b2.start(), 0); EXPECT_EQ(b2.capacity(), 0); + EXPECT_EQ(b2.size(), size_t(0)); EXPECT_EQ(b2.start(), size_t(0)); EXPECT_EQ(b2.capacity(), size_t(0)); Blob_t b3{std::move(b2)}; // Move-ct. EXPECT_TRUE(b3.zero()); EXPECT_TRUE(b3.empty()); - EXPECT_EQ(b3.size(), 0); EXPECT_EQ(b3.start(), 0); EXPECT_EQ(b3.capacity(), 0); + EXPECT_EQ(b3.size(), size_t(0)); EXPECT_EQ(b3.start(), size_t(0)); EXPECT_EQ(b3.capacity(), size_t(0)); EXPECT_TRUE(b2.zero()); EXPECT_TRUE(b2.empty()); - EXPECT_EQ(b2.size(), 0); EXPECT_EQ(b2.start(), 0); EXPECT_EQ(b2.capacity(), 0); + EXPECT_EQ(b2.size(), size_t(0)); EXPECT_EQ(b2.start(), size_t(0)); EXPECT_EQ(b2.capacity(), size_t(0)); b2 = std::move(b3); // Move-assign. EXPECT_TRUE(b3.zero()); EXPECT_TRUE(b3.empty()); - EXPECT_EQ(b3.size(), 0); EXPECT_EQ(b3.start(), 0); EXPECT_EQ(b3.capacity(), 0); + EXPECT_EQ(b3.size(), size_t(0)); EXPECT_EQ(b3.start(), size_t(0)); EXPECT_EQ(b3.capacity(), size_t(0)); EXPECT_TRUE(b2.zero()); EXPECT_TRUE(b2.empty()); - EXPECT_EQ(b2.size(), 0); EXPECT_EQ(b2.start(), 0); EXPECT_EQ(b2.capacity(), 0); + EXPECT_EQ(b2.size(), size_t(0)); EXPECT_EQ(b2.start(), size_t(0)); EXPECT_EQ(b2.capacity(), size_t(0)); swap(b2, b3); EXPECT_TRUE(b3.zero()); EXPECT_TRUE(b3.empty()); - EXPECT_EQ(b3.size(), 0); EXPECT_EQ(b3.start(), 0); EXPECT_EQ(b3.capacity(), 0); + EXPECT_EQ(b3.size(), size_t(0)); EXPECT_EQ(b3.start(), size_t(0)); EXPECT_EQ(b3.capacity(), size_t(0)); EXPECT_TRUE(b2.zero()); EXPECT_TRUE(b2.empty()); - EXPECT_EQ(b2.size(), 0); EXPECT_EQ(b2.start(), 0); EXPECT_EQ(b2.capacity(), 0); + EXPECT_EQ(b2.size(), size_t(0)); EXPECT_EQ(b2.start(), size_t(0)); EXPECT_EQ(b2.capacity(), size_t(0)); } // Null blobs. // We can now generally perhaps ignore null blobs when testing construct/assign ops. @@ -235,14 +235,14 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr { // Copy-ct. auto b1 = make_blob(alloc, &logger, N_SM, CLEAR_ON_ALLOC); EXPECT_FALSE(b1.zero()); EXPECT_FALSE(b1.empty()); - EXPECT_EQ(b1.size(), N_SM); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), N_SM); + EXPECT_EQ(b1.size(), N_SM); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.capacity(), N_SM); EXPECT_TRUE(ALL_ZERO_FN(b1)); Blob_t b2{b1}; EXPECT_FALSE(b1.zero()); EXPECT_FALSE(b1.empty()); - EXPECT_EQ(b1.size(), N_SM); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), N_SM); + EXPECT_EQ(b1.size(), N_SM); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.capacity(), N_SM); EXPECT_TRUE(ALL_ZERO_FN(b1)); EXPECT_FALSE(b2.zero()); EXPECT_FALSE(b2.empty()); - EXPECT_EQ(b2.size(), N_SM); EXPECT_EQ(b2.start(), 0); EXPECT_EQ(b2.capacity(), N_SM); + EXPECT_EQ(b2.size(), N_SM); EXPECT_EQ(b2.start(), size_t(0)); EXPECT_EQ(b2.capacity(), N_SM); EXPECT_TRUE(ALL_ZERO_FN(b2)); EXPECT_NE(b1.data(), b2.data()); @@ -251,7 +251,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr EXPECT_FALSE(b3.zero()); EXPECT_FALSE(b3.empty()); EXPECT_EQ(b3.size(), N_SM / 2); // Attn: Only [.b(), .e()) range copied; start() shall be zero, and capacity() big-enough for size() only. - EXPECT_EQ(b3.start(), 0); EXPECT_EQ(b3.capacity(), N_SM / 2); + EXPECT_EQ(b3.start(), size_t(0)); EXPECT_EQ(b3.capacity(), N_SM / 2); EXPECT_FALSE(b2.zero()); EXPECT_FALSE(b2.empty()); EXPECT_EQ(b2.size(), N_SM / 2); EXPECT_EQ(b2.start(), N_SM / 2); EXPECT_EQ(b2.capacity(), N_SM); EXPECT_NE(b3.data(), b2.data()); @@ -266,7 +266,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr b2 = b1; // Overwrite null blob. EXPECT_FALSE(b2.zero()); EXPECT_FALSE(b2.empty()); EXPECT_EQ(b2.size(), N_SM / 2); - EXPECT_EQ(b2.start(), 0); EXPECT_EQ(b2.capacity(), N_SM / 2); // Attn: same deal as with copy-ct. + EXPECT_EQ(b2.start(), size_t(0)); EXPECT_EQ(b2.capacity(), N_SM / 2); // Attn: same deal as with copy-ct. EXPECT_TRUE(ALL_ZERO_FN(b2)); EXPECT_NE(b1.data(), b2.data()); EXPECT_FALSE(b1.zero()); EXPECT_FALSE(b1.empty()); @@ -285,7 +285,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr * is adjusted accordingly. */ b3 = b2; // So now it's [][N_SM / 2][N_SM - N_SN/2]. EXPECT_FALSE(b3.zero()); EXPECT_FALSE(b3.empty()); - EXPECT_EQ(b3.start(), 0); EXPECT_EQ(b3.size(), N_SM / 2); EXPECT_EQ(b3.capacity(), N_SM); + EXPECT_EQ(b3.start(), size_t(0)); EXPECT_EQ(b3.size(), N_SM / 2); EXPECT_EQ(b3.capacity(), N_SM); EXPECT_TRUE(ALL_ZERO_FN(b3)); // Copied-stuff a/k/a body should be as in b2. EXPECT_TRUE(RNG_ONES_FN(b3.end(), b3.begin() - b3.start() + b3.capacity())); // Postfix should be untouched. @@ -313,20 +313,20 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr // assign_copy(). auto b5 = make_blob(alloc, &logger, ZERO); EXPECT_EQ(b5.assign_copy(STR_BUF), STR_BUF.size()); // Made of ONEs. - EXPECT_EQ(b5.start(), 0); EXPECT_EQ(b5.size(), STRING.size()); EXPECT_EQ(b5.capacity(), STRING.size()); + EXPECT_EQ(b5.start(), size_t(0)); EXPECT_EQ(b5.size(), STRING.size()); EXPECT_EQ(b5.capacity(), STRING.size()); EXPECT_TRUE(RNG_ONES_FN(b5.begin(), b5.end())); EXPECT_EQ(b5.assign_copy(STR_SM_BUF), STR_SM_BUF.size()); // Made of zeroes. - EXPECT_EQ(b5.start(), 0); EXPECT_EQ(b5.size(), STRING_SM.size()); EXPECT_EQ(b5.capacity(), STRING.size()); + EXPECT_EQ(b5.start(), size_t(0)); EXPECT_EQ(b5.size(), STRING_SM.size()); EXPECT_EQ(b5.capacity(), STRING.size()); EXPECT_TRUE(ALL_ZERO_FN(b5)); EXPECT_TRUE(RNG_ONES_FN(b5.end(), b5.begin() + STRING.size())); // emplace_copy(). b5.resize(b5.capacity()); - EXPECT_EQ(b5.start(), 0); EXPECT_EQ(b5.size(), STRING.size()); EXPECT_EQ(b5.capacity(), STRING.size()); + EXPECT_EQ(b5.start(), size_t(0)); EXPECT_EQ(b5.size(), STRING.size()); EXPECT_EQ(b5.capacity(), STRING.size()); EXPECT_TRUE(RNG_ONES_FN(b5.begin() + STRING_SM.size(), b5.begin() + STRING.size())); EXPECT_EQ(b5.emplace_copy(b5.begin() + (STRING_SM.size() / 2), STR_SM_BUF), b5.begin() + (STRING_SM.size() / 2) + STRING_SM.size()); // All these must remain unchanged; just bytes were copied into a sub-range of [.b(), .e()). - EXPECT_EQ(b5.start(), 0); EXPECT_EQ(b5.size(), STRING.size()); EXPECT_EQ(b5.capacity(), STRING.size()); + EXPECT_EQ(b5.start(), size_t(0)); EXPECT_EQ(b5.size(), STRING.size()); EXPECT_EQ(b5.capacity(), STRING.size()); EXPECT_TRUE(RNG_ZERO_FN(b5.begin(), b5.begin() + (STRING_SM.size() / 2))); EXPECT_TRUE(RNG_ZERO_FN(b5.begin() + (STRING_SM.size() / 2), @@ -337,8 +337,8 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr EXPECT_EQ(b5.emplace_copy(b5.begin() + 1, const_buffer{b5.end() - 5, 4}), b5.begin() + 5); - EXPECT_EQ(b5.start(), 0); EXPECT_EQ(b5.size(), STRING.size()); EXPECT_EQ(b5.capacity(), STRING.size()); - EXPECT_EQ(b5.front(), 0); + EXPECT_EQ(b5.start(), size_t(0)); EXPECT_EQ(b5.size(), STRING.size()); EXPECT_EQ(b5.capacity(), STRING.size()); + EXPECT_EQ(b5.front(), uint8_t(0)); EXPECT_TRUE(RNG_ONES_FN(b5.begin() + 1, b5.begin() + 1 + 4)); EXPECT_TRUE(RNG_ZERO_FN(b5.begin() + 1 + 4, @@ -354,14 +354,14 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr auto b1 = make_blob(alloc, &logger, N_SM, CLEAR_ON_ALLOC); EXPECT_FALSE(b1.zero()); EXPECT_FALSE(b1.empty()); - EXPECT_EQ(b1.size(), N_SM); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), N_SM); + EXPECT_EQ(b1.size(), N_SM); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.capacity(), N_SM); EXPECT_TRUE(ALL_ZERO_FN(b1)); auto saved_dt = b1.const_data(); Blob_t b2{std::move(b1)}; EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); - EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.capacity(), 0); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.capacity(), size_t(0)); EXPECT_FALSE(b2.zero()); EXPECT_FALSE(b2.empty()); - EXPECT_EQ(b2.size(), N_SM); EXPECT_EQ(b2.start(), 0); EXPECT_EQ(b2.capacity(), N_SM); + EXPECT_EQ(b2.size(), N_SM); EXPECT_EQ(b2.start(), size_t(0)); EXPECT_EQ(b2.capacity(), N_SM); EXPECT_TRUE(ALL_ZERO_FN(b2)); EXPECT_EQ(b1.data(), nullptr); EXPECT_EQ(b2.data(), saved_dt); @@ -370,7 +370,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr saved_dt = b2.const_data(); Blob_t b3{std::move(b2)}; EXPECT_TRUE(b2.zero()); EXPECT_TRUE(b2.empty()); - EXPECT_EQ(b2.size(), 0); EXPECT_EQ(b2.start(), 0); EXPECT_EQ(b2.capacity(), 0); + EXPECT_EQ(b2.size(), size_t(0)); EXPECT_EQ(b2.start(), size_t(0)); EXPECT_EQ(b2.capacity(), size_t(0)); EXPECT_FALSE(b3.zero()); EXPECT_FALSE(b3.empty()); EXPECT_EQ(b3.size(), N_SM / 2); EXPECT_EQ(b3.start(), N_SM / 2); EXPECT_EQ(b3.capacity(), N_SM); EXPECT_TRUE(ALL_ZERO_FN(b3)); @@ -379,10 +379,10 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr auto b4 = make_blob(alloc, &logger, N_TN, CLEAR_ON_ALLOC); EXPECT_FALSE(b4.zero()); EXPECT_FALSE(b4.empty()); - EXPECT_EQ(b4.size(), N_TN); EXPECT_EQ(b4.start(), 0); EXPECT_EQ(b4.capacity(), N_TN); + EXPECT_EQ(b4.size(), N_TN); EXPECT_EQ(b4.start(), size_t(0)); EXPECT_EQ(b4.capacity(), N_TN); b4 = std::move(b3); // Move-assign. EXPECT_TRUE(b3.zero()); EXPECT_TRUE(b3.empty()); - EXPECT_EQ(b3.size(), 0); EXPECT_EQ(b3.start(), 0); EXPECT_EQ(b3.capacity(), 0); + EXPECT_EQ(b3.size(), size_t(0)); EXPECT_EQ(b3.start(), size_t(0)); EXPECT_EQ(b3.capacity(), size_t(0)); EXPECT_FALSE(b4.zero()); EXPECT_FALSE(b4.empty()); EXPECT_EQ(b4.size(), N_SM / 2); EXPECT_EQ(b4.start(), N_SM / 2); EXPECT_EQ(b4.capacity(), N_SM); EXPECT_TRUE(ALL_ZERO_FN(b4)); @@ -395,26 +395,33 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr constexpr size_t N2 = 10; constexpr size_t N_BIG = 1024 * 1024; // Biggish so as to lower chance all-zeroes being already there by accident. auto b1 = make_blob(alloc, &logger, ZERO); - EXPECT_TRUE(b1.zero()); EXPECT_EQ(b1.capacity(), 0); EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); + EXPECT_TRUE(b1.zero()); EXPECT_EQ(b1.capacity(), size_t(0)); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.begin(), nullptr); EXPECT_EQ(b1.begin(), b1.end()); EXPECT_EQ(b1.data(), nullptr); b1.make_zero(); // No-op. - EXPECT_TRUE(b1.zero()); EXPECT_EQ(b1.capacity(), 0); EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); + EXPECT_TRUE(b1.zero()); EXPECT_EQ(b1.capacity(), size_t(0)); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.begin(), nullptr); EXPECT_EQ(b1.begin(), b1.end()); EXPECT_EQ(b1.data(), nullptr); b1.reserve(N1); - EXPECT_FALSE(b1.zero()); EXPECT_EQ(b1.capacity(), N1); EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); + EXPECT_FALSE(b1.zero()); EXPECT_EQ(b1.capacity(), N1); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_NE(b1.begin(), nullptr); EXPECT_EQ(b1.begin(), b1.end()); EXPECT_NE(b1.data(), nullptr); b1.reserve(N2); // Smaller => no-op. - EXPECT_FALSE(b1.zero()); EXPECT_EQ(b1.capacity(), N1); EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); + EXPECT_FALSE(b1.zero()); EXPECT_EQ(b1.capacity(), N1); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_NE(b1.begin(), nullptr); EXPECT_EQ(b1.begin(), b1.end()); EXPECT_NE(b1.data(), nullptr); b1.make_zero(); // Dealloc here (ahead of destructor). - EXPECT_TRUE(b1.zero()); EXPECT_EQ(b1.capacity(), 0); EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); + EXPECT_TRUE(b1.zero()); EXPECT_EQ(b1.capacity(), size_t(0)); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.begin(), nullptr); EXPECT_EQ(b1.begin(), b1.end()); EXPECT_EQ(b1.data(), nullptr); b1.reserve(N_BIG, CLEAR_ON_ALLOC); - EXPECT_FALSE(b1.zero()); EXPECT_EQ(b1.capacity(), N_BIG); EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); + EXPECT_FALSE(b1.zero()); EXPECT_EQ(b1.capacity(), N_BIG); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_NE(b1.begin(), nullptr); EXPECT_EQ(b1.begin(), b1.end()); EXPECT_NE(b1.data(), nullptr); RNG_ZERO_FN(b1.begin(), b1.begin() + b1.capacity()); b1.reserve(N1); // Smaller => no-op. - EXPECT_FALSE(b1.zero()); EXPECT_EQ(b1.capacity(), N_BIG); EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b1.start(), 0); + EXPECT_FALSE(b1.zero()); EXPECT_EQ(b1.capacity(), N_BIG); + EXPECT_EQ(b1.size(), size_t(0)); EXPECT_EQ(b1.start(), size_t(0)); EXPECT_NE(b1.begin(), nullptr); EXPECT_EQ(b1.begin(), b1.end()); EXPECT_NE(b1.data(), nullptr); RNG_ZERO_FN(b1.begin(), b1.begin() + b1.capacity()); // Destructor deallocs. @@ -431,7 +438,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr // Sanity-check begin() and end() cores when zero() and not zero() but empty() (degenerate cases). auto b1 = make_blob(alloc, &logger, ZERO); EXPECT_EQ(b1.begin(), nullptr); EXPECT_EQ(b1.end(), b1.begin()); - b1.reserve(N_SM, CLEAR_ON_ALLOC); ASSERT_EQ(b1.size(), 0); + b1.reserve(N_SM, CLEAR_ON_ALLOC); ASSERT_EQ(b1.size(), size_t(0)); EXPECT_NE(b1.begin(), nullptr); EXPECT_EQ(b1.end(), b1.begin()); // Now for the mainstream situation (!empty(); also have non-empty prefix (start()) and postfix). @@ -473,7 +480,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr constexpr size_t INC = 5; auto b1 = make_blob(alloc, &logger, N1); - EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.size(), N1); + EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.size(), N1); b1.start_past_prefix(N2); EXPECT_EQ(b1.start(), N2); EXPECT_EQ(b1.size(), N1 - N2); b1.make_zero(); // Dealloc. @@ -486,27 +493,29 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr EXPECT_EQ(b1.start(), INC); EXPECT_EQ(b1.size(), N2); ASSERT_GT(N1, N2 + INC) << "Sanity-check our own logic real quick."; b1.start_past_prefix(N1); // Requested start() > current start() + size() => size() becomes 0. - EXPECT_EQ(b1.start(), N1); EXPECT_EQ(b1.size(), 0); + EXPECT_EQ(b1.start(), N1); EXPECT_EQ(b1.size(), size_t(0)); b1.start_past_prefix_inc(-1); - EXPECT_EQ(b1.start(), N1 - 1); EXPECT_EQ(b1.size(), 1); + EXPECT_EQ(b1.start(), N1 - 1); EXPECT_EQ(b1.size(), size_t(1)); b1.start_past_prefix_inc(-5); - EXPECT_EQ(b1.start(), N1 - 1 - 5); EXPECT_EQ(b1.size(), 1 + 5); + EXPECT_EQ(b1.start(), N1 - 1 - 5); EXPECT_EQ(b1.size(), size_t(1) + 5); b1.start_past_prefix_inc(+2); - EXPECT_EQ(b1.start(), N1 - 1 - 5 + 2); EXPECT_EQ(b1.size(), 1 + 5 - 2); + EXPECT_EQ(b1.start(), N1 - 1 - 5 + 2); EXPECT_EQ(b1.size(), size_t(1) + 5 - 2); b1.start_past_prefix_inc(+5); // Push past original start(). size() itself is floored at 0. - EXPECT_EQ(b1.start(), N1 + 1); EXPECT_EQ(b1.size(), 0); + EXPECT_EQ(b1.start(), N1 + 1); EXPECT_EQ(b1.size(), size_t(0)); b1.start_past_prefix_inc(-(N1 + 1)); - EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.size(), N1 + 1); + EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.size(), N1 + 1); // (Recall that clear() never actually deallocs. It really just sets size() to 0; that's it.) b1.resize(N2, INC); EXPECT_EQ(b1.start(), INC); EXPECT_EQ(b1.size(), N2); b1.clear(); // Attn. EXPECT_FALSE(b1.zero()); EXPECT_TRUE(b1.empty()); // Empty but buffer is actually allocated still. - EXPECT_EQ(b1.start(), INC); EXPECT_EQ(b1.size(), 0); // Empty (size is 0) but start() unchanged as advertised. + EXPECT_EQ(b1.start(), INC); EXPECT_EQ(b1.size(), size_t(0)); // Empty but start() unchanged as advertised. b1.make_zero(); - EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.size(), 0); + EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); + EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.size(), size_t(0)); b1.clear(); // No-op. - EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); EXPECT_EQ(b1.start(), 0); EXPECT_EQ(b1.size(), 0); + EXPECT_TRUE(b1.zero()); EXPECT_TRUE(b1.empty()); + EXPECT_EQ(b1.start(), size_t(0)); EXPECT_EQ(b1.size(), size_t(0)); } // start_past_prefix[_inc](), clear(). { // erase(). @@ -722,21 +731,21 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr * copied-from. Like, the moved-from allocator shouldn't get nullified or something; generally that is not * really a thing; that I (ygoldfel) know of.) So we'll just still check the moved-from get_allocator(). */ EXPECT_EQ(b4.get_allocator(), *alloc); EXPECT_EQ(b3.get_allocator(), *alloc); - EXPECT_EQ(b4.capacity(), N_SM); EXPECT_EQ(b3.capacity(), 0); + EXPECT_EQ(b4.capacity(), N_SM); EXPECT_EQ(b3.capacity(), size_t(0)); auto b5 = make_blob(alloc2, &logger, N_SM); EXPECT_EQ(b5.get_allocator(), *alloc2); EXPECT_EQ(b4.get_allocator(), *alloc); EXPECT_EQ(b5.capacity(), N_SM); EXPECT_EQ(b4.capacity(), N_SM); b4 = std::move(b5); EXPECT_EQ(b5.get_allocator(), *alloc2); EXPECT_EQ(b4.get_allocator(), *alloc); // Move-construction propagates... but not move-assignment. - EXPECT_EQ(b5.capacity(), 0); EXPECT_EQ(b4.capacity(), N_SM); + EXPECT_EQ(b5.capacity(), size_t(0)); EXPECT_EQ(b4.capacity(), N_SM); static_assert(!(SHM_ALLOC && allocator_traits::propagate_on_container_swap::value), "Our stateful test allocators configured themselves to " "*not* propagate on swap. If this static-assert trips, something major changed " "in boost.interprocess maybe?!"); swap(b4, b5); - EXPECT_EQ(b5.capacity(), N_SM); EXPECT_EQ(b4.capacity(), 0); // Buf pointers apparently swapped... + EXPECT_EQ(b5.capacity(), N_SM); EXPECT_EQ(b4.capacity(), size_t(0)); // Buf pointers apparently swapped... EXPECT_EQ(b5.get_allocator(), *alloc2); // ...but not allocators. EXPECT_EQ(b4.get_allocator(), *alloc); b5.make_zero(); // Force deallocation now -- at least it shouldn't crash. @@ -813,25 +822,25 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr auto b2 = make_shared(b1->share()); check_alloc_sz(1, CTX); // No extra alloc. EXPECT_EQ(b1->data() - b1->start(), p); EXPECT_EQ(b1->capacity(), N); - EXPECT_EQ(b1->size(), N); EXPECT_EQ(b1->start(), 0); + EXPECT_EQ(b1->size(), N); EXPECT_EQ(b1->start(), size_t(0)); EXPECT_EQ(b2->data() - b2->start(), p); EXPECT_EQ(b2->capacity(), N); - EXPECT_EQ(b2->size(), N); EXPECT_EQ(b2->start(), 0); + EXPECT_EQ(b2->size(), N); EXPECT_EQ(b2->start(), size_t(0)); auto b3 = make_shared(b2->share_after_split_left(INC)); check_alloc_sz(1, CTX); EXPECT_EQ(b1->data() - b1->start(), p); EXPECT_EQ(b1->capacity(), N); - EXPECT_EQ(b1->size(), N); EXPECT_EQ(b1->start(), 0); + EXPECT_EQ(b1->size(), N); EXPECT_EQ(b1->start(), size_t(0)); EXPECT_EQ(b2->data() - b2->start(), p); EXPECT_EQ(b2->capacity(), N); EXPECT_EQ(b2->size(), N - INC); EXPECT_EQ(b2->start(), INC); EXPECT_EQ(b3->data() - b3->start(), p); EXPECT_EQ(b3->capacity(), N); - EXPECT_EQ(b3->size(), INC); EXPECT_EQ(b3->start(), 0); + EXPECT_EQ(b3->size(), INC); EXPECT_EQ(b3->start(), size_t(0)); auto b4 = make_shared(b2->share_after_split_right(INC)); check_alloc_sz(1, CTX); EXPECT_EQ(b1->data() - b1->start(), p); EXPECT_EQ(b1->capacity(), N); - EXPECT_EQ(b1->size(), N); EXPECT_EQ(b1->start(), 0); + EXPECT_EQ(b1->size(), N); EXPECT_EQ(b1->start(), size_t(0)); EXPECT_EQ(b2->data() - b2->start(), p); EXPECT_EQ(b2->capacity(), N); EXPECT_EQ(b2->size(), N - INC - INC); EXPECT_EQ(b2->start(), INC); EXPECT_EQ(b3->data() - b3->start(), p); EXPECT_EQ(b3->capacity(), N); - EXPECT_EQ(b3->size(), INC); EXPECT_EQ(b3->start(), 0); + EXPECT_EQ(b3->size(), INC); EXPECT_EQ(b3->start(), size_t(0)); EXPECT_EQ(b4->data() - b4->start(), p); EXPECT_EQ(b4->capacity(), N); EXPECT_EQ(b4->size(), INC); EXPECT_EQ(b4->start(), N - INC); @@ -912,7 +921,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr constexpr size_t N_LG = N_SM + INC; auto b1 = make_blob(alloc, &logger, N_SM); b1.resize(0); - ASSERT_EQ(b1.capacity(), N_SM); ASSERT_EQ(b1.size(), 0); + ASSERT_EQ(b1.capacity(), N_SM); ASSERT_EQ(b1.size(), size_t(0)); b1.reserve(N_SM); EXPECT_DEATH(b1.reserve(N_LG), RSRV_ERR); b1.make_zero(); @@ -945,9 +954,9 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr // start_past_prefix_inc(). b2.resize(10, 5); - b2.start_past_prefix_inc(-1); ASSERT_EQ(b2.start(), 4); - b2.start_past_prefix_inc(-4); ASSERT_EQ(b2.start(), 0); - b2.start_past_prefix_inc(+4); ASSERT_EQ(b2.start(), 4); + b2.start_past_prefix_inc(-1); ASSERT_EQ(b2.start(), size_t(4)); + b2.start_past_prefix_inc(-4); ASSERT_EQ(b2.start(), size_t(0)); + b2.start_past_prefix_inc(+4); ASSERT_EQ(b2.start(), size_t(4)); EXPECT_DEATH(b2.start_past_prefix_inc(-5), "start.. >= size_type.-prefix_size_inc"); { // emplace/sub_copy(). diff --git a/src/flow/util/test/linked_hash_test.cpp b/src/flow/util/test/linked_hash_test.cpp index baa2f3aa6..de3cd0c03 100644 --- a/src/flow/util/test/linked_hash_test.cpp +++ b/src/flow/util/test/linked_hash_test.cpp @@ -291,13 +291,13 @@ TEST(Linked_hash, Interface) keys_check_map(map10, { "b", "a", "e", "d", "c" }, CTX); vals_check_map(map10, { "Y", "A", "Z", "D", "X" }, CTX); - EXPECT_EQ(map10.erase("x"), 0); - EXPECT_EQ(map10.erase("c"), 1); - EXPECT_EQ(map10.erase("c"), 0); - EXPECT_EQ(map10.erase("a"), 1); - EXPECT_EQ(map10.erase("a"), 0); - EXPECT_EQ(map10.erase("b"), 1); - EXPECT_EQ(map10.erase("b"), 0); + EXPECT_EQ(map10.erase("x"), size_t(0)); + EXPECT_EQ(map10.erase("c"), size_t(1)); + EXPECT_EQ(map10.erase("c"), size_t(0)); + EXPECT_EQ(map10.erase("a"), size_t(1)); + EXPECT_EQ(map10.erase("a"), size_t(0)); + EXPECT_EQ(map10.erase("b"), size_t(1)); + EXPECT_EQ(map10.erase("b"), size_t(0)); keys_check_map(map10, { "e", "d" }, CTX); vals_check_map(map10, { "Z", "D" }, CTX); map10.clear(); @@ -327,12 +327,12 @@ TEST(Linked_hash, Interface) EXPECT_EQ(it5->first, "b"); EXPECT_EQ(it5->second, "B"); - EXPECT_EQ(map10.count("e"), 1); - EXPECT_EQ(map10.count("b"), 1); - EXPECT_EQ(map10.count("x"), 0); - EXPECT_EQ(map10.count(""), 0); + EXPECT_EQ(map10.count("e"), size_t(1)); + EXPECT_EQ(map10.count("b"), size_t(1)); + EXPECT_EQ(map10.count("x"), size_t(0)); + EXPECT_EQ(map10.count(""), size_t(0)); map10[""] = "Q"; - EXPECT_EQ(map10.count(""), 1); + EXPECT_EQ(map10.count(""), size_t(1)); EXPECT_EQ(map10[""], "Q"); EXPECT_EQ(map10["e"], "E"); EXPECT_EQ(map10["b"], "B"); @@ -458,13 +458,13 @@ TEST(Linked_hash, Interface) set10.touch(set10.find("b")); keys_check_set(set10, { "b", "a", "e", "d", "c" }, CTX); - EXPECT_EQ(set10.erase("x"), 0); - EXPECT_EQ(set10.erase("c"), 1); - EXPECT_EQ(set10.erase("c"), 0); - EXPECT_EQ(set10.erase("a"), 1); - EXPECT_EQ(set10.erase("a"), 0); - EXPECT_EQ(set10.erase("b"), 1); - EXPECT_EQ(set10.erase("b"), 0); + EXPECT_EQ(set10.erase("x"), size_t(0)); + EXPECT_EQ(set10.erase("c"), size_t(1)); + EXPECT_EQ(set10.erase("c"), size_t(0)); + EXPECT_EQ(set10.erase("a"), size_t(1)); + EXPECT_EQ(set10.erase("a"), size_t(0)); + EXPECT_EQ(set10.erase("b"), size_t(1)); + EXPECT_EQ(set10.erase("b"), size_t(0)); keys_check_set(set10, { "e", "d" }, CTX); set10.clear(); keys_check_set(set10, { }, CTX); @@ -485,12 +485,12 @@ TEST(Linked_hash, Interface) EXPECT_EQ(it5, --set10.end()); EXPECT_EQ(it5->m_str, "b"); - EXPECT_EQ(set10.count("e"), 1); - EXPECT_EQ(set10.count("b"), 1); - EXPECT_EQ(set10.count("x"), 0); - EXPECT_EQ(set10.count(""), 0); + EXPECT_EQ(set10.count("e"), size_t(1)); + EXPECT_EQ(set10.count("b"), size_t(1)); + EXPECT_EQ(set10.count("x"), size_t(0)); + EXPECT_EQ(set10.count(""), size_t(0)); set10.insert(""); - EXPECT_EQ(set10.count(""), 1); + EXPECT_EQ(set10.count(""), size_t(1)); } // Set test block. } // TEST(Linked_hash, Interface) From 7755377a2b6d681e3dd60f654d678953100a8b9d Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Thu, 20 Nov 2025 18:12:49 -0800 Subject: [PATCH 04/37] (cont) Eliminate unit-test compile warnings in higher gcc versions: sign-compare (part deux). --- src/flow/util/test/blob_test.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/flow/util/test/blob_test.cpp b/src/flow/util/test/blob_test.cpp index 4df6fee17..dd3d2d631 100644 --- a/src/flow/util/test/blob_test.cpp +++ b/src/flow/util/test/blob_test.cpp @@ -454,7 +454,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr uint8_t* const d = b1.data(); const uint8_t* const c_d = b1.const_data(); - EXPECT_EQ(e - b, b1.size()); + EXPECT_EQ(int(e - b), int(b1.size())); EXPECT_TRUE(RNG_ZERO_FN(b - INC, e + INC)); // Should be all derefable (and zeroed) as opposed to possible crash. EXPECT_EQ(b, c_b); EXPECT_EQ(b, c_cb); EXPECT_EQ(b, c_ccb); EXPECT_EQ(b, d); EXPECT_EQ(b, c_d); EXPECT_EQ(e, c_e); EXPECT_EQ(e, c_ce); EXPECT_EQ(e, c_cce); From b8ba974229989590c2789de851c7b65f6df779a9 Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Thu, 20 Nov 2025 18:22:51 -0800 Subject: [PATCH 05/37] (cont) Eliminate compile warning in higher gcc versions: narrowing conversion. --- src/flow/cfg/option_set.hpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/flow/cfg/option_set.hpp b/src/flow/cfg/option_set.hpp index e27a09585..4cd07f281 100644 --- a/src/flow/cfg/option_set.hpp +++ b/src/flow/cfg/option_set.hpp @@ -2269,10 +2269,12 @@ std::string value_set_member_id_to_opt_name_keyed(util::String_view member_id, c * [...]-by-dot-key substution is at least aesthetically perf-wasteful (those characters get replaced anyway later). * Anyway, all that aside, the following is easily defensible as clearly not having any such issues. */ return ostream_op_string - (value_set_member_id_to_opt_name(String_view{&*matched_groups[1].first, matched_groups[1].length()}), + (value_set_member_id_to_opt_name(String_view{&*matched_groups[1].first, + size_t(matched_groups[1].length())}), INDEX_SEP_BEFORE, key, // ostream<< it. - value_set_member_id_to_opt_name(String_view{&*matched_groups[2].first, matched_groups[2].length()})); + value_set_member_id_to_opt_name(String_view{&*matched_groups[2].first, + size_t(matched_groups[2].length())})); } // value_set_member_id_to_opt_name_keyed() } // namespace flow::cfg From cff8783c874771e61c3ca81bb5c1f1bd744dc26b Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Thu, 20 Nov 2025 19:58:09 -0800 Subject: [PATCH 06/37] (cont) Eliminate compile warning in clang: obscure copy-forbidding-related problem with `optional<>` and internally used `boost::thread_specific_ptr`. / (cont) Eliminate unit-test compile warning in clang: self-assignment of `auto` object. --- src/flow/util/test/blob_test.cpp | 11 ++++++----- src/flow/util/thread_lcl.hpp | 25 +++++++++++++++++++++++-- 2 files changed, 29 insertions(+), 7 deletions(-) diff --git a/src/flow/util/test/blob_test.cpp b/src/flow/util/test/blob_test.cpp index dd3d2d631..c918efba8 100644 --- a/src/flow/util/test/blob_test.cpp +++ b/src/flow/util/test/blob_test.cpp @@ -42,6 +42,7 @@ using std::cout; using std::flush; using std::vector; using std::allocator_traits; +using std::fill_n; namespace bipc = boost::interprocess; /* Basic_blob supports SHM-friendly allocators, and we test this to some extent. The testing just @@ -274,7 +275,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr EXPECT_TRUE(ALL_ZERO_FN(b1)); auto b3 = make_blob(alloc, &logger, N_SM); - std::memset(b3.begin(), ONE, b3.size()); + fill_n(b3.begin(), ONE, b3.size()); const size_t N_TN = 5; b3.resize(N_SM - N_TN, N_TN); // Structure: [N_TN][N_SM - N_TN][], all ONEs. Terms: [prefix][body][postfix]. EXPECT_TRUE(RNG_ONES_FN(b3.begin() - N_TN, b3.end())); // Ensure they're all ONEs in fact. @@ -292,7 +293,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr { // Copy blob over itself (no-op). const auto saved_dt = b3.data(); const auto saved_start = b3.start(); const auto saved_size = b3.size(); const auto saved_cap = b3.capacity(); - b3 = b3; + b3 = static_cast(b3); // Cast to avoid warning in some compilers (auto self-assignment). EXPECT_EQ(b3.data(), saved_dt); EXPECT_EQ(b3.start(), saved_start); EXPECT_EQ(b3.size(), saved_size); EXPECT_EQ(b3.capacity(), saved_cap); } @@ -300,7 +301,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr { // Copy null blob over itself (no-op). const auto saved_dt = b4.data(); const auto saved_start = b4.start(); const auto saved_size = b4.size(); const auto saved_cap = b4.capacity(); - b4 = b4; + b4 = static_cast(b4); // Cast to avoid warning in some compilers (auto self-assignment). EXPECT_EQ(b4.data(), saved_dt); EXPECT_EQ(b4.start(), saved_start); EXPECT_EQ(b4.size(), saved_size); EXPECT_EQ(b4.capacity(), saved_cap); } @@ -529,7 +530,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr b1.resize(b1.capacity() - INC, INC); // [INC][N1 - INC][], all 0. EXPECT_TRUE(RNG_ZERO_FN(b1.begin() - b1.start(), b1.begin() - b1.start() + b1.capacity())); - memset(b1.begin() + INC, ONE, INC); // [INC x 0][INC x 0, INC x 1, rest x 0][]. + fill_n(b1.begin() + INC, ONE, INC); // [INC x 0][INC x 0, INC x 1, rest x 0][]. ASSERT_TRUE(RNG_ZERO_FN(b1.begin() - b1.start(), b1.begin() - b1.start() + INC + INC)) << "Sanity-check selves."; ASSERT_TRUE(RNG_ONES_FN(b1.begin() - b1.start() + INC + INC, @@ -569,7 +570,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr auto b1 = make_blob(alloc, &logger, N_SM, CLEAR_ON_ALLOC); b1.resize(b1.capacity() - INC, INC); - memset(b1.begin() + INC, ONE, INC); + fill_n(b1.begin() + INC, INC, ONE); EXPECT_EQ(b1.sub_copy(b1.begin() + INC, mutable_buffer{&(DIST_VEC.front()), 0}), // Degenerate case (no-op). b1.begin() + INC); EXPECT_TRUE(RNG_ZERO_FN(DIST_VEC.begin(), DIST_VEC.end())); diff --git a/src/flow/util/thread_lcl.hpp b/src/flow/util/thread_lcl.hpp index 7fdd0bb1a..21e7f645d 100644 --- a/src/flow/util/thread_lcl.hpp +++ b/src/flow/util/thread_lcl.hpp @@ -133,8 +133,7 @@ namespace flow::util */ template class Thread_local_state_registry : - public log::Log_context_mt, - private boost::noncopyable + public log::Log_context_mt { public: // Types. @@ -211,6 +210,20 @@ class Thread_local_state_registry : explicit Thread_local_state_registry(log::Logger* logger_ptr, String_view nickname_str, decltype(m_create_state_func)&& create_state_func = {}); + /** + * Forbid copying. + * + * @internal + * Normally we'd derive from `boost::noncopyable`, but the combination of the crankiness of clang and + * old-schoolness of a `boost::thread_specific_ptr` copy-forbidding declaration causes a warning in at + * least some clang versions, when one wraps a `*this` in `optional<>`. + * @endinternal + * + * @param src + * See above. + */ + Thread_local_state_registry(const Thread_local_state& src) = delete; + /** * Deletes each #Thread_local_state to have been created so far by calls to this_thread_state() from various * threads (possibly but not necessarily including this thread). @@ -242,6 +255,14 @@ class Thread_local_state_registry : // Methods. + /** + * Forbid copying. + * @param src + * See above. + * @return See above. + */ + Thread_local_state& operator=(const Thread_local_state& src) = delete; + /** * Returns pointer to this thread's thread-local object, first constructing it via #m_create_state_func if * it is the first `this->this_thread_state()` call in this thread. In a given thread this shall always return From 96c116f481b7557f5c6a50ed2e2f84ba00c5f2fd Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Thu, 20 Nov 2025 20:14:06 -0800 Subject: [PATCH 07/37] (cont) (Bug fix.) --- src/flow/util/test/blob_test.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/flow/util/test/blob_test.cpp b/src/flow/util/test/blob_test.cpp index c918efba8..465d57cb0 100644 --- a/src/flow/util/test/blob_test.cpp +++ b/src/flow/util/test/blob_test.cpp @@ -275,7 +275,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr EXPECT_TRUE(ALL_ZERO_FN(b1)); auto b3 = make_blob(alloc, &logger, N_SM); - fill_n(b3.begin(), ONE, b3.size()); + fill_n(b3.begin(), b3.size(), ONE); const size_t N_TN = 5; b3.resize(N_SM - N_TN, N_TN); // Structure: [N_TN][N_SM - N_TN][], all ONEs. Terms: [prefix][body][postfix]. EXPECT_TRUE(RNG_ONES_FN(b3.begin() - N_TN, b3.end())); // Ensure they're all ONEs in fact. @@ -530,7 +530,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr b1.resize(b1.capacity() - INC, INC); // [INC][N1 - INC][], all 0. EXPECT_TRUE(RNG_ZERO_FN(b1.begin() - b1.start(), b1.begin() - b1.start() + b1.capacity())); - fill_n(b1.begin() + INC, ONE, INC); // [INC x 0][INC x 0, INC x 1, rest x 0][]. + fill_n(b1.begin() + INC, INC, ONE); // [INC x 0][INC x 0, INC x 1, rest x 0][]. ASSERT_TRUE(RNG_ZERO_FN(b1.begin() - b1.start(), b1.begin() - b1.start() + INC + INC)) << "Sanity-check selves."; ASSERT_TRUE(RNG_ONES_FN(b1.begin() - b1.start() + INC + INC, From d8156075697fc789a761f0462c14229721a6c14d Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Thu, 20 Nov 2025 21:09:43 -0800 Subject: [PATCH 08/37] (cont) (ugh) --- src/flow/util/test/blob_test.cpp | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/flow/util/test/blob_test.cpp b/src/flow/util/test/blob_test.cpp index 465d57cb0..d5718492f 100644 --- a/src/flow/util/test/blob_test.cpp +++ b/src/flow/util/test/blob_test.cpp @@ -42,7 +42,6 @@ using std::cout; using std::flush; using std::vector; using std::allocator_traits; -using std::fill_n; namespace bipc = boost::interprocess; /* Basic_blob supports SHM-friendly allocators, and we test this to some extent. The testing just @@ -77,6 +76,16 @@ Blob_t make_blob([[maybe_unused]] const Allocator_t* alloc_if_applicable, } } +/* @todo This std::fill_n() "replacement" should be removed, once we figure out a decent way to make some versions of + * gcc in Release-like configs stop giving nonsensical warnings like array-bounds and stringop-overflow, when we + * use std::fill_n() (and some other similar low-level ops in other tests). Ugh.... */ +template +void fill_n(uint8_t* p, size_t n, T x) +{ + const auto end = p + n; + for (; p != end; ++p) { *p = uint8_t(x); } +} + } // Anonymous namespace // Yes... this is very cheesy... but this is a test, so I don't really care. From 4f8bf7004af0ab7918ac5fb5e462854060f059d1 Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Thu, 20 Nov 2025 21:33:04 -0800 Subject: [PATCH 09/37] (cont) (WIP) (ugh) --- src/flow/util/thread_lcl.hpp | 39 ++++++++++-------------------------- 1 file changed, 11 insertions(+), 28 deletions(-) diff --git a/src/flow/util/thread_lcl.hpp b/src/flow/util/thread_lcl.hpp index 21e7f645d..ae16408fc 100644 --- a/src/flow/util/thread_lcl.hpp +++ b/src/flow/util/thread_lcl.hpp @@ -133,7 +133,8 @@ namespace flow::util */ template class Thread_local_state_registry : - public log::Log_context_mt + public log::Log_context_mt, + private boost::noncopyable { public: // Types. @@ -210,20 +211,6 @@ class Thread_local_state_registry : explicit Thread_local_state_registry(log::Logger* logger_ptr, String_view nickname_str, decltype(m_create_state_func)&& create_state_func = {}); - /** - * Forbid copying. - * - * @internal - * Normally we'd derive from `boost::noncopyable`, but the combination of the crankiness of clang and - * old-schoolness of a `boost::thread_specific_ptr` copy-forbidding declaration causes a warning in at - * least some clang versions, when one wraps a `*this` in `optional<>`. - * @endinternal - * - * @param src - * See above. - */ - Thread_local_state_registry(const Thread_local_state& src) = delete; - /** * Deletes each #Thread_local_state to have been created so far by calls to this_thread_state() from various * threads (possibly but not necessarily including this thread). @@ -255,14 +242,6 @@ class Thread_local_state_registry : // Methods. - /** - * Forbid copying. - * @param src - * See above. - * @return See above. - */ - Thread_local_state& operator=(const Thread_local_state& src) = delete; - /** * Returns pointer to this thread's thread-local object, first constructing it via #m_create_state_func if * it is the first `this->this_thread_state()` call in this thread. In a given thread this shall always return @@ -642,7 +621,7 @@ class Thread_local_state_registry : * As for the the stuff in `m_this_thread_state_or_null.get()` other than `p` -- the Tl_context surrounding it -- * again: see Tl_context doc header. */ - boost::thread_specific_ptr m_this_thread_state_or_null; + //XXXno boost::thread_specific_ptr m_this_thread_state_or_null; /// The non-thread-local state. See Registry_ctl docs. `shared_ptr` is used only for `weak_ptr`. boost::shared_ptr m_ctl; @@ -879,7 +858,7 @@ Thread_local_state_registry::Thread_local_state_registry m_create_state_func(std::move(create_state_func)), m_nickname(nickname_str), - m_this_thread_state_or_null(cleanup), + //XXXno m_this_thread_state_or_null(cleanup), m_ctl(boost::make_shared()) { FLOW_LOG_INFO("Tl_registry[" << *this << "]: " @@ -890,14 +869,17 @@ template typename Thread_local_state_registry::Thread_local_state* Thread_local_state_registry::this_thread_state_or_null() { - const auto ctx = m_this_thread_state_or_null.get(); - return ctx ? ctx->m_state : nullptr; + return nullptr;/*XXXno const auto ctx = m_this_thread_state_or_null.get(); + return ctx ? ctx->m_state : nullptr;*/ } template typename Thread_local_state_registry::Thread_local_state* Thread_local_state_registry::this_thread_state() { + return nullptr; +//XXXno +#if 0 using log::Logger; auto ctx = m_this_thread_state_or_null.get(); @@ -985,6 +967,7 @@ typename Thread_local_state_registry::Thread_local_state* // else if (ctx) { Fast path: state already init-ed. Do not log or do anything unnecessary. } return ctx->m_state; +#endif } // Thread_local_state_registry::this_thread_state() template @@ -1026,7 +1009,7 @@ Thread_local_state_registry::~Thread_local_state_registry( * can come of that really. We could try to prevent it by doing m_this_thread_state_or_null.reset()... but * same result. Instead we do the following which simply replaces the stored (now bogus) ptr with null, and * that's it. We already deleted it, so that's perfect. */ - m_this_thread_state_or_null.release(); + //XXXno m_this_thread_state_or_null.release(); // After the }, m_ctl is nullified, and lastly m_this_thread_state_or_null is destroyed (a no-op in our context). } // Thread_local_state_registry::~Thread_local_state_registry() From 0f30010131b76abbe9f519447954800cf85b0e0f Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Thu, 20 Nov 2025 22:02:30 -0800 Subject: [PATCH 10/37] (cont) (WIP) (ugh) --- src/flow/util/thread_lcl.hpp | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/src/flow/util/thread_lcl.hpp b/src/flow/util/thread_lcl.hpp index ae16408fc..f40821787 100644 --- a/src/flow/util/thread_lcl.hpp +++ b/src/flow/util/thread_lcl.hpp @@ -554,6 +554,21 @@ class Thread_local_state_registry : Thread_local_state* m_state; }; + //XXX + struct Tsp_wrapper + { + boost::thread_specific_ptr m_tsp; + + typename + Tsp_wrapper(Ctor_args&&... ctor_args) + { + // Yeah. + } + + Tsp_wrapper(const Tsp_wrapper& src) = delete; + Tsp_wrapper& operator=(const Tsp_wrapper& src) = delete; + }; + // Methods. /** @@ -621,7 +636,7 @@ class Thread_local_state_registry : * As for the the stuff in `m_this_thread_state_or_null.get()` other than `p` -- the Tl_context surrounding it -- * again: see Tl_context doc header. */ - //XXXno boost::thread_specific_ptr m_this_thread_state_or_null; + Tsp_wrapper m_this_thread_state_or_null;//XXX /// The non-thread-local state. See Registry_ctl docs. `shared_ptr` is used only for `weak_ptr`. boost::shared_ptr m_ctl; @@ -858,7 +873,7 @@ Thread_local_state_registry::Thread_local_state_registry m_create_state_func(std::move(create_state_func)), m_nickname(nickname_str), - //XXXno m_this_thread_state_or_null(cleanup), + m_this_thread_state_or_null(cleanup), m_ctl(boost::make_shared()) { FLOW_LOG_INFO("Tl_registry[" << *this << "]: " @@ -869,20 +884,17 @@ template typename Thread_local_state_registry::Thread_local_state* Thread_local_state_registry::this_thread_state_or_null() { - return nullptr;/*XXXno const auto ctx = m_this_thread_state_or_null.get(); - return ctx ? ctx->m_state : nullptr;*/ + const auto ctx = m_this_thread_state_or_null.m_tsp.get(); + return ctx ? ctx->m_state : nullptr; } template typename Thread_local_state_registry::Thread_local_state* Thread_local_state_registry::this_thread_state() { - return nullptr; -//XXXno -#if 0 using log::Logger; - auto ctx = m_this_thread_state_or_null.get(); + auto ctx = m_this_thread_state_or_null.m_tsp.get(); if (!ctx) { // (Slow-path. It is OK to log and do other not-so-fast things.) @@ -937,7 +949,7 @@ typename Thread_local_state_registry::Thread_local_state* ctx->m_ctl_observer = m_ctl; ctx->m_state = create_state_func(); - m_this_thread_state_or_null.reset(ctx); + m_this_thread_state_or_null.m_tsp.reset(ctx); /* Now to set up the later cleanup, either at thread-exit, or from our ~dtor(), whichever happens first; * and also to provide access to us via enumeration via state_per_thread(). */ @@ -967,7 +979,6 @@ typename Thread_local_state_registry::Thread_local_state* // else if (ctx) { Fast path: state already init-ed. Do not log or do anything unnecessary. } return ctx->m_state; -#endif } // Thread_local_state_registry::this_thread_state() template @@ -1009,7 +1020,7 @@ Thread_local_state_registry::~Thread_local_state_registry( * can come of that really. We could try to prevent it by doing m_this_thread_state_or_null.reset()... but * same result. Instead we do the following which simply replaces the stored (now bogus) ptr with null, and * that's it. We already deleted it, so that's perfect. */ - //XXXno m_this_thread_state_or_null.release(); + m_this_thread_state_or_null.m_tsp.release(); // After the }, m_ctl is nullified, and lastly m_this_thread_state_or_null is destroyed (a no-op in our context). } // Thread_local_state_registry::~Thread_local_state_registry() From 1ee786aa1a486ada494d27f684927f6a568475d6 Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Thu, 20 Nov 2025 22:06:34 -0800 Subject: [PATCH 11/37] (cont) (WIP) (ugh) --- src/flow/util/thread_lcl.hpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/flow/util/thread_lcl.hpp b/src/flow/util/thread_lcl.hpp index f40821787..4e17a460b 100644 --- a/src/flow/util/thread_lcl.hpp +++ b/src/flow/util/thread_lcl.hpp @@ -559,8 +559,9 @@ class Thread_local_state_registry : { boost::thread_specific_ptr m_tsp; - typename - Tsp_wrapper(Ctor_args&&... ctor_args) + template + Tsp_wrapper(Ctor_args&&... ctor_args) : + m_tsp(std::forward(ctor_args)...) { // Yeah. } From 8e0227bc55b25e98af0372cc2d6fe2ab74a5d8bc Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Thu, 20 Nov 2025 22:25:01 -0800 Subject: [PATCH 12/37] (cont) Eliminate unit-test compile warning in higher gcc versions: a nonsensical array-bounds warning. --- src/flow/util/test/blob_test.cpp | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/src/flow/util/test/blob_test.cpp b/src/flow/util/test/blob_test.cpp index d5718492f..73e7d1559 100644 --- a/src/flow/util/test/blob_test.cpp +++ b/src/flow/util/test/blob_test.cpp @@ -78,12 +78,28 @@ Blob_t make_blob([[maybe_unused]] const Allocator_t* alloc_if_applicable, /* @todo This std::fill_n() "replacement" should be removed, once we figure out a decent way to make some versions of * gcc in Release-like configs stop giving nonsensical warnings like array-bounds and stringop-overflow, when we - * use std::fill_n() (and some other similar low-level ops in other tests). Ugh.... */ -template -void fill_n(uint8_t* p, size_t n, T x) + * use std::fill_n() (and some other similar low-level ops in other tests). Ugh.... + * @todo Until then, maybe it's best to actually make this a flow::util, so it's reusable by everyone. Same + * with other similar things; search all-over for `-Warray-bounds` and/or `-Wstringop-overflow`. */ +template +void fill_n(C* p, size_t n, T x) { - const auto end = p + n; - for (; p != end; ++p) { *p = uint8_t(x); } + /* Note: std::memset() doesn't defeat the warnings. std::fill_n() doesn't either: internally it reduces to + * std::memset() anyway in this specialization. You know what else? Even doing + * const auto end = p + n; for (; p != end; ++p) { *p = static_cast(x); } + * does not defeat it either: gcc detects what this is doing and replaces it with memset()... which the front-end + * BSingly warns about, making us angry. + * + * No choice but to pragma. + * (A glance at the gcc bug database shows this particular set or warnings is not the most robust thing ever + * and has(d) both reporting bugs and a penchant for paranoia.) */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpragmas" // For older versions, where the following does not exist/cannot be disabled. +#pragma GCC diagnostic ignored "-Wunknown-warning-option" // (Similarly for clang.) +#pragma GCC diagnostic ignored "-Warray-bounds" +#pragma GCC diagnostic ignored "-Wstringop-overflow" // This one pops up too, if the preceding is pragma-ed out. + std::fill_n(p, n, static_cast(x)); +#pragma GCC diagnostic pop } } // Anonymous namespace From 6231cab0b1560bf10c91f5150d5a2f0e39e542d7 Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Thu, 20 Nov 2025 22:38:17 -0800 Subject: [PATCH 13/37] (cont) (WIP) (ugh) --- src/flow/util/test/blob_test.cpp | 8 +++---- src/flow/util/thread_lcl.hpp | 39 ++++++++++++++++++++++++-------- 2 files changed, 33 insertions(+), 14 deletions(-) diff --git a/src/flow/util/test/blob_test.cpp b/src/flow/util/test/blob_test.cpp index 73e7d1559..214d84499 100644 --- a/src/flow/util/test/blob_test.cpp +++ b/src/flow/util/test/blob_test.cpp @@ -82,7 +82,7 @@ Blob_t make_blob([[maybe_unused]] const Allocator_t* alloc_if_applicable, * @todo Until then, maybe it's best to actually make this a flow::util, so it's reusable by everyone. Same * with other similar things; search all-over for `-Warray-bounds` and/or `-Wstringop-overflow`. */ template -void fill_n(C* p, size_t n, T x) +void fill_n_ffs(C* p, size_t n, T x) { /* Note: std::memset() doesn't defeat the warnings. std::fill_n() doesn't either: internally it reduces to * std::memset() anyway in this specialization. You know what else? Even doing @@ -300,7 +300,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr EXPECT_TRUE(ALL_ZERO_FN(b1)); auto b3 = make_blob(alloc, &logger, N_SM); - fill_n(b3.begin(), b3.size(), ONE); + fill_n_ffs(b3.begin(), b3.size(), ONE); const size_t N_TN = 5; b3.resize(N_SM - N_TN, N_TN); // Structure: [N_TN][N_SM - N_TN][], all ONEs. Terms: [prefix][body][postfix]. EXPECT_TRUE(RNG_ONES_FN(b3.begin() - N_TN, b3.end())); // Ensure they're all ONEs in fact. @@ -555,7 +555,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr b1.resize(b1.capacity() - INC, INC); // [INC][N1 - INC][], all 0. EXPECT_TRUE(RNG_ZERO_FN(b1.begin() - b1.start(), b1.begin() - b1.start() + b1.capacity())); - fill_n(b1.begin() + INC, INC, ONE); // [INC x 0][INC x 0, INC x 1, rest x 0][]. + fill_n_ffs(b1.begin() + INC, INC, ONE); // [INC x 0][INC x 0, INC x 1, rest x 0][]. ASSERT_TRUE(RNG_ZERO_FN(b1.begin() - b1.start(), b1.begin() - b1.start() + INC + INC)) << "Sanity-check selves."; ASSERT_TRUE(RNG_ONES_FN(b1.begin() - b1.start() + INC + INC, @@ -595,7 +595,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr auto b1 = make_blob(alloc, &logger, N_SM, CLEAR_ON_ALLOC); b1.resize(b1.capacity() - INC, INC); - fill_n(b1.begin() + INC, INC, ONE); + fill_n_ffs(b1.begin() + INC, INC, ONE); EXPECT_EQ(b1.sub_copy(b1.begin() + INC, mutable_buffer{&(DIST_VEC.front()), 0}), // Degenerate case (no-op). b1.begin() + INC); EXPECT_TRUE(RNG_ZERO_FN(DIST_VEC.begin(), DIST_VEC.end())); diff --git a/src/flow/util/thread_lcl.hpp b/src/flow/util/thread_lcl.hpp index 4e17a460b..c15a6a011 100644 --- a/src/flow/util/thread_lcl.hpp +++ b/src/flow/util/thread_lcl.hpp @@ -554,20 +554,31 @@ class Thread_local_state_registry : Thread_local_state* m_state; }; - //XXX - struct Tsp_wrapper + /** + * Simply wraps a `boost::thread_specific_ptr`, adding absolutely no data or algorithms, purely to + * work-around a combination of (some?) clang versions and (some?) GNU STL impls giving a bogus compile error, + * when one tries `optional`. The error is + * "the parameter for this explicitly-defaulted copy constructor is const, but a member or base requires it to be + * non-const" and references impl details of STL's `optional`. The solution is to wrap the thing in a thing + * that is itself already noncopyable in the proper way, unlike `thread_specific_ptr` (which, at least as of + * Boost-1.87, still has a copy-forbidding ctor/assigner that takes non-`const` ref). + */ + struct Tsp_wrapper : private boost::noncopyable { + // Data. + + /// What we wrap and forward-to-and-fro. boost::thread_specific_ptr m_tsp; - template - Tsp_wrapper(Ctor_args&&... ctor_args) : - m_tsp(std::forward(ctor_args)...) - { - // Yeah. - } + // Constructors/destructor. - Tsp_wrapper(const Tsp_wrapper& src) = delete; - Tsp_wrapper& operator=(const Tsp_wrapper& src) = delete; + /** + * Constructs payload. + * @param ctor_args + * Args to #m_tsp ctor. + */ + template + Tsp_wrapper(Ctor_args&&... ctor_args); }; // Methods. @@ -1119,6 +1130,14 @@ void Thread_local_state_registry::set_logger(log::Logger* } } // Thread_local_state_registry::set_logger() +template +template +Thread_local_state_registry::Tsp_wrapper::Tsp_wrapper(Ctor_args&&... ctor_args) : + m_tsp(std::forward(ctor_args)...) +{ + // Yeah. +} + template std::ostream& operator<<(std::ostream& os, const Thread_local_state_registry& val) { From 3d95d0bfbfde23ed276fbaf67e21441925035733 Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Thu, 20 Nov 2025 23:01:31 -0800 Subject: [PATCH 14/37] (cont) (WIP) (ugh) --- src/flow/util/test/blob_test.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/flow/util/test/blob_test.cpp b/src/flow/util/test/blob_test.cpp index 214d84499..b5c168422 100644 --- a/src/flow/util/test/blob_test.cpp +++ b/src/flow/util/test/blob_test.cpp @@ -98,6 +98,7 @@ void fill_n_ffs(C* p, size_t n, T x) #pragma GCC diagnostic ignored "-Wunknown-warning-option" // (Similarly for clang.) #pragma GCC diagnostic ignored "-Warray-bounds" #pragma GCC diagnostic ignored "-Wstringop-overflow" // This one pops up too, if the preceding is pragma-ed out. +#pragma GCC diagnostic ignored "-Wrestrict" // Another similar bogus one pops up after pragma-ing away preceding one. std::fill_n(p, n, static_cast(x)); #pragma GCC diagnostic pop } From d8d21ae268ba7598340a84f0b596bc34cb6a6788 Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Thu, 20 Nov 2025 23:04:03 -0800 Subject: [PATCH 15/37] (cont) (WIP) (ugh) --- src/flow/util/thread_lcl.hpp | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/src/flow/util/thread_lcl.hpp b/src/flow/util/thread_lcl.hpp index c15a6a011..31dc93a15 100644 --- a/src/flow/util/thread_lcl.hpp +++ b/src/flow/util/thread_lcl.hpp @@ -563,7 +563,7 @@ class Thread_local_state_registry : * that is itself already noncopyable in the proper way, unlike `thread_specific_ptr` (which, at least as of * Boost-1.87, still has a copy-forbidding ctor/assigner that takes non-`const` ref). */ - struct Tsp_wrapper : private boost::noncopyable + struct Tsp_wrapper { // Data. @@ -579,7 +579,23 @@ class Thread_local_state_registry : */ template Tsp_wrapper(Ctor_args&&... ctor_args); - }; + + // Methods. + + /** + * Forbid copy. + * @param src + * Yeah. + */ + Tsp_wrapper(const Tsp_wrapper& src) = delete; + /** + * Forbid copy. + * @param src + * Yeah. + * @return Right. + */ + Tsp_wrapper& operator=(const Tsp_wrapper& src) = delete; + }; // struct Tsp_wrapper // Methods. From 47ede0713ff1e3b04262520b2067549355fec4d1 Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Fri, 21 Nov 2025 00:31:12 -0800 Subject: [PATCH 16/37] (cont) (WIP) (ugh) --- src/flow/util/test/blob_test.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/flow/util/test/blob_test.cpp b/src/flow/util/test/blob_test.cpp index b5c168422..fbf26ef3f 100644 --- a/src/flow/util/test/blob_test.cpp +++ b/src/flow/util/test/blob_test.cpp @@ -301,7 +301,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr EXPECT_TRUE(ALL_ZERO_FN(b1)); auto b3 = make_blob(alloc, &logger, N_SM); - fill_n_ffs(b3.begin(), b3.size(), ONE); + std::fill_n(b3.begin(), b3.size(), ONE); const size_t N_TN = 5; b3.resize(N_SM - N_TN, N_TN); // Structure: [N_TN][N_SM - N_TN][], all ONEs. Terms: [prefix][body][postfix]. EXPECT_TRUE(RNG_ONES_FN(b3.begin() - N_TN, b3.end())); // Ensure they're all ONEs in fact. @@ -556,7 +556,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr b1.resize(b1.capacity() - INC, INC); // [INC][N1 - INC][], all 0. EXPECT_TRUE(RNG_ZERO_FN(b1.begin() - b1.start(), b1.begin() - b1.start() + b1.capacity())); - fill_n_ffs(b1.begin() + INC, INC, ONE); // [INC x 0][INC x 0, INC x 1, rest x 0][]. + std::fill_n(b1.begin() + INC, INC, ONE); // [INC x 0][INC x 0, INC x 1, rest x 0][]. ASSERT_TRUE(RNG_ZERO_FN(b1.begin() - b1.start(), b1.begin() - b1.start() + INC + INC)) << "Sanity-check selves."; ASSERT_TRUE(RNG_ONES_FN(b1.begin() - b1.start() + INC + INC, @@ -596,7 +596,7 @@ TEST(Blob, Interface) // Note that other test-cases specifically test SHARING=tr auto b1 = make_blob(alloc, &logger, N_SM, CLEAR_ON_ALLOC); b1.resize(b1.capacity() - INC, INC); - fill_n_ffs(b1.begin() + INC, INC, ONE); + std::fill_n(b1.begin() + INC, INC, ONE); EXPECT_EQ(b1.sub_copy(b1.begin() + INC, mutable_buffer{&(DIST_VEC.front()), 0}), // Degenerate case (no-op). b1.begin() + INC); EXPECT_TRUE(RNG_ZERO_FN(DIST_VEC.begin(), DIST_VEC.end())); From 191bc9bff466050069b19a32a26083f6c784d34f Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Fri, 21 Nov 2025 00:39:52 -0800 Subject: [PATCH 17/37] (cont) (WIP) (ugh) --- test/suite/unit_test/CMakeLists.txt | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/test/suite/unit_test/CMakeLists.txt b/test/suite/unit_test/CMakeLists.txt index dc661411b..7d61c4bea 100644 --- a/test/suite/unit_test/CMakeLists.txt +++ b/test/suite/unit_test/CMakeLists.txt @@ -45,6 +45,14 @@ target_include_directories(${name} PRIVATE # Do stuff we've resolved to do on all our targets. common_set_target_properties(${name}) +# XXX +if((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") AND + (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 11) AND (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 12)) + set_source_files_properties(${PROJECT_SOURCE_DIR}/src/flow/util/test/blob_test.cpp + PROPERTIES + COMPILE_OPTIONS "-Wno-array-bounds;-Wno-stringop-overflow") +endif() + # Link good ol' flow; and gtest. target_link_libraries(${name} PRIVATE flow ${GTEST_LIB}) # At least blob_test.cpp requires boost.interprocess SHM ops which require this lib. (Flow itself doesn't as of From 62fa16c13602c08b8f275fb1f784e6b29a19a760 Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Fri, 21 Nov 2025 01:37:02 -0800 Subject: [PATCH 18/37] (cont) (WIP) (ugh) --- src/flow/util/test/blob_test.cpp | 2 +- test/suite/unit_test/CMakeLists.txt | 18 +++++++++++++----- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/src/flow/util/test/blob_test.cpp b/src/flow/util/test/blob_test.cpp index fbf26ef3f..afa4c3fd1 100644 --- a/src/flow/util/test/blob_test.cpp +++ b/src/flow/util/test/blob_test.cpp @@ -76,7 +76,7 @@ Blob_t make_blob([[maybe_unused]] const Allocator_t* alloc_if_applicable, } } -/* @todo This std::fill_n() "replacement" should be removed, once we figure out a decent way to make some versions of +/* XXX@todo This std::fill_n() "replacement" should be removed, once we figure out a decent way to make some versions of * gcc in Release-like configs stop giving nonsensical warnings like array-bounds and stringop-overflow, when we * use std::fill_n() (and some other similar low-level ops in other tests). Ugh.... * @todo Until then, maybe it's best to actually make this a flow::util, so it's reusable by everyone. Same diff --git a/test/suite/unit_test/CMakeLists.txt b/test/suite/unit_test/CMakeLists.txt index 7d61c4bea..4056482c7 100644 --- a/test/suite/unit_test/CMakeLists.txt +++ b/test/suite/unit_test/CMakeLists.txt @@ -46,11 +46,19 @@ target_include_directories(${name} PRIVATE common_set_target_properties(${name}) # XXX -if((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") AND - (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 11) AND (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 12)) - set_source_files_properties(${PROJECT_SOURCE_DIR}/src/flow/util/test/blob_test.cpp - PROPERTIES - COMPILE_OPTIONS "-Wno-array-bounds;-Wno-stringop-overflow") +if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + block() + set(ver ${CMAKE_CXX_COMPILER_VERSION}) + set(evil_file ${PROJECT_SOURCE_DIR}/src/flow/util/test/blob_test.cpp) + if((ver VERSION_GREATER_EQUAL 11) AND (ver VERSION_LESS 12)) + set(evil_opts "-Wno-stringop-overflow") + elseif((ver VERSION_GREATER_EQUAL 13) AND (ver VERSION_LESS 15)) + set(evil_opts "-Wno-array-bounds") + endif() + if(DEFINED evil_opts) + set_source_files_properties(${evil_file} PROPERTIES COMPILE_OPTIONS "${evil_opts}") + endif() + endblock() endif() # Link good ol' flow; and gtest. From 3afef73f0c927f3bc5adbaf16e92a38d8aa164f2 Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Fri, 21 Nov 2025 13:31:07 -0800 Subject: [PATCH 19/37] (cont) (WIP) (ugh) --- .github/workflows/main.yml | 2 ++ test/suite/unit_test/CMakeLists.txt | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 7bf81223d..8b4c67749 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -572,6 +572,8 @@ jobs: # and will grow. The techniques will still apply. - name: Run test/demo [NetFlow echo] + if: | + !cancelled() run: | # Run test/demo [NetFlow echo]. cd ${{ env.install-dir }}/bin diff --git a/test/suite/unit_test/CMakeLists.txt b/test/suite/unit_test/CMakeLists.txt index 4056482c7..817765d2e 100644 --- a/test/suite/unit_test/CMakeLists.txt +++ b/test/suite/unit_test/CMakeLists.txt @@ -53,7 +53,7 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") if((ver VERSION_GREATER_EQUAL 11) AND (ver VERSION_LESS 12)) set(evil_opts "-Wno-stringop-overflow") elseif((ver VERSION_GREATER_EQUAL 13) AND (ver VERSION_LESS 15)) - set(evil_opts "-Wno-array-bounds") + set(evil_opts "-Wno-array-bounds;-Wno-stringop-overflow") endif() if(DEFINED evil_opts) set_source_files_properties(${evil_file} PROPERTIES COMPILE_OPTIONS "${evil_opts}") From 1da4b949642681eb03be4103b4905cddb664fcff Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Fri, 21 Nov 2025 14:03:31 -0800 Subject: [PATCH 20/37] (cont) (WIP) --- .github/workflows/main.yml | 4 ++++ src/flow/util/test/blob_test.cpp | 27 ------------------------ src/flow/util/thread_lcl.hpp | 2 +- src/sanitize/asan/suppressions_clang.cfg | 25 ++++++++++++++++++++++ test/suite/unit_test/CMakeLists.txt | 17 +++++++++++++-- 5 files changed, 45 insertions(+), 30 deletions(-) create mode 100644 src/sanitize/asan/suppressions_clang.cfg diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 8b4c67749..82bc94390 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -401,8 +401,12 @@ jobs: san-suppress-cfg-in-file2: sanitize/${{ matrix.build-test-cfg.sanitizer-name }}/suppressions_${{ matrix.compiler.name }}_${{ matrix.compiler.version }}.cfg setup-tests-env: | if [ '${{ matrix.build-test-cfg.sanitizer-name }}' = asan ]; then + export SAN_SUPP=1 + export SAN_SUPP_CFG=${{ github.workspace }}/install/${{ matrix.build-test-cfg.conan-profile-build-type }}/usr/local/bin/san_suppressions.cfg export ASAN_OPTIONS='disable_coredump=0' + export LSAN_OPTIONS='suppressions=$SAN_SUPP_CFG' echo "ASAN_OPTIONS = [$ASAN_OPTIONS]." + echo "LSAN_OPTIONS = [$LSAN_OPTIONS]." elif [ '${{ matrix.build-test-cfg.sanitizer-name }}' = ubsan ]; then export SAN_SUPP=1 export SAN_SUPP_CFG=${{ github.workspace }}/install/${{ matrix.build-test-cfg.conan-profile-build-type }}/usr/local/bin/san_suppressions.cfg diff --git a/src/flow/util/test/blob_test.cpp b/src/flow/util/test/blob_test.cpp index afa4c3fd1..1921afb92 100644 --- a/src/flow/util/test/blob_test.cpp +++ b/src/flow/util/test/blob_test.cpp @@ -76,33 +76,6 @@ Blob_t make_blob([[maybe_unused]] const Allocator_t* alloc_if_applicable, } } -/* XXX@todo This std::fill_n() "replacement" should be removed, once we figure out a decent way to make some versions of - * gcc in Release-like configs stop giving nonsensical warnings like array-bounds and stringop-overflow, when we - * use std::fill_n() (and some other similar low-level ops in other tests). Ugh.... - * @todo Until then, maybe it's best to actually make this a flow::util, so it's reusable by everyone. Same - * with other similar things; search all-over for `-Warray-bounds` and/or `-Wstringop-overflow`. */ -template -void fill_n_ffs(C* p, size_t n, T x) -{ - /* Note: std::memset() doesn't defeat the warnings. std::fill_n() doesn't either: internally it reduces to - * std::memset() anyway in this specialization. You know what else? Even doing - * const auto end = p + n; for (; p != end; ++p) { *p = static_cast(x); } - * does not defeat it either: gcc detects what this is doing and replaces it with memset()... which the front-end - * BSingly warns about, making us angry. - * - * No choice but to pragma. - * (A glance at the gcc bug database shows this particular set or warnings is not the most robust thing ever - * and has(d) both reporting bugs and a penchant for paranoia.) */ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpragmas" // For older versions, where the following does not exist/cannot be disabled. -#pragma GCC diagnostic ignored "-Wunknown-warning-option" // (Similarly for clang.) -#pragma GCC diagnostic ignored "-Warray-bounds" -#pragma GCC diagnostic ignored "-Wstringop-overflow" // This one pops up too, if the preceding is pragma-ed out. -#pragma GCC diagnostic ignored "-Wrestrict" // Another similar bogus one pops up after pragma-ing away preceding one. - std::fill_n(p, n, static_cast(x)); -#pragma GCC diagnostic pop -} - } // Anonymous namespace // Yes... this is very cheesy... but this is a test, so I don't really care. diff --git a/src/flow/util/thread_lcl.hpp b/src/flow/util/thread_lcl.hpp index 31dc93a15..d3c8f3105 100644 --- a/src/flow/util/thread_lcl.hpp +++ b/src/flow/util/thread_lcl.hpp @@ -664,7 +664,7 @@ class Thread_local_state_registry : * As for the the stuff in `m_this_thread_state_or_null.get()` other than `p` -- the Tl_context surrounding it -- * again: see Tl_context doc header. */ - Tsp_wrapper m_this_thread_state_or_null;//XXX + Tsp_wrapper m_this_thread_state_or_null; /// The non-thread-local state. See Registry_ctl docs. `shared_ptr` is used only for `weak_ptr`. boost::shared_ptr m_ctl; diff --git a/src/sanitize/asan/suppressions_clang.cfg b/src/sanitize/asan/suppressions_clang.cfg new file mode 100644 index 000000000..403784d4c --- /dev/null +++ b/src/sanitize/asan/suppressions_clang.cfg @@ -0,0 +1,25 @@ +# Flow +# Copyright 2023 Akamai Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in +# compliance with the License. You may obtain a copy +# of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in +# writing, software distributed under the License is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing +# permissions and limitations under the License. + +# Current version assumption: clang-15/16/17. + +# XXXHad some issues matching ^ and $ in this one; leaving them out; these are very unlikely to match something +# unintentional. + +# XXX +leak:flow::util::Thread_local_state_registry.*this_thread_state +leak:flow::util::Thread_local_state_registry::.*Thread_local_state_registry diff --git a/test/suite/unit_test/CMakeLists.txt b/test/suite/unit_test/CMakeLists.txt index 817765d2e..b323009c1 100644 --- a/test/suite/unit_test/CMakeLists.txt +++ b/test/suite/unit_test/CMakeLists.txt @@ -45,14 +45,27 @@ target_include_directories(${name} PRIVATE # Do stuff we've resolved to do on all our targets. common_set_target_properties(${name}) -# XXX +# In this particular test, in Release and MinSizeRel configs, some versions of gcc issue buggy front-end-driven +# mem-op-optimization-related warnings. Similar problems were observed in a few other places, including as of +# this writing the production code basic_blob.hpp; but in this case attempts to defeat them (in this case +# the calls to std::fill_n) with pragmas, or direct std::memcpy, or even manual looping all failed. Since it is +# mere test code, it's fine to defeat it via ignoring specific warnings. (Note: This class of gcc bugs appears to +# have been eliminated by gcc-15, based on gcc Bugzilla tickets, but we don't know. As of this writing CI pipeline +# does, gcc-wise, versions 9, 10, 11, 13. For now we are conservative, in the sense that we do this for the +# versions where we know there are these specific issues. They might exist in versions the pipeline does not test +# as of this writing. TODO: Possibly either just say screw it and do it for all gccs, or find the ~exact versions. +# Or let it fail, if pipeline/people's non-pipeline builds expose more versions with the problem, and add here at +# that point.) if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") block() set(ver ${CMAKE_CXX_COMPILER_VERSION}) set(evil_file ${PROJECT_SOURCE_DIR}/src/flow/util/test/blob_test.cpp) if((ver VERSION_GREATER_EQUAL 11) AND (ver VERSION_LESS 12)) + # stringop-overflow in form A observed in latest gcc-11, not 13, 9, 10. set(evil_opts "-Wno-stringop-overflow") - elseif((ver VERSION_GREATER_EQUAL 13) AND (ver VERSION_LESS 15)) + elseif((ver VERSION_GREATER_EQUAL 13) AND (ver VERSION_LESS 14)) + # stringop-overflow in form B observed in gcc-13, not 11, 9, 10. + # array-bounds, same. set(evil_opts "-Wno-array-bounds;-Wno-stringop-overflow") endif() if(DEFINED evil_opts) From f4ca3f7d00e0815dc8b525257450d5bb14d6a7eb Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Fri, 21 Nov 2025 14:08:12 -0800 Subject: [PATCH 21/37] (cont) (WIP) --- .github/workflows/main.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 82bc94390..482794943 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -289,6 +289,16 @@ jobs: exclude: # Firstly the exclusions from the C++17 sub-matrix (the other being the C++20 sub-matrix). # Again, these exclusions are explained in Flow-IPC workflow counterpart. + # XXX + - compiler: { id: gcc-9 } + - compiler: { id: gcc-10 } + - compiler: { id: gcc-11 } + - compiler: { id: gcc-13 } + - build-test-cfg: { id: release } + - build-test-cfg: { id: debug } + - build-test-cfg: { id: relwithdebinfo } + - build-test-cfg: { id: relwithdebinfo-ubsan } + - build-test-cfg: { id: minsizerel } - cxx-std: { id: cxx17 } compiler: { id: gcc-9 } build-test-cfg: { id: relwithdebinfo-asan } From 776ccc0bb8c990794fde6fee673d8d4ea8acfa83 Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Fri, 21 Nov 2025 14:23:19 -0800 Subject: [PATCH 22/37] (cont) (WIP) --- .github/workflows/main.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 482794943..f95aa178d 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -414,7 +414,7 @@ jobs: export SAN_SUPP=1 export SAN_SUPP_CFG=${{ github.workspace }}/install/${{ matrix.build-test-cfg.conan-profile-build-type }}/usr/local/bin/san_suppressions.cfg export ASAN_OPTIONS='disable_coredump=0' - export LSAN_OPTIONS='suppressions=$SAN_SUPP_CFG' + export LSAN_OPTIONS="suppressions=$SAN_SUPP_CFG" echo "ASAN_OPTIONS = [$ASAN_OPTIONS]." echo "LSAN_OPTIONS = [$LSAN_OPTIONS]." elif [ '${{ matrix.build-test-cfg.sanitizer-name }}' = ubsan ]; then @@ -628,6 +628,7 @@ jobs: mkdir -p $OUT_DIR SUPP_DIR_A=${{ github.workspace }}/src # As of this writing there are TSAN suppressions for this test specifically. TODO: Revisit them; and then this. + # Update: Now there are also ASAN (LSAN) suppressions. These are likely permanent as of this writing. SUPP_DIR_OWN=${{ github.workspace }}/test/suite/unit_test { cat $SUPP_DIR_A/${{ env.san-suppress-cfg-in-file1 }} $SUPP_DIR_A/${{ env.san-suppress-cfg-in-file2 }} \ $SUPP_DIR_OWN/${{ env.san-suppress-cfg-in-file1 }} $SUPP_DIR_OWN/${{ env.san-suppress-cfg-in-file2 }} \ From e1d4fd065c6e924afdf2a0a05f7821bbb7d4ee61 Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Fri, 21 Nov 2025 14:47:38 -0800 Subject: [PATCH 23/37] (cont) (WIP) --- src/sanitize/asan/suppressions_clang.cfg | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/sanitize/asan/suppressions_clang.cfg b/src/sanitize/asan/suppressions_clang.cfg index 403784d4c..f9884479f 100644 --- a/src/sanitize/asan/suppressions_clang.cfg +++ b/src/sanitize/asan/suppressions_clang.cfg @@ -17,9 +17,14 @@ # Current version assumption: clang-15/16/17. -# XXXHad some issues matching ^ and $ in this one; leaving them out; these are very unlikely to match something -# unintentional. +# This is for LSAN (leak sanitizer), not ASAN proper, but ASAN includes LSAN and is enabled via LSAN_OPTIONS. +# If we do ASAN-proper suppressions, they would be in a separate file. -# XXX -leak:flow::util::Thread_local_state_registry.*this_thread_state -leak:flow::util::Thread_local_state_registry::.*Thread_local_state_registry +# Note for posterity: Have not tried ^ and $ within LSAN suppressions files. We get by without them +# below. (For other sanitizers, it works for some but not others, and docs are spotty and somewhat unreliable +# about such details, so in our experience it's a matter of experimentation.) + +# Thread_local_state_registry has a couple of documented, small leaks that are intentional and acceptable. +# Any application that this class template is likely to need these LSAN suppressions. +leak:Thread_local_state_registry*this_thread_state +leak:Thread_local_state_registry*Thread_local_state_registry From 6ea0cb867ca434e303b0d644ae851ecfa83cc5fc Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Fri, 21 Nov 2025 15:26:10 -0800 Subject: [PATCH 24/37] (cont) (WIP) --- src/flow/util/test/thread_lcl_test.cpp | 17 +++++++++++------ src/sanitize/asan/suppressions_clang.cfg | 4 +++- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/src/flow/util/test/thread_lcl_test.cpp b/src/flow/util/test/thread_lcl_test.cpp index 555d6f6b5..ae9f9f6d8 100644 --- a/src/flow/util/test/thread_lcl_test.cpp +++ b/src/flow/util/test/thread_lcl_test.cpp @@ -23,6 +23,7 @@ #include #include #include +#include namespace flow::util::test { @@ -31,6 +32,8 @@ namespace { using std::optional; using std::string; +using std::make_shared; +using std::shared_ptr; using flow::test::Test_logger; using Thread_loop = async::Single_thread_task_loop; template @@ -173,28 +176,30 @@ TEST(Thread_local_state_registry, Interface) reg3.emplace(&logger, "testLock"); Thread_loop t1{&logger, "threadLoop1"}; - Task func1 = [&](bool) + auto func1 = make_shared(); + *func1 = [self = func1, ®3, &t1](bool) { bool exp{true}; if (reg3->this_thread_state()->m_do_action.compare_exchange_strong(exp, false, std::memory_order_relaxed)) { s_events += "didAction\n"; } - t1.schedule_from_now(boost::chrono::milliseconds(500), Task{func1}); + t1.schedule_from_now(boost::chrono::milliseconds(500), Task{*self}); }; Thread_loop t2{&logger, "threadLoop2"}; - Task func2 = [&](bool) + auto func2 = make_shared(); + *func2 = [self = func1, ®3, &t2](bool) { bool exp{true}; if (reg3->this_thread_state()->m_do_action.compare_exchange_strong(exp, false, std::memory_order_relaxed)) { s_events += "didAction\n"; } - t2.schedule_from_now(boost::chrono::milliseconds(500), Task{func2}); + t2.schedule_from_now(boost::chrono::milliseconds(500), Task{*self}); }; - t1.start([&]() { func1(false); }); - t2.start([&]() { func2(false); }); + t1.start([func = func1]() { (*func)(false); }); + t2.start([func = func2]() { (*func)(false); }); EXPECT_TRUE(s_events.empty()); reg3->while_locked([&](const auto& lock) diff --git a/src/sanitize/asan/suppressions_clang.cfg b/src/sanitize/asan/suppressions_clang.cfg index f9884479f..215b7dbaf 100644 --- a/src/sanitize/asan/suppressions_clang.cfg +++ b/src/sanitize/asan/suppressions_clang.cfg @@ -22,7 +22,9 @@ # Note for posterity: Have not tried ^ and $ within LSAN suppressions files. We get by without them # below. (For other sanitizers, it works for some but not others, and docs are spotty and somewhat unreliable -# about such details, so in our experience it's a matter of experimentation.) +# about such details, so in our experience it's a matter of experimentation.) Reminder: These aren't regexes; +# at best (that we know of) the following works: * (definitely, a-la *nix wildcard), ^ and $ (possibly, a-la +# regex). # Thread_local_state_registry has a couple of documented, small leaks that are intentional and acceptable. # Any application that this class template is likely to need these LSAN suppressions. From 2be0175c8f08f2082614207e4986c8fd0d89d813 Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Fri, 21 Nov 2025 15:40:17 -0800 Subject: [PATCH 25/37] (cont) (WIP) --- .github/workflows/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index f95aa178d..521c53521 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -298,6 +298,7 @@ jobs: - build-test-cfg: { id: debug } - build-test-cfg: { id: relwithdebinfo } - build-test-cfg: { id: relwithdebinfo-ubsan } + - build-test-cfg: { id: relwithdebinfo-asan } - build-test-cfg: { id: minsizerel } - cxx-std: { id: cxx17 } compiler: { id: gcc-9 } From 9822b1127c1a5f05179c085e2865d2721421e51e Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Fri, 21 Nov 2025 16:09:00 -0800 Subject: [PATCH 26/37] (cont) (WIP) --- src/flow/util/test/thread_lcl_test.cpp | 58 +++++++++++++++----------- 1 file changed, 33 insertions(+), 25 deletions(-) diff --git a/src/flow/util/test/thread_lcl_test.cpp b/src/flow/util/test/thread_lcl_test.cpp index ae9f9f6d8..ad6c13520 100644 --- a/src/flow/util/test/thread_lcl_test.cpp +++ b/src/flow/util/test/thread_lcl_test.cpp @@ -24,6 +24,7 @@ #include #include #include +#include namespace flow::util::test { @@ -34,13 +35,20 @@ using std::optional; using std::string; using std::make_shared; using std::shared_ptr; +using std::mutex; +using std::atomic; using flow::test::Test_logger; using Thread_loop = async::Single_thread_task_loop; template using Tl_reg = Thread_local_state_registry; +static mutex s_events_mtx; static string s_events; -static std::atomic s_id{1}; +static atomic s_id{1}; + +void events_clear() { Lock_guard lock; events_clear(); } +void events_set(const string& str) { Lock_guard lock; s_events = str; } +string events() { Lock_guard lock; return s_events; } struct State { @@ -50,7 +58,7 @@ struct State ~State() { - s_events += ostream_op_string("~State/", s_id++, '\n'); + events_set(ostream_op_string(events(), "~State/", s_id++, '\n')); } }; struct State2 @@ -61,7 +69,7 @@ struct State2 ~State2() { - s_events += ostream_op_string("~State/", s_id++, '\n'); + events_set(ostream_op_string(events(), "~State/", s_id++, '\n')); } }; @@ -87,7 +95,7 @@ TEST(Thread_local_state_registry, Interface) * - */ s_reg2->set_logger(&logger); - EXPECT_TRUE(s_events.empty()); + EXPECT_TRUE(events().empty()); auto s1 = reg1->this_thread_state(); EXPECT_EQ(s1->m_stuff, "stuff"); @@ -102,8 +110,8 @@ TEST(Thread_local_state_registry, Interface) s_reg2.reset(); reg1.reset(); - EXPECT_EQ(s_events, "~State/1\n~State/2\n"); - s_events.clear(); + EXPECT_EQ(events(), "~State/1\n~State/2\n"); + events_clear(); { optional> reg3; @@ -126,16 +134,16 @@ TEST(Thread_local_state_registry, Interface) auto s2 = reg3->this_thread_state(); EXPECT_EQ(s2, reg3->this_thread_state()); - EXPECT_TRUE(s_events.empty()); + EXPECT_TRUE(events().empty()); t1.stop(); - EXPECT_EQ(s_events, "~State/3\n"); + EXPECT_EQ(events(), "~State/3\n"); t2.stop(); - EXPECT_EQ(s_events, "~State/3\n~State/4\n"); + EXPECT_EQ(events(), "~State/3\n~State/4\n"); } - EXPECT_EQ(s_events, "~State/3\n~State/4\n"); + EXPECT_EQ(events(), "~State/3\n~State/4\n"); } - EXPECT_EQ(s_events, "~State/3\n~State/4\n~State/5\n"); - s_events.clear(); + EXPECT_EQ(events(), "~State/3\n~State/4\n~State/5\n"); + events_clear(); { // Create a couple of `Tl_reg`s of the same type; and of a different type (some internal `static`s exercised). @@ -156,18 +164,18 @@ TEST(Thread_local_state_registry, Interface) EXPECT_EQ(reg3b->this_thread_state()->m_x, 3); EXPECT_EQ(reg4b->this_thread_state()->m_x, 4); }); t2.start([&]() { reg3->this_thread_state(); reg4b->this_thread_state(); }); - EXPECT_TRUE(s_events.empty()); + EXPECT_TRUE(events().empty()); t1.stop(); - EXPECT_EQ(s_events, "~State/6\n~State/7\n~State/8\n~State/9\n"); + EXPECT_EQ(events(), "~State/6\n~State/7\n~State/8\n~State/9\n"); reg4b.reset(); - EXPECT_EQ(s_events, "~State/6\n~State/7\n~State/8\n~State/9\n~State/10\n"); + EXPECT_EQ(events(), "~State/6\n~State/7\n~State/8\n~State/9\n~State/10\n"); reg3.reset(); - EXPECT_EQ(s_events, "~State/6\n~State/7\n~State/8\n~State/9\n~State/10\n~State/11\n"); + EXPECT_EQ(events(), "~State/6\n~State/7\n~State/8\n~State/9\n~State/10\n~State/11\n"); t2.stop(); - EXPECT_EQ(s_events, "~State/6\n~State/7\n~State/8\n~State/9\n~State/10\n~State/11\n"); - s_events.clear(); + EXPECT_EQ(events(), "~State/6\n~State/7\n~State/8\n~State/9\n~State/10\n~State/11\n"); + events_clear(); } - EXPECT_TRUE(s_events.empty()); + EXPECT_TRUE(events().empty()); { using Task = async::Scheduled_task; @@ -182,7 +190,7 @@ TEST(Thread_local_state_registry, Interface) bool exp{true}; if (reg3->this_thread_state()->m_do_action.compare_exchange_strong(exp, false, std::memory_order_relaxed)) { - s_events += "didAction\n"; + events_set(events() + "didAction\n"); } t1.schedule_from_now(boost::chrono::milliseconds(500), Task{*self}); }; @@ -193,7 +201,7 @@ TEST(Thread_local_state_registry, Interface) bool exp{true}; if (reg3->this_thread_state()->m_do_action.compare_exchange_strong(exp, false, std::memory_order_relaxed)) { - s_events += "didAction\n"; + events_set(events() + "didAction\n"); } t2.schedule_from_now(boost::chrono::milliseconds(500), Task{*self}); }; @@ -201,7 +209,7 @@ TEST(Thread_local_state_registry, Interface) t1.start([func = func1]() { (*func)(false); }); t2.start([func = func2]() { (*func)(false); }); - EXPECT_TRUE(s_events.empty()); + EXPECT_TRUE(events().empty()); reg3->while_locked([&](const auto& lock) { const auto& states = reg3->state_per_thread(lock); @@ -212,11 +220,11 @@ TEST(Thread_local_state_registry, Interface) } }); this_thread::sleep_for(boost::chrono::seconds(2)); - EXPECT_EQ(s_events, "didAction\ndidAction\n"); + EXPECT_EQ(events(), "didAction\ndidAction\n"); this_thread::sleep_for(boost::chrono::seconds(2)); - EXPECT_EQ(s_events, "didAction\ndidAction\n"); + EXPECT_EQ(events(), "didAction\ndidAction\n"); } - EXPECT_EQ(s_events, "didAction\ndidAction\n~State/12\n~State/13\n"); + EXPECT_EQ(events(), "didAction\ndidAction\n~State/12\n~State/13\n"); } // TEST(Thread_local_state_registry, Interface) TEST(Thread_local_state_registry, DISABLED_Advanced) From bc945f7d06abfbbcb70e6cd43d940b8c8da384d0 Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Fri, 21 Nov 2025 16:18:04 -0800 Subject: [PATCH 27/37] (cont) (WIP) --- src/flow/util/test/thread_lcl_test.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/flow/util/test/thread_lcl_test.cpp b/src/flow/util/test/thread_lcl_test.cpp index ad6c13520..f0f3eac1f 100644 --- a/src/flow/util/test/thread_lcl_test.cpp +++ b/src/flow/util/test/thread_lcl_test.cpp @@ -46,7 +46,7 @@ static mutex s_events_mtx; static string s_events; static atomic s_id{1}; -void events_clear() { Lock_guard lock; events_clear(); } +void events_clear() { Lock_guard lock; s_events.clear(); } void events_set(const string& str) { Lock_guard lock; s_events = str; } string events() { Lock_guard lock; return s_events; } From 0bca9bae7e0f159723934f81314dbe47649b289a Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Fri, 21 Nov 2025 16:32:35 -0800 Subject: [PATCH 28/37] (cont) (WIP) --- src/flow/util/test/thread_lcl_test.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/flow/util/test/thread_lcl_test.cpp b/src/flow/util/test/thread_lcl_test.cpp index f0f3eac1f..6213737a8 100644 --- a/src/flow/util/test/thread_lcl_test.cpp +++ b/src/flow/util/test/thread_lcl_test.cpp @@ -46,9 +46,9 @@ static mutex s_events_mtx; static string s_events; static atomic s_id{1}; -void events_clear() { Lock_guard lock; s_events.clear(); } -void events_set(const string& str) { Lock_guard lock; s_events = str; } -string events() { Lock_guard lock; return s_events; } +void events_clear() { Lock_guard lock{s_events_mtx}; s_events.clear(); } +void events_set(const string& str) { Lock_guard lock{s_events_mtx}; s_events = str; } +string events() { Lock_guard lock{s_events_mtx}; return s_events; } struct State { From 45d800211aadc75459d97b1276d0121cf5614469 Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Fri, 21 Nov 2025 16:47:23 -0800 Subject: [PATCH 29/37] (cont) (WIP) --- .github/workflows/main.yml | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 521c53521..0aa0c3972 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -289,17 +289,6 @@ jobs: exclude: # Firstly the exclusions from the C++17 sub-matrix (the other being the C++20 sub-matrix). # Again, these exclusions are explained in Flow-IPC workflow counterpart. - # XXX - - compiler: { id: gcc-9 } - - compiler: { id: gcc-10 } - - compiler: { id: gcc-11 } - - compiler: { id: gcc-13 } - - build-test-cfg: { id: release } - - build-test-cfg: { id: debug } - - build-test-cfg: { id: relwithdebinfo } - - build-test-cfg: { id: relwithdebinfo-ubsan } - - build-test-cfg: { id: relwithdebinfo-asan } - - build-test-cfg: { id: minsizerel } - cxx-std: { id: cxx17 } compiler: { id: gcc-9 } build-test-cfg: { id: relwithdebinfo-asan } From 23d4dd761ec17302c3db224b6f288ef15735024e Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Mon, 24 Nov 2025 17:19:37 -0800 Subject: [PATCH 30/37] (cont) --- .github/workflows/main.yml | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 0aa0c3972..d8d86441e 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -396,6 +396,16 @@ jobs: # (Unfortunately cannot refer to earlier-assigned `env.` entries within subsequent ones.) install-dir: ${{ github.workspace }}/install/${{ matrix.build-test-cfg.conan-profile-build-type }}/usr/local # For the remaining env entries please see comments in Flow-IPC workflow counterpart. We use same techniques. + # Update: (As of this writing this is only here, not in Flow-IPC counterpart, but that'll change; might want to + # move the comment there then.) ASAN is somewhat different from the others, as it's really two sanitizers with + # separate suppression files -- ASAN (actual safety checks) and LSAN (leak checks). So far we do *not* need + # any suppressed ASAN warnings (ASAN false positives are known to be rare, so that makes sense), but there are + # at least a couple minor, intentional mem-leaks, so we *do* need to suppress LSAN warnings at times. Therefore, + # until/unless this changes, we will essentially treat LSAN-suppression as *the* suppression for our ASAN runs, + # thus only requiring one suppression file type still. If/when that changes, the below will need to be made + # more complicated, so that 2 suppression types per sanitizer are supported as opposed to just 1. Until then, + # it's still simple. So that's why $ASAN_OPTIONS does not mention suppression, but $LSAN_OPTIONS does... and + # it is the suppression file(s) (if any) under various `asan/` dirs in the source tree. san-suppress-cfg-file: ${{ github.workspace }}/install/${{ matrix.build-test-cfg.conan-profile-build-type }}/usr/local/bin/san_suppressions.cfg san-suppress-cfg-in-file1: sanitize/${{ matrix.build-test-cfg.sanitizer-name }}/suppressions_${{ matrix.compiler.name }}.cfg san-suppress-cfg-in-file2: sanitize/${{ matrix.build-test-cfg.sanitizer-name }}/suppressions_${{ matrix.compiler.name }}_${{ matrix.compiler.version }}.cfg @@ -619,13 +629,14 @@ jobs: SUPP_DIR_A=${{ github.workspace }}/src # As of this writing there are TSAN suppressions for this test specifically. TODO: Revisit them; and then this. # Update: Now there are also ASAN (LSAN) suppressions. These are likely permanent as of this writing. + # Reminder: the following construction handles suppression file(s) from *any* relevant sanitizer type (if any). SUPP_DIR_OWN=${{ github.workspace }}/test/suite/unit_test { cat $SUPP_DIR_A/${{ env.san-suppress-cfg-in-file1 }} $SUPP_DIR_A/${{ env.san-suppress-cfg-in-file2 }} \ $SUPP_DIR_OWN/${{ env.san-suppress-cfg-in-file1 }} $SUPP_DIR_OWN/${{ env.san-suppress-cfg-in-file2 }} \ > ${{ env.san-suppress-cfg-file }} 2> /dev/null; } || true ${{ env.setup-tests-env }} ${{ env.setup-run-env }} - # Sensitive benchmarks in this setting should run and be warned about it they "fail," but they should not + # Sensitive benchmarks in this setting should run and be warned about, if they "fail," but they should not # fail the test. $RUN_IT --do-not-fail-benchmarks > $OUT_DIR/console.log 2>&1 From ff724ea71f3b972b20d7a7d8c61c3a98e239fa4c Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Mon, 24 Nov 2025 18:37:02 -0800 Subject: [PATCH 31/37] (cont) --- src/flow/util/basic_blob.hpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/flow/util/basic_blob.hpp b/src/flow/util/basic_blob.hpp index b2e124722..9b8c4ea44 100644 --- a/src/flow/util/basic_blob.hpp +++ b/src/flow/util/basic_blob.hpp @@ -1353,8 +1353,8 @@ class Basic_blob * or move-assignment of the Basic_blob; this reduces to Basic_blob::assign() (move overload); which will * do a swap -- that ultimately will move the stored Deleter_raw up to a few times. * - * As of this writing we also manually overwrite `.get_deleter()` it in one case in Basic_blob::reserve_impl(); - * so this is useful for that also. + * As of this writing we also manually overwrite `.get_deleter()` in one case in Basic_blob::reserve_impl(); + * so this is useful for that too. * * @param moved_src * Moved guy. For cleanliness it becomes as-if default-cted (unless it is the same object as `*this`). @@ -2187,7 +2187,7 @@ void Basic_blob::reserve_impl(size_type new_capaci buf_ptr().reset(clear_on_alloc ? (new value_type[new_capacity]()) : (new value_type[new_capacity]), // Careful! *this might be gone if some other share()ing obj is the one that 0s ref-count. [logger_ptr, original_blob = this, new_capacity] - (value_type* buf_ptr) + (value_type* buf_ptr_to_delete) { FLOW_LOG_SET_CONTEXT(logger_ptr, S_LOG_COMPONENT); FLOW_LOG_TRACE("Deallocating internal buffer sized [" << new_capacity << "] originally allocated by " @@ -2195,7 +2195,7 @@ void Basic_blob::reserve_impl(size_type new_capaci "Blob might live at that address now. A message immediately preceding this one should " "indicate the last Blob to give up ownership of the internal buffer."); // Finally just do what the default one would've done, as we've done our custom thing (logging). - delete [] buf_ptr; + delete [] buf_ptr_to_delete; }); } else // if (!should_log()): No logging deleter; just delete[] it. @@ -2262,7 +2262,7 @@ void Basic_blob::reserve_impl(size_type new_capaci { /* Conceptually it's quite similar to the S_SHARING case where we do shared_ptr::reset() above. * However there is an API difference that is subtle yet real (albeit only for stateful Allocator_raw): - * Current alloc_raw() was used to allocate *buf_ptr(), so it must be used also to dealloc it. + * Current alloc_raw() was used to allocate *(buf_ptr()), so it must be used also to dealloc it. * unique_ptr::reset() does *not* take a new Deleter_raw; hence if we used it (alone) here it would retain * the alloc_raw() from ction (or possibly last assignment) time -- and if that does not equal current * m_alloc => trouble in make_zero() or dtor. From bb63db9e3295cbeb1493feec1011998e3c6b3c6b Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Mon, 24 Nov 2025 18:55:59 -0800 Subject: [PATCH 32/37] (cont) --- src/flow/util/basic_blob.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/flow/util/basic_blob.hpp b/src/flow/util/basic_blob.hpp index 9b8c4ea44..12b67dab9 100644 --- a/src/flow/util/basic_blob.hpp +++ b/src/flow/util/basic_blob.hpp @@ -2183,7 +2183,7 @@ void Basic_blob::reserve_impl(size_type new_capaci if (logger_ptr && logger_ptr->should_log(log::Sev::S_TRACE, S_LOG_COMPONENT)) { /* This ensures delete[] call when buf_ptr() ref-count reaches 0. - * As advertised, for performance, the memory is NOT initialized. */ + * As advertised, for performance, the memory is NOT initialized unless so instructed. */ buf_ptr().reset(clear_on_alloc ? (new value_type[new_capacity]()) : (new value_type[new_capacity]), // Careful! *this might be gone if some other share()ing obj is the one that 0s ref-count. [logger_ptr, original_blob = this, new_capacity] From 8ff60f9c18cb3347eb6a9c0d1f3944ed4a8f3b9b Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Tue, 25 Nov 2025 14:34:40 -0800 Subject: [PATCH 33/37] (cont) --- src/flow/util/thread_lcl.hpp | 43 +++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/src/flow/util/thread_lcl.hpp b/src/flow/util/thread_lcl.hpp index d3c8f3105..31e767f92 100644 --- a/src/flow/util/thread_lcl.hpp +++ b/src/flow/util/thread_lcl.hpp @@ -51,27 +51,34 @@ namespace flow::util * disallowing any reset to null and back from null), but these are just happenstance/preference-based. * Likely we'd have just used the Boost guy, if that's all we wanted. * - * The main reason `Thread_local_state_registry` exists is the following feature: - * - If `~Thread_local_state_registry` (a `*this` dtor) executes before a given thread X, that has earlier - * caused the creation of a thread-local `T` (by calling `this->this_thread_state()` from X), then: - * - That dtor, from whichever thread invoked it, deletes that thread-local `T` (for all `T`). - * - Corollary: A related feature is the ability to look at all per-thread data accumulated so far (from any - * thread). See state_per_thread() accessor (+ while_locked()). + * The main reason `Thread_local_state_registry` exists comprises the following (mutually related) features: + * -# The ability to look at all per-thread data accumulated so far (from any + * thread). See state_per_thread() accessor (+ while_locked()). + * -# If `~Thread_local_state_registry` (a `*this` dtor) executes before a given thread X (that has earlier + * caused the creation of a thread-local `T`, by calling `this->this_thread_state()` from X) is joined (exits), + * then: + * - That dtor, from whichever thread invoked it, deletes that thread-local `T` (for all `T`). * - * So `*this` dtor does the equivalent of standard per-thread cleanup of per-thread data, if it is invoked - * before such standard per-thread cleanup has run (because the relevant threads have simply not yet exited). + * Feature 1 us a clear value-add over `thread_specific_ptr` (or just `static thread_local`). If one can get by + * without dealing with the set of per-thread objects from any 1 thread at a given time, that's good; it will involve + * far fewer corner cases and worries. Unfortunately it is not always possible to do that. In that case you want + * a *registry* of your `Thread_local_state`s; a `*this` provides this. * - * `thread_specific_ptr` does not do that: you must either `.reset()` from each relevant thread, before - * the `thread_specific_ptr` is itself deleted; or any such thread must exit before (causing an implicit `.reset()`). - * Nor can one iterate through other threads' data. + * As for feature 2: Consider `static thread_specific_ptr` (or `static thread_local`, broadly speaking, without getting + * into the formal details of C++ language guarantees as to how such per-thread items are cleaned up). By definition + * of `static` the `thread_specific_ptr` will outlive any threads to have been spawned by the time `main()` exits. + * Therefore an implicit `.reset()` will execute when extant threads are joined, and each thread-local object will + * be cleaned up. No problem! However, a non-`static thread_specific_ptr` offers no such behavior or guarantee: + * If the `~thread_specific_ptr` dtor runs in thread X, at most that thread's TL object shall be auto-`.reset()`. + * The other extant threads' TL objects will live on (leak). (Nor can one iterate through them; that would be + * feature 1.) * - * For this reason most people declare `thread_specific_ptr` either `static` or - * as a global, as then the `thread_specific_ptr` always outlives the relevant threads, and everything is fine and easy. - * What if you really must clean resources earlier, when they are no longer necessary, but relevant threads may - * stay around? Then try a `*this`. + * However a `*this` being destroyed in thread X will cause an automatic looping through the extant threads' + * objects (if any) and their cleanup as well. So if you want that, use a `*this` instead of + * non-`static thread_specific_ptr`. * - * As a secondary reason (ignoring the above) `Thread_local_state_registry` has a more straightforward/rigid API - * that enforces certain assumptions/conventions (some of this is mentioned above). These might be appealing + * As a secondary reason (ignoring the above 2 features) `Thread_local_state_registry` has a more straightforward/rigid + * API that enforces certain assumptions/conventions (some of this is mentioned above). These might be appealing * depending on one's taste/reasoning. * * ### How to use ### @@ -89,7 +96,7 @@ namespace flow::util * * A given thread's #Thread_local_state object shall be deleted via `delete Thread_local_state` when one of * the following occurs, whichever happens first: - * - The thread exits. (Deletion occurs shortly before.) + * - The thread exits (is joined). (Deletion occurs shortly before.) * - `*this` is destroyed (in some -- any -- thread, possibly a totally different one; or one of the ones * for which this_thread_state() was called). * From 4dfb4d9e3a00d3dbef52a7d754a35ff0e846837f Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Tue, 25 Nov 2025 17:45:36 -0800 Subject: [PATCH 34/37] (cont) --- src/flow/util/thread_lcl.hpp | 41 ++++++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/src/flow/util/thread_lcl.hpp b/src/flow/util/thread_lcl.hpp index 31e767f92..b919981e0 100644 --- a/src/flow/util/thread_lcl.hpp +++ b/src/flow/util/thread_lcl.hpp @@ -327,6 +327,13 @@ class Thread_local_state_registry : * A regular mutex being locked/unlocked, sans contention, is quite cheap. This should more than defeat * the preceding "bad news" bullet. * + * @internal + * + * @todo Add a Flow unit-test or functional-test for the pattern "thread-caching of central canonical state," as + * described in the doc header for Thread_local_state_registry::this_thread_state(). + * + * @endinternal + * * @return See above. Never null. */ Thread_local_state* this_thread_state(); @@ -607,8 +614,8 @@ class Thread_local_state_registry : // Methods. /** - * Called by `thread_specific_ptr` for a given thread's `m_this_thread_state_or_null.get()`, if `*this` dtor has not - * yet destroyed #m_this_thread_state_or_null. With proper synchronization: + * Called by `thread_specific_ptr` for a given thread's `m_this_thread_state_or_null.m_tsp.get()`, + * if `*this` dtor has not yet destroyed #m_this_thread_state_or_null. With proper synchronization: * does `delete ctx->m_state` and `delete ctx` and removes the former from Registry_ctl::m_state_per_thread. * It is possible that the `*this` dtor runs concurrently (if a relevant thread is exiting right around * the time the user chooses to invoke dtor) and manages to `delete ctx->m_state` first; however it will *not* @@ -642,18 +649,18 @@ class Thread_local_state_registry : * * We however declare it as a non-`static` data member. That's different. When #m_this_thread_state_or_null * is destroyed (during `*this` destruction), if a given thread T (that is not the thread in which dtor is - * executing) has called this_thread_state() -- thus has `m_this_thread_state_or_null.get() != nullptr` -- and + * executing) has called this_thread_state() -- thus has `m_this_thread_state_or_null.m_tsp.get() != nullptr` -- and * is currently running, then its #Thread_local_state shall leak. Cleanup functions run only while the owner * `thread_specific_ptr` exists. Boost.thread docs specifically say this. * * Therefore, in our case, we can make it `static`: but then any cleanup is deferred until thread exit; - * and while it is maybe not the end of the world, we strive to be better; the whole point of the registry + * and while it is maybe not the end of the world, we strive to be better; a major part of the point of the registry * is to do timely cleanup. So then instead of that we: - * - Keep a non-thread-local registry Registry_ctl::m_state_per_thread of each thread's thread-local - * #Thread_local_state. - * - In dtor iterate through that registry and delete 'em. + * - keep a non-thread-local registry Registry_ctl::m_state_per_thread of each thread's thread-local + * #Thread_local_state; + * - in dtor iterate through that registry and delete 'em. * - * Let `p` stand for `m_this_thread_state_or_null.get()->m_state`: if `p != nullptr`, that alone does not + * Let `p` stand for `m_this_thread_state_or_null.m_tsp.get()->m_state`: if `p != nullptr`, that alone does not * guarantee that `*p` is valid. It is valid if and only if #m_ctl is a live `shared_ptr` (as determined * via `weak_ptr`), and `p` is in Registry_ctl::m_state_per_thread. If #m_ctl is not live * (`weak_ptr::lock()` is null), then `*this` is destroyed or very soon to be destroyed, and its dtor thus @@ -668,8 +675,8 @@ class Thread_local_state_registry : * - By user code, probably following this_thread_state() to obtain `p`. This is safe, because: * It is illegal for them to access `*this`-owned state after destroying `*this`. * - * As for the the stuff in `m_this_thread_state_or_null.get()` other than `p` -- the Tl_context surrounding it -- - * again: see Tl_context doc header. + * As for the the stuff in `m_this_thread_state_or_null.m_tsp.get()` other than `p` -- the Tl_context surrounding + * it -- again: see Tl_context doc header. */ Tsp_wrapper m_this_thread_state_or_null; @@ -752,7 +759,7 @@ class Thread_local_state_registry : * ~~~ * void opportunistically_launch_when_triggered() // Assumes: bool(registry.this_thread_state_or_null()) == true. * { - * T* const this_thread_state = registry.this_thread_state()l + * T* const this_thread_state = registry.this_thread_state(); * if (!missiles_to_launch_polled_shared_state.poll_armed(this_thread_state->m_missile_launch_needed_poll_state)) * { // Fast-path! Nothing to do re. missile-launching. * return; @@ -797,6 +804,13 @@ class Thread_local_state_registry : * calls it will cause a compile-time `static_assert()` fail. As noted earlier using Polled_shared_state, despite * the name, for not any shared state but just the thread-local distributed flag arming/polling = a perfectly * valid approach. + * + * @tparam Shared_state_t + * A single object of this type shall be constructed and can be accessed, whether for reading or writing, + * using Polled_shared_state::while_locked(). It must be constructible via the ctor signature you choose + * to use when constructing `*this` Polled_shared_state ctor (template). The ctor args shall be forwarded + * to the `Shared_state_t` ctor. Note that it is not required to actually use a #Shared_state and + * Polled_shared_state::while_locked(). In that case please let `Shared_state_t` be an empty `struct` type. */ template class Polled_shared_state : @@ -1173,8 +1187,9 @@ template template Polled_shared_state::Polled_shared_state(Ctor_args&&... shared_state_ctor_args) : m_poll_flag_registry(nullptr, "", - []() -> auto { using Atomic_flag = typename decltype(m_poll_flag_registry)::Thread_local_state; - return new Atomic_flag{false}; }), + []() -> auto + { using Atomic_flag = typename decltype(m_poll_flag_registry)::Thread_local_state; + return new Atomic_flag{false}; }), m_shared_state(std::forward(shared_state_ctor_args)...) { // Yep. From 71592ec4eee8897168c8fc96842e91fd709a085c Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Wed, 26 Nov 2025 22:23:17 -0800 Subject: [PATCH 35/37] (cont) --- src/flow/util/basic_blob.hpp | 397 +++++++++++++++++------------------ src/flow/util/blob.hpp | 134 ++++++------ src/flow/util/blob_fwd.hpp | 19 +- 3 files changed, 275 insertions(+), 275 deletions(-) diff --git a/src/flow/util/basic_blob.hpp b/src/flow/util/basic_blob.hpp index 12b67dab9..8b24fd307 100644 --- a/src/flow/util/basic_blob.hpp +++ b/src/flow/util/basic_blob.hpp @@ -265,13 +265,13 @@ struct Clear_on_alloc {}; * and generally allocators for which `pointer` is not simply `value_type*` but rather a fancy-pointer * (see cppreference.com) are correctly supported. (Note this may not be the case for your compiler's * `std::vector`.) - * @tparam S_SHARING_ALLOWED + * @tparam SHARING * If `true`, share() and all derived methods, plus blobs_sharing(), can be instantiated (invoked in compiled * code). If `false` they cannot (`static_assert()` will trip), but the resulting Basic_blob concrete * class will be slightly more performant (internally, a `shared_ptr` becomes instead a `unique_ptr` which * means smaller allocations and no ref-count logic invoked). */ -template +template class Basic_blob { public: @@ -312,8 +312,8 @@ class Basic_blob // Constants. - /// Value of template parameter `S_SHARING_ALLOWED` (for generic programming). - static constexpr bool S_SHARING = S_SHARING_ALLOWED; + /// Value of template parameter `SHARING` (for generic programming). + static constexpr bool S_SHARING = SHARING; /// Special value indicating an unchanged `size_type` value; such as in resize(). static constexpr size_type S_UNCHANGED = size_type(-1); // Same trick as std::string::npos. @@ -1666,8 +1666,8 @@ class Basic_blob // Template implementations. // buf_ptr() initialized to null pointer. n_capacity and m_size remain uninit (meaningless until buf_ptr() changes). -template -Basic_blob::Basic_blob(const Allocator_raw& alloc_raw) : +template +Basic_blob::Basic_blob(const Allocator_raw& alloc_raw) : m_alloc_and_buf_ptr(alloc_raw), // Copy allocator; stateless allocator should have size 0 (no-op for the processor). m_capacity(0), // Not necessary, but some compilers will warn in some situations. Fine; it's cheap enough. m_start(0), // Ditto. @@ -1676,8 +1676,8 @@ Basic_blob::Basic_blob(const Allocator_raw& alloc_ // OK. } -template -Basic_blob::Basic_blob +template +Basic_blob::Basic_blob (size_type size, log::Logger* logger_ptr, const Allocator_raw& alloc_raw) : Basic_blob(alloc_raw) // Delegate. @@ -1685,8 +1685,8 @@ Basic_blob::Basic_blob resize(size, 0, logger_ptr); } -template -Basic_blob::Basic_blob +template +Basic_blob::Basic_blob (size_type size, Clear_on_alloc coa_tag, log::Logger* logger_ptr, const Allocator_raw& alloc_raw) : Basic_blob(alloc_raw) // Delegate. @@ -1694,8 +1694,8 @@ Basic_blob::Basic_blob resize(size, coa_tag, 0, logger_ptr); } -template -Basic_blob::Basic_blob(const Basic_blob& src, log::Logger* logger_ptr) : +template +Basic_blob::Basic_blob(const Basic_blob& src, log::Logger* logger_ptr) : // Follow rules established in alloc_raw() doc header: m_alloc_and_buf_ptr(std::allocator_traits::select_on_container_copy_construction(src.alloc_raw())), m_capacity(0), // See comment in first delegated ctor above. @@ -1711,8 +1711,8 @@ Basic_blob::Basic_blob(const Basic_blob& src, log: assign_copy(src.const_buffer(), logger_ptr); } -template -Basic_blob::Basic_blob(Basic_blob&& moved_src, log::Logger* logger_ptr) noexcept : +template +Basic_blob::Basic_blob(Basic_blob&& moved_src, log::Logger* logger_ptr) noexcept : // Follow rules established in alloc_raw() doc header: m_alloc_and_buf_ptr(std::move(moved_src.alloc_raw())), m_capacity(0), // See comment in first delegated ctor above. @@ -1724,12 +1724,12 @@ Basic_blob::Basic_blob(Basic_blob&& moved_src, log swap_impl(moved_src, logger_ptr); } -template -Basic_blob::~Basic_blob() = default; +template +Basic_blob::~Basic_blob() = default; -template -Basic_blob& - Basic_blob::assign(const Basic_blob& src, log::Logger* logger_ptr) +template +Basic_blob& + Basic_blob::assign(const Basic_blob& src, log::Logger* logger_ptr) { if (this != &src) { @@ -1790,15 +1790,15 @@ Basic_blob& return *this; } // Basic_blob::assign(copy) -template -Basic_blob& Basic_blob::operator=(const Basic_blob& src) +template +Basic_blob& Basic_blob::operator=(const Basic_blob& src) { return assign(src); } -template -Basic_blob& - Basic_blob::assign(Basic_blob&& moved_src, log::Logger* logger_ptr) noexcept +template +Basic_blob& + Basic_blob::assign(Basic_blob&& moved_src, log::Logger* logger_ptr) noexcept { if (this != &moved_src) { @@ -1827,15 +1827,15 @@ Basic_blob& return *this; } // Basic_blob::assign(move) -template -Basic_blob& - Basic_blob::operator=(Basic_blob&& moved_src) noexcept +template +Basic_blob& + Basic_blob::operator=(Basic_blob&& moved_src) noexcept { return assign(std::move(moved_src)); } -template -void Basic_blob::swap_impl(Basic_blob& other, log::Logger* logger_ptr) noexcept +template +void Basic_blob::swap_impl(Basic_blob& other, log::Logger* logger_ptr) noexcept { using std::swap; @@ -1886,8 +1886,8 @@ void Basic_blob::swap_impl(Basic_blob& other, log: } } // Basic_blob::swap_impl() -template -void Basic_blob::swap(Basic_blob& other, log::Logger* logger_ptr) noexcept +template +void Basic_blob::swap(Basic_blob& other, log::Logger* logger_ptr) noexcept { using std::swap; @@ -1910,18 +1910,18 @@ void Basic_blob::swap(Basic_blob& other, log::Logg swap_impl(other, logger_ptr); } -template -void swap(Basic_blob& blob1, - Basic_blob& blob2, log::Logger* logger_ptr) noexcept +template +void swap(Basic_blob& blob1, + Basic_blob& blob2, log::Logger* logger_ptr) noexcept { return blob1.swap(blob2, logger_ptr); } -template -Basic_blob Basic_blob::share(log::Logger* logger_ptr) const +template +Basic_blob Basic_blob::share(log::Logger* logger_ptr) const { static_assert(S_SHARING, - "Do not invoke (and thus instantiate) share() or derived methods unless you set the S_SHARING_ALLOWED " + "Do not invoke (and thus instantiate) share() or derived methods unless you set the SHARING " "template parameter to true. Sharing will be enabled at a small perf cost; see class doc header."); // Note: The guys that call it will cause the same check to occur, since instantiating them will instantiate us. @@ -1945,9 +1945,9 @@ Basic_blob Basic_blob -Basic_blob - Basic_blob::share_after_split_left(size_type lt_size, log::Logger* logger_ptr) +template +Basic_blob + Basic_blob::share_after_split_left(size_type lt_size, log::Logger* logger_ptr) { if (lt_size > size()) { @@ -1969,9 +1969,9 @@ Basic_blob return sharing_blob; } -template -Basic_blob - Basic_blob::share_after_split_right(size_type rt_size, log::Logger* logger_ptr) +template +Basic_blob + Basic_blob::share_after_split_right(size_type rt_size, log::Logger* logger_ptr) { if (rt_size > size()) { @@ -1994,9 +1994,9 @@ Basic_blob return sharing_blob; } -template +template template -void Basic_blob::share_after_split_equally_impl +void Basic_blob::share_after_split_equally_impl (size_type size, bool headless_pool, Emit_blob_func&& emit_blob_func, log::Logger* logger_ptr, Share_after_split_left_func&& share_after_split_left_func) { @@ -2023,11 +2023,11 @@ void Basic_blob::share_after_split_equally_impl } } // Basic_blob::share_after_split_equally_impl() -template +template template -void Basic_blob::share_after_split_equally(size_type size, bool headless_pool, - Emit_blob_func&& emit_blob_func, - log::Logger* logger_ptr) +void Basic_blob::share_after_split_equally(size_type size, bool headless_pool, + Emit_blob_func&& emit_blob_func, + log::Logger* logger_ptr) { share_after_split_equally_impl(size, headless_pool, std::move(emit_blob_func), logger_ptr, [this](size_type lt_size, log::Logger* logger_ptr) -> Basic_blob @@ -2036,9 +2036,9 @@ void Basic_blob::share_after_split_equally(size_ty }); } -template +template template -void Basic_blob::share_after_split_equally_emit_seq +void Basic_blob::share_after_split_equally_emit_seq (size_type size, bool headless_pool, Blob_container* out_blobs_ptr, log::Logger* logger_ptr) { // If changing this please see Blob_with_log_context::(). @@ -2050,12 +2050,12 @@ void Basic_blob::share_after_split_equally_emit_se }, logger_ptr); } -template +template template -void Basic_blob::share_after_split_equally_emit_ptr_seq(size_type size, - bool headless_pool, - Blob_ptr_container* out_blobs_ptr, - log::Logger* logger_ptr) +void Basic_blob::share_after_split_equally_emit_ptr_seq(size_type size, + bool headless_pool, + Blob_ptr_container* out_blobs_ptr, + log::Logger* logger_ptr) { // If changing this please see Blob_with_log_context::(). @@ -2070,12 +2070,12 @@ void Basic_blob::share_after_split_equally_emit_pt }, logger_ptr); } -template -bool blobs_sharing(const Basic_blob& blob1, - const Basic_blob& blob2) +template +bool blobs_sharing(const Basic_blob& blob1, + const Basic_blob& blob2) { - static_assert(S_SHARING_ALLOWED, - "blobs_sharing() would only make sense on `Basic_blob`s with S_SHARING_ALLOWED=true. " + static_assert(SHARING, + "blobs_sharing() would only make sense on `Basic_blob`s with SHARING=true. " "Even if we were to allow this to instantiate (compile) it would always return false."); return ((!blob1.zero()) && (!blob2.zero())) // Can't co-own a buffer if doesn't own a buffer. @@ -2086,52 +2086,50 @@ bool blobs_sharing(const Basic_blob& blob1, // @todo Maybe throw in assert(blob1.capacity() == blob2.capacity()), if `true` is being returned. } -template -typename Basic_blob::size_type Basic_blob::size() const +template +typename Basic_blob::size_type Basic_blob::size() const { return zero() ? 0 : m_size; // Note that zero() may or may not be true if we return 0. } -template -typename Basic_blob::size_type Basic_blob::start() const +template +typename Basic_blob::size_type Basic_blob::start() const { return zero() ? 0 : m_start; // Note that zero() may or may not be true if we return 0. } -template -bool Basic_blob::empty() const +template +bool Basic_blob::empty() const { return size() == 0; // Note that zero() may or may not be true if we return true. } -template -typename Basic_blob::size_type Basic_blob::capacity() const +template +typename Basic_blob::size_type Basic_blob::capacity() const { return zero() ? 0 : m_capacity; // Note that zero() <=> we return non-zero. (m_capacity >= 1 if !zero().) } -template -bool Basic_blob::zero() const +template +bool Basic_blob::zero() const { return !buf_ptr(); } -template -void Basic_blob::reserve(size_type new_capacity, log::Logger* logger_ptr) +template +void Basic_blob::reserve(size_type new_capacity, log::Logger* logger_ptr) { reserve_impl(new_capacity, false, logger_ptr); } -template -void Basic_blob::reserve(size_type new_capacity, Clear_on_alloc, - log::Logger* logger_ptr) +template +void Basic_blob::reserve(size_type new_capacity, Clear_on_alloc, log::Logger* logger_ptr) { reserve_impl(new_capacity, true, logger_ptr); } -template -void Basic_blob::reserve_impl(size_type new_capacity, bool clear_on_alloc, - log::Logger* logger_ptr) +template +void Basic_blob::reserve_impl(size_type new_capacity, bool clear_on_alloc, log::Logger* logger_ptr) { using boost::make_shared_noinit; using boost::make_shared; @@ -2363,23 +2361,23 @@ void Basic_blob::reserve_impl(size_type new_capaci assert(capacity() >= new_capacity); // Promised post-condition. } // Basic_blob::reserve() -template -void Basic_blob::resize(size_type new_size, size_type new_start_or_unchanged, - log::Logger* logger_ptr) +template +void Basic_blob::resize(size_type new_size, size_type new_start_or_unchanged, + log::Logger* logger_ptr) { resize_impl(new_size, false, new_start_or_unchanged, logger_ptr); } -template -void Basic_blob::resize(size_type new_size, Clear_on_alloc, - size_type new_start_or_unchanged, log::Logger* logger_ptr) +template +void Basic_blob::resize(size_type new_size, Clear_on_alloc, + size_type new_start_or_unchanged, log::Logger* logger_ptr) { resize_impl(new_size, true, new_start_or_unchanged, logger_ptr); } -template -void Basic_blob::resize_impl(size_type new_size, bool clear_on_alloc, - size_type new_start_or_unchanged, log::Logger* logger_ptr) +template +void Basic_blob::resize_impl(size_type new_size, bool clear_on_alloc, + size_type new_start_or_unchanged, log::Logger* logger_ptr) { auto& new_start = new_start_or_unchanged; if (new_start == S_UNCHANGED) @@ -2408,8 +2406,8 @@ void Basic_blob::resize_impl(size_type new_size, b assert(start() == new_start); } // Basic_blob::resize() -template -void Basic_blob::start_past_prefix(size_type prefix_size) +template +void Basic_blob::start_past_prefix(size_type prefix_size) { resize(((start() + size()) > prefix_size) ? (start() + size() - prefix_size) @@ -2418,23 +2416,23 @@ void Basic_blob::start_past_prefix(size_type prefi // Sanity check: `prefix_size == 0` translates to: resize(start() + size(), 0), as advertised. } -template -void Basic_blob::start_past_prefix_inc(difference_type prefix_size_inc) +template +void Basic_blob::start_past_prefix_inc(difference_type prefix_size_inc) { assert((prefix_size_inc >= 0) || (start() >= size_type(-prefix_size_inc))); start_past_prefix(start() + prefix_size_inc); } -template -void Basic_blob::clear() +template +void Basic_blob::clear() { // Note: start() remains unchanged (as advertised). resize(0, 0) can be used if that is unacceptable. resize(0); // It won't log, as it cannot allocate, so no need to pass-through a Logger*. // Note corner case: zero() remains true if was true (and false if was false). } -template -void Basic_blob::make_zero(log::Logger* logger_ptr) +template +void Basic_blob::make_zero(log::Logger* logger_ptr) { /* Could also write more elegantly: `swap(Basic_blob{});`, but following is a bit optimized (while equivalent); * logs better. */ @@ -2443,7 +2441,7 @@ void Basic_blob::make_zero(log::Logger* logger_ptr if (logger_ptr && logger_ptr->should_log(log::Sev::S_TRACE, S_LOG_COMPONENT)) { FLOW_LOG_SET_CONTEXT(logger_ptr, S_LOG_COMPONENT); - if constexpr(S_SHARING_ALLOWED) + if constexpr(SHARING) { FLOW_LOG_TRACE_WITHOUT_CHECKING("Blob [" << this << "] giving up ownership of internal buffer sized " "[" << capacity() << "]; deallocation will immediately follow if no sharing " @@ -2460,10 +2458,9 @@ void Basic_blob::make_zero(log::Logger* logger_ptr } // if (!zero()) } // Basic_blob::make_zero() -template -typename Basic_blob::size_type - Basic_blob::assign_copy(const boost::asio::const_buffer& src, - log::Logger* logger_ptr) +template +typename Basic_blob::size_type + Basic_blob::assign_copy(const boost::asio::const_buffer& src, log::Logger* logger_ptr) { const size_type n = src.size(); @@ -2480,10 +2477,10 @@ typename Basic_blob::size_type return n; } -template -typename Basic_blob::Iterator - Basic_blob::emplace_copy(Const_iterator dest, const boost::asio::const_buffer& src, - log::Logger* logger_ptr) +template +typename Basic_blob::Iterator + Basic_blob::emplace_copy(Const_iterator dest, const boost::asio::const_buffer& src, + log::Logger* logger_ptr) { using std::memcpy; @@ -2542,10 +2539,10 @@ typename Basic_blob::Iterator return dest_it + n; } // Basic_blob::emplace_copy() -template -typename Basic_blob::Const_iterator - Basic_blob::sub_copy(Const_iterator src, const boost::asio::mutable_buffer& dest, - log::Logger* logger_ptr) const +template +typename Basic_blob::Const_iterator + Basic_blob::sub_copy(Const_iterator src, const boost::asio::mutable_buffer& dest, + log::Logger* logger_ptr) const { // Code similar to emplace_copy(). Therefore keeping comments light. @@ -2584,9 +2581,9 @@ typename Basic_blob::Const_iterator return src + n; } -template -typename Basic_blob::Iterator - Basic_blob::erase(Const_iterator first, Const_iterator past_last) +template +typename Basic_blob::Iterator + Basic_blob::erase(Const_iterator first, Const_iterator past_last) { using std::memmove; @@ -2620,62 +2617,62 @@ typename Basic_blob::Iterator return dest; } // Basic_blob::erase() -template -typename Basic_blob::value_type const & - Basic_blob::const_front() const +template +typename Basic_blob::value_type const & + Basic_blob::const_front() const { assert(!empty()); return *const_begin(); } -template -typename Basic_blob::value_type - const & Basic_blob::const_back() const +template +typename Basic_blob::value_type + const & Basic_blob::const_back() const { assert(!empty()); return const_end()[-1]; } -template -typename Basic_blob::value_type& - Basic_blob::front() +template +typename Basic_blob::value_type& + Basic_blob::front() { assert(!empty()); return *begin(); } -template -typename Basic_blob::value_type& - Basic_blob::back() +template +typename Basic_blob::value_type& + Basic_blob::back() { assert(!empty()); return end()[-1]; } -template -typename Basic_blob::value_type const & - Basic_blob::front() const +template +typename Basic_blob::value_type const & + Basic_blob::front() const { return const_front(); } -template -typename Basic_blob::value_type const & - Basic_blob::back() const +template +typename Basic_blob::value_type const & + Basic_blob::back() const { return const_back(); } -template -typename Basic_blob::Const_iterator - Basic_blob::const_begin() const +template +typename Basic_blob::Const_iterator + Basic_blob::const_begin() const { return const_cast(this)->begin(); } -template -typename Basic_blob::Iterator - Basic_blob::begin() +template +typename Basic_blob::Iterator + Basic_blob::begin() { if (zero()) { @@ -2697,139 +2694,139 @@ typename Basic_blob::Iterator return &(*raw_or_fancy_buf_ptr) + m_start; } -template -typename Basic_blob::Const_iterator - Basic_blob::const_end() const +template +typename Basic_blob::Const_iterator + Basic_blob::const_end() const { return zero() ? const_begin() : (const_begin() + size()); } -template -typename Basic_blob::Iterator - Basic_blob::end() +template +typename Basic_blob::Iterator + Basic_blob::end() { return zero() ? begin() : (begin() + size()); } -template -typename Basic_blob::Const_iterator - Basic_blob::begin() const +template +typename Basic_blob::Const_iterator + Basic_blob::begin() const { return const_begin(); } -template -typename Basic_blob::Const_iterator - Basic_blob::cbegin() const +template +typename Basic_blob::Const_iterator + Basic_blob::cbegin() const { return const_begin(); } -template -typename Basic_blob::Const_iterator - Basic_blob::end() const +template +typename Basic_blob::Const_iterator + Basic_blob::end() const { return const_end(); } -template -typename Basic_blob::Const_iterator - Basic_blob::cend() const +template +typename Basic_blob::Const_iterator + Basic_blob::cend() const { return const_end(); } -template -typename Basic_blob::value_type - const * Basic_blob::const_data() const +template +typename Basic_blob::value_type + const * Basic_blob::const_data() const { return const_begin(); } -template -typename Basic_blob::value_type* - Basic_blob::data() +template +typename Basic_blob::value_type* + Basic_blob::data() { return begin(); } -template -bool Basic_blob::valid_iterator(Const_iterator it) const +template +bool Basic_blob::valid_iterator(Const_iterator it) const { return empty() ? (it == const_end()) : in_closed_range(const_begin(), it, const_end()); } -template -bool Basic_blob::derefable_iterator(Const_iterator it) const +template +bool Basic_blob::derefable_iterator(Const_iterator it) const { return empty() ? false : in_closed_open_range(const_begin(), it, const_end()); } -template -typename Basic_blob::Iterator - Basic_blob::iterator_sans_const(Const_iterator it) +template +typename Basic_blob::Iterator + Basic_blob::iterator_sans_const(Const_iterator it) { return const_cast(it); // Can be done without const_cast<> but might as well save some cycles. } -template -boost::asio::const_buffer Basic_blob::const_buffer() const +template +boost::asio::const_buffer Basic_blob::const_buffer() const { return boost::asio::const_buffer{const_data(), size()}; } -template -boost::asio::mutable_buffer Basic_blob::mutable_buffer() +template +boost::asio::mutable_buffer Basic_blob::mutable_buffer() { return boost::asio::mutable_buffer{data(), size()}; } -template -typename Basic_blob::Allocator_raw - Basic_blob::get_allocator() const +template +typename Basic_blob::Allocator_raw + Basic_blob::get_allocator() const { return alloc_raw(); } -template -typename Basic_blob::Buf_ptr& Basic_blob::buf_ptr() +template +typename Basic_blob::Buf_ptr& Basic_blob::buf_ptr() { return m_alloc_and_buf_ptr.second(); } -template -const typename Basic_blob::Buf_ptr& - Basic_blob::buf_ptr() const +template +const typename Basic_blob::Buf_ptr& + Basic_blob::buf_ptr() const { return const_cast(this)->buf_ptr(); } -template -typename Basic_blob::Allocator_raw& Basic_blob::alloc_raw() +template +typename Basic_blob::Allocator_raw& Basic_blob::alloc_raw() { return m_alloc_and_buf_ptr.first(); } -template -const typename Basic_blob::Allocator_raw& - Basic_blob::alloc_raw() const +template +const typename Basic_blob::Allocator_raw& + Basic_blob::alloc_raw() const { return const_cast(this)->alloc_raw(); } -template -Basic_blob::Deleter_raw::Deleter_raw() : +template +Basic_blob::Deleter_raw::Deleter_raw() : m_buf_sz(0) { /* It can be left `= default;`, but some gcc versions then complain m_buf_sz may be used uninitialized (not true but * such is life). */ } -template -Basic_blob::Deleter_raw::Deleter_raw(const Allocator_raw& alloc_raw, size_type buf_sz) : +template +Basic_blob::Deleter_raw::Deleter_raw(const Allocator_raw& alloc_raw, size_type buf_sz) : /* Copy allocator; a stateless allocator should have size 0 (no-op for the processor in that case... except * the optional<> registering it has-a-value). */ m_alloc_raw(std::in_place, alloc_raw), @@ -2838,8 +2835,8 @@ Basic_blob::Deleter_raw::Deleter_raw(const Allocat // OK. } -template -Basic_blob::Deleter_raw::Deleter_raw(Deleter_raw&& moved_src) +template +Basic_blob::Deleter_raw::Deleter_raw(Deleter_raw&& moved_src) { /* We advertised our action is as-if we default-ct, then move-assign. While we skipped delegating to default-ctor, * the only difference is that would've initialized m_buf_sz; but the following will just overwrite it anyway. So @@ -2849,12 +2846,12 @@ Basic_blob::Deleter_raw::Deleter_raw(Deleter_raw&& /* Auto-generated copy-ct should be fine; the only conceivable source of trouble might be Allocator_raw copy-ction, * but that must exist for all allocators. */ -template -Basic_blob::Deleter_raw::Deleter_raw(const Deleter_raw&) = default; +template +Basic_blob::Deleter_raw::Deleter_raw(const Deleter_raw&) = default; -template -typename Basic_blob::Deleter_raw& - Basic_blob::Deleter_raw::operator=(Deleter_raw&& moved_src) +template +typename Basic_blob::Deleter_raw& + Basic_blob::Deleter_raw::operator=(Deleter_raw&& moved_src) { using std::swap; @@ -2895,9 +2892,9 @@ typename Basic_blob::Deleter_raw& return *this; } // Basic_blob::Deleter_raw::operator=(&&) -template -typename Basic_blob::Deleter_raw& - Basic_blob::Deleter_raw::operator=(const Deleter_raw& src) +template +typename Basic_blob::Deleter_raw& + Basic_blob::Deleter_raw::operator=(const Deleter_raw& src) { /* Ideally we'd just use `= default;`, but that might not compile, when Allocator_raw has no copy-assignment * (as noted elsewhere this is entirely possible). So basically perform a simpler version of the move-assignment @@ -2924,8 +2921,8 @@ typename Basic_blob::Deleter_raw& return *this; } // Basic_blob::Deleter_raw::operator=(const&) -template -void Basic_blob::Deleter_raw::operator()(Pointer_raw to_delete) +template +void Basic_blob::Deleter_raw::operator()(Pointer_raw to_delete) { // No need to invoke dtor: Allocator_raw::value_type is Basic_blob::value_type, a boring int type with no real dtor. diff --git a/src/flow/util/blob.hpp b/src/flow/util/blob.hpp index 97454c686..210dc00aa 100644 --- a/src/flow/util/blob.hpp +++ b/src/flow/util/blob.hpp @@ -56,7 +56,7 @@ namespace flow::util * Then when those 2 changes became required by some use cases, Basic_blob took the vast majority of what used to * be `Blob` and added those 2 changes. Meanwhile `Blob` was rewritten in terms of Basic_blob in a way that * exactly preserved its behavior (so that no call-site changes for Blob-using code were needed). - * Lastly, when `S_SHARING_ALLOWED` template param was added to Basic_blob, `Blob` became a template + * Lastly, when #S_SHARING template param was added to Basic_blob, `Blob` became a template * Blob_with_log_context, while #Blob aliased to `Blob_with_log_context` thus staying functionally * exactly the same as before, minus the share() feature. (`Sharing_blob` was added, aliasing to * `Blob_with_log_context`, being exactly idetical to `Blob` before; the rename was possible due to @@ -73,19 +73,19 @@ namespace flow::util * (See for example ~Blob_with_log_context() and the assignment operators.) * @endinternal * - * @tparam S_SHARING_ALLOWED + * @tparam SHARING * See Basic_blob. */ -template +template class Blob_with_log_context : public log::Log_context, - public Basic_blob, S_SHARING_ALLOWED> + public Basic_blob, SHARING> { public: // Types. /// Short-hand for our main base. - using Base = Basic_blob, S_SHARING_ALLOWED>; + using Base = Basic_blob, SHARING>; /// Short-hand for base member (needed because base access to a template must be qualified otherwise). using value_type = typename Base::value_type; @@ -120,6 +120,10 @@ class Blob_with_log_context : /// Short-hand for base member (needed because base access to a template must be qualified otherwise). static constexpr auto S_UNCHANGED = Base::S_UNCHANGED; + /// Short-hand for base member (needed because base access to a template must be qualified otherwise). + static constexpr auto S_IS_VANILLA_ALLOC = Base::S_IS_VANILLA_ALLOC; + static_assert(S_IS_VANILLA_ALLOC, "We derive from a std::allocator-driven thingie."); + // Constructors/destructor. /** @@ -385,24 +389,24 @@ class Blob_with_log_context : // Template implementations. -template -Blob_with_log_context::Blob_with_log_context(log::Logger* logger_ptr) : +template +Blob_with_log_context::Blob_with_log_context(log::Logger* logger_ptr) : log::Log_context(logger_ptr, Base::S_LOG_COMPONENT) // And default-ct Base{}. { // Nothing else. } -template -Blob_with_log_context::Blob_with_log_context(log::Logger* logger_ptr, size_type size) : +template +Blob_with_log_context::Blob_with_log_context(log::Logger* logger_ptr, size_type size) : log::Log_context(logger_ptr, Base::S_LOG_COMPONENT), Base(size, get_logger()) { // Nothing else. } -template -Blob_with_log_context::Blob_with_log_context(log::Logger* logger_ptr, size_type size, +template +Blob_with_log_context::Blob_with_log_context(log::Logger* logger_ptr, size_type size, Clear_on_alloc coa_tag) : log::Log_context(logger_ptr, Base::S_LOG_COMPONENT), Base(size, coa_tag, get_logger()) @@ -410,24 +414,24 @@ Blob_with_log_context::Blob_with_log_context(log::Logger* log // Nothing else. } -template -Blob_with_log_context::Blob_with_log_context(Blob_with_log_context&& moved_src) noexcept : +template +Blob_with_log_context::Blob_with_log_context(Blob_with_log_context&& moved_src) noexcept : log::Log_context(static_cast(std::move(moved_src))), Base(std::move(moved_src), get_logger()) { // Nothing else. } -template -Blob_with_log_context::Blob_with_log_context(const Blob_with_log_context& src) : +template +Blob_with_log_context::Blob_with_log_context(const Blob_with_log_context& src) : log::Log_context(static_cast(src)), Base(src, get_logger()) { // Nothing else. } -template -Blob_with_log_context::~Blob_with_log_context() +template +Blob_with_log_context::~Blob_with_log_context() { /* ~Basic_blob() doesn't log at all -- no way to give a Logger* to it -- but a way to get some potentially * useful logging is to make_zero(). It is redundant (which is why ~Basic_blob() does not bother) but in some @@ -435,9 +439,9 @@ Blob_with_log_context::~Blob_with_log_context() make_zero(); } -template -Blob_with_log_context& - Blob_with_log_context::operator=(const Blob_with_log_context& src) +template +Blob_with_log_context& + Blob_with_log_context::operator=(const Blob_with_log_context& src) { using log::Log_context; @@ -446,9 +450,9 @@ Blob_with_log_context& return *this; } -template -Blob_with_log_context& - Blob_with_log_context::operator=(Blob_with_log_context&& moved_src) noexcept +template +Blob_with_log_context& + Blob_with_log_context::operator=(Blob_with_log_context&& moved_src) noexcept { using log::Log_context; @@ -457,8 +461,8 @@ Blob_with_log_context& return *this; } -template -void Blob_with_log_context::swap(Blob_with_log_context& other) noexcept +template +void Blob_with_log_context::swap(Blob_with_log_context& other) noexcept { using log::Log_context; using std::swap; @@ -470,42 +474,42 @@ void Blob_with_log_context::swap(Blob_with_log_context& other } } -template -void swap(Blob_with_log_context& blob1, Blob_with_log_context& blob2) noexcept +template +void swap(Blob_with_log_context& blob1, Blob_with_log_context& blob2) noexcept { return blob1.swap(blob2); } -template -Blob_with_log_context Blob_with_log_context::share() const +template +Blob_with_log_context Blob_with_log_context::share() const { Blob_with_log_context blob{get_logger()}; static_cast(blob) = Base::share(get_logger()); return blob; } -template -Blob_with_log_context - Blob_with_log_context::share_after_split_left(size_type lt_size) +template +Blob_with_log_context + Blob_with_log_context::share_after_split_left(size_type lt_size) { Blob_with_log_context blob{get_logger()}; static_cast(blob) = Base::share_after_split_left(lt_size, get_logger()); return blob; } -template -Blob_with_log_context - Blob_with_log_context::share_after_split_right(size_type rt_size) +template +Blob_with_log_context + Blob_with_log_context::share_after_split_right(size_type rt_size) { Blob_with_log_context blob{get_logger()}; static_cast(blob) = Base::share_after_split_right(rt_size, get_logger()); return blob; } -template +template template -void Blob_with_log_context::share_after_split_equally(size_type size, bool headless_pool, - Emit_blob_func&& emit_blob_func) +void Blob_with_log_context::share_after_split_equally(size_type size, bool headless_pool, + Emit_blob_func&& emit_blob_func) { Base::share_after_split_equally_impl(size, headless_pool, std::move(emit_blob_func), get_logger(), [this](size_type lt_size, [[maybe_unused]] log::Logger* logger_ptr) @@ -516,10 +520,10 @@ void Blob_with_log_context::share_after_split_equally(size_ty }); } -template +template template -void Blob_with_log_context::share_after_split_equally_emit_seq(size_type size, bool headless_pool, - Blob_container* out_blobs_ptr) +void Blob_with_log_context::share_after_split_equally_emit_seq(size_type size, bool headless_pool, + Blob_container* out_blobs_ptr) { // Almost copy-pasted from Basic_blob::(). It's short, though, so it seems fine. @todo Revisit. @@ -530,11 +534,11 @@ void Blob_with_log_context::share_after_split_equally_emit_se }); } -template +template template -void Blob_with_log_context::share_after_split_equally_emit_ptr_seq(size_type size, - bool headless_pool, - Blob_ptr_container* out_blobs_ptr) +void Blob_with_log_context::share_after_split_equally_emit_ptr_seq(size_type size, + bool headless_pool, + Blob_ptr_container* out_blobs_ptr) { // Almost copy-pasted from Basic_blob::(). It's short, though, so it seems fine. @todo Revisit. @@ -549,54 +553,54 @@ void Blob_with_log_context::share_after_split_equally_emit_pt }); } -template -void Blob_with_log_context::reserve(size_type new_capacity) +template +void Blob_with_log_context::reserve(size_type new_capacity) { Base::reserve(new_capacity, get_logger()); } -template -void Blob_with_log_context::reserve(size_type new_capacity, Clear_on_alloc coa_tag) +template +void Blob_with_log_context::reserve(size_type new_capacity, Clear_on_alloc coa_tag) { Base::reserve(new_capacity, coa_tag, get_logger()); } -template -void Blob_with_log_context::resize(size_type new_size, size_type new_start_or_unchanged) +template +void Blob_with_log_context::resize(size_type new_size, size_type new_start_or_unchanged) { Base::resize(new_size, new_start_or_unchanged, get_logger()); } -template -void Blob_with_log_context::resize(size_type new_size, Clear_on_alloc coa_tag, - size_type new_start_or_unchanged) +template +void Blob_with_log_context::resize(size_type new_size, Clear_on_alloc coa_tag, + size_type new_start_or_unchanged) { Base::resize(new_size, coa_tag, new_start_or_unchanged, get_logger()); } -template -void Blob_with_log_context::make_zero() +template +void Blob_with_log_context::make_zero() { Base::make_zero(get_logger()); } -template -typename Blob_with_log_context::size_type - Blob_with_log_context::assign_copy(const boost::asio::const_buffer& src) +template +typename Blob_with_log_context::size_type + Blob_with_log_context::assign_copy(const boost::asio::const_buffer& src) { return Base::assign_copy(src, get_logger()); } -template -typename Blob_with_log_context::Iterator - Blob_with_log_context::emplace_copy(Const_iterator dest, const boost::asio::const_buffer& src) +template +typename Blob_with_log_context::Iterator + Blob_with_log_context::emplace_copy(Const_iterator dest, const boost::asio::const_buffer& src) { return Base::emplace_copy(dest, src, get_logger()); } -template -typename Blob_with_log_context::Const_iterator - Blob_with_log_context::sub_copy(Const_iterator src, const boost::asio::mutable_buffer& dest) const +template +typename Blob_with_log_context::Const_iterator + Blob_with_log_context::sub_copy(Const_iterator src, const boost::asio::mutable_buffer& dest) const { return Base::sub_copy(src, dest, get_logger()); } diff --git a/src/flow/util/blob_fwd.hpp b/src/flow/util/blob_fwd.hpp index e1f79ad3c..12e3f2343 100644 --- a/src/flow/util/blob_fwd.hpp +++ b/src/flow/util/blob_fwd.hpp @@ -27,9 +27,9 @@ namespace flow::util // Find doc headers near the bodies of these compound types. -template, bool S_SHARING_ALLOWED = false> +template, bool SHARING = false> class Basic_blob; -template +template class Blob_with_log_context; struct Clear_on_alloc; @@ -93,9 +93,8 @@ extern const Clear_on_alloc CLEAR_ON_ALLOC; * Object. * @return Whether `blob1` and `blob2` both operate on the same underlying buffer. */ -template -bool blobs_sharing(const Basic_blob& blob1, - const Basic_blob& blob2); +template +bool blobs_sharing(const Basic_blob& blob1, const Basic_blob& blob2); /** * Equivalent to `blob1.swap(blob2)`. @@ -108,9 +107,9 @@ bool blobs_sharing(const Basic_blob& blob1, * @param logger_ptr * The Logger implementation to use in *this* routine (synchronously) only. Null allowed. */ -template -void swap(Basic_blob& blob1, - Basic_blob& blob2, log::Logger* logger_ptr = nullptr) noexcept; +template +void swap(Basic_blob& blob1, + Basic_blob& blob2, log::Logger* logger_ptr = nullptr) noexcept; /** * On top of the similar Basic_blob related function, logs using the stored log context of `blob1`. @@ -121,7 +120,7 @@ void swap(Basic_blob& blob1, * @param blob2 * See super-class related API. */ -template -void swap(Blob_with_log_context& blob1, Blob_with_log_context& blob2) noexcept; +template +void swap(Blob_with_log_context& blob1, Blob_with_log_context& blob2) noexcept; } // namespace flow::util From b586a464ede1a5ec0567c564271912c8a7829769 Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Tue, 9 Dec 2025 15:09:12 -0800 Subject: [PATCH 36/37] (cont) Bug fix (in new code: Log_context_mt copy/move ctors+assignment, swap; found in code review by echan; for some reason my brain glitched and told me I need not lock a const thing... which is embarrassing). --- src/flow/log/log.cpp | 90 ++++++++++++++++++++----------- src/flow/log/test/log_test.cpp | 7 ++- src/flow/net_flow/peer_socket.hpp | 2 +- 3 files changed, 67 insertions(+), 32 deletions(-) diff --git a/src/flow/log/log.cpp b/src/flow/log/log.cpp index a77388c65..995cb24e6 100644 --- a/src/flow/log/log.cpp +++ b/src/flow/log/log.cpp @@ -219,7 +219,7 @@ Logger* Log_context::get_logger() const Logger* Log_context::set_logger(Logger* logger) { - std::swap(logger, m_logger); + m_logger = logger; return logger; } @@ -250,29 +250,47 @@ Log_context_mt::Log_context_mt(Logger* logger) : } Log_context_mt::Log_context_mt(const Log_context_mt& src) : - Log_context(src) + Log_context() // Eliminate possible warning at tiny (if any) perf cost. { - // Leave m_mutex alone. + // We could just do `operator=(src)`; but to avoid unnecessary locking of this->m_mutex do it manually. + util::Lock_guard lock{src.m_mutex}; + Log_context::operator=(src); } Log_context_mt::Log_context_mt(Log_context_mt&& src) : - Log_context(static_cast(src)) // See below. + Log_context() // Clear *this in preparation for swap. { - using Lock = util::Lock_guard; + using std::swap; // This enables proper ADL. - /* We could just do `operator=(std::move(src))`; but to avoid unnecessary locking of this->m_mutex do it manually; - * - lock-free copying from Log_context src onto Log_context *this = already done; - * - so it remains to lock src and nullify it: */ - Lock lock{src.m_mutex}; - static_cast(src).operator=(Log_context{}); + // We could just do `operator=(move(src))`; but to avoid unnecessary locking of this->m_mutex do it manually. + util::Lock_guard lock{src.m_mutex}; + swap(static_cast(*this), + static_cast(src)); } Log_context_mt& Log_context_mt::operator=(const Log_context_mt& src) { + using Lock = util::Lock_guard; + if (&src != this) { - util::Lock_guard lock{m_mutex}; - Log_context::operator=(src); + /* Naively we'd do something close to: + * Lock lock1{m_mutex}; + * Lock lock2{src.m_mutex}; + * Log_context::operator=(src); + * However conceivably this could cause an obscure deadlock for reasons similar to those cited in swap(). As there: + * Seems there's no choice but to lock things piecewise and execute the copy-assignment via a temporary + * intermediary Log_context. */ + + Log_context obj_tmp; + { + Lock lock{src.m_mutex}; + obj_tmp = src; + } + { + Lock lock{m_mutex}; + Log_context::operator=(obj_tmp); + } } return *this; } @@ -280,24 +298,20 @@ Log_context_mt& Log_context_mt::operator=(const Log_context_mt& src) Log_context_mt& Log_context_mt::operator=(Log_context_mt&& src) { using Lock = util::Lock_guard; + using std::swap; // This enables proper ADL. if (&src != this) { - /* Naively we'd do something close to: - * Lock lock1{m_mutex}; - * Lock lock2{src.m_mutex}; - * Log_context::operator=(std::move(src)); - * However conceivably this could cause an obscure deadlock for reasons similar to those cited in swap(). As there: - * Seems there's no choice but to lock things piecewise and execute the move-assignment manually as its 2 - * component ops (copy-assign; then clear `src`), as we do so. */ + // Same deal as in copy ctor; just have to add the clearing of `src` which we do by using swap(L_c&, L_c&). + Log_context obj_tmp; { - Lock lock{m_mutex}; - Log_context::operator=(static_cast(src)); + Lock lock{src.m_mutex}; + swap(obj_tmp, static_cast(src)); } { - Lock lock{src.m_mutex}; - static_cast(src).operator=(Log_context{}); + Lock lock{m_mutex}; + Log_context::operator=(obj_tmp); } } @@ -324,6 +338,9 @@ const Component& Log_context_mt::get_log_component() const void Log_context_mt::swap(Log_context_mt& other) { + using Lock = util::Lock_guard; + using std::swap; // This enables proper ADL. + /* Naively we'd do something close to: * Lock lock1{m_mutex}; * Lock lock2{other.m_mutex}; @@ -333,15 +350,28 @@ void Log_context_mt::swap(Log_context_mt& other) * and * lc_mt2.swap(lc_mt1); * Strange thing to do, but it is legal, and a classic AB-BA deadlock results. - * Seems there's no choice but to lock things piecewise and execute the swap manually as the 3 classic ops, as - * we do so. */ + * Seems there's no choice but to lock things in series and use a temporary intermediary. + * (We could've also let the default std::swap() just do one move-construct and two move-assignments, but + * perf-wise that'd do some unnecessary stuff.) */ - Log_context_mt& obj1 = *this; - Log_context_mt& obj2 = other; + auto& obj1_mt = *this; + auto& obj2_mt = other; + auto& obj1_rw = static_cast(obj1_mt); + auto& obj2_rw = static_cast(obj2_mt); - Log_context_mt obj_tmp{static_cast(obj1)}; - obj1 = static_cast(obj2); // Will lock/unlock obj1.m_mutex. - obj2 = static_cast(obj_tmp); // Will lock/unlock obj2.m_mutex. + Log_context tmp_rw; + { + Lock lock{obj1_mt.m_mutex}; + tmp_rw = obj1_rw; + } + { + Lock lock{obj2_mt.m_mutex}; + swap(tmp_rw, obj2_rw); + } + { + Lock lock{obj1_mt.m_mutex}; + obj1_rw = tmp_rw; + } } // Log_context_mt::swap() void swap(Log_context_mt& val1, Log_context_mt& val2) diff --git a/src/flow/log/test/log_test.cpp b/src/flow/log/test/log_test.cpp index b6660d00c..03ce3d51e 100644 --- a/src/flow/log/test/log_test.cpp +++ b/src/flow/log/test/log_test.cpp @@ -20,6 +20,7 @@ #include "flow/util/util.hpp" #include #include +#include namespace flow::log::test { @@ -27,6 +28,8 @@ namespace flow::log::test namespace { using std::string; +using std::cout; +using std::flush; } // Anonymous namespace // Yes... this is very cheesy... but this is a test, so I don't really care. @@ -52,7 +55,9 @@ TEST(Log_context, Interface) const auto comp1 = Flow_log_component::S_UTIL; const auto comp2 = Flow_log_component::S_LOG; const auto comp0 = Component{}; - EXPECT_TRUE(comp0.empty()); + ASSERT_TRUE(comp0.empty()); + + cout << "Testing type [" << typeid(Log_context_t).name() << "].\n" << flush; // @todo Maybe should implement operator==(Component, Component)? Then use it/test it here? diff --git a/src/flow/net_flow/peer_socket.hpp b/src/flow/net_flow/peer_socket.hpp index 7d069ca3b..742978915 100644 --- a/src/flow/net_flow/peer_socket.hpp +++ b/src/flow/net_flow/peer_socket.hpp @@ -783,7 +783,7 @@ class Peer_socket : /** * The error code that perviously caused state() to become State::S_CLOSED, or success code if state - * is not CLOSED. For example, error::code::S_CONN_RESET_BY_OTHER_SIDE (if was connected) or + * is not CLOSED. For example, error::Code::S_CONN_RESET_BY_OTHER_SIDE (if was connected) or * error::Code::S_CONN_TIMEOUT (if was connecting) * * @return Ditto. From b7bd40bbd590ac7decbbcbe37b3cfe91efb557fb Mon Sep 17 00:00:00 2001 From: Yuri Goldfeld Date: Wed, 14 Jan 2026 14:22:22 -0800 Subject: [PATCH 37/37] (cont) Tweaks, all stylistic, based on code review feedback; and a couple minor opportunistic things. --- src/flow/net_flow/detail/low_lvl_packet.cpp | 2 +- src/flow/net_flow/detail/low_lvl_packet.hpp | 2 +- src/flow/net_flow/event_set.hpp | 28 +++++++-------- src/flow/net_flow/node.hpp | 4 +-- src/flow/net_flow/options.hpp | 2 +- src/flow/net_flow/peer_socket.hpp | 4 +-- src/flow/util/basic_blob.hpp | 40 ++++++++++----------- src/flow/util/linked_hash_map.hpp | 13 +++---- src/flow/util/linked_hash_set.hpp | 11 +++--- src/flow/util/thread_lcl.hpp | 33 +++++++++-------- 10 files changed, 66 insertions(+), 73 deletions(-) diff --git a/src/flow/net_flow/detail/low_lvl_packet.cpp b/src/flow/net_flow/detail/low_lvl_packet.cpp index ee85a22e7..b006cd36b 100644 --- a/src/flow/net_flow/detail/low_lvl_packet.cpp +++ b/src/flow/net_flow/detail/low_lvl_packet.cpp @@ -403,7 +403,7 @@ Low_lvl_packet::Ptr Low_lvl_packet::create_from_raw_data_packet(log::Logger* log * - The raw type ID, which indicates what is after the Common Header. * - We'll create the appropriate Low_lvl_packet sub-type object based on this. * - Certain common fields that apply to all. - * - We'll fill those out in the Low_lvl_packet (super-type) area of the the just-created object. + * - We'll fill those out in the Low_lvl_packet (super-type) area of the just-created object. * - Type-specific data: * - We hand off the partially filled object and the remaining buffer to a virtual method that * will appropriately fill the rest of the object. */ diff --git a/src/flow/net_flow/detail/low_lvl_packet.hpp b/src/flow/net_flow/detail/low_lvl_packet.hpp index a4446f01d..7a1fb79a3 100644 --- a/src/flow/net_flow/detail/low_lvl_packet.hpp +++ b/src/flow/net_flow/detail/low_lvl_packet.hpp @@ -1031,7 +1031,7 @@ struct Ack_packet : public Low_lvl_packet * congestion control (like FAST or Vegas) we realized it is important for RTTs (which use the ACK * delay value) to be quite precise (microsecond level or so). Therefore, to be totally safe, we * choose to use the same units as #Fine_duration, which is how we compute all time periods. As - * for the the encoding width, we use 64 bits just in case. + * for the encoding width, we use 64 bits just in case. * * @todo Reconsider the encoding width. If `Ack_delay_time_unit{1}` is a nanosecond, then 32 bits * would support a maximum delay of ~4.1 seconds which is likely fine for most real-world diff --git a/src/flow/net_flow/event_set.hpp b/src/flow/net_flow/event_set.hpp index 8413d68e0..592c03aa7 100644 --- a/src/flow/net_flow/event_set.hpp +++ b/src/flow/net_flow/event_set.hpp @@ -423,7 +423,7 @@ class Event_set : * See flow::Error_code docs for error reporting semantics. Generated codes: * error::Code::S_EVENT_SET_CLOSED. */ - void close(Error_code* err_code = 0); + void close(Error_code* err_code = nullptr); /** * Adds the given socket to the set of sockets we want to know are "ready" by the definition of @@ -444,7 +444,7 @@ class Event_set : * @return `true` if and only if no error occurred (`*err_code` is success). */ template - bool add_wanted_socket(typename Socket::Ptr sock, Event_type ev_type, Error_code* err_code = 0); + bool add_wanted_socket(typename Socket::Ptr sock, Event_type ev_type, Error_code* err_code = nullptr); /** * Opposite of add_wanted_socket(). @@ -461,7 +461,7 @@ class Event_set : * @return `true` if and only if no error occurred (`*err_code` is success). */ template - bool remove_wanted_socket(typename Socket::Ptr sock, Event_type ev_type, Error_code* err_code = 0); + bool remove_wanted_socket(typename Socket::Ptr sock, Event_type ev_type, Error_code* err_code = nullptr); /** * Efficiently exchanges the current set of sockets we want to know are "ready" by the definiton of @@ -508,7 +508,7 @@ class Event_set : * error::Code::S_EVENT_SET_CLOSED, error::Code::S_EVENT_SET_IMMUTABLE_WHEN_WAITING. * @return `true` if and only if no error occurred (`*err_code` is success). */ - bool clear_wanted_sockets(Event_type ev_type, Error_code* err_code = 0); + bool clear_wanted_sockets(Event_type ev_type, Error_code* err_code = nullptr); /** * Returns `true` if and only if at least one wanted event for at least one socket is registered @@ -520,7 +520,7 @@ class Event_set : * @return `true` if there are wanted events; `false` if there are no wanted events (then `*err_code` is * success) or there was an error (`*err_code` is failure; i.e., `bool(*err_code) == true`). */ - bool events_wanted(Error_code* err_code = 0) const; + bool events_wanted(Error_code* err_code = nullptr) const; /** * Checks for all previously described events that currently hold, saves them for retrieval via @@ -535,7 +535,7 @@ class Event_set : * error::Code::S_EVENT_SET_CLOSED, error::Code::S_EVENT_SET_DOUBLE_WAIT_OR_POLL. * @return `true` if and only if no error occurred (`*err_code` is success). */ - bool poll(Error_code* err_code = 0); + bool poll(Error_code* err_code = nullptr); /** * Blocks indefinitely until one or more of the previously described events hold -- or the wait @@ -559,7 +559,7 @@ class Event_set : * error::Code::S_EVENT_SET_DOUBLE_WAIT_OR_POLL, error::Code::S_EVENT_SET_CLOSED. * @return `true` if and only if no error occurred (`*err_code` is success). */ - bool sync_wait(Error_code* err_code = 0); + bool sync_wait(Error_code* err_code = nullptr); /** * Same as the other sync_wait() but will stop waiting if the timeout given as argument @@ -591,7 +591,7 @@ class Event_set : * an error, in particular. */ template - bool sync_wait(const boost::chrono::duration& max_wait, Error_code* err_code = 0); + bool sync_wait(const boost::chrono::duration& max_wait, Error_code* err_code = nullptr); /** * Moves object to State::S_WAITING state, saves the given handler to be executed later (in a different, @@ -640,7 +640,7 @@ class Event_set : * error::Code::S_EVENT_SET_DOUBLE_WAIT_OR_POLL, error::Code::S_EVENT_SET_CLOSED. * @return `true` if and only if no error occurred (`*err_code` is success). */ - bool async_wait(const Event_handler& on_event, Error_code* err_code = 0); + bool async_wait(const Event_handler& on_event, Error_code* err_code = nullptr); /** * Moves object from State::S_WAITING to State::S_INACTIVE, and forgets any handler saved by async_wait(), or does @@ -661,7 +661,7 @@ class Event_set : * @return `true` if and only if no error occurred (`*err_code` is success). In particular, state() * being State::S_INACTIVE when the method starts is not an error. */ - bool async_wait_finish(Error_code* err_code = 0); + bool async_wait_finish(Error_code* err_code = nullptr); /** * Returns `true` if and only if the last wait, if any, detected at least one event. In other @@ -689,7 +689,7 @@ class Event_set : * @return `true` if there are active events; `false` if there are no active events (then `*err_code` is * success) or there was an error (`*err_code` is failure; i.e., `bool(*err_code) == true`). */ - bool events_detected(Error_code* err_code = 0) const; + bool events_detected(Error_code* err_code = nullptr) const; /** * Gets the sockets that satisfy the condition of the given Event_type detected during the last wait. @@ -742,7 +742,7 @@ class Event_set : * the author wanted to play around with `any`s instead of haxoring old-school `union`s. * `variant` is much nicer, however, and the dynamic nature of `any` is entirely unnecessary here. */ - bool emit_result_sockets(Sockets* target_set, Event_type ev_type, Error_code* err_code = 0); + bool emit_result_sockets(Sockets* target_set, Event_type ev_type, Error_code* err_code = nullptr); /** * Identical to `emit_result_sockets(&sockets, ev_type, err_code)`, where originally `sockets` is empty and @@ -754,7 +754,7 @@ class Event_set : * Same. * @return Same. */ - bool clear_result_sockets(Event_type ev_type, Error_code* err_code = 0); + bool clear_result_sockets(Event_type ev_type, Error_code* err_code = nullptr); /** * Forgets all sockets stored in this object in any fashion. @@ -764,7 +764,7 @@ class Event_set : * error::Code::S_EVENT_SET_CLOSED, error::Code::S_EVENT_SET_IMMUTABLE_WHEN_WAITING. * @return true if and only if no error occurred (*err_code is success). */ - bool clear(Error_code* err_code = 0); + bool clear(Error_code* err_code = nullptr); private: // Friends. diff --git a/src/flow/net_flow/node.hpp b/src/flow/net_flow/node.hpp index 58774bf4e..8c0e9ffb2 100644 --- a/src/flow/net_flow/node.hpp +++ b/src/flow/net_flow/node.hpp @@ -619,7 +619,7 @@ namespace flow::net_flow * was able to run; thread W had just decided to send that packet over wire in the first place; so there's no * reason to access it until ACK -- much later -- or some kind of socket-wide catastrophe.) All that put * together I dub APPROACH 5. Thus, APPROACH 1 + APPROACH 5 seems like the best idea of all, distilling all - * the trade-offs into the the fastest yet close to simplest approach. + * the trade-offs into the fastest yet close to simplest approach. * * @todo More uniform diagnostic logging: There is much diagnostic logging in the * implementation (FLOW_ERROR*(), etc.), but some of it lacks useful info like `sock` or `serv` (i.e., the @@ -994,7 +994,7 @@ class Node : * Network environment simulator to use to simulate (fake) external network conditions * inside the code, e.g., for testing. If 0, no such simulation will occur. Otherwise the * code will add conditions such as loss and latency (in addition to any present naturally) - * and will take ownership of the the passed in pointer (meaning, we will `delete` as we see fit; + * and will take ownership of the passed in pointer (meaning, we will `delete` as we see fit; * and you must never do so from now on). * @param err_code * See flow::Error_code docs for error reporting semantics. error::Code generated: diff --git a/src/flow/net_flow/options.hpp b/src/flow/net_flow/options.hpp index 9e5477497..bf92732f7 100644 --- a/src/flow/net_flow/options.hpp +++ b/src/flow/net_flow/options.hpp @@ -433,7 +433,7 @@ struct Peer_socket_options * 4. Add an ADD_CONFIG_OPTION() line into the proper `struct` setup_config_parsing_helper() by analogy * with other present options. The description string should usually be a copy of the comment * from step 3. - * 5. Add the default value (very important) into the the proper `struct` constructor. + * 5. Add the default value (very important) into the proper `struct` constructor. * Explain the choice of default with a comment. * 6. Is this a static option (step 2)? If so, add static validation (by analogy with present * options) to Node::validate_options() or Node::sock_validate_options(). diff --git a/src/flow/net_flow/peer_socket.hpp b/src/flow/net_flow/peer_socket.hpp index 742978915..898c206c6 100644 --- a/src/flow/net_flow/peer_socket.hpp +++ b/src/flow/net_flow/peer_socket.hpp @@ -1780,7 +1780,7 @@ class Peer_socket : * This gains meaning only in thread W. This should NOT be accessed outside of thread W and is * not protected by a mutex. * - * @see Sent_when and Sent_packet::m_sent_when, where if `X` is the the last element of the latter sequence, then + * @see Sent_when and Sent_packet::m_sent_when, where if `X` is the last element of the latter sequence, then * `X.m_sent_time` is the value by which elements in the present map are ordered. However, this only * happens to be the case, because by definition an element * is always placed at the front of the present map (Linked_hash_map), and this order is inductively maintained; @@ -2144,7 +2144,7 @@ class Peer_socket : * Connection timeout scheduled task; fires if the entire initial connection process does not complete within a * certain amount of time. It is started when the SYN or SYN_ACK is sent the very first time (NOT counting * resends), canceled when SYN_ACK or SYN_ACK_ACK (respectively) is received in response to ANY SYN or - * SYN_ACK (respevtively), and fired if the the latter does not occur in time. + * SYN_ACK (respectively), and fired if the latter does not occur in time. * * This gains meaning only in thread W. This should NOT be accessed outside of thread W and is * not protected by a mutex. diff --git a/src/flow/util/basic_blob.hpp b/src/flow/util/basic_blob.hpp index 8b24fd307..ef10ec635 100644 --- a/src/flow/util/basic_blob.hpp +++ b/src/flow/util/basic_blob.hpp @@ -363,12 +363,12 @@ class Basic_blob /** * Constructs blob with `zero() == true`. Note this means no buffer is allocated. * - * @param alloc_raw + * @param alloc_raw_src * Allocator to copy and store in `*this` for all buffer allocations/deallocations. * If #Allocator_raw is stateless, then this has size zero, so nothing is copied at runtime, * and by definition it is to equal `Allocator_raw{}`. */ - Basic_blob(const Allocator_raw& alloc_raw = {}); + Basic_blob(const Allocator_raw& alloc_raw_src = {}); /** * Constructs blob with size() and capacity() equal to the given `size`, and `start() == 0`. Performance note: @@ -387,13 +387,13 @@ class Basic_blob * @param logger_ptr * The Logger implementation to use in *this* routine (synchronously) or asynchronously when TRACE-logging * in the event of buffer dealloc. Null allowed. - * @param alloc_raw + * @param alloc_raw_src * Allocator to copy and store in `*this` for all buffer allocations/deallocations. * If #Allocator_raw is stateless, then this has size zero, so nothing is copied at runtime, * and by definition it is to equal `Allocator_raw{}`. */ explicit Basic_blob(size_type size, log::Logger* logger_ptr = nullptr, - const Allocator_raw& alloc_raw = {}); + const Allocator_raw& alloc_raw_src = {}); /** * Identical to similar-sig ctor except, if `size > 0`, all `size` elements are performantly initialized to zero. @@ -410,11 +410,11 @@ class Basic_blob * See similar ctor. * @param logger_ptr * See similar ctor. - * @param alloc_raw + * @param alloc_raw_src * See similar ctor. */ explicit Basic_blob(size_type size, Clear_on_alloc coa_tag, log::Logger* logger_ptr = nullptr, - const Allocator_raw& alloc_raw = {}); + const Allocator_raw& alloc_raw_src = {}); /** * Move constructor, constructing a blob exactly internally equal to pre-call `moved_src`, while the latter is @@ -1320,12 +1320,12 @@ class Basic_blob * to be passed-in. Many allocators probably don't really need this, as array size is typically recorded * invisibly near the array itself, but formally this is not guaranteed for all allocators. * - * @param alloc_raw + * @param alloc_raw_src * Allocator to copy and store. * @param buf_sz * See above. */ - explicit Deleter_raw(const Allocator_raw& alloc_raw, size_type buf_sz); + explicit Deleter_raw(const Allocator_raw& alloc_raw_src, size_type buf_sz); /** * Move-construction which may be required when we are used in `unique_ptr`. This is equivalent to @@ -1667,8 +1667,8 @@ class Basic_blob // buf_ptr() initialized to null pointer. n_capacity and m_size remain uninit (meaningless until buf_ptr() changes). template -Basic_blob::Basic_blob(const Allocator_raw& alloc_raw) : - m_alloc_and_buf_ptr(alloc_raw), // Copy allocator; stateless allocator should have size 0 (no-op for the processor). +Basic_blob::Basic_blob(const Allocator_raw& alloc_raw_src) : + m_alloc_and_buf_ptr(alloc_raw_src), // Copy allocator; stateless alloc should have size 0 (no-op for the processor). m_capacity(0), // Not necessary, but some compilers will warn in some situations. Fine; it's cheap enough. m_start(0), // Ditto. m_size(0) // Ditto. @@ -1678,30 +1678,26 @@ Basic_blob::Basic_blob(const Allocator_raw& alloc_raw) : template Basic_blob::Basic_blob - (size_type size, log::Logger* logger_ptr, const Allocator_raw& alloc_raw) : + (size_type size, log::Logger* logger_ptr, const Allocator_raw& alloc_raw_src) : - Basic_blob(alloc_raw) // Delegate. + Basic_blob(alloc_raw_src) // Delegate. { resize(size, 0, logger_ptr); } template Basic_blob::Basic_blob - (size_type size, Clear_on_alloc coa_tag, log::Logger* logger_ptr, const Allocator_raw& alloc_raw) : + (size_type size, Clear_on_alloc coa_tag, log::Logger* logger_ptr, const Allocator_raw& alloc_raw_src) : - Basic_blob(alloc_raw) // Delegate. + Basic_blob(alloc_raw_src) // Delegate. { resize(size, coa_tag, 0, logger_ptr); } template Basic_blob::Basic_blob(const Basic_blob& src, log::Logger* logger_ptr) : - // Follow rules established in alloc_raw() doc header: - m_alloc_and_buf_ptr(std::allocator_traits::select_on_container_copy_construction(src.alloc_raw())), - m_capacity(0), // See comment in first delegated ctor above. - m_start(0), // Ditto. - m_size(0) // Ditto - + // Follow rules established in alloc_raw() doc header. This is compatible with the delegated-to ctor. + Basic_blob(std::allocator_traits::select_on_container_copy_construction(src.alloc_raw())) { /* What we want to do here, ignoring allocators, is (for concision): `assign(src, logger_ptr);` * However copy-assignment also must do something different w/r/t alloc_raw() than what we had to do above @@ -2826,10 +2822,10 @@ Basic_blob::Deleter_raw::Deleter_raw() : } template -Basic_blob::Deleter_raw::Deleter_raw(const Allocator_raw& alloc_raw, size_type buf_sz) : +Basic_blob::Deleter_raw::Deleter_raw(const Allocator_raw& alloc_raw_src, size_type buf_sz) : /* Copy allocator; a stateless allocator should have size 0 (no-op for the processor in that case... except * the optional<> registering it has-a-value). */ - m_alloc_raw(std::in_place, alloc_raw), + m_alloc_raw(std::in_place, alloc_raw_src), m_buf_sz(buf_sz) // Smart-ptr stores a T*, where T is a trivial-deleter PoD, but we delete an array of Ts: this many. { // OK. diff --git a/src/flow/util/linked_hash_map.hpp b/src/flow/util/linked_hash_map.hpp index 02a239bad..c4a0dcd5b 100644 --- a/src/flow/util/linked_hash_map.hpp +++ b/src/flow/util/linked_hash_map.hpp @@ -594,7 +594,7 @@ class Linked_hash_map Iterator insert_impl(const Value& key_and_mapped); /** - * Simimlar to insert_impl(), except `key_and_mapped` components are `move()`d into `*this` instead of being copied. + * Similar to insert_impl(), except `key_and_mapped` components are `move()`d into `*this` instead of being copied. * * @param key_and_mapped * Same as in insert(). @@ -666,12 +666,7 @@ template Linked_hash_map::Linked_hash_map(size_type n_buckets, const Hash& hasher_obj, const Pred& pred) : - /* @todo Using detail:: like this is technically uncool, but so far all alternatives look worse. - * We blame the somewhat annoying ctor API for unordered_*. */ - m_value_iter_set((n_buckets == size_type(-1)) - ? boost::unordered::detail::default_bucket_count - : n_buckets, - hasher_obj, pred) + Linked_hash_map({}, n_buckets, hasher_obj, pred) { // That's all. } @@ -683,8 +678,10 @@ Linked_hash_map::Linked_hash_map(std::initializ const Pred& pred) : // Their initializer_list is meant for a dictionary, but it is perfect for our list of pairs! m_value_list(values), + /* @todo Using detail:: like this is technically uncool, but so far all alternatives look worse. + * We blame the somewhat annoying ctor API for unordered_*. */ m_value_iter_set((n_buckets == size_type(-1)) - ? boost::unordered::detail::default_bucket_count // See @todo above. + ? boost::unordered::detail::default_bucket_count : n_buckets, hasher_obj, pred) { diff --git a/src/flow/util/linked_hash_set.hpp b/src/flow/util/linked_hash_set.hpp index 22d082b1f..849846e31 100644 --- a/src/flow/util/linked_hash_set.hpp +++ b/src/flow/util/linked_hash_set.hpp @@ -470,12 +470,7 @@ template Linked_hash_set::Linked_hash_set(size_type n_buckets, const Hash& hasher_obj, const Pred& pred) : - /* @todo Using detail:: like this is technically uncool, but so far all alternatives look worse. - * We blame the somewhat annoying ctor API for unordered_*. */ - m_value_iter_set((n_buckets == size_type(-1)) - ? boost::unordered::detail::default_bucket_count - : n_buckets, - hasher_obj, pred) + Linked_hash_set({}, n_buckets, hasher_obj, pred) { // That's all. } @@ -487,8 +482,10 @@ Linked_hash_set::Linked_hash_set(std::initializer_listget_logger()}` if and only if `Thread_local_state*` is @@ -91,7 +91,7 @@ namespace flow::util * See also Logging section below. * * From any thread where you need a #Thread_local_state, call `this->this_thread_state()`. The first time - * in a given thread, this shall perform and save `new Thread_local_state{}`; subsequent times it shall return + * in a given thread, this shall perform and save `new Thread_local_state`; subsequent times it shall return * the same pointer. (You can also save the pointer and reuse it; just be careful.) * * A given thread's #Thread_local_state object shall be deleted via `delete Thread_local_state` when one of @@ -355,7 +355,7 @@ class Thread_local_state_registry : * You may access the returned data structure, including the #Thread_local_state pointees, in read-only mode. * * You may write to each individual #Thread_local_state pointee. Moreover you are guaranteed (see - * "Thread safety" below) that no while_locked() user is doing the same simultaneously (byt while_locked() + * "Thread safety" below) that no while_locked() user is doing the same simultaneously (by while_locked() * contract). * * If you *do* write to a particular pointee, remember these points: @@ -371,7 +371,7 @@ class Thread_local_state_registry : * #Thread_local_state an `atomic m_do_flush{false}`; set it to `true` (with most-relaxed atomic mode) * via while_locked() + state_per_thread() block when wanting a thread to perform an (e.g.) "flush" action; * and in the owner-thread do checks like: - * `if (this_thread_state()->m_do_flush.compare_exchange_strong(true, false, relaxed) { flush_stuff(); }`. + * `if (this_thread_state()->m_do_flush.compare_exchange_strong(true, false, relaxed)) { flush_stuff(); }`. * It is speedy and easy. * - You could also surround any access, from the proper owner thread, to that `Thread_local_state` pointee * with while_locked(). Again, usually one uses thread-local stuff to avoid such central-locking actions; @@ -489,7 +489,7 @@ class Thread_local_state_registry : * The essential problem is that in cleanup() (which is called by thread X that earlier issued * `Thread_local_state* x` via this_thread_state() if and only if at X exit `*this` still exists, and therefore * so does #m_this_thread_state_or_null) we cannot be sure that `x` isn't being concurrently `delete`d and - * removed from #m_ctl by the (unlikely, but possibly) concurrently executing `*this` dtor. To do that + * removed from #m_ctl by the (unlikely but possibly) concurrently executing `*this` dtor. To do that * we must first lock `m_ctl->m_mutex`. However, `*m_ctl` might concurrently disappear! This is perfect * for `weak_ptr`: we can "just" capture a `weak_ptr` of `shared_ptr` #m_ctl and either grab a co-shared-pointer * of `m_ctl` via `weak_ptr::lock()`; or fail to do so which simply means the dtor will do the cleanup anyway. @@ -515,7 +515,7 @@ class Thread_local_state_registry : * and the Tl_context (passed to cleanup() by `thread_specific_ptr`). * * If dtor runs before a given thread exits, then again: simple enough. Dtor can just do (for each thread's stuff) - * what cleanup() what have done; hence for the thread in question it would delete the `Thread_local_state` and + * what cleanup() would have done; hence for the thread in question it would delete the `Thread_local_state` and * `Tl_context` and delete the entry from Registry_ctl::m_state_per_thread. cleanup() will just not run. * * The problems begin in the unlikely but eminently possible, and annoying, scenario wherein they both run at @@ -675,7 +675,7 @@ class Thread_local_state_registry : * - By user code, probably following this_thread_state() to obtain `p`. This is safe, because: * It is illegal for them to access `*this`-owned state after destroying `*this`. * - * As for the the stuff in `m_this_thread_state_or_null.m_tsp.get()` other than `p` -- the Tl_context surrounding + * As for the stuff in `m_this_thread_state_or_null.m_tsp.get()` other than `p` -- the Tl_context surrounding * it -- again: see Tl_context doc header. */ Tsp_wrapper m_this_thread_state_or_null; @@ -1058,17 +1058,19 @@ Thread_local_state_registry::~Thread_local_state_registry( { delete state; /* Careful! We delete `state` (the Thread_local_state) but *not* the Tl_context (we didn't even store - * it in the map) that is actually stored in the thread_specific_ptr m_this_thread_state_or_null. + * it in the map) that is actually stored in the thread_specific_ptr m_this_thread_state_or_null.m_tsp. * See Tl_context doc header for explanation. In short by leaving it alive we leave cleanup() able to * run concurrently with ourselves -- unlikely but possible. */ } /* Subtlety: When m_this_thread_state_or_null is auto-destroyed shortly, it will auto-execute - * m_this_thread_state_or_null.reset() -- in *this* thread only. If in fact this_thread_state() has been - * called in this thread, then it'll try to do cleanup(m_this_thread_state_or_null.get()); nothing good - * can come of that really. We could try to prevent it by doing m_this_thread_state_or_null.reset()... but - * same result. Instead we do the following which simply replaces the stored (now bogus) ptr with null, and - * that's it. We already deleted it, so that's perfect. */ + * m_this_thread_state_or_null.m_tsp.reset() -- in *this* thread only. If in fact this_thread_state() has been + * called in this thread, then it'll try to do cleanup(m_this_thread_state_or_null.m_tsp.get()); nothing good + * can come of that really. We could try to prevent it by doing m_this_thread_state_or_null.m_tsp.reset()... but + * same result. Instead we do the following which simply replaces the stored (now useless) Tl_context* with null, and + * that's it. We already deleted its ->m_state, so that's perfect. (Again: per Tl_context doc header, it is + * intentional that we don't the release()d `delete m_this_thread_state_or_null.m_tsp.get()` but only "most" of it, + * namely (it)->m_state.) */ m_this_thread_state_or_null.m_tsp.release(); // After the }, m_ctl is nullified, and lastly m_this_thread_state_or_null is destroyed (a no-op in our context). @@ -1091,11 +1093,12 @@ void Thread_local_state_registry::cleanup(Tl_context* ctx) if (!shared_ptr_to_ctl) { /* Relevant Thread_local_state_registry dtor was called late enough to coincide with current thread about to exit - * but not quite late enough for its thread_specific_ptr ->m_this_thread_state_or_null to be destroyed. + * but not quite late enough for its thread_specific_ptr m_this_thread_state_or_null.m_tsp to be destroyed + * (hence we, cleanup(), were called for this thread -- possibly similarly for other thread(s) too). * Its shared_ptr m_ctl did already get destroyed though. So -- we need not worry about cleanup after all. * This is rare and fun, but it is no different from that dtor simply running before this thread exited. * It will be/is cleaning up our stuff (and everything else) -- except the *ctx wrapper itself. So clean that - * up (not actual ctx->m_state payload!); and GTFO. */ + * up (not actual ctx->m_state payload!) -- as T_l_s_r dtor specifically never does -- and GTFO. */ delete ctx; return; }