harsh solution

This commit is contained in:
Artur Mukhamadiev 2025-06-23 16:48:36 +03:00
parent 8d0fd6ccdb
commit ac0d12b90a
5 changed files with 25 additions and 24 deletions

View File

@ -11,8 +11,8 @@ file(GLOB prj_src src/*)
target_sources(${PROJECT_NAME} PRIVATE ${prj_src}) target_sources(${PROJECT_NAME} PRIVATE ${prj_src})
target_compile_options(${PROJECT_NAME} PRIVATE -fsanitize=thread) # target_compile_options(${PROJECT_NAME} PRIVATE -fsanitize=thread)
target_link_options(${PROJECT_NAME} PRIVATE -fsanitize=thread) # target_link_options(${PROJECT_NAME} PRIVATE -fsanitize=thread)
add_subdirectory(bench) add_subdirectory(bench)
add_subdirectory(tests) add_subdirectory(tests)

View File

@ -12,5 +12,5 @@ target_link_libraries(${NAME} PRIVATE
${PROJECT_NAME} ${PROJECT_NAME}
) )
target_compile_options(${NAME} PRIVATE -fsanitize=thread) # target_compile_options(${NAME} PRIVATE -fsanitize=thread)
target_link_options(${NAME} PRIVATE -fsanitize=thread) # target_link_options(${NAME} PRIVATE -fsanitize=thread)

View File

@ -36,12 +36,15 @@ class Logger {
template <typename Metric, template <typename Metric,
typename = std::enable_if_t<std::is_arithmetic_v<Metric>>> typename = std::enable_if_t<std::is_arithmetic_v<Metric>>>
void add(const std::string& field, Metric metric) { void add(const std::string& field, Metric metric) {
std::shared_ptr<map_type> locked = active.load(std::memory_order_acquire); refs.fetch_add(1, std::memory_order_release);
map_type* locked = active.load(std::memory_order_acquire);
auto it = locked->find(field); auto it = locked->find(field);
if (it == locked->end()) { if (it == locked->end()) {
refs.fetch_sub(1, std::memory_order_release);
throw configErrorMsg; // additional 30ns on bench ? throw configErrorMsg; // additional 30ns on bench ?
} }
it->second = metric; it->second = metric;
refs.fetch_sub(1, std::memory_order_release);
} }
bool isConfigured() { return configured == CONFIGURED; } bool isConfigured() { return configured == CONFIGURED; }
@ -51,10 +54,10 @@ class Logger {
enum Configuration { NOT_CONFIGURED, CONFIG_IN_PROGRESS, CONFIGURED }; enum Configuration { NOT_CONFIGURED, CONFIG_IN_PROGRESS, CONFIGURED };
std::atomic<int> configured{NOT_CONFIGURED}; std::atomic<int> configured{NOT_CONFIGURED};
std::unique_ptr<Worker> worker; std::unique_ptr<Worker> worker;
std::shared_ptr<map_type> m1, m2; std::unique_ptr<map_type> m1, m2;
std::atomic<std::shared_ptr<map_type>> active; // impl may use mutex! std::atomic<map_type*> active; // impl may use mutex!
// std::shared_ptr<map_type> active; // data race without atomic on swap std::atomic<size_t> refs{0}; // degradation on worker side (waiting for no
// operation // one to be in refs section)
}; };
} // namespace vptyp } // namespace vptyp

View File

@ -15,18 +15,18 @@ bool Logger::configure(const std::vector<std::string>& d) {
m1_ref[key] = 0; m1_ref[key] = 0;
} }
m2_ref = m1_ref; m2_ref = m1_ref;
active = m1; active.store(m1.get());
configured.store(CONFIGURED); configured.store(CONFIGURED);
return true; return true;
} }
Logger::Logger() Logger::Logger()
: m1(std::make_shared<map_type>()), m2(std::make_shared<map_type>()) { : m1(std::make_unique<map_type>()), m2(std::make_unique<map_type>()) {
worker = std::make_unique<Logger::Worker>(*this, std::cout); worker = std::make_unique<Logger::Worker>(*this, std::cout);
} }
Logger::Logger(std::ostream& out) Logger::Logger(std::ostream& out)
: m1(std::make_shared<map_type>()), m2(std::make_shared<map_type>()) { : m1(std::make_unique<map_type>()), m2(std::make_unique<map_type>()) {
worker = std::make_unique<Logger::Worker>(*this, out); worker = std::make_unique<Logger::Worker>(*this, out);
} }
@ -68,26 +68,24 @@ void Logger::Worker::unroll() {
if (!parent.isConfigured()) if (!parent.isConfigured())
return; return;
auto tmp = parent.active.load(std::memory_order_relaxed); auto tmp = parent.active.load(std::memory_order_acquire);
auto toBeActive = tmp == parent.m1 ? parent.m2 : parent.m1; auto toBeActive = tmp == parent.m1.get() ? parent.m2.get() : parent.m1.get();
parent.active.store(toBeActive, std::memory_order_release); parent.active.store(toBeActive, std::memory_order_release);
// hmm, seems that we can receive situation here there use_count is less or // so we setting up happens before relation with counters
// equal 2 but in reality we still have instance how? store and load is while (parent.refs.load(std::memory_order_acquire) > 0) {
// tighted, so we have guarantee that this check will be happens before
while (tmp.use_count() > 2) {
std::this_thread::yield(); std::this_thread::yield();
} }
// it's needed thread fence to guarantee use count change // // it's needed thread fence to guarantee use count change
// __tsan_acquire(tmp.use_count()); // // __tsan_acquire(tmp.use_count());
std::atomic_thread_fence(std::memory_order_acquire); // std::atomic_thread_fence(std::memory_order_acquire);
// at this place we are guarantee that tmp is only ours or not? // at this place we are guarantee that tmp is only ours or not?
std::string output; std::string output;
bool haveToPush{false}; bool haveToPush{false};
for (auto& element : *tmp.get()) { for (auto& element : *tmp) {
if (!std::visit([](auto&& i) -> bool { return i; }, element.second)) if (!std::visit([](auto&& i) -> bool { return i; }, element.second))
continue; continue;
std::string value = std::string value =

View File

@ -12,5 +12,5 @@ target_link_libraries(${NAME} PRIVATE
${PROJECT_NAME} ${PROJECT_NAME}
) )
target_compile_options(${NAME} PRIVATE -fsanitize=thread) # target_compile_options(${NAME} PRIVATE -fsanitize=thread)
target_link_options(${NAME} PRIVATE -fsanitize=thread) # target_link_options(${NAME} PRIVATE -fsanitize=thread)