From 1957b2dd33b244754cd47db05f831a7627b8031e Mon Sep 17 00:00:00 2001 From: Douglas Rumbaugh Date: Fri, 25 Apr 2025 12:28:31 -0400 Subject: Scheduler statistics tracking update The current scheme is really inefficient in terms of retreival of the results, but keeps the critical path mostly clear. It's probably worth it to do a more organized tracking of the data as it comes in, to avoid an n^2 statistics generation step at the end. --- include/framework/DynamicExtension.h | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'include/framework/DynamicExtension.h') diff --git a/include/framework/DynamicExtension.h b/include/framework/DynamicExtension.h index fb82638..0bb1524 100644 --- a/include/framework/DynamicExtension.h +++ b/include/framework/DynamicExtension.h @@ -471,7 +471,7 @@ private: static void reconstruction(void *arguments) { auto args = (ReconstructionArgs *)arguments; auto extension = (DynamicExtension *)args->extension; - extension->SetThreadAffinity(); + extension->set_thread_affinity(); static std::atomic cnt = 0; size_t recon_id = cnt.fetch_add(1); @@ -661,6 +661,7 @@ private: static void async_query(void *arguments) { auto *args = (QueryArgs *)arguments; + args->extension->set_thread_affinity(); auto version = args->extension->get_active_version(); @@ -927,13 +928,12 @@ private: } //#ifdef _GNU_SOURCE -#if 0 - void SetThreadAffinity() { + void set_thread_affinity() { if constexpr (std::same_as) { return; } - int core = m_next_core.fetch_add(1) % m_core_cnt; + int core = m_next_core.fetch_add(1) % m_config.physical_core_count; cpu_set_t mask; CPU_ZERO(&mask); @@ -954,8 +954,10 @@ private: CPU_SET(core, &mask); ::sched_setaffinity(0, sizeof(mask), &mask); } + /* #else - void SetThreadAffinity() {} + void set_thread_affinity() {} #endif +*/ }; } // namespace de -- cgit v1.2.3