diff options
| author | Douglas Rumbaugh <dbr4@psu.edu> | 2025-04-25 12:28:31 -0400 |
|---|---|---|
| committer | Douglas Rumbaugh <dbr4@psu.edu> | 2025-04-25 12:28:31 -0400 |
| commit | 1957b2dd33b244754cd47db05f831a7627b8031e (patch) | |
| tree | da438aabd8ff188e2a3c94c69352a36ec243f730 /benchmarks/tail-latency/query_parm_sweep.cpp | |
| parent | 5a3d36fecabc8f220b19dcaea28a78f99b5244af (diff) | |
| download | dynamic-extension-1957b2dd33b244754cd47db05f831a7627b8031e.tar.gz | |
Scheduler statistics tracking update
The current scheme is really inefficient in terms
of retreival of the results, but keeps the critical
path mostly clear. It's probably worth it to do
a more organized tracking of the data as it comes
in, to avoid an n^2 statistics generation step
at the end.
Diffstat (limited to 'benchmarks/tail-latency/query_parm_sweep.cpp')
| -rw-r--r-- | benchmarks/tail-latency/query_parm_sweep.cpp | 21 |
1 files changed, 15 insertions, 6 deletions
diff --git a/benchmarks/tail-latency/query_parm_sweep.cpp b/benchmarks/tail-latency/query_parm_sweep.cpp index 4f11ce0..a91db8c 100644 --- a/benchmarks/tail-latency/query_parm_sweep.cpp +++ b/benchmarks/tail-latency/query_parm_sweep.cpp @@ -29,11 +29,11 @@ typedef de::Record<uint64_t, uint64_t> Rec; typedef de::ISAMTree<Rec> Shard; typedef de::pl::Query<Shard> Q; typedef de::DynamicExtension<Shard, Q, de::DeletePolicy::TOMBSTONE, - de::SerialScheduler> + de::FIFOScheduler> Ext; typedef Q::Parameters QP; typedef de::DEConfiguration<Shard, Q, de::DeletePolicy::TOMBSTONE, - de::SerialScheduler> + de::FIFOScheduler> Conf; std::atomic<size_t> idx; @@ -103,11 +103,11 @@ int main(int argc, char **argv) { auto queries =read_sosd_point_lookups<QP>(q_fname, 100); size_t buffer_size = 8000; - std::vector<size_t> policies = {0, 1, 2}; + std::vector<size_t> policies = {0}; std::vector<size_t> thread_counts = {8}; - std::vector<double> modifiers = {0, .3, .5, .8}; - std::vector<size_t> scale_factors = {2, 4, 8}; + std::vector<double> modifiers = {0}; + std::vector<size_t> scale_factors = {8, 8, 8, 8, 8}; size_t insert_threads = 1; size_t query_threads = 1; @@ -165,11 +165,19 @@ int main(int argc, char **argv) { extension->await_version(); + /* run some queries to "warm up" the cache */ + for (size_t i=0; i<queries.size()*2; i++) { + auto q_idx = i % queries.size(); + auto q = queries[q_idx]; + auto res = extension->query(std::move(q)).get(); + total_res.fetch_add(res.size()); + } + total_query_count.store(50000); TIMER_INIT(); TIMER_START(); for (size_t i=0; i<total_query_count; i++) { - auto q_idx = rand() % queries.size(); + auto q_idx = i % queries.size(); auto q = queries[q_idx]; auto res = extension->query(std::move(q)).get(); total_res.fetch_add(res.size()); @@ -187,6 +195,7 @@ int main(int argc, char **argv) { fprintf(stdout, "%ld\t%ld\t%ld\t%lf\t%ld\t%ld\t%ld\t%ld\n", internal_thread_cnt, pol, sf, mod, extension->get_height(), extension->get_shard_count(), insert_tput, query_lat); + extension->print_scheduler_statistics(); fflush(stdout); total_res.store(0); |