diff options
| author | Douglas Rumbaugh <dbr4@psu.edu> | 2025-02-17 16:40:11 -0500 |
|---|---|---|
| committer | Douglas Rumbaugh <dbr4@psu.edu> | 2025-02-17 16:40:11 -0500 |
| commit | f1316e313de5c5286b279cec6ed320cba3eb506f (patch) | |
| tree | c1796e865548eca9bdbd0b7f4bd9fae864e41f71 | |
| parent | a162dd2e3be16ff9cd56f80f09f83c4b4b7bc959 (diff) | |
| download | dynamic-extension-f1316e313de5c5286b279cec6ed320cba3eb506f.tar.gz | |
Average version of mixed-workload bench
| -rw-r--r-- | CMakeLists.txt | 6 | ||||
| -rw-r--r-- | benchmarks/tail-latency/mixed_workload.cpp | 18 | ||||
| -rw-r--r-- | benchmarks/tail-latency/mixed_workload_average.cpp | 176 |
3 files changed, 192 insertions, 8 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index e053367..3faa60b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -311,6 +311,12 @@ if (tail_bench) target_link_libraries(mixed_workload PUBLIC gsl pthread atomic) target_include_directories(mixed_workload PRIVATE include external external/m-tree/cpp external/PGM-index/include external/PLEX/include benchmarks/include external/psudb-common/cpp/include) target_link_options(mixed_workload PUBLIC -mcx16) + + + add_executable(mixed_workload_average ${CMAKE_CURRENT_SOURCE_DIR}/benchmarks/tail-latency/mixed_workload_average.cpp) + target_link_libraries(mixed_workload_average PUBLIC gsl pthread atomic) + target_include_directories(mixed_workload_average PRIVATE include external external/m-tree/cpp external/PGM-index/include external/PLEX/include benchmarks/include external/psudb-common/cpp/include) + target_link_options(mixed_workload_average PUBLIC -mcx16) endif() if (bench) diff --git a/benchmarks/tail-latency/mixed_workload.cpp b/benchmarks/tail-latency/mixed_workload.cpp index 517ccd8..f4bfdda 100644 --- a/benchmarks/tail-latency/mixed_workload.cpp +++ b/benchmarks/tail-latency/mixed_workload.cpp @@ -43,7 +43,7 @@ size_t query_ratio = 3; std::atomic<size_t> total_res = 0; size_t reccnt = 0; -size_t thrd_cnt = 0; +size_t g_thrd_cnt = 0; void operation_thread(Ext *extension, std::vector<QP> *queries, std::vector<Rec> *records) { @@ -60,7 +60,7 @@ void operation_thread(Ext *extension, std::vector<QP> *queries, auto res = extension->query(std::move(q)).get(); TIMER_STOP(); - fprintf(stdout, "Q\t%ld\t%ld\n", thrd_cnt, TIMER_RESULT()); + fprintf(stdout, "Q\t%ld\t%ld\n", g_thrd_cnt, TIMER_RESULT()); total_res.fetch_add(res); @@ -78,7 +78,7 @@ void operation_thread(Ext *extension, std::vector<QP> *queries, } TIMER_STOP(); - fprintf(stdout, "I\t%ld\t%ld\n", thrd_cnt, TIMER_RESULT()); + fprintf(stdout, "I\t%ld\t%ld\n", g_thrd_cnt, TIMER_RESULT()); if (idx.load() == reccnt) { inserts_done.store(true); @@ -117,22 +117,22 @@ int main(int argc, char **argv) { reccnt = n; for (auto pol : policies) { - for (size_t i = 0; i < thread_counts.size(); i++) { + for (auto internal_thread_cnt : thread_counts) { auto policy = get_policy<Shard, Q>(sfs[0], buffer_size, pol, n); auto config = Conf(std::move(policy)); config.recon_enable_maint_on_flush = true; config.recon_maint_disabled = false; config.buffer_flush_trigger = 4000; - config.maximum_threads = thread_counts[i]; + config.maximum_threads = internal_thread_cnt; - thrd_cnt = thread_counts[i]; + g_thrd_cnt = internal_thread_cnt; auto extension = new Ext(std::move(config)); /* warmup structure w/ 10% of records */ size_t warmup = .1 * n; - for (size_t j = 0; j < warmup; j++) { - while (!extension->insert(data[j])) { + for (size_t k = 0; k < warmup; k++) { + while (!extension->insert(data[k])) { usleep(1); } } @@ -153,6 +153,8 @@ int main(int argc, char **argv) { } fprintf(stderr, "%ld\n", total_res.load()); + total_res.store(0); + inserts_done.store(false); delete extension; } } diff --git a/benchmarks/tail-latency/mixed_workload_average.cpp b/benchmarks/tail-latency/mixed_workload_average.cpp new file mode 100644 index 0000000..cbf3a82 --- /dev/null +++ b/benchmarks/tail-latency/mixed_workload_average.cpp @@ -0,0 +1,176 @@ +/* + * + */ + +#define ENABLE_TIMER +#define TS_TEST + +#include <thread> + +#include "framework/scheduling/SerialScheduler.h" +#include "framework/util/Configuration.h" +#include "util/types.h" +#include "file_util.h" +#include "framework/DynamicExtension.h" +#include "framework/interface/Record.h" +#include "framework/scheduling/FIFOScheduler.h" +#include "query/rangecount.h" +#include "shard/TrieSpline.h" +#include "standard_benchmarks.h" + +#include "framework/reconstruction/FixedShardCountPolicy.h" + +#include <gsl/gsl_rng.h> + +#include "psu-util/timer.h" + +typedef de::Record<uint64_t, uint64_t> Rec; +typedef de::TrieSpline<Rec> Shard; +typedef de::rc::Query<Shard> Q; +typedef de::DynamicExtension<Shard, Q, de::DeletePolicy::TOMBSTONE, + de::FIFOScheduler> + Ext; +typedef Q::Parameters QP; +typedef de::DEConfiguration<Shard, Q, de::DeletePolicy::TOMBSTONE, + de::FIFOScheduler> + Conf; + +std::atomic<size_t> idx; +std::atomic<bool> inserts_done = false; + +size_t query_ratio = 3; + +std::atomic<size_t> total_res = 0; +size_t reccnt = 0; + +size_t g_thrd_cnt = 0; + +std::atomic<size_t> total_insert_time = 0; +std::atomic<size_t> total_insert_count = 0; +std::atomic<size_t> total_query_time = 0; +std::atomic<size_t> total_query_count = 0; + +void operation_thread(Ext *extension, std::vector<QP> *queries, + std::vector<Rec> *records) { + TIMER_INIT(); + while (!inserts_done.load()) { + auto type = rand() % 10; + + if (type < 8) { + total_query_count.fetch_add(1); + auto q_idx = rand() % queries->size(); + + auto q = (*queries)[q_idx]; + + TIMER_START(); + auto res = extension->query(std::move(q)).get(); + TIMER_STOP(); + + total_query_time.fetch_add(TIMER_RESULT()); + total_res.fetch_add(res); + } else { + TIMER_START(); + for (size_t i = 0; i < 1000; i++) { + auto insert_idx = idx.fetch_add(1); + if (insert_idx >= reccnt) { + inserts_done.store(true); + break; + } + + while (!extension->insert((*records)[insert_idx])) { + usleep(1); + } + + if (idx.load() == reccnt) { + inserts_done.store(true); + } + } + TIMER_STOP(); + total_insert_time.fetch_add(TIMER_RESULT()); + } + } +} + +void usage(char *progname) { + fprintf(stderr, "%s reccnt datafile queryfile\n", progname); +} + +int main(int argc, char **argv) { + + if (argc < 4) { + usage(argv[0]); + exit(EXIT_FAILURE); + } + + size_t n = atol(argv[1]); + std::string d_fname = std::string(argv[2]); + std::string q_fname = std::string(argv[3]); + + auto data = read_sosd_file<Rec>(d_fname, n); + auto queries = read_range_queries<QP>(q_fname, .0001); + + std::vector<size_t> sfs = {8}; //, 4, 8, 16, 32, 64, 128, 256, 512, 1024}; + size_t buffer_size = 8000; + std::vector<size_t> policies = { + 5 + }; + + std::vector<size_t> thread_counts = {1, 2, 4, 8, 16, 32}; + + reccnt = n; + + for (auto pol : policies) { + for (auto internal_thread_cnt : thread_counts) { + auto policy = get_policy<Shard, Q>(sfs[0], buffer_size, pol, n); + auto config = Conf(std::move(policy)); + config.recon_enable_maint_on_flush = true; + config.recon_maint_disabled = false; + config.buffer_flush_trigger = 4000; + config.maximum_threads = internal_thread_cnt; + + g_thrd_cnt = internal_thread_cnt; + + total_insert_time.store(0); + total_query_time.store(0); + total_query_count.store(0); + + auto extension = new Ext(std::move(config)); + + /* warmup structure w/ 10% of records */ + size_t warmup = .1 * n; + for (size_t k = 0; k < warmup; k++) { + while (!extension->insert(data[k])) { + usleep(1); + } + } + + extension->await_version(); + + idx.store(warmup); + + size_t thrd_cnt = 8; + std::thread thrds[thrd_cnt]; + + for (size_t i=0; i<thrd_cnt; i++) { + thrds[i] = std::thread(operation_thread, extension, &queries, &data); + } + + for (size_t i=0; i<thrd_cnt; i++) { + thrds[i].join(); + } + + fprintf(stderr, "%ld\n", total_res.load()); + + size_t insert_tput = ((double)(n - warmup) / (double) total_insert_time) *1e9; + size_t query_lat = (double) total_query_time.load() / (double) total_query_count.load(); + + fprintf(stdout, "%ld\t%ld\t%ld\n", internal_thread_cnt, insert_tput, query_lat); + + total_res.store(0); + inserts_done.store(false); + delete extension; + } + } + + fflush(stderr); +} |