From e02742b07540dd5a9bcbb44dae14856bf10955ed Mon Sep 17 00:00:00 2001 From: Douglas Rumbaugh Date: Mon, 6 Nov 2023 15:18:53 -0500 Subject: Refactoring progress --- external/psudb-common | 2 +- include/framework/interface/Shard.h | 2 +- include/query/irs.h | 6 +- include/query/rangequery.h | 14 +- include/query/wss.h | 204 ++++++++++++++++ include/shard/Alias.h | 251 ++++++++++++++++++++ include/shard/ISAMTree.h | 339 +++++++++++++++++++++++++++ include/shard/MemISAM.h | 341 --------------------------- include/shard/PGM.h | 267 --------------------- include/shard/TrieSpline.h | 184 +-------------- include/shard/WSS.h | 453 ------------------------------------ tests/alias_tests.cpp | 391 +++++++++++++++++++++++++++++++ tests/memisam_tests.cpp | 69 +++--- tests/pgm_tests.cpp | 33 +-- tests/triespline_tests.cpp | 17 +- tests/wss_tests.cpp | 390 ------------------------------- 16 files changed, 1263 insertions(+), 1700 deletions(-) create mode 100644 include/query/wss.h create mode 100644 include/shard/Alias.h create mode 100644 include/shard/ISAMTree.h delete mode 100644 include/shard/MemISAM.h delete mode 100644 include/shard/WSS.h create mode 100644 tests/alias_tests.cpp delete mode 100644 tests/wss_tests.cpp diff --git a/external/psudb-common b/external/psudb-common index 7005ad8..b85686b 160000 --- a/external/psudb-common +++ b/external/psudb-common @@ -1 +1 @@ -Subproject commit 7005ad856c941d8485843c53a3b08d53ccc3d98e +Subproject commit b85686b50ab767e5c06eedb975686923aa79dd3d diff --git a/include/framework/interface/Shard.h b/include/framework/interface/Shard.h index 40a696b..92cdca0 100644 --- a/include/framework/interface/Shard.h +++ b/include/framework/interface/Shard.h @@ -37,6 +37,6 @@ template concept SortedShardInterface = ShardInterface && requires(S s, R r, R *rp) { {s.lower_bound(r)} -> std::convertible_to; {s.upper_bound(r)} -> std::convertible_to; -} +}; } diff --git a/include/query/irs.h b/include/query/irs.h index 5b09e73..4cb69b0 100644 --- a/include/query/irs.h +++ b/include/query/irs.h @@ -44,8 +44,8 @@ public: static void *get_query_state(S *shard, void *parms) { auto res = new State(); - decltype(R::key) lower_key = ((PARMS *) parms)->lower_bound; - decltype(R::key) upper_key = (PARMS *) parms)->upper_bound; + decltype(R::key) lower_key = ((Parms *) parms)->lower_bound; + decltype(R::key) upper_key = ((Parms *) parms)->upper_bound; res->lower_bound = shard->get_lower_bound(lower_key); res->upper_bound = shard->get_upper_bound(upper_key); @@ -119,7 +119,7 @@ public: normalized_weights.push_back((double) w / (double) total_weight); } - auto shard_alias = Alias(normalized_weights); + auto shard_alias = psudb::Alias(normalized_weights); for (size_t i=0; isample_size; i++) { auto idx = shard_alias.get(p->rng); if (idx == 0) { diff --git a/include/query/rangequery.h b/include/query/rangequery.h index f9a34d9..b9ac9db 100644 --- a/include/query/rangequery.h +++ b/include/query/rangequery.h @@ -8,6 +8,12 @@ */ #pragma once +#include "framework/interface/Record.h" +#include "framework/interface/Shard.h" +#include "framework/structure/MutableBuffer.h" +#include "psu-ds/PriorityQueue.h" +#include "util/Cursor.h" + namespace de { namespace rq { template @@ -27,7 +33,7 @@ struct BufferState { size_t cutoff; }; -template +template class Query { public: constexpr static bool EARLY_ABORT=false; @@ -74,7 +80,7 @@ public: ptr++; } - while (ptr->rec.key <= p->upper_bound && ptr < shard->m_data + s->stop_idx) { + while (ptr->rec.key <= p->upper_bound && ptr < shard->get_data() + s->stop_idx) { records.emplace_back(*ptr); ptr++; } @@ -101,7 +107,7 @@ public: std::vector>> cursors; cursors.reserve(results.size()); - PriorityQueue> pq(results.size()); + psudb::PriorityQueue> pq(results.size()); size_t total = 0; size_t tmp_n = results.size(); @@ -126,7 +132,7 @@ public: while (pq.size()) { auto now = pq.peek(); - auto next = pq.size() > 1 ? pq.peek(1) : queue_record>{nullptr, 0}; + auto next = pq.size() > 1 ? pq.peek(1) : psudb::queue_record>{nullptr, 0}; if (!now.data->is_tombstone() && next.data != nullptr && now.data->rec == next.data->rec && next.data->is_tombstone()) { diff --git a/include/query/wss.h b/include/query/wss.h new file mode 100644 index 0000000..b8a5d54 --- /dev/null +++ b/include/query/wss.h @@ -0,0 +1,204 @@ +/* + * include/query/rangequery.h + * + * Copyright (C) 2023 Douglas B. Rumbaugh + * + * All rights reserved. Published under the Modified BSD License. + * + */ +#pragma once + +#include "framework/interface/Record.h" +#include "framework/interface/Shard.h" +#include "framework/structure/MutableBuffer.h" + +namespace de { namespace wss { + +template +struct Parms { + size_t sample_size; + gsl_rng *rng; +}; + +template +struct State { + decltype(R::weight) total_weight; + size_t sample_size; + + State() { + total_weight = 0; + } +}; + +template +struct BufferState { + size_t cutoff; + size_t sample_size; + psudb::Alias *alias; + decltype(R::weight) max_weight; + decltype(R::weight) total_weight; + + ~BufferState() { + delete alias; + } +}; + +template +class Query { +public: + constexpr static bool EARLY_ABORT=false; + constexpr static bool SKIP_DELETE_FILTER=false; + + static void *get_query_state(S *shard, void *parms) { + auto res = new State(); + res->total_weight = shard->get_total_weight(); + res->sample_size = 0; + + return res; + } + + static void* get_buffer_query_state(MutableBuffer *buffer, void *parms) { + BufferState *state = new BufferState(); + auto parameters = (Parms*) parms; + if constexpr (Rejection) { + state->cutoff = buffer->get_record_count() - 1; + state->max_weight = buffer->get_max_weight(); + state->total_weight = buffer->get_total_weight(); + return state; + } + + std::vector weights; + + state->cutoff = buffer->get_record_count() - 1; + double total_weight = 0.0; + + for (size_t i = 0; i <= state->cutoff; i++) { + auto rec = buffer->get_data() + i; + weights.push_back(rec->rec.weight); + total_weight += rec->rec.weight; + } + + for (size_t i = 0; i < weights.size(); i++) { + weights[i] = weights[i] / total_weight; + } + + state->alias = new psudb::Alias(weights); + state->total_weight = total_weight; + + return state; + } + + static void process_query_states(void *query_parms, std::vector &shard_states, std::vector &buffer_states) { + auto p = (Parms *) query_parms; + auto bs = (BufferState *) buffer_states[0]; + + std::vector shard_sample_sizes(shard_states.size()+1, 0); + size_t buffer_sz = 0; + + std::vector weights; + weights.push_back(bs->total_weight); + + decltype(R::weight) total_weight = 0; + for (auto &s : shard_states) { + auto state = (State *) s; + total_weight += state->total_weight; + weights.push_back(state->total_weight); + } + + std::vector normalized_weights; + for (auto w : weights) { + normalized_weights.push_back((double) w / (double) total_weight); + } + + auto shard_alias = psudb::Alias(normalized_weights); + for (size_t i=0; isample_size; i++) { + auto idx = shard_alias.get(p->rng); + if (idx == 0) { + buffer_sz++; + } else { + shard_sample_sizes[idx - 1]++; + } + } + + + bs->sample_size = buffer_sz; + for (size_t i=0; i *) shard_states[i]; + state->sample_size = shard_sample_sizes[i+1]; + } + } + + static std::vector> query(S *shard, void *q_state, void *parms) { + auto rng = ((Parms *) parms)->rng; + + auto state = (State *) q_state; + auto sample_size = state->sample_size; + + std::vector> result_set; + + if (sample_size == 0) { + return result_set; + } + size_t attempts = 0; + do { + attempts++; + size_t idx = shard->m_alias->get(rng); + result_set.emplace_back(*shard->get_record_at(idx)); + } while (attempts < sample_size); + + return result_set; + } + + static std::vector> buffer_query(MutableBuffer *buffer, void *state, void *parms) { + auto st = (BufferState *) state; + auto p = (Parms *) parms; + + std::vector> result; + result.reserve(st->sample_size); + + if constexpr (Rejection) { + for (size_t i=0; isample_size; i++) { + auto idx = gsl_rng_uniform_int(p->rng, st->cutoff); + auto rec = buffer->get_data() + idx; + + auto test = gsl_rng_uniform(p->rng) * st->max_weight; + + if (test <= rec->rec.weight) { + result.emplace_back(*rec); + } + } + return result; + } + + for (size_t i=0; isample_size; i++) { + auto idx = st->alias->get(p->rng); + result.emplace_back(*(buffer->get_data() + idx)); + } + + return result; + } + + static std::vector merge(std::vector>> &results, void *parms) { + std::vector output; + + for (size_t i=0; i *) state; + delete s; + } + + static void delete_buffer_query_state(void *state) { + auto s = (BufferState *) state; + delete s; + } +}; + +}} diff --git a/include/shard/Alias.h b/include/shard/Alias.h new file mode 100644 index 0000000..b6b16c5 --- /dev/null +++ b/include/shard/Alias.h @@ -0,0 +1,251 @@ +/* + * include/shard/Alias.h + * + * Copyright (C) 2023 Douglas B. Rumbaugh + * Dong Xie + * + * All rights reserved. Published under the Modified BSD License. + * + */ +#pragma once + +#include +#include +#include +#include +#include + +#include "framework/ShardRequirements.h" + +#include "psu-ds/PriorityQueue.h" +#include "util/Cursor.h" +#include "psu-ds/psudb::Alias.h" +#include "psu-ds/BloomFilter.h" +#include "util/bf_config.h" + +using psudb::CACHELINE_SIZE; +using psudb::BloomFilter; +using psudb::PriorityQueue; +using psudb::queue_record; + +namespace de { + +thread_local size_t wss_cancelations = 0; + +template +class Alias { +private: + typedef decltype(R::key) K; + typedef decltype(R::value) V; + typedef decltype(R::weight) W; + +public: + Alias(MutableBuffer* buffer) + : m_reccnt(0), m_tombstone_cnt(0), m_total_weight(0), m_alias(nullptr), m_bf(nullptr) { + + m_alloc_size = (buffer->get_record_count() * sizeof(Wrapped)) + (CACHELINE_SIZE - (buffer->get_record_count() * sizeof(Wrapped)) % CACHELINE_SIZE); + assert(m_alloc_size % CACHELINE_SIZE == 0); + m_data = (Wrapped*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size); + + m_bf = new BloomFilter(BF_FPR, buffer->get_tombstone_count(), BF_HASH_FUNCS); + + size_t offset = 0; + m_reccnt = 0; + auto base = buffer->get_data(); + auto stop = base + buffer->get_record_count(); + + std::sort(base, stop, std::less>()); + + std::vector weights; + + while (base < stop) { + if (!(base->is_tombstone()) && (base + 1) < stop) { + if (base->rec == (base + 1)->rec && (base + 1)->is_tombstone()) { + base += 2; + wss_cancelations++; + continue; + } + } else if (base->is_deleted()) { + base += 1; + continue; + } + + // FIXME: this shouldn't be necessary, but the tagged record + // bypass doesn't seem to be working on this code-path, so this + // ensures that tagged records from the buffer are able to be + // dropped, eventually. It should only need to be &= 1 + base->header &= 3; + m_data[m_reccnt++] = *base; + m_total_weight+= base->rec.weight; + weights.push_back(base->rec.weight); + + if (m_bf && base->is_tombstone()) { + m_tombstone_cnt++; + m_bf->insert(base->rec); + } + + base++; + } + + if (m_reccnt > 0) { + build_alias_structure(weights); + } + } + + Alias(Alias** shards, size_t len) + : m_reccnt(0), m_tombstone_cnt(0), m_total_weight(0), m_alias(nullptr), m_bf(nullptr) { + std::vector>> cursors; + cursors.reserve(len); + + PriorityQueue> pq(len); + + size_t attemp_reccnt = 0; + size_t tombstone_count = 0; + + for (size_t i = 0; i < len; ++i) { + if (shards[i]) { + auto base = shards[i]->get_data(); + cursors.emplace_back(Cursor{base, base + shards[i]->get_record_count(), 0, shards[i]->get_record_count()}); + attemp_reccnt += shards[i]->get_record_count(); + tombstone_count += shards[i]->get_tombstone_count(); + pq.push(cursors[i].ptr, i); + } else { + cursors.emplace_back(Cursor>{nullptr, nullptr, 0, 0}); + } + } + + m_bf = new BloomFilter(BF_FPR, tombstone_count, BF_HASH_FUNCS); + + m_alloc_size = (attemp_reccnt * sizeof(Wrapped)) + (CACHELINE_SIZE - (attemp_reccnt * sizeof(Wrapped)) % CACHELINE_SIZE); + assert(m_alloc_size % CACHELINE_SIZE == 0); + m_data = (Wrapped*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size); + + std::vector weights; + + while (pq.size()) { + auto now = pq.peek(); + auto next = pq.size() > 1 ? pq.peek(1) : queue_record>{nullptr, 0}; + if (!now.data->is_tombstone() && next.data != nullptr && + now.data->rec == next.data->rec && next.data->is_tombstone()) { + + pq.pop(); pq.pop(); + auto& cursor1 = cursors[now.version]; + auto& cursor2 = cursors[next.version]; + if (advance_cursor>(cursor1)) pq.push(cursor1.ptr, now.version); + if (advance_cursor>(cursor2)) pq.push(cursor2.ptr, next.version); + } else { + auto& cursor = cursors[now.version]; + if (!cursor.ptr->is_deleted()) { + m_data[m_reccnt++] = *cursor.ptr; + m_total_weight += cursor.ptr->rec.weight; + weights.push_back(cursor.ptr->rec.weight); + if (m_bf && cursor.ptr->is_tombstone()) { + ++m_tombstone_cnt; + if (m_bf) m_bf->insert(cursor.ptr->rec); + } + } + pq.pop(); + + if (advance_cursor>(cursor)) pq.push(cursor.ptr, now.version); + } + } + + if (m_reccnt > 0) { + build_alias_structure(weights); + } + } + + ~Alias() { + if (m_data) free(m_data); + if (m_alias) delete m_alias; + if (m_bf) delete m_bf; + + } + + Wrapped *point_lookup(const R &rec, bool filter=false) { + if (filter && !m_bf->lookup(rec)) { + return nullptr; + } + + size_t idx = get_lower_bound(rec.key); + if (idx >= m_reccnt) { + return nullptr; + } + + while (idx < m_reccnt && m_data[idx].rec < rec) ++idx; + + if (m_data[idx].rec == rec) { + return m_data + idx; + } + + return nullptr; + } + + Wrapped* get_data() const { + return m_data; + } + + size_t get_record_count() const { + return m_reccnt; + } + + size_t get_tombstone_count() const { + return m_tombstone_cnt; + } + + const Wrapped* get_record_at(size_t idx) const { + if (idx >= m_reccnt) return nullptr; + return m_data + idx; + } + + + size_t get_memory_usage() { + return m_alloc_size; + } + + size_t get_aux_memory_usage() { + return 0; + } + +private: + + size_t get_lower_bound(const K& key) const { + size_t min = 0; + size_t max = m_reccnt - 1; + + const char * record_key; + while (min < max) { + size_t mid = (min + max) / 2; + + if (key > m_data[mid].rec.key) { + min = mid + 1; + } else { + max = mid; + } + } + + return min; + } + + void build_alias_structure(std::vector &weights) { + + // normalize the weights vector + std::vector norm_weights(weights.size()); + + for (size_t i=0; i* m_data; + psudb::Alias *m_alias; + W m_total_weight; + size_t m_reccnt; + size_t m_tombstone_cnt; + size_t m_group_size; + size_t m_alloc_size; + BloomFilter *m_bf; +}; diff --git a/include/shard/ISAMTree.h b/include/shard/ISAMTree.h new file mode 100644 index 0000000..a610c09 --- /dev/null +++ b/include/shard/ISAMTree.h @@ -0,0 +1,339 @@ +/* + * include/shard/ISAMTree.h + * + * Copyright (C) 2023 Douglas B. Rumbaugh + * Dong Xie + * + * All rights reserved. Published under the Modified BSD License. + * + */ +#pragma once + +#include +#include +#include +#include + +#include "framework/ShardRequirements.h" + +#include "util/bf_config.h" +#include "psu-ds/PriorityQueue.h" +#include "util/Cursor.h" +#include "psu-util/timer.h" + +using psudb::CACHELINE_SIZE; +using psudb::BloomFilter; +using psudb::PriorityQueue; +using psudb::queue_record; +using psudb::Alias; + +namespace de { + +thread_local size_t mrun_cancelations = 0; + +template +class ISAMTree { +private: + +typedef decltype(R::key) K; +typedef decltype(R::value) V; + +constexpr static size_t inmem_isam_node_size = 256; +constexpr static size_t inmem_isam_fanout = inmem_isam_node_size / (sizeof(K) + sizeof(char*)); + +struct InternalNode { + K keys[inmem_isam_fanout]; + char* child[inmem_isam_fanout]; +}; + +constexpr static size_t inmem_isam_leaf_fanout = inmem_isam_node_size / sizeof(R); +constexpr static size_t inmem_isam_node_keyskip = sizeof(K) * inmem_isam_fanout; + +static_assert(sizeof(InternalNode) == inmem_isam_node_size, "node size does not match"); + +public: + ISAMTree(MutableBuffer* buffer) + :m_reccnt(0), m_tombstone_cnt(0), m_isam_nodes(nullptr), m_deleted_cnt(0) { + + m_bf = new BloomFilter(BF_FPR, buffer->get_tombstone_count(), BF_HASH_FUNCS); + + m_alloc_size = (buffer->get_record_count() * sizeof(Wrapped)) + (CACHELINE_SIZE - (buffer->get_record_count() * sizeof(Wrapped)) % CACHELINE_SIZE); + assert(m_alloc_size % CACHELINE_SIZE == 0); + m_data = (Wrapped*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size); + + TIMER_INIT(); + + size_t offset = 0; + m_reccnt = 0; + auto base = buffer->get_data(); + auto stop = base + buffer->get_record_count(); + + TIMER_START(); + std::sort(base, stop, std::less>()); + TIMER_STOP(); + auto sort_time = TIMER_RESULT(); + + TIMER_START(); + while (base < stop) { + if (!base->is_tombstone() && (base + 1 < stop) + && base->rec == (base + 1)->rec && (base + 1)->is_tombstone()) { + base += 2; + mrun_cancelations++; + continue; + } else if (base->is_deleted()) { + base += 1; + continue; + } + + // FIXME: this shouldn't be necessary, but the tagged record + // bypass doesn't seem to be working on this code-path, so this + // ensures that tagged records from the buffer are able to be + // dropped, eventually. It should only need to be &= 1 + base->header &= 3; + m_data[m_reccnt++] = *base; + if (m_bf && base->is_tombstone()) { + ++m_tombstone_cnt; + m_bf->insert(base->rec); + } + + base++; + } + TIMER_STOP(); + auto copy_time = TIMER_RESULT(); + + TIMER_START(); + if (m_reccnt > 0) { + build_internal_levels(); + } + TIMER_STOP(); + auto level_time = TIMER_RESULT(); + } + + ISAMTree(ISAMTree** runs, size_t len) + : m_reccnt(0), m_tombstone_cnt(0), m_deleted_cnt(0), m_isam_nodes(nullptr) { + std::vector>> cursors; + cursors.reserve(len); + + PriorityQueue> pq(len); + + size_t attemp_reccnt = 0; + size_t tombstone_count = 0; + + for (size_t i = 0; i < len; ++i) { + if (runs[i]) { + auto base = runs[i]->get_data(); + cursors.emplace_back(Cursor{base, base + runs[i]->get_record_count(), 0, runs[i]->get_record_count()}); + attemp_reccnt += runs[i]->get_record_count(); + tombstone_count += runs[i]->get_tombstone_count(); + pq.push(cursors[i].ptr, i); + } else { + cursors.emplace_back(Cursor>{nullptr, nullptr, 0, 0}); + } + } + + m_bf = new BloomFilter(BF_FPR, tombstone_count, BF_HASH_FUNCS); + + m_alloc_size = (attemp_reccnt * sizeof(Wrapped)) + (CACHELINE_SIZE - (attemp_reccnt * sizeof(Wrapped)) % CACHELINE_SIZE); + assert(m_alloc_size % CACHELINE_SIZE == 0); + m_data = (Wrapped*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size); + + size_t offset = 0; + + while (pq.size()) { + auto now = pq.peek(); + auto next = pq.size() > 1 ? pq.peek(1) : queue_record>{nullptr, 0}; + if (!now.data->is_tombstone() && next.data != nullptr && + now.data->rec == next.data->rec && next.data->is_tombstone()) { + + pq.pop(); pq.pop(); + auto& cursor1 = cursors[now.version]; + auto& cursor2 = cursors[next.version]; + if (advance_cursor(cursor1)) pq.push(cursor1.ptr, now.version); + if (advance_cursor(cursor2)) pq.push(cursor2.ptr, next.version); + } else { + auto& cursor = cursors[now.version]; + if (!cursor.ptr->is_deleted()) { + m_data[m_reccnt++] = *cursor.ptr; + if (cursor.ptr->is_tombstone()) { + ++m_tombstone_cnt; + m_bf->insert(cursor.ptr->rec); + } + } + pq.pop(); + + if (advance_cursor(cursor)) pq.push(cursor.ptr, now.version); + } + } + + if (m_reccnt > 0) { + build_internal_levels(); + } + } + + ~ISAMTree() { + if (m_data) free(m_data); + if (m_isam_nodes) free(m_isam_nodes); + if (m_bf) delete m_bf; + } + + Wrapped *point_lookup(const R &rec, bool filter=false) { + if (filter && !m_bf->lookup(rec)) { + return nullptr; + } + + size_t idx = get_lower_bound(rec.key); + if (idx >= m_reccnt) { + return nullptr; + } + + while (idx < m_reccnt && m_data[idx].rec < rec) ++idx; + + if (m_data[idx].rec == rec) { + return m_data + idx; + } + + return nullptr; + } + + Wrapped* get_data() const { + return m_data; + } + + size_t get_record_count() const { + return m_reccnt; + } + + size_t get_tombstone_count() const { + return m_tombstone_cnt; + } + + const Wrapped* get_record_at(size_t idx) const { + return (idx < m_reccnt) ? m_data + idx : nullptr; + } + + size_t get_memory_usage() { + return m_internal_node_cnt * inmem_isam_node_size + m_alloc_size; + } + + size_t get_aux_memory_usage() { + return 0; + } + + size_t get_lower_bound(const K& key) const { + const InternalNode* now = m_root; + while (!is_leaf(reinterpret_cast(now))) { + const InternalNode* next = nullptr; + for (size_t i = 0; i < inmem_isam_fanout - 1; ++i) { + if (now->child[i + 1] == nullptr || key <= now->keys[i]) { + next = reinterpret_cast(now->child[i]); + break; + } + } + + now = next ? next : reinterpret_cast(now->child[inmem_isam_fanout - 1]); + } + + const Wrapped* pos = reinterpret_cast*>(now); + while (pos < m_data + m_reccnt && pos->rec.key < key) pos++; + + return pos - m_data; + } + + size_t get_upper_bound(const K& key) const { + const InternalNode* now = m_root; + while (!is_leaf(reinterpret_cast(now))) { + const InternalNode* next = nullptr; + for (size_t i = 0; i < inmem_isam_fanout - 1; ++i) { + if (now->child[i + 1] == nullptr || key < now->keys[i]) { + next = reinterpret_cast(now->child[i]); + break; + } + } + + now = next ? next : reinterpret_cast(now->child[inmem_isam_fanout - 1]); + } + + const Wrapped* pos = reinterpret_cast*>(now); + while (pos < m_data + m_reccnt && pos->rec.key <= key) pos++; + + return pos - m_data; + } + + +private: + void build_internal_levels() { + size_t n_leaf_nodes = m_reccnt / inmem_isam_leaf_fanout + (m_reccnt % inmem_isam_leaf_fanout != 0); + size_t level_node_cnt = n_leaf_nodes; + size_t node_cnt = 0; + do { + level_node_cnt = level_node_cnt / inmem_isam_fanout + (level_node_cnt % inmem_isam_fanout != 0); + node_cnt += level_node_cnt; + } while (level_node_cnt > 1); + + m_alloc_size = (node_cnt * inmem_isam_node_size) + (CACHELINE_SIZE - (node_cnt * inmem_isam_node_size) % CACHELINE_SIZE); + assert(m_alloc_size % CACHELINE_SIZE == 0); + + m_isam_nodes = (InternalNode*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size); + m_internal_node_cnt = node_cnt; + memset(m_isam_nodes, 0, node_cnt * inmem_isam_node_size); + + InternalNode* current_node = m_isam_nodes; + + const Wrapped* leaf_base = m_data; + const Wrapped* leaf_stop = m_data + m_reccnt; + while (leaf_base < leaf_stop) { + size_t fanout = 0; + for (size_t i = 0; i < inmem_isam_fanout; ++i) { + auto rec_ptr = leaf_base + inmem_isam_leaf_fanout * i; + if (rec_ptr >= leaf_stop) break; + const Wrapped* sep_key = std::min(rec_ptr + inmem_isam_leaf_fanout - 1, leaf_stop - 1); + current_node->keys[i] = sep_key->rec.key; + current_node->child[i] = (char*)rec_ptr; + ++fanout; + } + current_node++; + leaf_base += fanout * inmem_isam_leaf_fanout; + } + + auto level_start = m_isam_nodes; + auto level_stop = current_node; + auto current_level_node_cnt = level_stop - level_start; + while (current_level_node_cnt > 1) { + auto now = level_start; + while (now < level_stop) { + size_t child_cnt = 0; + for (size_t i = 0; i < inmem_isam_fanout; ++i) { + auto node_ptr = now + i; + ++child_cnt; + if (node_ptr >= level_stop) break; + current_node->keys[i] = node_ptr->keys[inmem_isam_fanout - 1]; + current_node->child[i] = (char*)node_ptr; + } + now += child_cnt; + current_node++; + } + level_start = level_stop; + level_stop = current_node; + current_level_node_cnt = level_stop - level_start; + } + + assert(current_level_node_cnt == 1); + m_root = level_start; + } + + bool is_leaf(const char* ptr) const { + return ptr >= (const char*)m_data && ptr < (const char*)(m_data + m_reccnt); + } + + // Members: sorted data, internal ISAM levels, reccnt; + Wrapped* m_data; + psudb::BloomFilter *m_bf; + InternalNode* m_isam_nodes; + InternalNode* m_root; + size_t m_reccnt; + size_t m_tombstone_cnt; + size_t m_internal_node_cnt; + size_t m_deleted_cnt; + size_t m_alloc_size; +}; +} diff --git a/include/shard/MemISAM.h b/include/shard/MemISAM.h deleted file mode 100644 index 6962c19..0000000 --- a/include/shard/MemISAM.h +++ /dev/null @@ -1,341 +0,0 @@ -/* - * include/shard/MemISAM.h - * - * Copyright (C) 2023 Douglas B. Rumbaugh - * Dong Xie - * - * All rights reserved. Published under the Modified BSD License. - * - */ -#pragma once - -#include -#include -#include -#include - -#include "framework/ShardRequirements.h" - -#include "util/bf_config.h" -#include "psu-ds/PriorityQueue.h" -#include "util/Cursor.h" -#include "psu-util/timer.h" - -using psudb::CACHELINE_SIZE; -using psudb::BloomFilter; -using psudb::PriorityQueue; -using psudb::queue_record; -using psudb::Alias; - -namespace de { - -thread_local size_t mrun_cancelations = 0; - -template -class MemISAM { -private: - friend class IRSQuery; - friend class IRSQuery; - friend class ISAMRangeQuery; - -typedef decltype(R::key) K; -typedef decltype(R::value) V; - -constexpr static size_t inmem_isam_node_size = 256; -constexpr static size_t inmem_isam_fanout = inmem_isam_node_size / (sizeof(K) + sizeof(char*)); - -struct InMemISAMNode { - K keys[inmem_isam_fanout]; - char* child[inmem_isam_fanout]; -}; - -constexpr static size_t inmem_isam_leaf_fanout = inmem_isam_node_size / sizeof(R); -constexpr static size_t inmem_isam_node_keyskip = sizeof(K) * inmem_isam_fanout; - -static_assert(sizeof(InMemISAMNode) == inmem_isam_node_size, "node size does not match"); - -public: - MemISAM(MutableBuffer* buffer) - :m_reccnt(0), m_tombstone_cnt(0), m_isam_nodes(nullptr), m_deleted_cnt(0) { - - m_bf = new BloomFilter(BF_FPR, buffer->get_tombstone_count(), BF_HASH_FUNCS); - - m_alloc_size = (buffer->get_record_count() * sizeof(Wrapped)) + (CACHELINE_SIZE - (buffer->get_record_count() * sizeof(Wrapped)) % CACHELINE_SIZE); - assert(m_alloc_size % CACHELINE_SIZE == 0); - m_data = (Wrapped*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size); - - TIMER_INIT(); - - size_t offset = 0; - m_reccnt = 0; - auto base = buffer->get_data(); - auto stop = base + buffer->get_record_count(); - - TIMER_START(); - std::sort(base, stop, std::less>()); - TIMER_STOP(); - auto sort_time = TIMER_RESULT(); - - TIMER_START(); - while (base < stop) { - if (!base->is_tombstone() && (base + 1 < stop) - && base->rec == (base + 1)->rec && (base + 1)->is_tombstone()) { - base += 2; - mrun_cancelations++; - continue; - } else if (base->is_deleted()) { - base += 1; - continue; - } - - // FIXME: this shouldn't be necessary, but the tagged record - // bypass doesn't seem to be working on this code-path, so this - // ensures that tagged records from the buffer are able to be - // dropped, eventually. It should only need to be &= 1 - base->header &= 3; - m_data[m_reccnt++] = *base; - if (m_bf && base->is_tombstone()) { - ++m_tombstone_cnt; - m_bf->insert(base->rec); - } - - base++; - } - TIMER_STOP(); - auto copy_time = TIMER_RESULT(); - - TIMER_START(); - if (m_reccnt > 0) { - build_internal_levels(); - } - TIMER_STOP(); - auto level_time = TIMER_RESULT(); - } - - MemISAM(MemISAM** runs, size_t len) - : m_reccnt(0), m_tombstone_cnt(0), m_deleted_cnt(0), m_isam_nodes(nullptr) { - std::vector>> cursors; - cursors.reserve(len); - - PriorityQueue> pq(len); - - size_t attemp_reccnt = 0; - size_t tombstone_count = 0; - - for (size_t i = 0; i < len; ++i) { - if (runs[i]) { - auto base = runs[i]->get_data(); - cursors.emplace_back(Cursor{base, base + runs[i]->get_record_count(), 0, runs[i]->get_record_count()}); - attemp_reccnt += runs[i]->get_record_count(); - tombstone_count += runs[i]->get_tombstone_count(); - pq.push(cursors[i].ptr, i); - } else { - cursors.emplace_back(Cursor>{nullptr, nullptr, 0, 0}); - } - } - - m_bf = new BloomFilter(BF_FPR, tombstone_count, BF_HASH_FUNCS); - - m_alloc_size = (attemp_reccnt * sizeof(Wrapped)) + (CACHELINE_SIZE - (attemp_reccnt * sizeof(Wrapped)) % CACHELINE_SIZE); - assert(m_alloc_size % CACHELINE_SIZE == 0); - m_data = (Wrapped*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size); - - size_t offset = 0; - - while (pq.size()) { - auto now = pq.peek(); - auto next = pq.size() > 1 ? pq.peek(1) : queue_record>{nullptr, 0}; - if (!now.data->is_tombstone() && next.data != nullptr && - now.data->rec == next.data->rec && next.data->is_tombstone()) { - - pq.pop(); pq.pop(); - auto& cursor1 = cursors[now.version]; - auto& cursor2 = cursors[next.version]; - if (advance_cursor(cursor1)) pq.push(cursor1.ptr, now.version); - if (advance_cursor(cursor2)) pq.push(cursor2.ptr, next.version); - } else { - auto& cursor = cursors[now.version]; - if (!cursor.ptr->is_deleted()) { - m_data[m_reccnt++] = *cursor.ptr; - if (cursor.ptr->is_tombstone()) { - ++m_tombstone_cnt; - m_bf->insert(cursor.ptr->rec); - } - } - pq.pop(); - - if (advance_cursor(cursor)) pq.push(cursor.ptr, now.version); - } - } - - if (m_reccnt > 0) { - build_internal_levels(); - } - } - - ~MemISAM() { - if (m_data) free(m_data); - if (m_isam_nodes) free(m_isam_nodes); - if (m_bf) delete m_bf; - } - - Wrapped *point_lookup(const R &rec, bool filter=false) { - if (filter && !m_bf->lookup(rec)) { - return nullptr; - } - - size_t idx = get_lower_bound(rec.key); - if (idx >= m_reccnt) { - return nullptr; - } - - while (idx < m_reccnt && m_data[idx].rec < rec) ++idx; - - if (m_data[idx].rec == rec) { - return m_data + idx; - } - - return nullptr; - } - - Wrapped* get_data() const { - return m_data; - } - - size_t get_record_count() const { - return m_reccnt; - } - - size_t get_tombstone_count() const { - return m_tombstone_cnt; - } - - const Wrapped* get_record_at(size_t idx) const { - return (idx < m_reccnt) ? m_data + idx : nullptr; - } - - size_t get_memory_usage() { - return m_internal_node_cnt * inmem_isam_node_size + m_alloc_size; - } - - size_t get_aux_memory_usage() { - return 0; - } - -private: - size_t get_lower_bound(const K& key) const { - const InMemISAMNode* now = m_root; - while (!is_leaf(reinterpret_cast(now))) { - const InMemISAMNode* next = nullptr; - for (size_t i = 0; i < inmem_isam_fanout - 1; ++i) { - if (now->child[i + 1] == nullptr || key <= now->keys[i]) { - next = reinterpret_cast(now->child[i]); - break; - } - } - - now = next ? next : reinterpret_cast(now->child[inmem_isam_fanout - 1]); - } - - const Wrapped* pos = reinterpret_cast*>(now); - while (pos < m_data + m_reccnt && pos->rec.key < key) pos++; - - return pos - m_data; - } - - size_t get_upper_bound(const K& key) const { - const InMemISAMNode* now = m_root; - while (!is_leaf(reinterpret_cast(now))) { - const InMemISAMNode* next = nullptr; - for (size_t i = 0; i < inmem_isam_fanout - 1; ++i) { - if (now->child[i + 1] == nullptr || key < now->keys[i]) { - next = reinterpret_cast(now->child[i]); - break; - } - } - - now = next ? next : reinterpret_cast(now->child[inmem_isam_fanout - 1]); - } - - const Wrapped* pos = reinterpret_cast*>(now); - while (pos < m_data + m_reccnt && pos->rec.key <= key) pos++; - - return pos - m_data; - } - - void build_internal_levels() { - size_t n_leaf_nodes = m_reccnt / inmem_isam_leaf_fanout + (m_reccnt % inmem_isam_leaf_fanout != 0); - size_t level_node_cnt = n_leaf_nodes; - size_t node_cnt = 0; - do { - level_node_cnt = level_node_cnt / inmem_isam_fanout + (level_node_cnt % inmem_isam_fanout != 0); - node_cnt += level_node_cnt; - } while (level_node_cnt > 1); - - m_alloc_size = (node_cnt * inmem_isam_node_size) + (CACHELINE_SIZE - (node_cnt * inmem_isam_node_size) % CACHELINE_SIZE); - assert(m_alloc_size % CACHELINE_SIZE == 0); - - m_isam_nodes = (InMemISAMNode*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size); - m_internal_node_cnt = node_cnt; - memset(m_isam_nodes, 0, node_cnt * inmem_isam_node_size); - - InMemISAMNode* current_node = m_isam_nodes; - - const Wrapped* leaf_base = m_data; - const Wrapped* leaf_stop = m_data + m_reccnt; - while (leaf_base < leaf_stop) { - size_t fanout = 0; - for (size_t i = 0; i < inmem_isam_fanout; ++i) { - auto rec_ptr = leaf_base + inmem_isam_leaf_fanout * i; - if (rec_ptr >= leaf_stop) break; - const Wrapped* sep_key = std::min(rec_ptr + inmem_isam_leaf_fanout - 1, leaf_stop - 1); - current_node->keys[i] = sep_key->rec.key; - current_node->child[i] = (char*)rec_ptr; - ++fanout; - } - current_node++; - leaf_base += fanout * inmem_isam_leaf_fanout; - } - - auto level_start = m_isam_nodes; - auto level_stop = current_node; - auto current_level_node_cnt = level_stop - level_start; - while (current_level_node_cnt > 1) { - auto now = level_start; - while (now < level_stop) { - size_t child_cnt = 0; - for (size_t i = 0; i < inmem_isam_fanout; ++i) { - auto node_ptr = now + i; - ++child_cnt; - if (node_ptr >= level_stop) break; - current_node->keys[i] = node_ptr->keys[inmem_isam_fanout - 1]; - current_node->child[i] = (char*)node_ptr; - } - now += child_cnt; - current_node++; - } - level_start = level_stop; - level_stop = current_node; - current_level_node_cnt = level_stop - level_start; - } - - assert(current_level_node_cnt == 1); - m_root = level_start; - } - - bool is_leaf(const char* ptr) const { - return ptr >= (const char*)m_data && ptr < (const char*)(m_data + m_reccnt); - } - - // Members: sorted data, internal ISAM levels, reccnt; - Wrapped* m_data; - psudb::BloomFilter *m_bf; - InMemISAMNode* m_isam_nodes; - InMemISAMNode* m_root; - size_t m_reccnt; - size_t m_tombstone_cnt; - size_t m_internal_node_cnt; - size_t m_deleted_cnt; - size_t m_alloc_size; -}; -} diff --git a/include/shard/PGM.h b/include/shard/PGM.h index 6d76376..6b66b7d 100644 --- a/include/shard/PGM.h +++ b/include/shard/PGM.h @@ -31,34 +31,6 @@ using psudb::Alias; namespace de { -template -struct pgm_range_query_parms { - decltype(R::key) lower_bound; - decltype(R::key) upper_bound; -}; - -template -struct PGMPointLookupParms { - decltype(R::key) target_key; -}; - -template -class PGMRangeQuery; - -template -class PGMPointLookup; - -template -struct PGMState { - size_t start_idx; - size_t stop_idx; -}; - -template -struct PGMBufferState { - size_t cutoff; -}; - template class PGM { private: @@ -67,11 +39,6 @@ private: public: - - // FIXME: there has to be a better way to do this - friend class PGMRangeQuery; - friend class PGMPointLookup; - PGM(MutableBuffer* buffer) : m_reccnt(0), m_tombstone_cnt(0) { @@ -80,8 +47,6 @@ public: m_data = (Wrapped*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size); std::vector keys; - //m_bf = new BloomFilter(BF_FPR, buffer->get_tombstone_count(), BF_HASH_FUNCS); - size_t offset = 0; m_reccnt = 0; auto base = buffer->get_data(); @@ -110,13 +75,6 @@ public: base->header &= 3; m_data[m_reccnt++] = *base; keys.emplace_back(base->rec.key); - - /* - if (m_bf && base->is_tombstone()) { - m_tombstone_cnt++; - m_bf->insert(base->rec); - }*/ - base++; } @@ -148,8 +106,6 @@ public: } } - //m_bf = new BloomFilter(BF_FPR, tombstone_count, BF_HASH_FUNCS); - m_alloc_size = (attemp_reccnt * sizeof(Wrapped)) + (CACHELINE_SIZE - (attemp_reccnt * sizeof(Wrapped)) % CACHELINE_SIZE); assert(m_alloc_size % CACHELINE_SIZE == 0); m_data = (Wrapped*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size); @@ -172,10 +128,6 @@ public: if (!cursor.ptr->is_deleted()) { m_data[m_reccnt++] = *cursor.ptr; keys.emplace_back(cursor.ptr->rec.key); - /*if (m_bf && cursor.ptr->is_tombstone()) { - ++m_tombstone_cnt; - if (m_bf) m_bf->insert(cursor.ptr->rec); - }*/ } pq.pop(); @@ -190,15 +142,9 @@ public: ~PGM() { if (m_data) free(m_data); - //if (m_bf) delete m_bf; - } Wrapped *point_lookup(const R &rec, bool filter=false) { - //if (filter && !m_bf->lookup(rec)) { - // return nullptr; - //} - size_t idx = get_lower_bound(rec.key); if (idx >= m_reccnt) { return nullptr; @@ -284,219 +230,6 @@ private: K m_max_key; K m_min_key; pgm::PGMIndex m_pgm; - //BloomFilter *m_bf; -}; -template -class PGMPointLookup { -public: - constexpr static bool EARLY_ABORT=false; - constexpr static bool SKIP_DELETE_FILTER=false; - - static void *get_query_state(PGM *ts, void *parms) { - return nullptr; - } - - static void* get_buffer_query_state(MutableBuffer *buffer, void *parms) { - return nullptr; - } - - static void process_query_states(void *query_parms, std::vector &shard_states, void *buff_state) { - return; - } - - static std::vector> query(PGM *ts, void *q_state, void *parms) { - std::vector> records; - auto p = (PGMPointLookupParms *) parms; - auto s = (PGMState *) q_state; - - size_t idx = ts->get_lower_bound(p->target_key); - if (ts->get_record_at(idx)->rec.key == p->target_key) { - records.emplace_back(*ts->get_record_at(idx)); - } - - return records; - } - - static std::vector> buffer_query(MutableBuffer *buffer, void *state, void *parms) { - auto p = (PGMPointLookupParms *) parms; - auto s = (PGMBufferState *) state; - - std::vector> records; - for (size_t i=0; iget_record_count(); i++) { - auto rec = buffer->get_data() + i; - if (rec->rec.key == p->target_key) { - records.emplace_back(*rec); - return records; - } - } - - return records; - } - - static std::vector merge(std::vector>> &results, void *parms) { - std::vector output; - for (size_t i=0 ;i 0) { - output.emplace_back(results[i][0].rec); - return output; - } - } - - return output; - } - - static void delete_query_state(void *state) { - } - - static void delete_buffer_query_state(void *state) { - } }; - - -template -class PGMRangeQuery { -public: - constexpr static bool EARLY_ABORT=false; - constexpr static bool SKIP_DELETE_FILTER=false; - - static void *get_query_state(PGM *ts, void *parms) { - auto res = new PGMState(); - auto p = (pgm_range_query_parms *) parms; - - res->start_idx = ts->get_lower_bound(p->lower_bound); - res->stop_idx = ts->get_record_count(); - - return res; - } - - static void* get_buffer_query_state(MutableBuffer *buffer, void *parms) { - auto res = new PGMBufferState(); - res->cutoff = buffer->get_record_count(); - - return res; - } - - static void process_query_states(void *query_parms, std::vector &shard_states, void *buff_state) { - return; - } - - static std::vector> query(PGM *ts, void *q_state, void *parms) { - size_t tot = 0; - //std::vector> records; - auto p = (pgm_range_query_parms *) parms; - auto s = (PGMState *) q_state; - - // if the returned index is one past the end of the - // records for the PGM, then there are not records - // in the index falling into the specified range. - if (s->start_idx == ts->get_record_count()) { - return {}; - } - - auto ptr = ts->get_record_at(s->start_idx); - - // roll the pointer forward to the first record that is - // greater than or equal to the lower bound. - while(ptr->rec.key < p->lower_bound) { - ptr++; - } - - while (ptr->rec.key <= p->upper_bound && ptr < ts->m_data + s->stop_idx) { - if (ptr->is_tombstone()) --tot; - else if (!ptr->is_deleted()) ++tot; - //records.emplace_back(*ptr); - ptr++; - } - - return {Wrapped{0, {tot, 0}}}; - //return records; - } - - static std::vector> buffer_query(MutableBuffer *buffer, void *state, void *parms) { - size_t tot = 0; - auto p = (pgm_range_query_parms *) parms; - auto s = (PGMBufferState *) state; - - //std::vector> records; - for (size_t i=0; icutoff; i++) { - auto rec = buffer->get_data() + i; - if (rec->rec.key >= p->lower_bound && rec->rec.key <= p->upper_bound) { - if (rec->is_tombstone()) --tot; - else if (!rec->is_deleted()) ++tot; - //records.emplace_back(*rec); - } - } - - return {Wrapped{0, {tot, 0}}}; - //return records; - } - - static std::vector merge(std::vector>> &results, void *parms) { - /*std::vector>> cursors; - cursors.reserve(results.size()); - - PriorityQueue> pq(results.size()); - size_t total = 0; - size_t tmp_n = results.size(); - - - for (size_t i = 0; i < tmp_n; ++i) - if (results[i].size() > 0){ - auto base = results[i].data(); - cursors.emplace_back(Cursor{base, base + results[i].size(), 0, results[i].size()}); - assert(i == cursors.size() - 1); - total += results[i].size(); - pq.push(cursors[i].ptr, tmp_n - i - 1); - } else { - cursors.emplace_back(Cursor>{nullptr, nullptr, 0, 0}); - } - - if (total == 0) { - return std::vector(); - } - - std::vector output; - output.reserve(total); - - while (pq.size()) { - auto now = pq.peek(); - auto next = pq.size() > 1 ? pq.peek(1) : queue_record>{nullptr, 0}; - if (!now.data->is_tombstone() && next.data != nullptr && - now.data->rec == next.data->rec && next.data->is_tombstone()) { - - pq.pop(); pq.pop(); - auto& cursor1 = cursors[tmp_n - now.version - 1]; - auto& cursor2 = cursors[tmp_n - next.version - 1]; - if (advance_cursor>(cursor1)) pq.push(cursor1.ptr, now.version); - if (advance_cursor>(cursor2)) pq.push(cursor2.ptr, next.version); - } else { - auto& cursor = cursors[tmp_n - now.version - 1]; - if (!now.data->is_tombstone()) output.push_back(cursor.ptr->rec); - pq.pop(); - - if (advance_cursor>(cursor)) pq.push(cursor.ptr, now.version); - } - }*/ - - size_t tot = 0; - for (auto& result: results) - if (result.size() > 0) tot += result[0].rec.key; - - return {{tot, 0}}; - } - - static void delete_query_state(void *state) { - auto s = (PGMState *) state; - delete s; - } - - static void delete_buffer_query_state(void *state) { - auto s = (PGMBufferState *) state; - delete s; - } -}; - -; - } diff --git a/include/shard/TrieSpline.h b/include/shard/TrieSpline.h index a784a38..fdf8edb 100644 --- a/include/shard/TrieSpline.h +++ b/include/shard/TrieSpline.h @@ -30,32 +30,6 @@ using psudb::Alias; namespace de { -template -struct ts_range_query_parms { - decltype(R::key) lower_bound; - decltype(R::key) upper_bound; -}; - -template -class TrieSplineRangeQuery; - -template -struct TrieSplineState { - size_t start_idx; - size_t stop_idx; -}; - -template -struct TrieSplineBufferState { - size_t cutoff; - Alias* alias; - - ~TrieSplineBufferState() { - delete alias; - } - -}; - template class TrieSpline { private: @@ -63,10 +37,6 @@ private: typedef decltype(R::value) V; public: - - // FIXME: there has to be a better way to do this - friend class TrieSplineRangeQuery; - TrieSpline(MutableBuffer* buffer) : m_reccnt(0), m_tombstone_cnt(0) { @@ -254,8 +224,6 @@ public: return 0; } -private: - size_t get_lower_bound(const K& key) const { auto bound = m_ts.GetSearchBound(key); size_t idx = bound.begin; @@ -293,6 +261,8 @@ private: return (m_data[idx].rec.key <= key) ? idx : m_reccnt; } +private: + Wrapped* m_data; size_t m_reccnt; size_t m_tombstone_cnt; @@ -302,154 +272,4 @@ private: ts::TrieSpline m_ts; BloomFilter *m_bf; }; - - -template -class TrieSplineRangeQuery { -public: - constexpr static bool EARLY_ABORT=false; - constexpr static bool SKIP_DELETE_FILTER=true; - - static void *get_query_state(TrieSpline *ts, void *parms) { - auto res = new TrieSplineState(); - auto p = (ts_range_query_parms *) parms; - - res->start_idx = ts->get_lower_bound(p->lower_bound); - res->stop_idx = ts->get_record_count(); - - return res; - } - - static void* get_buffer_query_state(MutableBuffer *buffer, void *parms) { - auto res = new TrieSplineBufferState(); - res->cutoff = buffer->get_record_count(); - - return res; - } - - static void process_query_states(void *query_parms, std::vector &shard_states, void *buff_state) { - return; - } - - static std::vector> query(TrieSpline *ts, void *q_state, void *parms) { - //std::vector> records; - size_t tot = 0; - auto p = (ts_range_query_parms *) parms; - auto s = (TrieSplineState *) q_state; - - // if the returned index is one past the end of the - // records for the TrieSpline, then there are not records - // in the index falling into the specified range. - if (s->start_idx == ts->get_record_count()) { - return {}; - } - - auto ptr = ts->get_record_at(s->start_idx); - - // roll the pointer forward to the first record that is - // greater than or equal to the lower bound. - while(ptr->rec.key < p->lower_bound) { - ptr++; - } - - - while (ptr->rec.key <= p->upper_bound && ptr < ts->m_data + s->stop_idx) { - if (ptr->is_tombstone()) --tot; - else if (!ptr->is_deleted()) ++tot; - //records.emplace_back(*ptr); - ptr++; - } - - return {Wrapped{0, {tot, 0}}}; - //return records; - } - - static std::vector> buffer_query(MutableBuffer *buffer, void *state, void *parms) { - size_t tot = 0; - auto p = (ts_range_query_parms *) parms; - auto s = (TrieSplineBufferState *) state; - - //std::vector> records; - for (size_t i=0; icutoff; i++) { - auto rec = buffer->get_data() + i; - if (rec->rec.key >= p->lower_bound && rec->rec.key <= p->upper_bound) { - if (rec->is_tombstone()) --tot; - else if (!rec->is_deleted()) ++tot; - //records.emplace_back(*rec); - } - - } - - return {Wrapped{0, {tot, 0}}}; - //return records; - } - - static std::vector merge(std::vector>> &results, void *parms) { -/* - std::vector>> cursors; - cursors.reserve(results.size()); - - PriorityQueue> pq(results.size()); - size_t total = 0; - size_t tmp_n = results.size(); - - - for (size_t i = 0; i < tmp_n; ++i) - if (results[i].size() > 0){ - auto base = results[i].data(); - cursors.emplace_back(Cursor{base, base + results[i].size(), 0, results[i].size()}); - assert(i == cursors.size() - 1); - total += results[i].size(); - pq.push(cursors[i].ptr, tmp_n - i - 1); - } else { - cursors.emplace_back(Cursor>{nullptr, nullptr, 0, 0}); - } - - if (total == 0) { - return std::vector(); - } - - std::vector output; - output.reserve(total); - - while (pq.size()) { - auto now = pq.peek(); - auto next = pq.size() > 1 ? pq.peek(1) : queue_record>{nullptr, 0}; - if (!now.data->is_tombstone() && next.data != nullptr && - now.data->rec == next.data->rec && next.data->is_tombstone()) { - - pq.pop(); pq.pop(); - auto& cursor1 = cursors[tmp_n - now.version - 1]; - auto& cursor2 = cursors[tmp_n - next.version - 1]; - if (advance_cursor>(cursor1)) pq.push(cursor1.ptr, now.version); - if (advance_cursor>(cursor2)) pq.push(cursor2.ptr, next.version); - } else { - auto& cursor = cursors[tmp_n - now.version - 1]; - if (!now.data->is_tombstone()) output.push_back(cursor.ptr->rec); - pq.pop(); - - if (advance_cursor>(cursor)) pq.push(cursor.ptr, now.version); - } - } - - return output;*/ - - size_t tot = 0; - for (auto& result: results) - if (result.size() > 0) tot += result[0].rec.key; - - return {{tot, 0}}; - } - - static void delete_query_state(void *state) { - auto s = (TrieSplineState *) state; - delete s; - } - - static void delete_buffer_query_state(void *state) { - auto s = (TrieSplineBufferState *) state; - delete s; - } -}; - } diff --git a/include/shard/WSS.h b/include/shard/WSS.h deleted file mode 100644 index 4e3a326..0000000 --- a/include/shard/WSS.h +++ /dev/null @@ -1,453 +0,0 @@ -/* - * include/shard/WSS.h - * - * Copyright (C) 2023 Douglas B. Rumbaugh - * Dong Xie - * - * All rights reserved. Published under the Modified BSD License. - * - */ -#pragma once - - -#include -#include -#include -#include -#include - -#include "framework/ShardRequirements.h" - -#include "psu-ds/PriorityQueue.h" -#include "util/Cursor.h" -#include "psu-ds/Alias.h" -#include "psu-ds/BloomFilter.h" -#include "util/bf_config.h" - -using psudb::CACHELINE_SIZE; -using psudb::BloomFilter; -using psudb::PriorityQueue; -using psudb::queue_record; -using psudb::Alias; - -namespace de { - -thread_local size_t wss_cancelations = 0; - -template -struct wss_query_parms { - size_t sample_size; - gsl_rng *rng; -}; - -template -class WSSQuery; - -template -struct WSSState { - decltype(R::weight) total_weight; - size_t sample_size; - - WSSState() { - total_weight = 0; - } -}; - -template -struct WSSBufferState { - size_t cutoff; - size_t sample_size; - Alias* alias; - decltype(R::weight) max_weight; - decltype(R::weight) total_weight; - - ~WSSBufferState() { - delete alias; - } - -}; - -template -class WSS { -private: - typedef decltype(R::key) K; - typedef decltype(R::value) V; - typedef decltype(R::weight) W; - -public: - - // FIXME: there has to be a better way to do this - friend class WSSQuery; - friend class WSSQuery; - - WSS(MutableBuffer* buffer) - : m_reccnt(0), m_tombstone_cnt(0), m_total_weight(0), m_alias(nullptr), m_bf(nullptr) { - - m_alloc_size = (buffer->get_record_count() * sizeof(Wrapped)) + (CACHELINE_SIZE - (buffer->get_record_count() * sizeof(Wrapped)) % CACHELINE_SIZE); - assert(m_alloc_size % CACHELINE_SIZE == 0); - m_data = (Wrapped*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size); - - m_bf = new BloomFilter(BF_FPR, buffer->get_tombstone_count(), BF_HASH_FUNCS); - - size_t offset = 0; - m_reccnt = 0; - auto base = buffer->get_data(); - auto stop = base + buffer->get_record_count(); - - std::sort(base, stop, std::less>()); - - std::vector weights; - - while (base < stop) { - if (!(base->is_tombstone()) && (base + 1) < stop) { - if (base->rec == (base + 1)->rec && (base + 1)->is_tombstone()) { - base += 2; - wss_cancelations++; - continue; - } - } else if (base->is_deleted()) { - base += 1; - continue; - } - - // FIXME: this shouldn't be necessary, but the tagged record - // bypass doesn't seem to be working on this code-path, so this - // ensures that tagged records from the buffer are able to be - // dropped, eventually. It should only need to be &= 1 - base->header &= 3; - m_data[m_reccnt++] = *base; - m_total_weight+= base->rec.weight; - weights.push_back(base->rec.weight); - - if (m_bf && base->is_tombstone()) { - m_tombstone_cnt++; - m_bf->insert(base->rec); - } - - base++; - } - - if (m_reccnt > 0) { - build_alias_structure(weights); - } - } - - WSS(WSS** shards, size_t len) - : m_reccnt(0), m_tombstone_cnt(0), m_total_weight(0), m_alias(nullptr), m_bf(nullptr) { - std::vector>> cursors; - cursors.reserve(len); - - PriorityQueue> pq(len); - - size_t attemp_reccnt = 0; - size_t tombstone_count = 0; - - for (size_t i = 0; i < len; ++i) { - if (shards[i]) { - auto base = shards[i]->get_data(); - cursors.emplace_back(Cursor{base, base + shards[i]->get_record_count(), 0, shards[i]->get_record_count()}); - attemp_reccnt += shards[i]->get_record_count(); - tombstone_count += shards[i]->get_tombstone_count(); - pq.push(cursors[i].ptr, i); - } else { - cursors.emplace_back(Cursor>{nullptr, nullptr, 0, 0}); - } - } - - m_bf = new BloomFilter(BF_FPR, tombstone_count, BF_HASH_FUNCS); - - m_alloc_size = (attemp_reccnt * sizeof(Wrapped)) + (CACHELINE_SIZE - (attemp_reccnt * sizeof(Wrapped)) % CACHELINE_SIZE); - assert(m_alloc_size % CACHELINE_SIZE == 0); - m_data = (Wrapped*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size); - - std::vector weights; - - while (pq.size()) { - auto now = pq.peek(); - auto next = pq.size() > 1 ? pq.peek(1) : queue_record>{nullptr, 0}; - if (!now.data->is_tombstone() && next.data != nullptr && - now.data->rec == next.data->rec && next.data->is_tombstone()) { - - pq.pop(); pq.pop(); - auto& cursor1 = cursors[now.version]; - auto& cursor2 = cursors[next.version]; - if (advance_cursor>(cursor1)) pq.push(cursor1.ptr, now.version); - if (advance_cursor>(cursor2)) pq.push(cursor2.ptr, next.version); - } else { - auto& cursor = cursors[now.version]; - if (!cursor.ptr->is_deleted()) { - m_data[m_reccnt++] = *cursor.ptr; - m_total_weight += cursor.ptr->rec.weight; - weights.push_back(cursor.ptr->rec.weight); - if (m_bf && cursor.ptr->is_tombstone()) { - ++m_tombstone_cnt; - if (m_bf) m_bf->insert(cursor.ptr->rec); - } - } - pq.pop(); - - if (advance_cursor>(cursor)) pq.push(cursor.ptr, now.version); - } - } - - if (m_reccnt > 0) { - build_alias_structure(weights); - } - } - - ~WSS() { - if (m_data) free(m_data); - if (m_alias) delete m_alias; - if (m_bf) delete m_bf; - - } - - Wrapped *point_lookup(const R &rec, bool filter=false) { - if (filter && !m_bf->lookup(rec)) { - return nullptr; - } - - size_t idx = get_lower_bound(rec.key); - if (idx >= m_reccnt) { - return nullptr; - } - - while (idx < m_reccnt && m_data[idx].rec < rec) ++idx; - - if (m_data[idx].rec == rec) { - return m_data + idx; - } - - return nullptr; - } - - Wrapped* get_data() const { - return m_data; - } - - size_t get_record_count() const { - return m_reccnt; - } - - size_t get_tombstone_count() const { - return m_tombstone_cnt; - } - - const Wrapped* get_record_at(size_t idx) const { - if (idx >= m_reccnt) return nullptr; - return m_data + idx; - } - - - size_t get_memory_usage() { - return m_alloc_size; - } - - size_t get_aux_memory_usage() { - return 0; - } - -private: - - size_t get_lower_bound(const K& key) const { - size_t min = 0; - size_t max = m_reccnt - 1; - - const char * record_key; - while (min < max) { - size_t mid = (min + max) / 2; - - if (key > m_data[mid].rec.key) { - min = mid + 1; - } else { - max = mid; - } - } - - return min; - } - - void build_alias_structure(std::vector &weights) { - - // normalize the weights vector - std::vector norm_weights(weights.size()); - - for (size_t i=0; i* m_data; - Alias *m_alias; - W m_total_weight; - size_t m_reccnt; - size_t m_tombstone_cnt; - size_t m_group_size; - size_t m_alloc_size; - BloomFilter *m_bf; -}; - - -template -class WSSQuery { -public: - - constexpr static bool EARLY_ABORT=false; - constexpr static bool SKIP_DELETE_FILTER=false; - - static void *get_query_state(WSS *wss, void *parms) { - auto res = new WSSState(); - res->total_weight = wss->m_total_weight; - res->sample_size = 0; - - return res; - } - - static void* get_buffer_query_state(MutableBuffer *buffer, void *parms) { - WSSBufferState *state = new WSSBufferState(); - auto parameters = (wss_query_parms*) parms; - if constexpr (Rejection) { - state->cutoff = buffer->get_record_count() - 1; - state->max_weight = buffer->get_max_weight(); - state->total_weight = buffer->get_total_weight(); - return state; - } - - std::vector weights; - - state->cutoff = buffer->get_record_count() - 1; - double total_weight = 0.0; - - for (size_t i = 0; i <= state->cutoff; i++) { - auto rec = buffer->get_data() + i; - weights.push_back(rec->rec.weight); - total_weight += rec->rec.weight; - } - - for (size_t i = 0; i < weights.size(); i++) { - weights[i] = weights[i] / total_weight; - } - - state->alias = new Alias(weights); - state->total_weight = total_weight; - - return state; - } - - static void process_query_states(void *query_parms, std::vector &shard_states, void *buff_state) { - auto p = (wss_query_parms *) query_parms; - auto bs = (WSSBufferState *) buff_state; - - std::vector shard_sample_sizes(shard_states.size()+1, 0); - size_t buffer_sz = 0; - - std::vector weights; - weights.push_back(bs->total_weight); - - decltype(R::weight) total_weight = 0; - for (auto &s : shard_states) { - auto state = (WSSState *) s; - total_weight += state->total_weight; - weights.push_back(state->total_weight); - } - - std::vector normalized_weights; - for (auto w : weights) { - normalized_weights.push_back((double) w / (double) total_weight); - } - - auto shard_alias = Alias(normalized_weights); - for (size_t i=0; isample_size; i++) { - auto idx = shard_alias.get(p->rng); - if (idx == 0) { - buffer_sz++; - } else { - shard_sample_sizes[idx - 1]++; - } - } - - - bs->sample_size = buffer_sz; - for (size_t i=0; i *) shard_states[i]; - state->sample_size = shard_sample_sizes[i+1]; - } - } - - static std::vector> query(WSS *wss, void *q_state, void *parms) { - auto rng = ((wss_query_parms *) parms)->rng; - - auto state = (WSSState *) q_state; - auto sample_size = state->sample_size; - - std::vector> result_set; - - if (sample_size == 0) { - return result_set; - } - size_t attempts = 0; - do { - attempts++; - size_t idx = wss->m_alias->get(rng); - result_set.emplace_back(*wss->get_record_at(idx)); - } while (attempts < sample_size); - - return result_set; - } - - static std::vector> buffer_query(MutableBuffer *buffer, void *state, void *parms) { - auto st = (WSSBufferState *) state; - auto p = (wss_query_parms *) parms; - - std::vector> result; - result.reserve(st->sample_size); - - if constexpr (Rejection) { - for (size_t i=0; isample_size; i++) { - auto idx = gsl_rng_uniform_int(p->rng, st->cutoff); - auto rec = buffer->get_data() + idx; - - auto test = gsl_rng_uniform(p->rng) * st->max_weight; - - if (test <= rec->rec.weight) { - result.emplace_back(*rec); - } - } - return result; - } - - for (size_t i=0; isample_size; i++) { - auto idx = st->alias->get(p->rng); - result.emplace_back(*(buffer->get_data() + idx)); - } - - return result; - } - - static std::vector merge(std::vector>> &results, void *parms) { - std::vector output; - - for (size_t i=0; i *) state; - delete s; - } - - static void delete_buffer_query_state(void *state) { - auto s = (WSSBufferState *) state; - delete s; - } -}; - -} diff --git a/tests/alias_tests.cpp b/tests/alias_tests.cpp new file mode 100644 index 0000000..b9e678b --- /dev/null +++ b/tests/alias_tests.cpp @@ -0,0 +1,391 @@ +/* + * tests/alias_tests.cpp + * + * Unit tests for Alias shard + * + * Copyright (C) 2023 Douglas Rumbaugh + * Dong Xie + * + * All rights reserved. Published under the Modified BSD License. + * + */ + +#include "shard/Alias.h" +#include "query/wss.h" +#include "testing.h" + +#include + +using namespace de; + +typedef Alias Shard; + +START_TEST(t_mbuffer_init) +{ + auto buffer = new MutableBuffer(1024, 1024); + for (uint64_t i = 512; i > 0; i--) { + uint32_t v = i; + buffer->append({i,v, 1}); + } + + for (uint64_t i = 1; i <= 256; ++i) { + uint32_t v = i; + buffer->append({i, v, 1}, true); + } + + for (uint64_t i = 257; i <= 512; ++i) { + uint32_t v = i + 1; + buffer->append({i, v, 1}); + } + + Shard* shard = new Shard(buffer); + ck_assert_uint_eq(shard->get_record_count(), 512); + + delete buffer; + delete shard; +} + + +START_TEST(t_alias_init) +{ + size_t n = 512; + auto mbuffer1 = create_test_mbuffer(n); + auto mbuffer2 = create_test_mbuffer(n); + auto mbuffer3 = create_test_mbuffer(n); + + auto shard1 = new Shard(mbuffer1); + auto shard2 = new Shard(mbuffer2); + auto shard3 = new Shard(mbuffer3); + + Shard* shards[3] = {shard1, shard2, shard3}; + auto shard4 = new Shard(shards, 3); + + ck_assert_int_eq(shard4->get_record_count(), n * 3); + ck_assert_int_eq(shard4->get_tombstone_count(), 0); + + size_t total_cnt = 0; + size_t shard1_idx = 0; + size_t shard2_idx = 0; + size_t shard3_idx = 0; + + for (size_t i = 0; i < shard4->get_record_count(); ++i) { + auto rec1 = shard1->get_record_at(shard1_idx); + auto rec2 = shard2->get_record_at(shard2_idx); + auto rec3 = shard3->get_record_at(shard3_idx); + + auto cur_rec = shard4->get_record_at(i); + + if (shard1_idx < n && cur_rec->rec == rec1->rec) { + ++shard1_idx; + } else if (shard2_idx < n && cur_rec->rec == rec2->rec) { + ++shard2_idx; + } else if (shard3_idx < n && cur_rec->rec == rec3->rec) { + ++shard3_idx; + } else { + assert(false); + } + } + + delete mbuffer1; + delete mbuffer2; + delete mbuffer3; + + delete shard1; + delete shard2; + delete shard3; + delete shard4; +} + + +START_TEST(t_point_lookup) +{ + size_t n = 10000; + + auto buffer = create_double_seq_mbuffer(n, false); + auto alias = Shard(buffer); + + for (size_t i=0; iget_data() + i); + r.key = rec->rec.key; + r.value = rec->rec.value; + + auto result = alias.point_lookup(r); + ck_assert_ptr_nonnull(result); + ck_assert_int_eq(result->rec.key, r.key); + ck_assert_int_eq(result->rec.value, r.value); + } + + delete buffer; +} +END_TEST + + +START_TEST(t_point_lookup_miss) +{ + size_t n = 10000; + + auto buffer = create_double_seq_mbuffer(n, false); + auto alias = Shard(buffer); + + for (size_t i=n + 100; i<2*n; i++) { + WRec r; + r.key = i; + r.value = i; + + auto result = alias.point_lookup(r); + ck_assert_ptr_null(result); + } + + delete buffer; +} + +START_TEST(t_full_cancelation) +{ + size_t n = 100; + auto buffer = create_double_seq_mbuffer(n, false); + auto buffer_ts = create_double_seq_mbuffer(n, true); + + Shard* shard = new Shard(buffer); + Shard* shard_ts = new Shard(buffer_ts); + + ck_assert_int_eq(shard->get_record_count(), n); + ck_assert_int_eq(shard->get_tombstone_count(), 0); + ck_assert_int_eq(shard_ts->get_record_count(), n); + ck_assert_int_eq(shard_ts->get_tombstone_count(), n); + + Shard* shards[] = {shard, shard_ts}; + + Shard* merged = new Shard(shards, 2); + + ck_assert_int_eq(merged->get_tombstone_count(), 0); + ck_assert_int_eq(merged->get_record_count(), 0); + + delete buffer; + delete buffer_ts; + delete shard; + delete shard_ts; + delete merged; +} +END_TEST + + +START_TEST(t_alias_query) +{ + size_t n=1000; + auto buffer = create_weighted_mbuffer(n); + + Shard* shard = new Shard(buffer); + + size_t k = 1000; + + size_t cnt[3] = {0}; + wss:Parms parms = {k}; + parms.rng = gsl_rng_alloc(gsl_rng_mt19937); + + size_t total_samples = 0; + + for (size_t i=0; i<1000; i++) { + auto state = wss::Query::get_query_state(shard, &parms); + ((wss::State *) state)->sample_size = k; + auto result = wss::Query::query(shard, state, &parms); + + total_samples += result.size(); + + for (size_t j=0; j::delete_query_state(state); + } + + ck_assert(roughly_equal(cnt[0], (double) total_samples/4.0, total_samples, .05)); + ck_assert(roughly_equal(cnt[1], (double) total_samples/4.0, total_samples, .05)); + ck_assert(roughly_equal(cnt[2], (double) total_samples/2.0, total_samples, .05)); + + gsl_rng_free(parms.rng); + delete shard; + delete buffer; +} +END_TEST + + +START_TEST(t_alias_query_merge) +{ + size_t n=1000; + auto buffer = create_weighted_mbuffer(n); + + Shard* shard = new Shard(buffer); + + uint64_t lower_key = 0; + uint64_t upper_key = 5; + + size_t k = 1000; + + size_t cnt[3] = {0}; + wss:Parms parms = {k}; + parms.rng = gsl_rng_alloc(gsl_rng_mt19937); + + std::vector>> results(2); + + for (size_t i=0; i<1000; i++) { + auto state1 = wss::Query::get_query_state(shard, &parms); + ((wss::State *) state1)->sample_size = k; + results[0] = wss::Query::query(shard, state1, &parms); + + auto state2 = wss::Query::get_query_state(shard, &parms); + ((wss::State *) state2)->sample_size = k; + results[1] = wss::Query::query(shard, state2, &parms); + + wss::Query::delete_query_state(state1); + wss::Query::delete_query_state(state2); + } + + auto merged = wss::Query::merge(results, nullptr); + + ck_assert_int_eq(merged.size(), 2*k); + for (size_t i=0; i(n); + + uint64_t lower_key = 0; + uint64_t upper_key = 5; + + size_t k = 1000; + + size_t cnt[3] = {0}; + wss:Parms parms = {k}; + parms.rng = gsl_rng_alloc(gsl_rng_mt19937); + + size_t total_samples = 0; + + for (size_t i=0; i<1000; i++) { + auto state = wss::Query::get_buffer_query_state(buffer, &parms); + ((wss::BufferState *) state)->sample_size = k; + auto result = wss::Query::buffer_query(buffer, state, &parms); + total_samples += result.size(); + + for (size_t j=0; j::delete_buffer_query_state(state); + } + + ck_assert(roughly_equal(cnt[0], (double) total_samples/4.0, total_samples, .05)); + ck_assert(roughly_equal(cnt[1], (double) total_samples/4.0, total_samples, .05)); + ck_assert(roughly_equal(cnt[2], (double) total_samples/2.0, total_samples, .05)); + + gsl_rng_free(parms.rng); + delete buffer; +} +END_TEST + + +START_TEST(t_alias_buffer_query_rejection) +{ + size_t n=1000; + auto buffer = create_weighted_mbuffer(n); + + uint64_t lower_key = 0; + uint64_t upper_key = 5; + + size_t k = 1000; + + size_t cnt[3] = {0}; + wss:Parms parms = {k}; + parms.rng = gsl_rng_alloc(gsl_rng_mt19937); + + size_t total_samples = 0; + + for (size_t i=0; i<1000; i++) { + auto state = wss::Query::get_buffer_query_state(buffer, &parms); + ((wss::BufferState *) state)->sample_size = k; + auto result = wss::Query::buffer_query(buffer, state, &parms); + + total_samples += result.size(); + + for (size_t j=0; j::delete_buffer_query_state(state); + } + + ck_assert(roughly_equal(cnt[0], (double) total_samples/4.0, total_samples, .1)); + ck_assert(roughly_equal(cnt[1], (double) total_samples/4.0, total_samples, .1)); + ck_assert(roughly_equal(cnt[2], (double) total_samples/2.0, total_samples, .1)); + + gsl_rng_free(parms.rng); + delete buffer; +} +END_TEST + + +Suite *unit_testing() +{ + Suite *unit = suite_create("Alias Shard Unit Testing"); + + TCase *create = tcase_create("de::Alias constructor Testing"); + tcase_add_test(create, t_mbuffer_init); + tcase_add_test(create, t_alias_init); + tcase_set_timeout(create, 100); + suite_add_tcase(unit, create); + + + TCase *tombstone = tcase_create("de:Alias::tombstone cancellation Testing"); + tcase_add_test(tombstone, t_full_cancelation); + suite_add_tcase(unit, tombstone); + + + TCase *lookup = tcase_create("de:Alias:point_lookup Testing"); + tcase_add_test(lookup, t_point_lookup); + tcase_add_test(lookup, t_point_lookup_miss); + suite_add_tcase(unit, lookup); + + + + TCase *sampling = tcase_create("de:Alias::AliasQuery Testing"); + tcase_add_test(sampling, t_alias_query); + tcase_add_test(sampling, t_alias_query_merge); + tcase_add_test(sampling, t_alias_buffer_query_rejection); + tcase_add_test(sampling, t_alias_buffer_query_scan); + suite_add_tcase(unit, sampling); + + return unit; +} + + +int shard_unit_tests() +{ + int failed = 0; + Suite *unit = unit_testing(); + SRunner *unit_shardner = srunner_create(unit); + + srunner_run_all(unit_shardner, CK_NORMAL); + failed = srunner_ntests_failed(unit_shardner); + srunner_free(unit_shardner); + + return failed; +} + + +int main() +{ + int unit_failed = shard_unit_tests(); + + return (unit_failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE; +} diff --git a/tests/memisam_tests.cpp b/tests/memisam_tests.cpp index 0ae97dc..d3b8087 100644 --- a/tests/memisam_tests.cpp +++ b/tests/memisam_tests.cpp @@ -1,7 +1,7 @@ /* - * tests/irs_tests.cpp + * tests/isam_tests.cpp * - * Unit tests for MemISAM (Augmented B+Tree) shard + * Unit tests for ISAM Tree shard * * Copyright (C) 2023 Douglas Rumbaugh * Dong Xie @@ -10,14 +10,15 @@ * */ -#include "shard/MemISAM.h" +#include "shard/ISAMTree.h" +#include "query/irs.h" #include "testing.h" #include using namespace de; -typedef MemISAM Shard; +typedef ISAMTree Shard; START_TEST(t_mbuffer_init) { @@ -181,15 +182,15 @@ START_TEST(t_irs_query) size_t k = 100; size_t cnt[3] = {0}; - irs_query_parms parms = {lower_key, upper_key, k}; + irs::Parms parms = {lower_key, upper_key, k}; parms.rng = gsl_rng_alloc(gsl_rng_mt19937); size_t total_samples = 0; for (size_t i=0; i<1000; i++) { - auto state = IRSQuery::get_query_state(&isam, &parms); - ((IRSState *) state)->sample_size = k; - auto result = IRSQuery::query(&isam, state, &parms); + auto state = irs::Query::get_query_state(&isam, &parms); + ((irs::State *) state)->sample_size = k; + auto result = irs::Query::query(&isam, state, &parms); ck_assert_int_eq(result.size(), k); @@ -198,7 +199,7 @@ START_TEST(t_irs_query) ck_assert_int_ge(rec.rec.key, lower_key); } - IRSQuery::delete_query_state(state); + irs::Query::delete_query_state(state); } gsl_rng_free(parms.rng); @@ -220,25 +221,25 @@ START_TEST(t_irs_query_merge) size_t k = 1000; size_t cnt[3] = {0}; - irs_query_parms parms = {lower_key, upper_key, k}; + irs::Parms parms = {lower_key, upper_key, k}; parms.rng = gsl_rng_alloc(gsl_rng_mt19937); std::vector>> results(2); for (size_t i=0; i<1000; i++) { - auto state1 = IRSQuery::get_query_state(&shard, &parms); - ((IRSState *) state1)->sample_size = k; - results[0] = IRSQuery::query(&shard, state1, &parms); + auto state1 = irs::Query::get_query_state(&shard, &parms); + ((irs::State *) state1)->sample_size = k; + results[0] = irs::Query::query(&shard, state1, &parms); - auto state2 = IRSQuery::get_query_state(&shard, &parms); - ((IRSState *) state2)->sample_size = k; - results[1] = IRSQuery::query(&shard, state2, &parms); + auto state2 = irs::Query::get_query_state(&shard, &parms); + ((irs::State *) state2)->sample_size = k; + results[1] = irs::Query::query(&shard, state2, &parms); - IRSQuery::delete_query_state(state1); - IRSQuery::delete_query_state(state2); + irs::Query::delete_query_state(state1); + irs::Query::delete_query_state(state2); } - auto merged = IRSQuery::merge(results, nullptr); + auto merged = irs::Query::merge(results, nullptr); ck_assert_int_eq(merged.size(), 2*k); for (size_t i=0; i parms = {lower_key, upper_key, k}; + irs::Parms parms = {lower_key, upper_key, k}; parms.rng = gsl_rng_alloc(gsl_rng_mt19937); size_t total_samples = 0; for (size_t i=0; i<1000; i++) { - auto state = IRSQuery::get_buffer_query_state(buffer, &parms); - ((IRSBufferState *) state)->sample_size = k; - auto result = IRSQuery::buffer_query(buffer, state, &parms); + auto state = irs::Query::get_buffer_query_state(buffer, &parms); + ((irs::BufferState *) state)->sample_size = k; + auto result = irs::Query::buffer_query(buffer, state, &parms); ck_assert_int_eq(result.size(), k); @@ -280,7 +281,7 @@ START_TEST(t_irs_buffer_query_scan) ck_assert_int_ge(rec.rec.key, lower_key); } - IRSQuery::delete_buffer_query_state(state); + irs::Query::delete_buffer_query_state(state); } gsl_rng_free(parms.rng); @@ -300,15 +301,15 @@ START_TEST(t_irs_buffer_query_rejection) size_t k = 10000; size_t cnt[3] = {0}; - irs_query_parms parms = {lower_key, upper_key, k}; + irs::Parms parms = {lower_key, upper_key, k}; parms.rng = gsl_rng_alloc(gsl_rng_mt19937); size_t total_samples = 0; for (size_t i=0; i<1000; i++) { - auto state = IRSQuery::get_buffer_query_state(buffer, &parms); - ((IRSBufferState *) state)->sample_size = k; - auto result = IRSQuery::buffer_query(buffer, state, &parms); + auto state = irs::Query::get_buffer_query_state(buffer, &parms); + ((irs::BufferState *) state)->sample_size = k; + auto result = irs::Query::buffer_query(buffer, state, &parms); ck_assert_int_gt(result.size(), 0); ck_assert_int_le(result.size(), k); @@ -318,7 +319,7 @@ START_TEST(t_irs_buffer_query_rejection) ck_assert_int_ge(rec.rec.key, lower_key); } - IRSQuery::delete_buffer_query_state(state); + irs::Query::delete_buffer_query_state(state); } gsl_rng_free(parms.rng); @@ -329,27 +330,27 @@ END_TEST Suite *unit_testing() { - Suite *unit = suite_create("MemISAM Shard Unit Testing"); + Suite *unit = suite_create("ISAMTree Shard Unit Testing"); - TCase *create = tcase_create("de::MemISAM constructor Testing"); + TCase *create = tcase_create("de::ISAMTree constructor Testing"); tcase_add_test(create, t_mbuffer_init); tcase_add_test(create, t_irs_init); tcase_set_timeout(create, 100); suite_add_tcase(unit, create); - TCase *tombstone = tcase_create("de:MemISAM::tombstone cancellation Testing"); + TCase *tombstone = tcase_create("de:ISAMTree::tombstone cancellation Testing"); tcase_add_test(tombstone, t_full_cancelation); suite_add_tcase(unit, tombstone); - TCase *lookup = tcase_create("de:MemISAM:point_lookup Testing"); + TCase *lookup = tcase_create("de:ISAMTree:point_lookup Testing"); tcase_add_test(lookup, t_point_lookup); tcase_add_test(lookup, t_point_lookup_miss); suite_add_tcase(unit, lookup); - TCase *sampling = tcase_create("de:MemISAM::MemISAMQuery Testing"); + TCase *sampling = tcase_create("de:ISAMTree::ISAMTreeQuery Testing"); tcase_add_test(sampling, t_irs_query); tcase_add_test(sampling, t_irs_query_merge); tcase_add_test(sampling, t_irs_buffer_query_rejection); diff --git a/tests/pgm_tests.cpp b/tests/pgm_tests.cpp index 0552417..1565384 100644 --- a/tests/pgm_tests.cpp +++ b/tests/pgm_tests.cpp @@ -11,6 +11,7 @@ */ #include "shard/PGM.h" +#include "query/rangequery.h" #include "testing.h" #include @@ -144,13 +145,13 @@ START_TEST(t_range_query) auto buffer = create_sequential_mbuffer(100, 1000); auto shard = Shard(buffer); - pgm_range_query_parms parms; + rq::Parms parms; parms.lower_bound = 300; parms.upper_bound = 500; - auto state = PGMRangeQuery::get_query_state(&shard, &parms); - auto result = PGMRangeQuery::query(&shard, state, &parms); - PGMRangeQuery::delete_query_state(state); + auto state = rq::Query::get_query_state(&shard, &parms); + auto result = rq::Query::query(&shard, state, &parms); + rq::Query::delete_query_state(state); ck_assert_int_eq(result.size(), parms.upper_bound - parms.lower_bound + 1); for (size_t i=0; i(100, 1000); - pgm_range_query_parms parms; + rq::Parms parms; parms.lower_bound = 300; parms.upper_bound = 500; - auto state = PGMRangeQuery::get_buffer_query_state(buffer, &parms); - auto result = PGMRangeQuery::buffer_query(buffer, state, &parms); - PGMRangeQuery::delete_buffer_query_state(state); + auto state = rq::Query::get_buffer_query_state(buffer, &parms); + auto result = rq::Query::buffer_query(buffer, state, &parms); + rq::Query::delete_buffer_query_state(state); ck_assert_int_eq(result.size(), parms.upper_bound - parms.lower_bound + 1); for (size_t i=0; i parms; + rq::Parms parms; parms.lower_bound = 150; parms.upper_bound = 500; size_t result_size = parms.upper_bound - parms.lower_bound + 1 - 200; - auto state1 = PGMRangeQuery::get_query_state(&shard1, &parms); - auto state2 = PGMRangeQuery::get_query_state(&shard2, &parms); + auto state1 = rq::Query::get_query_state(&shard1, &parms); + auto state2 = rq::Query::get_query_state(&shard2, &parms); std::vector>> results(2); - results[0] = PGMRangeQuery::query(&shard1, state1, &parms); - results[1] = PGMRangeQuery::query(&shard2, state2, &parms); + results[0] = rq::Query::query(&shard1, state1, &parms); + results[1] = rq::Query::query(&shard2, state2, &parms); - PGMRangeQuery::delete_query_state(state1); - PGMRangeQuery::delete_query_state(state2); + rq::Query::delete_query_state(state1); + rq::Query::delete_query_state(state2); ck_assert_int_eq(results[0].size() + results[1].size(), result_size); @@ -221,7 +222,7 @@ START_TEST(t_range_query_merge) } } - auto result = PGMRangeQuery::merge(proc_results, nullptr); + auto result = rq::Query::merge(proc_results, nullptr); std::sort(result.begin(), result.end()); ck_assert_int_eq(result.size(), result_size); diff --git a/tests/triespline_tests.cpp b/tests/triespline_tests.cpp index 6f63961..101f143 100644 --- a/tests/triespline_tests.cpp +++ b/tests/triespline_tests.cpp @@ -13,6 +13,7 @@ #include #include "shard/TrieSpline.h" +#include "query/rangequery.h" #include "testing.h" #include @@ -176,13 +177,13 @@ START_TEST(t_range_query) auto buffer = create_sequential_mbuffer(100, 1000); auto shard = Shard(buffer); - ts_range_query_parms parms; + rq::Parms parms; parms.lower_bound = 300; parms.upper_bound = 500; - auto state = TrieSplineRangeQuery::get_query_state(&shard, &parms); - auto result = TrieSplineRangeQuery::query(&shard, state, &parms); - TrieSplineRangeQuery::delete_query_state(state); + auto state = rq::Query::get_query_state(&shard, &parms); + auto result = rq::Query::query(&shard, state, &parms); + rq::Query::delete_query_state(state); ck_assert_int_eq(result.size(), parms.upper_bound - parms.lower_bound + 1); for (size_t i=0; i(100, 1000); - ts_range_query_parms parms; + rq::Parms parms; parms.lower_bound = 300; parms.upper_bound = 500; - auto state = TrieSplineRangeQuery::get_buffer_query_state(buffer, &parms); - auto result = TrieSplineRangeQuery::buffer_query(buffer, state, &parms); - TrieSplineRangeQuery::delete_buffer_query_state(state); + auto state = rq::Query::get_buffer_query_state(buffer, &parms); + auto result = rq::Query::buffer_query(buffer, state, &parms); + rq::Query::delete_buffer_query_state(state); ck_assert_int_eq(result.size(), parms.upper_bound - parms.lower_bound + 1); for (size_t i=0; i - * Dong Xie - * - * All rights reserved. Published under the Modified BSD License. - * - */ - -#include "shard/WSS.h" -#include "testing.h" - -#include - -using namespace de; - -typedef WSS Shard; - -START_TEST(t_mbuffer_init) -{ - auto buffer = new MutableBuffer(1024, 1024); - for (uint64_t i = 512; i > 0; i--) { - uint32_t v = i; - buffer->append({i,v, 1}); - } - - for (uint64_t i = 1; i <= 256; ++i) { - uint32_t v = i; - buffer->append({i, v, 1}, true); - } - - for (uint64_t i = 257; i <= 512; ++i) { - uint32_t v = i + 1; - buffer->append({i, v, 1}); - } - - Shard* shard = new Shard(buffer); - ck_assert_uint_eq(shard->get_record_count(), 512); - - delete buffer; - delete shard; -} - - -START_TEST(t_wss_init) -{ - size_t n = 512; - auto mbuffer1 = create_test_mbuffer(n); - auto mbuffer2 = create_test_mbuffer(n); - auto mbuffer3 = create_test_mbuffer(n); - - auto shard1 = new Shard(mbuffer1); - auto shard2 = new Shard(mbuffer2); - auto shard3 = new Shard(mbuffer3); - - Shard* shards[3] = {shard1, shard2, shard3}; - auto shard4 = new Shard(shards, 3); - - ck_assert_int_eq(shard4->get_record_count(), n * 3); - ck_assert_int_eq(shard4->get_tombstone_count(), 0); - - size_t total_cnt = 0; - size_t shard1_idx = 0; - size_t shard2_idx = 0; - size_t shard3_idx = 0; - - for (size_t i = 0; i < shard4->get_record_count(); ++i) { - auto rec1 = shard1->get_record_at(shard1_idx); - auto rec2 = shard2->get_record_at(shard2_idx); - auto rec3 = shard3->get_record_at(shard3_idx); - - auto cur_rec = shard4->get_record_at(i); - - if (shard1_idx < n && cur_rec->rec == rec1->rec) { - ++shard1_idx; - } else if (shard2_idx < n && cur_rec->rec == rec2->rec) { - ++shard2_idx; - } else if (shard3_idx < n && cur_rec->rec == rec3->rec) { - ++shard3_idx; - } else { - assert(false); - } - } - - delete mbuffer1; - delete mbuffer2; - delete mbuffer3; - - delete shard1; - delete shard2; - delete shard3; - delete shard4; -} - - -START_TEST(t_point_lookup) -{ - size_t n = 10000; - - auto buffer = create_double_seq_mbuffer(n, false); - auto wss = Shard(buffer); - - for (size_t i=0; iget_data() + i); - r.key = rec->rec.key; - r.value = rec->rec.value; - - auto result = wss.point_lookup(r); - ck_assert_ptr_nonnull(result); - ck_assert_int_eq(result->rec.key, r.key); - ck_assert_int_eq(result->rec.value, r.value); - } - - delete buffer; -} -END_TEST - - -START_TEST(t_point_lookup_miss) -{ - size_t n = 10000; - - auto buffer = create_double_seq_mbuffer(n, false); - auto wss = Shard(buffer); - - for (size_t i=n + 100; i<2*n; i++) { - WRec r; - r.key = i; - r.value = i; - - auto result = wss.point_lookup(r); - ck_assert_ptr_null(result); - } - - delete buffer; -} - -START_TEST(t_full_cancelation) -{ - size_t n = 100; - auto buffer = create_double_seq_mbuffer(n, false); - auto buffer_ts = create_double_seq_mbuffer(n, true); - - Shard* shard = new Shard(buffer); - Shard* shard_ts = new Shard(buffer_ts); - - ck_assert_int_eq(shard->get_record_count(), n); - ck_assert_int_eq(shard->get_tombstone_count(), 0); - ck_assert_int_eq(shard_ts->get_record_count(), n); - ck_assert_int_eq(shard_ts->get_tombstone_count(), n); - - Shard* shards[] = {shard, shard_ts}; - - Shard* merged = new Shard(shards, 2); - - ck_assert_int_eq(merged->get_tombstone_count(), 0); - ck_assert_int_eq(merged->get_record_count(), 0); - - delete buffer; - delete buffer_ts; - delete shard; - delete shard_ts; - delete merged; -} -END_TEST - - -START_TEST(t_wss_query) -{ - size_t n=1000; - auto buffer = create_weighted_mbuffer(n); - - Shard* shard = new Shard(buffer); - - size_t k = 1000; - - size_t cnt[3] = {0}; - wss_query_parms parms = {k}; - parms.rng = gsl_rng_alloc(gsl_rng_mt19937); - - size_t total_samples = 0; - - for (size_t i=0; i<1000; i++) { - auto state = WSSQuery::get_query_state(shard, &parms); - ((WSSState *) state)->sample_size = k; - auto result = WSSQuery::query(shard, state, &parms); - - total_samples += result.size(); - - for (size_t j=0; j::delete_query_state(state); - } - - ck_assert(roughly_equal(cnt[0], (double) total_samples/4.0, total_samples, .05)); - ck_assert(roughly_equal(cnt[1], (double) total_samples/4.0, total_samples, .05)); - ck_assert(roughly_equal(cnt[2], (double) total_samples/2.0, total_samples, .05)); - - gsl_rng_free(parms.rng); - delete shard; - delete buffer; -} -END_TEST - - -START_TEST(t_wss_query_merge) -{ - size_t n=1000; - auto buffer = create_weighted_mbuffer(n); - - Shard* shard = new Shard(buffer); - - uint64_t lower_key = 0; - uint64_t upper_key = 5; - - size_t k = 1000; - - size_t cnt[3] = {0}; - wss_query_parms parms = {k}; - parms.rng = gsl_rng_alloc(gsl_rng_mt19937); - - std::vector>> results(2); - - for (size_t i=0; i<1000; i++) { - auto state1 = WSSQuery::get_query_state(shard, &parms); - ((WSSState *) state1)->sample_size = k; - results[0] = WSSQuery::query(shard, state1, &parms); - - auto state2 = WSSQuery::get_query_state(shard, &parms); - ((WSSState *) state2)->sample_size = k; - results[1] = WSSQuery::query(shard, state2, &parms); - - WSSQuery::delete_query_state(state1); - WSSQuery::delete_query_state(state2); - } - - auto merged = WSSQuery::merge(results, nullptr); - - ck_assert_int_eq(merged.size(), 2*k); - for (size_t i=0; i(n); - - uint64_t lower_key = 0; - uint64_t upper_key = 5; - - size_t k = 1000; - - size_t cnt[3] = {0}; - wss_query_parms parms = {k}; - parms.rng = gsl_rng_alloc(gsl_rng_mt19937); - - size_t total_samples = 0; - - for (size_t i=0; i<1000; i++) { - auto state = WSSQuery::get_buffer_query_state(buffer, &parms); - ((WSSBufferState *) state)->sample_size = k; - auto result = WSSQuery::buffer_query(buffer, state, &parms); - total_samples += result.size(); - - for (size_t j=0; j::delete_buffer_query_state(state); - } - - ck_assert(roughly_equal(cnt[0], (double) total_samples/4.0, total_samples, .05)); - ck_assert(roughly_equal(cnt[1], (double) total_samples/4.0, total_samples, .05)); - ck_assert(roughly_equal(cnt[2], (double) total_samples/2.0, total_samples, .05)); - - gsl_rng_free(parms.rng); - delete buffer; -} -END_TEST - - -START_TEST(t_wss_buffer_query_rejection) -{ - size_t n=1000; - auto buffer = create_weighted_mbuffer(n); - - uint64_t lower_key = 0; - uint64_t upper_key = 5; - - size_t k = 1000; - - size_t cnt[3] = {0}; - wss_query_parms parms = {k}; - parms.rng = gsl_rng_alloc(gsl_rng_mt19937); - - size_t total_samples = 0; - - for (size_t i=0; i<1000; i++) { - auto state = WSSQuery::get_buffer_query_state(buffer, &parms); - ((WSSBufferState *) state)->sample_size = k; - auto result = WSSQuery::buffer_query(buffer, state, &parms); - - total_samples += result.size(); - - for (size_t j=0; j::delete_buffer_query_state(state); - } - - ck_assert(roughly_equal(cnt[0], (double) total_samples/4.0, total_samples, .1)); - ck_assert(roughly_equal(cnt[1], (double) total_samples/4.0, total_samples, .1)); - ck_assert(roughly_equal(cnt[2], (double) total_samples/2.0, total_samples, .1)); - - gsl_rng_free(parms.rng); - delete buffer; -} -END_TEST - - -Suite *unit_testing() -{ - Suite *unit = suite_create("WSS Shard Unit Testing"); - - TCase *create = tcase_create("de::WSS constructor Testing"); - tcase_add_test(create, t_mbuffer_init); - tcase_add_test(create, t_wss_init); - tcase_set_timeout(create, 100); - suite_add_tcase(unit, create); - - - TCase *tombstone = tcase_create("de:WSS::tombstone cancellation Testing"); - tcase_add_test(tombstone, t_full_cancelation); - suite_add_tcase(unit, tombstone); - - - TCase *lookup = tcase_create("de:WSS:point_lookup Testing"); - tcase_add_test(lookup, t_point_lookup); - tcase_add_test(lookup, t_point_lookup_miss); - suite_add_tcase(unit, lookup); - - - - TCase *sampling = tcase_create("de:WSS::WSSQuery Testing"); - tcase_add_test(sampling, t_wss_query); - tcase_add_test(sampling, t_wss_query_merge); - tcase_add_test(sampling, t_wss_buffer_query_rejection); - tcase_add_test(sampling, t_wss_buffer_query_scan); - suite_add_tcase(unit, sampling); - - return unit; -} - - -int shard_unit_tests() -{ - int failed = 0; - Suite *unit = unit_testing(); - SRunner *unit_shardner = srunner_create(unit); - - srunner_run_all(unit_shardner, CK_NORMAL); - failed = srunner_ntests_failed(unit_shardner); - srunner_free(unit_shardner); - - return failed; -} - - -int main() -{ - int unit_failed = shard_unit_tests(); - - return (unit_failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE; -} -- cgit v1.2.3