From e02742b07540dd5a9bcbb44dae14856bf10955ed Mon Sep 17 00:00:00 2001 From: Douglas Rumbaugh Date: Mon, 6 Nov 2023 15:18:53 -0500 Subject: Refactoring progress --- include/shard/ISAMTree.h | 339 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 339 insertions(+) create mode 100644 include/shard/ISAMTree.h (limited to 'include/shard/ISAMTree.h') diff --git a/include/shard/ISAMTree.h b/include/shard/ISAMTree.h new file mode 100644 index 0000000..a610c09 --- /dev/null +++ b/include/shard/ISAMTree.h @@ -0,0 +1,339 @@ +/* + * include/shard/ISAMTree.h + * + * Copyright (C) 2023 Douglas B. Rumbaugh + * Dong Xie + * + * All rights reserved. Published under the Modified BSD License. + * + */ +#pragma once + +#include +#include +#include +#include + +#include "framework/ShardRequirements.h" + +#include "util/bf_config.h" +#include "psu-ds/PriorityQueue.h" +#include "util/Cursor.h" +#include "psu-util/timer.h" + +using psudb::CACHELINE_SIZE; +using psudb::BloomFilter; +using psudb::PriorityQueue; +using psudb::queue_record; +using psudb::Alias; + +namespace de { + +thread_local size_t mrun_cancelations = 0; + +template +class ISAMTree { +private: + +typedef decltype(R::key) K; +typedef decltype(R::value) V; + +constexpr static size_t inmem_isam_node_size = 256; +constexpr static size_t inmem_isam_fanout = inmem_isam_node_size / (sizeof(K) + sizeof(char*)); + +struct InternalNode { + K keys[inmem_isam_fanout]; + char* child[inmem_isam_fanout]; +}; + +constexpr static size_t inmem_isam_leaf_fanout = inmem_isam_node_size / sizeof(R); +constexpr static size_t inmem_isam_node_keyskip = sizeof(K) * inmem_isam_fanout; + +static_assert(sizeof(InternalNode) == inmem_isam_node_size, "node size does not match"); + +public: + ISAMTree(MutableBuffer* buffer) + :m_reccnt(0), m_tombstone_cnt(0), m_isam_nodes(nullptr), m_deleted_cnt(0) { + + m_bf = new BloomFilter(BF_FPR, buffer->get_tombstone_count(), BF_HASH_FUNCS); + + m_alloc_size = (buffer->get_record_count() * sizeof(Wrapped)) + (CACHELINE_SIZE - (buffer->get_record_count() * sizeof(Wrapped)) % CACHELINE_SIZE); + assert(m_alloc_size % CACHELINE_SIZE == 0); + m_data = (Wrapped*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size); + + TIMER_INIT(); + + size_t offset = 0; + m_reccnt = 0; + auto base = buffer->get_data(); + auto stop = base + buffer->get_record_count(); + + TIMER_START(); + std::sort(base, stop, std::less>()); + TIMER_STOP(); + auto sort_time = TIMER_RESULT(); + + TIMER_START(); + while (base < stop) { + if (!base->is_tombstone() && (base + 1 < stop) + && base->rec == (base + 1)->rec && (base + 1)->is_tombstone()) { + base += 2; + mrun_cancelations++; + continue; + } else if (base->is_deleted()) { + base += 1; + continue; + } + + // FIXME: this shouldn't be necessary, but the tagged record + // bypass doesn't seem to be working on this code-path, so this + // ensures that tagged records from the buffer are able to be + // dropped, eventually. It should only need to be &= 1 + base->header &= 3; + m_data[m_reccnt++] = *base; + if (m_bf && base->is_tombstone()) { + ++m_tombstone_cnt; + m_bf->insert(base->rec); + } + + base++; + } + TIMER_STOP(); + auto copy_time = TIMER_RESULT(); + + TIMER_START(); + if (m_reccnt > 0) { + build_internal_levels(); + } + TIMER_STOP(); + auto level_time = TIMER_RESULT(); + } + + ISAMTree(ISAMTree** runs, size_t len) + : m_reccnt(0), m_tombstone_cnt(0), m_deleted_cnt(0), m_isam_nodes(nullptr) { + std::vector>> cursors; + cursors.reserve(len); + + PriorityQueue> pq(len); + + size_t attemp_reccnt = 0; + size_t tombstone_count = 0; + + for (size_t i = 0; i < len; ++i) { + if (runs[i]) { + auto base = runs[i]->get_data(); + cursors.emplace_back(Cursor{base, base + runs[i]->get_record_count(), 0, runs[i]->get_record_count()}); + attemp_reccnt += runs[i]->get_record_count(); + tombstone_count += runs[i]->get_tombstone_count(); + pq.push(cursors[i].ptr, i); + } else { + cursors.emplace_back(Cursor>{nullptr, nullptr, 0, 0}); + } + } + + m_bf = new BloomFilter(BF_FPR, tombstone_count, BF_HASH_FUNCS); + + m_alloc_size = (attemp_reccnt * sizeof(Wrapped)) + (CACHELINE_SIZE - (attemp_reccnt * sizeof(Wrapped)) % CACHELINE_SIZE); + assert(m_alloc_size % CACHELINE_SIZE == 0); + m_data = (Wrapped*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size); + + size_t offset = 0; + + while (pq.size()) { + auto now = pq.peek(); + auto next = pq.size() > 1 ? pq.peek(1) : queue_record>{nullptr, 0}; + if (!now.data->is_tombstone() && next.data != nullptr && + now.data->rec == next.data->rec && next.data->is_tombstone()) { + + pq.pop(); pq.pop(); + auto& cursor1 = cursors[now.version]; + auto& cursor2 = cursors[next.version]; + if (advance_cursor(cursor1)) pq.push(cursor1.ptr, now.version); + if (advance_cursor(cursor2)) pq.push(cursor2.ptr, next.version); + } else { + auto& cursor = cursors[now.version]; + if (!cursor.ptr->is_deleted()) { + m_data[m_reccnt++] = *cursor.ptr; + if (cursor.ptr->is_tombstone()) { + ++m_tombstone_cnt; + m_bf->insert(cursor.ptr->rec); + } + } + pq.pop(); + + if (advance_cursor(cursor)) pq.push(cursor.ptr, now.version); + } + } + + if (m_reccnt > 0) { + build_internal_levels(); + } + } + + ~ISAMTree() { + if (m_data) free(m_data); + if (m_isam_nodes) free(m_isam_nodes); + if (m_bf) delete m_bf; + } + + Wrapped *point_lookup(const R &rec, bool filter=false) { + if (filter && !m_bf->lookup(rec)) { + return nullptr; + } + + size_t idx = get_lower_bound(rec.key); + if (idx >= m_reccnt) { + return nullptr; + } + + while (idx < m_reccnt && m_data[idx].rec < rec) ++idx; + + if (m_data[idx].rec == rec) { + return m_data + idx; + } + + return nullptr; + } + + Wrapped* get_data() const { + return m_data; + } + + size_t get_record_count() const { + return m_reccnt; + } + + size_t get_tombstone_count() const { + return m_tombstone_cnt; + } + + const Wrapped* get_record_at(size_t idx) const { + return (idx < m_reccnt) ? m_data + idx : nullptr; + } + + size_t get_memory_usage() { + return m_internal_node_cnt * inmem_isam_node_size + m_alloc_size; + } + + size_t get_aux_memory_usage() { + return 0; + } + + size_t get_lower_bound(const K& key) const { + const InternalNode* now = m_root; + while (!is_leaf(reinterpret_cast(now))) { + const InternalNode* next = nullptr; + for (size_t i = 0; i < inmem_isam_fanout - 1; ++i) { + if (now->child[i + 1] == nullptr || key <= now->keys[i]) { + next = reinterpret_cast(now->child[i]); + break; + } + } + + now = next ? next : reinterpret_cast(now->child[inmem_isam_fanout - 1]); + } + + const Wrapped* pos = reinterpret_cast*>(now); + while (pos < m_data + m_reccnt && pos->rec.key < key) pos++; + + return pos - m_data; + } + + size_t get_upper_bound(const K& key) const { + const InternalNode* now = m_root; + while (!is_leaf(reinterpret_cast(now))) { + const InternalNode* next = nullptr; + for (size_t i = 0; i < inmem_isam_fanout - 1; ++i) { + if (now->child[i + 1] == nullptr || key < now->keys[i]) { + next = reinterpret_cast(now->child[i]); + break; + } + } + + now = next ? next : reinterpret_cast(now->child[inmem_isam_fanout - 1]); + } + + const Wrapped* pos = reinterpret_cast*>(now); + while (pos < m_data + m_reccnt && pos->rec.key <= key) pos++; + + return pos - m_data; + } + + +private: + void build_internal_levels() { + size_t n_leaf_nodes = m_reccnt / inmem_isam_leaf_fanout + (m_reccnt % inmem_isam_leaf_fanout != 0); + size_t level_node_cnt = n_leaf_nodes; + size_t node_cnt = 0; + do { + level_node_cnt = level_node_cnt / inmem_isam_fanout + (level_node_cnt % inmem_isam_fanout != 0); + node_cnt += level_node_cnt; + } while (level_node_cnt > 1); + + m_alloc_size = (node_cnt * inmem_isam_node_size) + (CACHELINE_SIZE - (node_cnt * inmem_isam_node_size) % CACHELINE_SIZE); + assert(m_alloc_size % CACHELINE_SIZE == 0); + + m_isam_nodes = (InternalNode*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size); + m_internal_node_cnt = node_cnt; + memset(m_isam_nodes, 0, node_cnt * inmem_isam_node_size); + + InternalNode* current_node = m_isam_nodes; + + const Wrapped* leaf_base = m_data; + const Wrapped* leaf_stop = m_data + m_reccnt; + while (leaf_base < leaf_stop) { + size_t fanout = 0; + for (size_t i = 0; i < inmem_isam_fanout; ++i) { + auto rec_ptr = leaf_base + inmem_isam_leaf_fanout * i; + if (rec_ptr >= leaf_stop) break; + const Wrapped* sep_key = std::min(rec_ptr + inmem_isam_leaf_fanout - 1, leaf_stop - 1); + current_node->keys[i] = sep_key->rec.key; + current_node->child[i] = (char*)rec_ptr; + ++fanout; + } + current_node++; + leaf_base += fanout * inmem_isam_leaf_fanout; + } + + auto level_start = m_isam_nodes; + auto level_stop = current_node; + auto current_level_node_cnt = level_stop - level_start; + while (current_level_node_cnt > 1) { + auto now = level_start; + while (now < level_stop) { + size_t child_cnt = 0; + for (size_t i = 0; i < inmem_isam_fanout; ++i) { + auto node_ptr = now + i; + ++child_cnt; + if (node_ptr >= level_stop) break; + current_node->keys[i] = node_ptr->keys[inmem_isam_fanout - 1]; + current_node->child[i] = (char*)node_ptr; + } + now += child_cnt; + current_node++; + } + level_start = level_stop; + level_stop = current_node; + current_level_node_cnt = level_stop - level_start; + } + + assert(current_level_node_cnt == 1); + m_root = level_start; + } + + bool is_leaf(const char* ptr) const { + return ptr >= (const char*)m_data && ptr < (const char*)(m_data + m_reccnt); + } + + // Members: sorted data, internal ISAM levels, reccnt; + Wrapped* m_data; + psudb::BloomFilter *m_bf; + InternalNode* m_isam_nodes; + InternalNode* m_root; + size_t m_reccnt; + size_t m_tombstone_cnt; + size_t m_internal_node_cnt; + size_t m_deleted_cnt; + size_t m_alloc_size; +}; +} -- cgit v1.2.3 From 357cab549c2ed33970562b84ff6f83923742343d Mon Sep 17 00:00:00 2001 From: Douglas Rumbaugh Date: Tue, 7 Nov 2023 15:34:24 -0500 Subject: Comment and License updates --- include/shard/ISAMTree.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include/shard/ISAMTree.h') diff --git a/include/shard/ISAMTree.h b/include/shard/ISAMTree.h index a610c09..e11c899 100644 --- a/include/shard/ISAMTree.h +++ b/include/shard/ISAMTree.h @@ -4,7 +4,9 @@ * Copyright (C) 2023 Douglas B. Rumbaugh * Dong Xie * - * All rights reserved. Published under the Modified BSD License. + * Distributed under the Modified BSD License. + * + * A shard shim around an in-memory ISAM tree. * */ #pragma once -- cgit v1.2.3 From 5a2c378aad3f1a9923db3191ffaa3fb807d392b2 Mon Sep 17 00:00:00 2001 From: Douglas Rumbaugh Date: Thu, 11 Jan 2024 15:29:51 -0500 Subject: Ported ISAMTree over to new buffer setup I may still play with the shard from shards constructor, and queries need some work yet too. --- include/shard/ISAMTree.h | 125 ++++++++++++++++++++++++----------------------- 1 file changed, 65 insertions(+), 60 deletions(-) (limited to 'include/shard/ISAMTree.h') diff --git a/include/shard/ISAMTree.h b/include/shard/ISAMTree.h index e11c899..6b2f6b5 100644 --- a/include/shard/ISAMTree.h +++ b/include/shard/ISAMTree.h @@ -13,8 +13,6 @@ #include #include -#include -#include #include "framework/ShardRequirements.h" @@ -27,52 +25,54 @@ using psudb::CACHELINE_SIZE; using psudb::BloomFilter; using psudb::PriorityQueue; using psudb::queue_record; -using psudb::Alias; namespace de { -thread_local size_t mrun_cancelations = 0; - -template +template class ISAMTree { private: typedef decltype(R::key) K; typedef decltype(R::value) V; -constexpr static size_t inmem_isam_node_size = 256; -constexpr static size_t inmem_isam_fanout = inmem_isam_node_size / (sizeof(K) + sizeof(char*)); +constexpr static size_t NODE_SZ = 256; +constexpr static size_t INTERNAL_FANOUT = NODE_SZ / (sizeof(K) + sizeof(byte*)); struct InternalNode { - K keys[inmem_isam_fanout]; - char* child[inmem_isam_fanout]; + K keys[INTERNAL_FANOUT]; + byte* child[INTERNAL_FANOUT]; }; -constexpr static size_t inmem_isam_leaf_fanout = inmem_isam_node_size / sizeof(R); -constexpr static size_t inmem_isam_node_keyskip = sizeof(K) * inmem_isam_fanout; - -static_assert(sizeof(InternalNode) == inmem_isam_node_size, "node size does not match"); +static_assert(sizeof(InternalNode) == NODE_SZ, "node size does not match"); -public: - ISAMTree(MutableBuffer* buffer) - :m_reccnt(0), m_tombstone_cnt(0), m_isam_nodes(nullptr), m_deleted_cnt(0) { +constexpr static size_t LEAF_FANOUT = NODE_SZ / sizeof(R); - m_bf = new BloomFilter(BF_FPR, buffer->get_tombstone_count(), BF_HASH_FUNCS); - - m_alloc_size = (buffer->get_record_count() * sizeof(Wrapped)) + (CACHELINE_SIZE - (buffer->get_record_count() * sizeof(Wrapped)) % CACHELINE_SIZE); - assert(m_alloc_size % CACHELINE_SIZE == 0); - m_data = (Wrapped*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size); +public: + ISAMTree(BufferView buffer) + : m_bf(new BloomFilter(BF_FPR, buffer.get_tombstone_count(), BF_HASH_FUNCS)) + , m_isam_nodes(nullptr) + , m_root(nullptr) + , m_reccnt(0) + , m_tombstone_cnt(0) + , m_internal_node_cnt(0) + , m_deleted_cnt(0) + , m_alloc_size(0) + , m_data(nullptr) + { TIMER_INIT(); - size_t offset = 0; - m_reccnt = 0; - auto base = buffer->get_data(); - auto stop = base + buffer->get_record_count(); + m_alloc_size = psudb::sf_aligned_alloc(CACHELINE_SIZE, buffer.get_record_count() * sizeof(Wrapped), (byte**) &m_data); TIMER_START(); + auto temp_buffer = (Wrapped *) psudb::sf_aligned_alloc(CACHELINE_SIZE, buffer.get_record_count() * sizeof(Wrapped)); + buffer.copy_to_buffer((byte *) temp_buffer); + + auto base = temp_buffer; + auto stop = base + buffer.get_record_count(); std::sort(base, stop, std::less>()); TIMER_STOP(); + auto sort_time = TIMER_RESULT(); TIMER_START(); @@ -80,7 +80,6 @@ public: if (!base->is_tombstone() && (base + 1 < stop) && base->rec == (base + 1)->rec && (base + 1)->is_tombstone()) { base += 2; - mrun_cancelations++; continue; } else if (base->is_deleted()) { base += 1; @@ -109,10 +108,21 @@ public: } TIMER_STOP(); auto level_time = TIMER_RESULT(); + + free(temp_buffer); } ISAMTree(ISAMTree** runs, size_t len) - : m_reccnt(0), m_tombstone_cnt(0), m_deleted_cnt(0), m_isam_nodes(nullptr) { + : m_bf(nullptr) + , m_isam_nodes(nullptr) + , m_root(nullptr) + , m_reccnt(0) + , m_tombstone_cnt(0) + , m_internal_node_cnt(0) + , m_deleted_cnt(0) + , m_alloc_size(0) + , m_data(nullptr) + { std::vector>> cursors; cursors.reserve(len); @@ -139,8 +149,6 @@ public: assert(m_alloc_size % CACHELINE_SIZE == 0); m_data = (Wrapped*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size); - size_t offset = 0; - while (pq.size()) { auto now = pq.peek(); auto next = pq.size() > 1 ? pq.peek(1) : queue_record>{nullptr, 0}; @@ -173,9 +181,9 @@ public: } ~ISAMTree() { - if (m_data) free(m_data); - if (m_isam_nodes) free(m_isam_nodes); - if (m_bf) delete m_bf; + free(m_data); + free(m_isam_nodes); + delete m_bf; } Wrapped *point_lookup(const R &rec, bool filter=false) { @@ -214,25 +222,25 @@ public: } size_t get_memory_usage() { - return m_internal_node_cnt * inmem_isam_node_size + m_alloc_size; + return m_alloc_size; } size_t get_aux_memory_usage() { - return 0; + return m_bf->memory_usage(); } size_t get_lower_bound(const K& key) const { const InternalNode* now = m_root; - while (!is_leaf(reinterpret_cast(now))) { + while (!is_leaf(reinterpret_cast(now))) { const InternalNode* next = nullptr; - for (size_t i = 0; i < inmem_isam_fanout - 1; ++i) { + for (size_t i = 0; i < INTERNAL_FANOUT - 1; ++i) { if (now->child[i + 1] == nullptr || key <= now->keys[i]) { next = reinterpret_cast(now->child[i]); break; } } - now = next ? next : reinterpret_cast(now->child[inmem_isam_fanout - 1]); + now = next ? next : reinterpret_cast(now->child[INTERNAL_FANOUT - 1]); } const Wrapped* pos = reinterpret_cast*>(now); @@ -243,16 +251,16 @@ public: size_t get_upper_bound(const K& key) const { const InternalNode* now = m_root; - while (!is_leaf(reinterpret_cast(now))) { + while (!is_leaf(reinterpret_cast(now))) { const InternalNode* next = nullptr; - for (size_t i = 0; i < inmem_isam_fanout - 1; ++i) { + for (size_t i = 0; i < INTERNAL_FANOUT - 1; ++i) { if (now->child[i + 1] == nullptr || key < now->keys[i]) { next = reinterpret_cast(now->child[i]); break; } } - now = next ? next : reinterpret_cast(now->child[inmem_isam_fanout - 1]); + now = next ? next : reinterpret_cast(now->child[INTERNAL_FANOUT - 1]); } const Wrapped* pos = reinterpret_cast*>(now); @@ -264,20 +272,17 @@ public: private: void build_internal_levels() { - size_t n_leaf_nodes = m_reccnt / inmem_isam_leaf_fanout + (m_reccnt % inmem_isam_leaf_fanout != 0); + size_t n_leaf_nodes = m_reccnt / LEAF_FANOUT + (m_reccnt % LEAF_FANOUT != 0); + size_t level_node_cnt = n_leaf_nodes; size_t node_cnt = 0; do { - level_node_cnt = level_node_cnt / inmem_isam_fanout + (level_node_cnt % inmem_isam_fanout != 0); + level_node_cnt = level_node_cnt / INTERNAL_FANOUT + (level_node_cnt % INTERNAL_FANOUT != 0); node_cnt += level_node_cnt; } while (level_node_cnt > 1); - m_alloc_size = (node_cnt * inmem_isam_node_size) + (CACHELINE_SIZE - (node_cnt * inmem_isam_node_size) % CACHELINE_SIZE); - assert(m_alloc_size % CACHELINE_SIZE == 0); - - m_isam_nodes = (InternalNode*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size); + m_alloc_size += psudb::sf_aligned_calloc(CACHELINE_SIZE, node_cnt, NODE_SZ, (byte**) &m_isam_nodes); m_internal_node_cnt = node_cnt; - memset(m_isam_nodes, 0, node_cnt * inmem_isam_node_size); InternalNode* current_node = m_isam_nodes; @@ -285,16 +290,16 @@ private: const Wrapped* leaf_stop = m_data + m_reccnt; while (leaf_base < leaf_stop) { size_t fanout = 0; - for (size_t i = 0; i < inmem_isam_fanout; ++i) { - auto rec_ptr = leaf_base + inmem_isam_leaf_fanout * i; + for (size_t i = 0; i < INTERNAL_FANOUT; ++i) { + auto rec_ptr = leaf_base + LEAF_FANOUT * i; if (rec_ptr >= leaf_stop) break; - const Wrapped* sep_key = std::min(rec_ptr + inmem_isam_leaf_fanout - 1, leaf_stop - 1); + const Wrapped* sep_key = std::min(rec_ptr + LEAF_FANOUT - 1, leaf_stop - 1); current_node->keys[i] = sep_key->rec.key; - current_node->child[i] = (char*)rec_ptr; + current_node->child[i] = (byte*)rec_ptr; ++fanout; } current_node++; - leaf_base += fanout * inmem_isam_leaf_fanout; + leaf_base += fanout * LEAF_FANOUT; } auto level_start = m_isam_nodes; @@ -304,12 +309,12 @@ private: auto now = level_start; while (now < level_stop) { size_t child_cnt = 0; - for (size_t i = 0; i < inmem_isam_fanout; ++i) { + for (size_t i = 0; i < INTERNAL_FANOUT; ++i) { auto node_ptr = now + i; ++child_cnt; if (node_ptr >= level_stop) break; - current_node->keys[i] = node_ptr->keys[inmem_isam_fanout - 1]; - current_node->child[i] = (char*)node_ptr; + current_node->keys[i] = node_ptr->keys[INTERNAL_FANOUT - 1]; + current_node->child[i] = (byte*)node_ptr; } now += child_cnt; current_node++; @@ -323,12 +328,10 @@ private: m_root = level_start; } - bool is_leaf(const char* ptr) const { - return ptr >= (const char*)m_data && ptr < (const char*)(m_data + m_reccnt); + bool is_leaf(const byte* ptr) const { + return ptr >= (const byte*)m_data && ptr < (const byte*)(m_data + m_reccnt); } - // Members: sorted data, internal ISAM levels, reccnt; - Wrapped* m_data; psudb::BloomFilter *m_bf; InternalNode* m_isam_nodes; InternalNode* m_root; @@ -337,5 +340,7 @@ private: size_t m_internal_node_cnt; size_t m_deleted_cnt; size_t m_alloc_size; + + Wrapped* m_data; }; } -- cgit v1.2.3 From 138c793b0a58577713d98c98bb140cf1d9c79bee Mon Sep 17 00:00:00 2001 From: Douglas Rumbaugh Date: Wed, 17 Jan 2024 18:22:00 -0500 Subject: Multiple concurrency bug fixes A poorly organized commit with fixes for a variety of bugs that were causing missing records. The core problems all appear to be fixed, though there is an outstanding problem with tombstones not being completely canceled. A very small number are appearing in the wrong order during the static structure test. --- include/shard/ISAMTree.h | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) (limited to 'include/shard/ISAMTree.h') diff --git a/include/shard/ISAMTree.h b/include/shard/ISAMTree.h index 6b2f6b5..932e767 100644 --- a/include/shard/ISAMTree.h +++ b/include/shard/ISAMTree.h @@ -62,10 +62,13 @@ public: { TIMER_INIT(); - m_alloc_size = psudb::sf_aligned_alloc(CACHELINE_SIZE, buffer.get_record_count() * sizeof(Wrapped), (byte**) &m_data); + m_alloc_size = psudb::sf_aligned_alloc(CACHELINE_SIZE, + buffer.get_record_count() * + sizeof(Wrapped), + (byte**) &m_data); TIMER_START(); - auto temp_buffer = (Wrapped *) psudb::sf_aligned_alloc(CACHELINE_SIZE, buffer.get_record_count() * sizeof(Wrapped)); + auto temp_buffer = (Wrapped *) psudb::sf_aligned_calloc(CACHELINE_SIZE, buffer.get_record_count(), sizeof(Wrapped)); buffer.copy_to_buffer((byte *) temp_buffer); auto base = temp_buffer; @@ -99,6 +102,7 @@ public: base++; } + TIMER_STOP(); auto copy_time = TIMER_RESULT(); @@ -112,7 +116,7 @@ public: free(temp_buffer); } - ISAMTree(ISAMTree** runs, size_t len) + ISAMTree(std::vector &shards) : m_bf(nullptr) , m_isam_nodes(nullptr) , m_root(nullptr) @@ -124,19 +128,19 @@ public: , m_data(nullptr) { std::vector>> cursors; - cursors.reserve(len); + cursors.reserve(shards.size()); - PriorityQueue> pq(len); + PriorityQueue> pq(shards.size()); size_t attemp_reccnt = 0; size_t tombstone_count = 0; - for (size_t i = 0; i < len; ++i) { - if (runs[i]) { - auto base = runs[i]->get_data(); - cursors.emplace_back(Cursor{base, base + runs[i]->get_record_count(), 0, runs[i]->get_record_count()}); - attemp_reccnt += runs[i]->get_record_count(); - tombstone_count += runs[i]->get_tombstone_count(); + for (size_t i = 0; i < shards.size(); ++i) { + if (shards[i]) { + auto base = shards[i]->get_data(); + cursors.emplace_back(Cursor{base, base + shards[i]->get_record_count(), 0, shards[i]->get_record_count()}); + attemp_reccnt += shards[i]->get_record_count(); + tombstone_count += shards[i]->get_tombstone_count(); pq.push(cursors[i].ptr, i); } else { cursors.emplace_back(Cursor>{nullptr, nullptr, 0, 0}); @@ -144,10 +148,9 @@ public: } m_bf = new BloomFilter(BF_FPR, tombstone_count, BF_HASH_FUNCS); - - m_alloc_size = (attemp_reccnt * sizeof(Wrapped)) + (CACHELINE_SIZE - (attemp_reccnt * sizeof(Wrapped)) % CACHELINE_SIZE); - assert(m_alloc_size % CACHELINE_SIZE == 0); - m_data = (Wrapped*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size); + m_alloc_size = psudb::sf_aligned_alloc(CACHELINE_SIZE, + attemp_reccnt * sizeof(Wrapped), + (byte **) &m_data); while (pq.size()) { auto now = pq.peek(); @@ -165,6 +168,8 @@ public: if (!cursor.ptr->is_deleted()) { m_data[m_reccnt++] = *cursor.ptr; if (cursor.ptr->is_tombstone()) { + //fprintf(stderr, "ISAM: Tombstone from shard %ld next record from shard %ld\n", + //now.version, next.version); ++m_tombstone_cnt; m_bf->insert(cursor.ptr->rec); } -- cgit v1.2.3 From 2c5d549b3618b9ea72e6eece4cb4f3da5a6811a8 Mon Sep 17 00:00:00 2001 From: Douglas Rumbaugh Date: Wed, 7 Feb 2024 13:42:34 -0500 Subject: Fully realized shard concept interface --- include/shard/ISAMTree.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'include/shard/ISAMTree.h') diff --git a/include/shard/ISAMTree.h b/include/shard/ISAMTree.h index 932e767..7de9cb1 100644 --- a/include/shard/ISAMTree.h +++ b/include/shard/ISAMTree.h @@ -25,6 +25,7 @@ using psudb::CACHELINE_SIZE; using psudb::BloomFilter; using psudb::PriorityQueue; using psudb::queue_record; +using psudb::byte; namespace de { @@ -222,9 +223,6 @@ public: return m_tombstone_cnt; } - const Wrapped* get_record_at(size_t idx) const { - return (idx < m_reccnt) ? m_data + idx : nullptr; - } size_t get_memory_usage() { return m_alloc_size; @@ -234,6 +232,7 @@ public: return m_bf->memory_usage(); } + /* SortedShardInterface methods */ size_t get_lower_bound(const K& key) const { const InternalNode* now = m_root; while (!is_leaf(reinterpret_cast(now))) { @@ -274,6 +273,9 @@ public: return pos - m_data; } + const Wrapped* get_record_at(size_t idx) const { + return (idx < m_reccnt) ? m_data + idx : nullptr; + } private: void build_internal_levels() { -- cgit v1.2.3 From bd74e27b28bd95267ce50d2e4b6f12b51d9b6aae Mon Sep 17 00:00:00 2001 From: Douglas Rumbaugh Date: Wed, 7 Feb 2024 17:23:23 -0500 Subject: Cleaned up shard files (except VPTree) Cleaned up shard implementations, fixed a few bugs, and set up some tests. There's still some work to be done in creating tests for the weighted sampling operations for the alias and aug btree shards. --- include/shard/ISAMTree.h | 105 +++++------------------------------------------ 1 file changed, 11 insertions(+), 94 deletions(-) (limited to 'include/shard/ISAMTree.h') diff --git a/include/shard/ISAMTree.h b/include/shard/ISAMTree.h index 7de9cb1..33ba82f 100644 --- a/include/shard/ISAMTree.h +++ b/include/shard/ISAMTree.h @@ -17,9 +17,8 @@ #include "framework/ShardRequirements.h" #include "util/bf_config.h" -#include "psu-ds/PriorityQueue.h" -#include "util/Cursor.h" -#include "psu-util/timer.h" +#include "psu-ds/BloomFilter.h" +#include "util/SortedMerge.h" using psudb::CACHELINE_SIZE; using psudb::BloomFilter; @@ -61,60 +60,18 @@ public: , m_alloc_size(0) , m_data(nullptr) { - TIMER_INIT(); - m_alloc_size = psudb::sf_aligned_alloc(CACHELINE_SIZE, buffer.get_record_count() * sizeof(Wrapped), (byte**) &m_data); - TIMER_START(); - auto temp_buffer = (Wrapped *) psudb::sf_aligned_calloc(CACHELINE_SIZE, buffer.get_record_count(), sizeof(Wrapped)); - buffer.copy_to_buffer((byte *) temp_buffer); - - auto base = temp_buffer; - auto stop = base + buffer.get_record_count(); - std::sort(base, stop, std::less>()); - TIMER_STOP(); - - auto sort_time = TIMER_RESULT(); - - TIMER_START(); - while (base < stop) { - if (!base->is_tombstone() && (base + 1 < stop) - && base->rec == (base + 1)->rec && (base + 1)->is_tombstone()) { - base += 2; - continue; - } else if (base->is_deleted()) { - base += 1; - continue; - } - - // FIXME: this shouldn't be necessary, but the tagged record - // bypass doesn't seem to be working on this code-path, so this - // ensures that tagged records from the buffer are able to be - // dropped, eventually. It should only need to be &= 1 - base->header &= 3; - m_data[m_reccnt++] = *base; - if (m_bf && base->is_tombstone()) { - ++m_tombstone_cnt; - m_bf->insert(base->rec); - } - - base++; - } - - TIMER_STOP(); - auto copy_time = TIMER_RESULT(); + auto res = sorted_array_from_bufferview(std::move(buffer), m_data, m_bf); + m_reccnt = res.record_count; + m_tombstone_cnt = res.tombstone_count; - TIMER_START(); if (m_reccnt > 0) { build_internal_levels(); } - TIMER_STOP(); - auto level_time = TIMER_RESULT(); - - free(temp_buffer); } ISAMTree(std::vector &shards) @@ -128,58 +85,18 @@ public: , m_alloc_size(0) , m_data(nullptr) { - std::vector>> cursors; - cursors.reserve(shards.size()); - - PriorityQueue> pq(shards.size()); - size_t attemp_reccnt = 0; size_t tombstone_count = 0; - - for (size_t i = 0; i < shards.size(); ++i) { - if (shards[i]) { - auto base = shards[i]->get_data(); - cursors.emplace_back(Cursor{base, base + shards[i]->get_record_count(), 0, shards[i]->get_record_count()}); - attemp_reccnt += shards[i]->get_record_count(); - tombstone_count += shards[i]->get_tombstone_count(); - pq.push(cursors[i].ptr, i); - } else { - cursors.emplace_back(Cursor>{nullptr, nullptr, 0, 0}); - } - } + auto cursors = build_cursor_vec(shards, &attemp_reccnt, &tombstone_count); m_bf = new BloomFilter(BF_FPR, tombstone_count, BF_HASH_FUNCS); m_alloc_size = psudb::sf_aligned_alloc(CACHELINE_SIZE, attemp_reccnt * sizeof(Wrapped), (byte **) &m_data); - while (pq.size()) { - auto now = pq.peek(); - auto next = pq.size() > 1 ? pq.peek(1) : queue_record>{nullptr, 0}; - if (!now.data->is_tombstone() && next.data != nullptr && - now.data->rec == next.data->rec && next.data->is_tombstone()) { - - pq.pop(); pq.pop(); - auto& cursor1 = cursors[now.version]; - auto& cursor2 = cursors[next.version]; - if (advance_cursor(cursor1)) pq.push(cursor1.ptr, now.version); - if (advance_cursor(cursor2)) pq.push(cursor2.ptr, next.version); - } else { - auto& cursor = cursors[now.version]; - if (!cursor.ptr->is_deleted()) { - m_data[m_reccnt++] = *cursor.ptr; - if (cursor.ptr->is_tombstone()) { - //fprintf(stderr, "ISAM: Tombstone from shard %ld next record from shard %ld\n", - //now.version, next.version); - ++m_tombstone_cnt; - m_bf->insert(cursor.ptr->rec); - } - } - pq.pop(); - - if (advance_cursor(cursor)) pq.push(cursor.ptr, now.version); - } - } + auto res = sorted_array_merge(cursors, m_data, m_bf); + m_reccnt = res.record_count; + m_tombstone_cnt = res.tombstone_count; if (m_reccnt > 0) { build_internal_levels(); @@ -225,11 +142,11 @@ public: size_t get_memory_usage() { - return m_alloc_size; + return m_alloc_size + m_internal_node_cnt * NODE_SZ; } size_t get_aux_memory_usage() { - return m_bf->memory_usage(); + return (m_bf) ? m_bf->memory_usage() : 0; } /* SortedShardInterface methods */ -- cgit v1.2.3 From 0c4a80b90e1f25b42e00a8af57131040203d2f89 Mon Sep 17 00:00:00 2001 From: Douglas Rumbaugh Date: Thu, 8 Feb 2024 13:10:29 -0500 Subject: Added compiler fence to block reordering I'm reasonably certain that this is a compiler bug... --- include/shard/ISAMTree.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/shard/ISAMTree.h') diff --git a/include/shard/ISAMTree.h b/include/shard/ISAMTree.h index 33ba82f..9458b1f 100644 --- a/include/shard/ISAMTree.h +++ b/include/shard/ISAMTree.h @@ -65,6 +65,12 @@ public: sizeof(Wrapped), (byte**) &m_data); + /* + * without this, gcc seems to hoist the building of the array + * _above_ its allocation under -O3, resulting in memfaults. + */ + asm volatile ("" ::: "memory"); + auto res = sorted_array_from_bufferview(std::move(buffer), m_data, m_bf); m_reccnt = res.record_count; m_tombstone_cnt = res.tombstone_count; -- cgit v1.2.3 From 402fc269c0aaa671d84a6d15918735ad4b90e6b2 Mon Sep 17 00:00:00 2001 From: Douglas Rumbaugh Date: Fri, 9 Feb 2024 12:30:21 -0500 Subject: Comment updates/fixes --- include/shard/ISAMTree.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/shard/ISAMTree.h') diff --git a/include/shard/ISAMTree.h b/include/shard/ISAMTree.h index 9458b1f..3763271 100644 --- a/include/shard/ISAMTree.h +++ b/include/shard/ISAMTree.h @@ -8,6 +8,7 @@ * * A shard shim around an in-memory ISAM tree. * + * TODO: The code in this file is very poorly commented. */ #pragma once -- cgit v1.2.3