From 9fe190f5d500e22b0894095e7c917f9c652e0a64 Mon Sep 17 00:00:00 2001 From: Douglas Rumbaugh Date: Wed, 20 Mar 2024 17:30:14 -0400 Subject: Updates/progress towards succinct trie support --- include/framework/interface/Record.h | 12 +++++++-- include/framework/structure/MutableBuffer.h | 5 ++-- include/shard/FSTrie.h | 39 +++++++++++++++++++++-------- 3 files changed, 41 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/framework/interface/Record.h b/include/framework/interface/Record.h index 5b9f307..f4105c6 100644 --- a/include/framework/interface/Record.h +++ b/include/framework/interface/Record.h @@ -97,9 +97,17 @@ template struct Record { K key; V value; - uint32_t header = 0; - inline bool operator<(const Record& other) const { + Record &operator=(const Record &other) { + this->key = K(); + + this->key = other.key; + this->value = other.value; + + return *this; + } + + inline bool operator<(const Record& other) const { return key < other.key || (key == other.key && value < other.value); } diff --git a/include/framework/structure/MutableBuffer.h b/include/framework/structure/MutableBuffer.h index c0c87cc..1ed0200 100644 --- a/include/framework/structure/MutableBuffer.h +++ b/include/framework/structure/MutableBuffer.h @@ -50,7 +50,8 @@ public: , m_tail(0) , m_head({0, 0}) , m_old_head({high_watermark, 0}) - , m_data((Wrapped *) psudb::sf_aligned_alloc(CACHELINE_SIZE, m_cap * sizeof(Wrapped))) + //, m_data((Wrapped *) psudb::sf_aligned_alloc(CACHELINE_SIZE, m_cap * sizeof(Wrapped))) + , m_data(new Wrapped[m_cap]()) , m_tombstone_filter(new psudb::BloomFilter(BF_FPR, m_hwm, BF_HASH_FUNCS)) , m_tscnt(0) , m_old_tscnt(0) @@ -61,7 +62,7 @@ public: } ~MutableBuffer() { - free(m_data); + delete[] m_data; delete m_tombstone_filter; } diff --git a/include/shard/FSTrie.h b/include/shard/FSTrie.h index 11a232d..62aa0b7 100644 --- a/include/shard/FSTrie.h +++ b/include/shard/FSTrie.h @@ -43,10 +43,9 @@ public: , m_reccnt(0) , m_alloc_size(0) { - m_alloc_size = psudb::sf_aligned_alloc(CACHELINE_SIZE, - buffer.get_record_count() * - sizeof(Wrapped), - (byte**) &m_data); + m_data = new Wrapped[buffer.get_record_count()](); + m_alloc_size = sizeof(Wrapped) * buffer.get_record_count(); + size_t cnt = 0; std::vector keys; keys.reserve(buffer.get_record_count()); @@ -62,10 +61,15 @@ public: * apply tombstone/deleted record filtering, as well as any possible * per-record processing that is required by the shard being built. */ + /* auto temp_buffer = (Wrapped *) psudb::sf_aligned_calloc(CACHELINE_SIZE, buffer.get_record_count(), sizeof(Wrapped)); - buffer.copy_to_buffer((byte *) temp_buffer); + */ + auto temp_buffer = new Wrapped[buffer.get_record_count()](); + for (size_t i=0; i) { @@ -88,6 +94,10 @@ public: cnt++; } + for (size_t i=0; i) { @@ -96,7 +106,7 @@ public: m_fst.load(keys, values); } - free(temp_buffer); + delete[] temp_buffer; } FSTrie(std::vector &shards) @@ -108,9 +118,8 @@ public: size_t tombstone_count = 0; auto cursors = build_cursor_vec(shards, &attemp_reccnt, &tombstone_count); - m_alloc_size = psudb::sf_aligned_alloc(CACHELINE_SIZE, - attemp_reccnt * sizeof(Wrapped), - (byte **) &m_data); + m_data = new Wrapped[attemp_reccnt](); + m_alloc_size = attemp_reccnt * sizeof(Wrapped); std::vector keys; keys.reserve(attemp_reccnt); @@ -165,6 +174,10 @@ public: } } + for (size_t i=0; i 0) { m_fst = FST(); if constexpr (std::is_same_v) { @@ -176,18 +189,22 @@ public: } ~FSTrie() { - free(m_data); + delete[] m_data; } Wrapped *point_lookup(const R &rec, bool filter=false) { size_t idx; bool res; if constexpr (std::is_same_v) { - res = m_fst.lookup(rec.key.c_str(), rec.key.size(), idx); + res = m_fst.lookup((uint8_t*)rec.key.c_str(), rec.key.size(), idx); } else { res = m_fst.lookup(rec.key, idx); } + if (res && m_data[idx].rec.key != rec.key) { + fprintf(stderr, "ERROR!\n"); + } + if (res) { return m_data + idx; } -- cgit v1.2.3