summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDouglas Rumbaugh <dbr4@psu.edu>2023-05-09 17:59:37 -0400
committerDouglas Rumbaugh <dbr4@psu.edu>2023-05-09 17:59:37 -0400
commit418e9b079e559c86f3a5b276f712ad2f5d66533c (patch)
tree943fa44b741a861a85ea43023a49fac112d26dbb
parentbc403d2d4f81c97a468e92d3b0981590fbe032cc (diff)
downloaddynamic-extension-418e9b079e559c86f3a5b276f712ad2f5d66533c.tar.gz
Ported over IRS with unit tests
-rw-r--r--CMakeLists.txt4
-rw-r--r--include/framework/MutableBuffer.h29
-rw-r--r--include/shard/MemISAM.h363
-rw-r--r--include/util/Cursor.h2
-rw-r--r--tests/internal_level_tests.cpp8
-rw-r--r--tests/memisam_tests.cpp227
-rw-r--r--tests/testing.h27
-rw-r--r--tests/wirs_tests.cpp14
8 files changed, 650 insertions, 24 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index c93f75a..3f551a3 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -49,6 +49,10 @@ if (tests)
target_link_libraries(dynamic_extension_tests PUBLIC gsl check subunit pthread)
target_include_directories(dynamic_extension_tests PRIVATE include)
+ add_executable(memisam_tests ${CMAKE_CURRENT_SOURCE_DIR}/tests/memisam_tests.cpp)
+ target_link_libraries(memisam_tests PUBLIC gsl check subunit pthread)
+ target_include_directories(memisam_tests PRIVATE include)
+
endif()
diff --git a/include/framework/MutableBuffer.h b/include/framework/MutableBuffer.h
index b3acfee..42bc9a7 100644
--- a/include/framework/MutableBuffer.h
+++ b/include/framework/MutableBuffer.h
@@ -14,6 +14,7 @@
#include <cassert>
#include <numeric>
#include <algorithm>
+#include <type_traits>
#include "util/base.h"
#include "util/bf_config.h"
@@ -46,7 +47,32 @@ public:
if (m_tombstone_filter) delete m_tombstone_filter;
}
- int append(const K& key, const V& value, W weight = 1, bool is_tombstone = false) {
+ template <typename W_=W,
+ typename =std::enable_if_t<std::is_same<W_, void>::value>>
+ int append(const K& key, const V& value, bool is_tombstone = false) {
+ static_assert(std::is_same<W_, void>::value);
+ if (is_tombstone && m_tombstonecnt + 1 > m_tombstone_cap) return 0;
+
+ int32_t pos = 0;
+ if ((pos = try_advance_tail()) == -1) return 0;
+
+ m_data[pos].key = key;
+ m_data[pos].value = value;
+ m_data[pos].header = ((pos << 2) | (is_tombstone ? 1 : 0));
+
+ if (is_tombstone) {
+ m_tombstonecnt.fetch_add(1);
+ if (m_tombstone_filter) m_tombstone_filter->insert(key);
+ }
+
+ m_weight.fetch_add(1);
+ return 1;
+ }
+
+ template <typename W_=W,
+ typename = std::enable_if_t<!std::is_same<W_, void>::value>>
+ int append(const K& key, const V& value, W_ weight=1, bool is_tombstone = false) {
+ static_assert(!std::is_same<W_, void>::value);
if (is_tombstone && m_tombstonecnt + 1 > m_tombstone_cap) return 0;
int32_t pos = 0;
@@ -82,6 +108,7 @@ public:
return 1;
}
+
bool truncate() {
m_tombstonecnt.store(0);
m_reccnt.store(0);
diff --git a/include/shard/MemISAM.h b/include/shard/MemISAM.h
new file mode 100644
index 0000000..6d97f95
--- /dev/null
+++ b/include/shard/MemISAM.h
@@ -0,0 +1,363 @@
+/*
+ * include/shard/MemISAM.h
+ *
+ * Copyright (C) 2023 Douglas Rumbaugh <drumbaugh@psu.edu>
+ * Dong Xie <dongx@psu.edu>
+ *
+ * All rights reserved. Published under the Modified BSD License.
+ *
+ */
+#pragma once
+
+#include <vector>
+#include <cassert>
+#include <queue>
+#include <memory>
+
+#include "framework/MutableBuffer.h"
+#include "ds/PriorityQueue.h"
+#include "util/Cursor.h"
+#include "util/timer.h"
+
+namespace de {
+
+thread_local size_t mrun_cancelations = 0;
+
+template <typename K, typename V>
+class MemISAM {
+private:
+typedef Record<K, V> Rec;
+
+constexpr static size_t inmem_isam_node_size = 256;
+constexpr static size_t inmem_isam_fanout = inmem_isam_node_size / (sizeof(K) + sizeof(char*));
+
+struct InMemISAMNode {
+ K keys[inmem_isam_fanout];
+ char* child[inmem_isam_fanout];
+};
+
+constexpr static size_t inmem_isam_leaf_fanout = inmem_isam_node_size / sizeof(Rec);
+constexpr static size_t inmem_isam_node_keyskip = sizeof(K) * inmem_isam_fanout;
+
+static_assert(sizeof(InMemISAMNode) == inmem_isam_node_size, "node size does not match");
+
+
+public:
+ MemISAM(std::string data_fname, size_t record_cnt, size_t tombstone_cnt, BloomFilter *bf, bool tagging)
+ : m_reccnt(record_cnt), m_tombstone_cnt(tombstone_cnt), m_deleted_cnt(0), m_tagging(tagging) {
+
+ // read the stored data file the file
+ size_t alloc_size = (record_cnt * sizeof(Rec)) + (CACHELINE_SIZE - (record_cnt * sizeof(Rec)) % CACHELINE_SIZE);
+ assert(alloc_size % CACHELINE_SIZE == 0);
+ m_data = (Rec*)std::aligned_alloc(CACHELINE_SIZE, alloc_size);
+
+ FILE *file = fopen(data_fname.c_str(), "rb");
+ assert(file);
+ auto res = fread(m_data, sizeof(Rec), m_reccnt, file);
+ assert (res == m_reccnt);
+ fclose(file);
+
+ // We can't really persist the internal structure, as it uses
+ // pointers, which are invalidated by the move. So we'll just
+ // rebuild it.
+ this->build_internal_levels();
+
+ // rebuild the bloom filter
+ for (size_t i=0; i<m_reccnt; i++) {
+ auto rec = this->get_record_at(i);
+ if (rec->is_tombstone()) {
+ bf->insert(rec->key);
+ }
+ }
+ }
+
+ MemISAM(MutableBuffer<K,V>* buffer, BloomFilter* bf, bool tagging)
+ :m_reccnt(0), m_tombstone_cnt(0), m_isam_nodes(nullptr), m_deleted_cnt(0), m_tagging(tagging) {
+
+ size_t alloc_size = (buffer->get_record_count() * sizeof(Rec)) + (CACHELINE_SIZE - (buffer->get_record_count() * sizeof(Rec)) % CACHELINE_SIZE);
+ assert(alloc_size % CACHELINE_SIZE == 0);
+ m_data = (Rec*)std::aligned_alloc(CACHELINE_SIZE, alloc_size);
+
+ TIMER_INIT();
+
+ size_t offset = 0;
+ m_reccnt = 0;
+ TIMER_START();
+ Rec* base = buffer->sorted_output();
+ TIMER_STOP();
+
+ auto sort_time = TIMER_RESULT();
+ Rec* stop = base + buffer->get_record_count();
+
+ TIMER_START();
+ while (base < stop) {
+ if (!m_tagging) {
+ if (!base->is_tombstone() && (base + 1 < stop)
+ && base->match(base + 1) && (base + 1)->is_tombstone()) {
+ base += 2;
+ mrun_cancelations++;
+ continue;
+ }
+ } else if (base->get_delete_status()) {
+ base += 1;
+ continue;
+ }
+
+ //Masking off the ts.
+ base->header &= 1;
+ m_data[m_reccnt++] = *base;
+ if (bf && base->is_tombstone()) {
+ ++m_tombstone_cnt;
+ bf->insert(base->key);
+ }
+
+ base++;
+ }
+ TIMER_STOP();
+ auto copy_time = TIMER_RESULT();
+
+ TIMER_START();
+ if (m_reccnt > 0) {
+ build_internal_levels();
+ }
+ TIMER_STOP();
+ auto level_time = TIMER_RESULT();
+
+ fprintf(stdout, "%ld %ld %ld\n", sort_time, copy_time, level_time);
+ }
+
+ MemISAM(MemISAM** runs, size_t len, BloomFilter* bf, bool tagging)
+ :m_reccnt(0), m_tombstone_cnt(0), m_deleted_cnt(0), m_isam_nodes(nullptr), m_tagging(tagging) {
+ std::vector<Cursor<K,V>> cursors;
+ cursors.reserve(len);
+
+ PriorityQueue<K,V> pq(len);
+
+ size_t attemp_reccnt = 0;
+
+ for (size_t i = 0; i < len; ++i) {
+ if (runs[i]) {
+ auto base = runs[i]->sorted_output();
+ cursors.emplace_back(Cursor{base, base + runs[i]->get_record_count(), 0, runs[i]->get_record_count()});
+ attemp_reccnt += runs[i]->get_record_count();
+ pq.push(cursors[i].ptr, i);
+ } else {
+ cursors.emplace_back(Cursor<K,V>{nullptr, nullptr, 0, 0});
+ }
+ }
+
+ size_t alloc_size = (attemp_reccnt * sizeof(Rec)) + (CACHELINE_SIZE - (attemp_reccnt * sizeof(Rec)) % CACHELINE_SIZE);
+ assert(alloc_size % CACHELINE_SIZE == 0);
+ m_data = (Rec*)std::aligned_alloc(CACHELINE_SIZE, alloc_size);
+
+ size_t offset = 0;
+
+ while (pq.size()) {
+ auto now = pq.peek();
+ auto next = pq.size() > 1 ? pq.peek(1) : queue_record<K,V>{nullptr, 0};
+ if (!m_tagging && !now.data->is_tombstone() && next.data != nullptr &&
+ now.data->match(next.data) && next.data->is_tombstone()) {
+
+ pq.pop(); pq.pop();
+ auto& cursor1 = cursors[now.version];
+ auto& cursor2 = cursors[next.version];
+ if (advance_cursor(cursor1)) pq.push(cursor1.ptr, now.version);
+ if (advance_cursor(cursor2)) pq.push(cursor2.ptr, next.version);
+ } else {
+ auto& cursor = cursors[now.version];
+ if (!m_tagging || !cursor.ptr->get_delete_status()) {
+ m_data[m_reccnt++] = *cursor.ptr;
+ if (cursor.ptr->is_tombstone()) {
+ ++m_tombstone_cnt;
+ bf->insert(cursor.ptr->key);
+ }
+ }
+ pq.pop();
+
+ if (advance_cursor(cursor)) pq.push(cursor.ptr, now.version);
+ }
+ }
+
+ if (m_reccnt > 0) {
+ build_internal_levels();
+ }
+ }
+
+ ~MemISAM() {
+ if (m_data) free(m_data);
+ if (m_isam_nodes) free(m_isam_nodes);
+ }
+
+ Rec* sorted_output() const {
+ return m_data;
+ }
+
+ size_t get_record_count() const {
+ return m_reccnt;
+ }
+
+ size_t get_tombstone_count() const {
+ return m_tombstone_cnt;
+ }
+
+ bool delete_record(const K& key, const V& val) {
+ size_t idx = get_lower_bound(key);
+ if (idx >= m_reccnt) {
+ return false;
+ }
+
+ while (idx < m_reccnt && m_data[idx].lt(key, val)) ++idx;
+
+ if (m_data[idx].match(key, val, false)) {
+ m_data[idx].set_delete_status();
+ m_deleted_cnt++;
+ return true;
+ }
+
+ return false;
+ }
+
+ const Rec* get_record_at(size_t idx) const {
+ return (idx < m_reccnt) ? m_data + idx : nullptr;
+ }
+
+ size_t get_lower_bound(const K& key) const {
+ const InMemISAMNode* now = m_root;
+ while (!is_leaf(reinterpret_cast<const char*>(now))) {
+ const InMemISAMNode* next = nullptr;
+ for (size_t i = 0; i < inmem_isam_fanout - 1; ++i) {
+ if (now->child[i + 1] == nullptr || key <= now->keys[i]) {
+ next = reinterpret_cast<InMemISAMNode*>(now->child[i]);
+ break;
+ }
+ }
+
+ now = next ? next : reinterpret_cast<const InMemISAMNode*>(now->child[inmem_isam_fanout - 1]);
+ }
+
+ const Rec* pos = reinterpret_cast<const Rec*>(now);
+ while (pos < m_data + m_reccnt && pos->key < key) pos++;
+
+ return pos - m_data;
+ }
+
+ size_t get_upper_bound(const K& key) const {
+ const InMemISAMNode* now = m_root;
+ while (!is_leaf(reinterpret_cast<const char*>(now))) {
+ const InMemISAMNode* next = nullptr;
+ for (size_t i = 0; i < inmem_isam_fanout - 1; ++i) {
+ if (now->child[i + 1] == nullptr || key < now->keys[i]) {
+ next = reinterpret_cast<InMemISAMNode*>(now->child[i]);
+ break;
+ }
+ }
+
+ now = next ? next : reinterpret_cast<const InMemISAMNode*>(now->child[inmem_isam_fanout - 1]);
+ }
+
+ const Rec* pos = reinterpret_cast<const Rec*>(now);
+ while (pos < m_data + m_reccnt && pos->key <= key) pos++;
+
+ return pos - m_data;
+ }
+
+ bool check_tombstone(const K& key, const V& val) const {
+ size_t idx = get_lower_bound(key);
+ if (idx >= m_reccnt) {
+ return false;
+ }
+
+ Rec* ptr = m_data + idx;
+
+ while (ptr < m_data + m_reccnt && ptr->lt(key, val)) ptr++;
+ return ptr->match(key, val, true);
+ }
+
+ size_t get_memory_utilization() {
+ return m_reccnt * sizeof(Rec) + m_internal_node_cnt * inmem_isam_node_size;
+ }
+
+ void persist_to_file(std::string data_fname) {
+ FILE *file = fopen(data_fname.c_str(), "wb");
+ assert(file);
+ fwrite(m_data, sizeof(Rec), m_reccnt, file);
+ fclose(file);
+ }
+
+private:
+ void build_internal_levels() {
+ size_t n_leaf_nodes = m_reccnt / inmem_isam_leaf_fanout + (m_reccnt % inmem_isam_leaf_fanout != 0);
+ size_t level_node_cnt = n_leaf_nodes;
+ size_t node_cnt = 0;
+ do {
+ level_node_cnt = level_node_cnt / inmem_isam_fanout + (level_node_cnt % inmem_isam_fanout != 0);
+ node_cnt += level_node_cnt;
+ } while (level_node_cnt > 1);
+
+ size_t alloc_size = (node_cnt * inmem_isam_node_size) + (CACHELINE_SIZE - (node_cnt * inmem_isam_node_size) % CACHELINE_SIZE);
+ assert(alloc_size % CACHELINE_SIZE == 0);
+
+ m_isam_nodes = (InMemISAMNode*)std::aligned_alloc(CACHELINE_SIZE, alloc_size);
+ m_internal_node_cnt = node_cnt;
+ memset(m_isam_nodes, 0, node_cnt * inmem_isam_node_size);
+
+ InMemISAMNode* current_node = m_isam_nodes;
+
+ const Rec* leaf_base = m_data;
+ const Rec* leaf_stop = m_data + m_reccnt;
+ while (leaf_base < leaf_stop) {
+ size_t fanout = 0;
+ for (size_t i = 0; i < inmem_isam_fanout; ++i) {
+ auto rec_ptr = leaf_base + inmem_isam_leaf_fanout * i;
+ if (rec_ptr >= leaf_stop) break;
+ const Rec* sep_key = std::min(rec_ptr + inmem_isam_leaf_fanout - 1, leaf_stop - 1);
+ current_node->keys[i] = sep_key->key;
+ current_node->child[i] = (char*)rec_ptr;
+ ++fanout;
+ }
+ current_node++;
+ leaf_base += fanout * inmem_isam_leaf_fanout;
+ }
+
+ auto level_start = m_isam_nodes;
+ auto level_stop = current_node;
+ auto current_level_node_cnt = level_stop - level_start;
+ while (current_level_node_cnt > 1) {
+ auto now = level_start;
+ while (now < level_stop) {
+ size_t child_cnt = 0;
+ for (size_t i = 0; i < inmem_isam_fanout; ++i) {
+ auto node_ptr = now + i;
+ ++child_cnt;
+ if (node_ptr >= level_stop) break;
+ current_node->keys[i] = node_ptr->keys[inmem_isam_fanout - 1];
+ current_node->child[i] = (char*)node_ptr;
+ }
+ now += child_cnt;
+ current_node++;
+ }
+ level_start = level_stop;
+ level_stop = current_node;
+ current_level_node_cnt = level_stop - level_start;
+ }
+
+ assert(current_level_node_cnt == 1);
+ m_root = level_start;
+ }
+
+ bool is_leaf(const char* ptr) const {
+ return ptr >= (const char*)m_data && ptr < (const char*)(m_data + m_reccnt);
+ }
+
+ // Members: sorted data, internal ISAM levels, reccnt;
+ Rec* m_data;
+ InMemISAMNode* m_isam_nodes;
+ InMemISAMNode* m_root;
+ size_t m_reccnt;
+ size_t m_tombstone_cnt;
+ size_t m_internal_node_cnt;
+ size_t m_deleted_cnt;
+ bool m_tagging;
+};
+
+}
diff --git a/include/util/Cursor.h b/include/util/Cursor.h
index b9093f4..2339800 100644
--- a/include/util/Cursor.h
+++ b/include/util/Cursor.h
@@ -14,7 +14,7 @@
#include "io/PagedFile.h"
namespace de {
-template<typename K, typename V, typename W>
+template<typename K, typename V, typename W=void>
struct Cursor {
Record<K,V,W> *ptr;
const Record<K,V,W> *end;
diff --git a/tests/internal_level_tests.cpp b/tests/internal_level_tests.cpp
index 051bb7c..7e542e6 100644
--- a/tests/internal_level_tests.cpp
+++ b/tests/internal_level_tests.cpp
@@ -20,8 +20,8 @@ using namespace de;
START_TEST(t_memlevel_merge)
{
- auto tbl1 = create_test_mbuffer(100);
- auto tbl2 = create_test_mbuffer(100);
+ auto tbl1 = create_test_mbuffer<uint64_t, uint32_t, uint64_t>(100);
+ auto tbl2 = create_test_mbuffer<uint64_t, uint32_t, uint64_t>(100);
auto base_level = new WeightedLevel(1, 1, false);
base_level->append_mem_table(tbl1, g_rng);
@@ -45,8 +45,8 @@ START_TEST(t_memlevel_merge)
WeightedLevel *create_test_memlevel(size_t reccnt) {
- auto tbl1 = create_test_mbuffer(reccnt/2);
- auto tbl2 = create_test_mbuffer(reccnt/2);
+ auto tbl1 = create_test_mbuffer<uint64_t, uint32_t, uint64_t>(reccnt/2);
+ auto tbl2 = create_test_mbuffer<uint64_t, uint32_t, uint64_t>(reccnt/2);
auto base_level = new WeightedLevel(1, 2, false);
base_level->append_mem_table(tbl1, g_rng);
diff --git a/tests/memisam_tests.cpp b/tests/memisam_tests.cpp
new file mode 100644
index 0000000..f3b80b3
--- /dev/null
+++ b/tests/memisam_tests.cpp
@@ -0,0 +1,227 @@
+
+#include "shard/MemISAM.h"
+#include "framework/InternalLevel.h"
+#include "util/bf_config.h"
+#include "testing.h"
+
+#include <check.h>
+
+using namespace de;
+
+typedef MemISAM<uint64_t, uint32_t> M_ISAM;
+
+START_TEST(t_memtable_init)
+{
+ auto buffer = new UnweightedMBuffer(1024, true, 512, g_rng);
+ for (uint64_t i = 512; i > 0; i--) {
+ uint32_t v = i;
+ buffer->append(i, v);
+ }
+
+ for (uint64_t i = 1; i <= 256; ++i) {
+ uint32_t v = i;
+ buffer->append(i, v, true);
+ }
+
+ for (uint64_t i = 257; i <= 512; ++i) {
+ uint32_t v = i + 1;
+ buffer->append(i, v);
+ }
+
+ BloomFilter* bf = new BloomFilter(BF_FPR, buffer->get_tombstone_count(), BF_HASH_FUNCS, g_rng);
+ M_ISAM* run = new M_ISAM(buffer, bf, false);
+ ck_assert_uint_eq(run->get_record_count(), 512);
+
+ delete bf;
+ delete buffer;
+ delete run;
+}
+
+START_TEST(t_inmemrun_init)
+{
+ size_t n = 512;
+ auto memtable1 = create_test_mbuffer<uint64_t, uint32_t>(n);
+ auto memtable2 = create_test_mbuffer<uint64_t, uint32_t>(n);
+ auto memtable3 = create_test_mbuffer<uint64_t, uint32_t>(n);
+
+ BloomFilter* bf1 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
+ BloomFilter* bf2 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
+ BloomFilter* bf3 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
+ auto run1 = new M_ISAM(memtable1, bf1, false);
+ auto run2 = new M_ISAM(memtable2, bf2, false);
+ auto run3 = new M_ISAM(memtable3, bf3, false);
+
+ BloomFilter* bf4 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
+ M_ISAM* runs[3] = {run1, run2, run3};
+ auto run4 = new M_ISAM(runs, 3, bf4, false);
+
+ ck_assert_int_eq(run4->get_record_count(), n * 3);
+ ck_assert_int_eq(run4->get_tombstone_count(), 0);
+
+ size_t total_cnt = 0;
+ size_t run1_idx = 0;
+ size_t run2_idx = 0;
+ size_t run3_idx = 0;
+
+ for (size_t i = 0; i < run4->get_record_count(); ++i) {
+ auto rec1 = run1->get_record_at(run1_idx);
+ auto rec2 = run2->get_record_at(run2_idx);
+ auto rec3 = run3->get_record_at(run3_idx);
+
+ auto cur_rec = run4->get_record_at(i);
+
+ if (run1_idx < n && cur_rec->match(rec1)) {
+ ++run1_idx;
+ } else if (run2_idx < n && cur_rec->match(rec2)) {
+ ++run2_idx;
+ } else if (run3_idx < n && cur_rec->match(rec3)) {
+ ++run3_idx;
+ } else {
+ assert(false);
+ }
+ }
+
+ delete memtable1;
+ delete memtable2;
+ delete memtable3;
+
+ delete bf1;
+ delete run1;
+ delete bf2;
+ delete run2;
+ delete bf3;
+ delete run3;
+ delete bf4;
+ delete run4;
+}
+
+START_TEST(t_get_lower_bound_index)
+{
+ size_t n = 10000;
+ auto memtable = create_double_seq_mbuffer<uint64_t, uint32_t>(n);
+
+ ck_assert_ptr_nonnull(memtable);
+ BloomFilter* bf = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
+ M_ISAM* run = new M_ISAM(memtable, bf, false);
+
+ ck_assert_int_eq(run->get_record_count(), n);
+ ck_assert_int_eq(run->get_tombstone_count(), 0);
+
+ auto tbl_records = memtable->sorted_output();
+ for (size_t i=0; i<n; i++) {
+ const UnweightedRec *tbl_rec = memtable->get_record_at(i);
+ auto pos = run->get_lower_bound(tbl_rec->key);
+ ck_assert_int_eq(run->get_record_at(pos)->key, tbl_rec->key);
+ ck_assert_int_le(pos, i);
+ }
+
+ delete memtable;
+ delete bf;
+ delete run;
+}
+
+START_TEST(t_get_upper_bound_index)
+{
+ size_t n = 10000;
+ auto memtable = create_double_seq_mbuffer<uint64_t, uint32_t>(n);
+
+ ck_assert_ptr_nonnull(memtable);
+ BloomFilter* bf = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
+ M_ISAM* run = new M_ISAM(memtable, bf, false);
+
+ ck_assert_int_eq(run->get_record_count(), n);
+ ck_assert_int_eq(run->get_tombstone_count(), 0);
+
+ auto tbl_records = memtable->sorted_output();
+ for (size_t i=0; i<n; i++) {
+ const UnweightedRec *tbl_rec = memtable->get_record_at(i);
+ auto pos = run->get_upper_bound(tbl_rec->key);
+ ck_assert(pos == run->get_record_count() ||
+ run->get_record_at(pos)->key > tbl_rec->key);
+ ck_assert_int_ge(pos, i);
+ }
+
+ delete memtable;
+ delete bf;
+ delete run;
+}
+
+
+START_TEST(t_full_cancelation)
+{
+ size_t n = 100;
+ auto mtable = create_double_seq_mbuffer<uint64_t, uint32_t>(n, false);
+ auto mtable_ts = create_double_seq_mbuffer<uint64_t, uint32_t>(n, true);
+ BloomFilter* bf1 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
+ BloomFilter* bf2 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
+ BloomFilter* bf3 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
+
+ M_ISAM* run = new M_ISAM(mtable, bf1, false);
+ M_ISAM* run_ts = new M_ISAM(mtable_ts, bf2, false);
+
+ ck_assert_int_eq(run->get_record_count(), n);
+ ck_assert_int_eq(run->get_tombstone_count(), 0);
+ ck_assert_int_eq(run_ts->get_record_count(), n);
+ ck_assert_int_eq(run_ts->get_tombstone_count(), n);
+
+ M_ISAM* runs[] = {run, run_ts};
+
+ M_ISAM* merged = new M_ISAM(runs, 2, bf3, false);
+
+ ck_assert_int_eq(merged->get_tombstone_count(), 0);
+ ck_assert_int_eq(merged->get_record_count(), 0);
+
+ delete mtable;
+ delete mtable_ts;
+ delete bf1;
+ delete bf2;
+ delete bf3;
+ delete run;
+ delete run_ts;
+ delete merged;
+}
+END_TEST
+
+Suite *unit_testing()
+{
+ Suite *unit = suite_create("M_ISAM Unit Testing");
+
+ TCase *create = tcase_create("lsm::M_ISAM constructor Testing");
+ tcase_add_test(create, t_memtable_init);
+ tcase_add_test(create, t_inmemrun_init);
+ tcase_set_timeout(create, 100);
+ suite_add_tcase(unit, create);
+
+ TCase *bounds = tcase_create("lsm::M_ISAM::get_{lower,upper}_bound Testing");
+ tcase_add_test(bounds, t_get_lower_bound_index);
+ tcase_add_test(bounds, t_get_upper_bound_index);
+ tcase_set_timeout(bounds, 100);
+ suite_add_tcase(unit, bounds);
+
+ TCase *tombstone = tcase_create("lsm::M_ISAM::tombstone cancellation Testing");
+ tcase_add_test(tombstone, t_full_cancelation);
+ suite_add_tcase(unit, tombstone);
+
+ return unit;
+}
+
+int run_unit_tests()
+{
+ int failed = 0;
+ Suite *unit = unit_testing();
+ SRunner *unit_runner = srunner_create(unit);
+
+ srunner_run_all(unit_runner, CK_NORMAL);
+ failed = srunner_ntests_failed(unit_runner);
+ srunner_free(unit_runner);
+
+ return failed;
+}
+
+
+int main()
+{
+ int unit_failed = run_unit_tests();
+
+ return (unit_failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE;
+}
diff --git a/tests/testing.h b/tests/testing.h
index d200e8f..062e930 100644
--- a/tests/testing.h
+++ b/tests/testing.h
@@ -72,9 +72,10 @@ static bool roughly_equal(int n1, int n2, size_t mag, double epsilon) {
return ((double) std::abs(n1 - n2) / (double) mag) < epsilon;
}
-static WeightedMBuffer *create_test_mbuffer(size_t cnt)
+template <typename K, typename V, typename W=void>
+static de::MutableBuffer<K,V,W> *create_test_mbuffer(size_t cnt)
{
- auto buffer = new WeightedMBuffer(cnt, true, cnt, g_rng);
+ auto buffer = new de::MutableBuffer<K,V,W>(cnt, true, cnt, g_rng);
for (size_t i = 0; i < cnt; i++) {
uint64_t key = rand();
@@ -86,9 +87,10 @@ static WeightedMBuffer *create_test_mbuffer(size_t cnt)
return buffer;
}
-static WeightedMBuffer *create_test_mbuffer_tombstones(size_t cnt, size_t ts_cnt)
+template <typename K, typename V, typename W=void>
+static de::MutableBuffer<K,V,W> *create_test_mbuffer_tombstones(size_t cnt, size_t ts_cnt)
{
- auto buffer = new WeightedMBuffer(cnt, true, ts_cnt, g_rng);
+ auto buffer = new de::MutableBuffer<K,V,W>(cnt, true, ts_cnt, g_rng);
std::vector<std::pair<uint64_t, uint32_t>> tombstones;
@@ -104,15 +106,17 @@ static WeightedMBuffer *create_test_mbuffer_tombstones(size_t cnt, size_t ts_cnt
}
for (size_t i=0; i<ts_cnt; i++) {
- buffer->append(tombstones[i].first, tombstones[i].second, 1.0, true);
+ buffer->append(tombstones[i].first, tombstones[i].second, true);
}
return buffer;
}
-static WeightedMBuffer *create_weighted_mbuffer(size_t cnt)
+template <typename K, typename V, typename W=void>
+static de::MutableBuffer<K,V,W> *create_weighted_mbuffer(size_t cnt)
{
- auto buffer = new WeightedMBuffer(cnt, true, cnt, g_rng);
+ static_assert(!std::is_same<W, void>::value);
+ auto buffer = new de::MutableBuffer<K,V,W>(cnt, true, cnt, g_rng);
// Put in half of the count with weight one.
uint64_t key = 1;
@@ -135,22 +139,23 @@ static WeightedMBuffer *create_weighted_mbuffer(size_t cnt)
return buffer;
}
-static WeightedMBuffer *create_double_seq_mbuffer(size_t cnt, bool ts=false)
+template <typename K, typename V, typename W=void>
+static de::MutableBuffer<K,V,W> *create_double_seq_mbuffer(size_t cnt, bool ts=false)
{
- auto buffer = new WeightedMBuffer(cnt, true, cnt, g_rng);
+ auto buffer = new de::MutableBuffer<K,V,W>(cnt, true, cnt, g_rng);
for (size_t i = 0; i < cnt / 2; i++) {
uint64_t key = i;
uint32_t val = i;
- buffer->append(key, val, 1.0, ts);
+ buffer->append(key, val, ts);
}
for (size_t i = 0; i < cnt / 2; i++) {
uint64_t key = i;
uint32_t val = i + 1;
- buffer->append(key, val, 1.0, ts);
+ buffer->append(key, val, ts);
}
return buffer;
diff --git a/tests/wirs_tests.cpp b/tests/wirs_tests.cpp
index fe026d1..ed83d40 100644
--- a/tests/wirs_tests.cpp
+++ b/tests/wirs_tests.cpp
@@ -51,9 +51,9 @@ START_TEST(t_mbuffer_init)
START_TEST(t_wirs_init)
{
size_t n = 512;
- auto mbuffer1 = create_test_mbuffer(n);
- auto mbuffer2 = create_test_mbuffer(n);
- auto mbuffer3 = create_test_mbuffer(n);
+ auto mbuffer1 = create_test_mbuffer<uint64_t, uint32_t, uint64_t>(n);
+ auto mbuffer2 = create_test_mbuffer<uint64_t, uint32_t, uint64_t>(n);
+ auto mbuffer3 = create_test_mbuffer<uint64_t, uint32_t, uint64_t>(n);
BloomFilter* bf1 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
BloomFilter* bf2 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
@@ -109,7 +109,7 @@ START_TEST(t_wirs_init)
START_TEST(t_get_lower_bound_index)
{
size_t n = 10000;
- auto mbuffer = create_double_seq_mbuffer(n);
+ auto mbuffer = create_double_seq_mbuffer<uint64_t, uint32_t, uint64_t>(n);
ck_assert_ptr_nonnull(mbuffer);
BloomFilter* bf = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
@@ -135,8 +135,8 @@ START_TEST(t_get_lower_bound_index)
START_TEST(t_full_cancelation)
{
size_t n = 100;
- auto buffer = create_double_seq_mbuffer(n, false);
- auto buffer_ts = create_double_seq_mbuffer(n, true);
+ auto buffer = create_double_seq_mbuffer<uint64_t, uint32_t, uint64_t>(n, false);
+ auto buffer_ts = create_double_seq_mbuffer<uint64_t, uint32_t, uint64_t>(n, true);
BloomFilter* bf1 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
BloomFilter* bf2 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
BloomFilter* bf3 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
@@ -171,7 +171,7 @@ END_TEST
START_TEST(t_weighted_sampling)
{
size_t n=1000;
- auto buffer = create_weighted_mbuffer(n);
+ auto buffer = create_weighted_mbuffer<uint64_t, uint32_t, uint64_t>(n);
BloomFilter* bf = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
Shard* shard = new Shard(buffer, bf, false);