summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitmodules3
-rw-r--r--CMakeLists.txt4
m---------external/PGM-index0
-rw-r--r--include/shard/PGM.h345
-rw-r--r--tests/pgm_tests.cpp217
5 files changed, 569 insertions, 0 deletions
diff --git a/.gitmodules b/.gitmodules
index a80fc10..36b61ae 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,3 +1,6 @@
[submodule "external/PLEX"]
path = external/PLEX
url = git@github.com:stoianmihail/PLEX.git
+[submodule "external/PGM-index"]
+ path = external/PGM-index
+ url = https://github.com/gvinciguerra/PGM-index.git
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 420c19c..fc41db3 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -60,6 +60,10 @@ if (tests)
add_executable(triespline_tests ${CMAKE_CURRENT_SOURCE_DIR}/tests/triespline_tests.cpp)
target_link_libraries(triespline_tests PUBLIC gsl check subunit pthread)
target_include_directories(triespline_tests PRIVATE include external/PLEX/include)
+
+ add_executable(pgm_tests ${CMAKE_CURRENT_SOURCE_DIR}/tests/pgm_tests.cpp)
+ target_link_libraries(pgm_tests PUBLIC gsl check subunit pthread)
+ target_include_directories(pgm_tests PRIVATE include external/PGM-index/include)
endif()
# Benchmark build instructions
diff --git a/external/PGM-index b/external/PGM-index
new file mode 160000
+Subproject 4c1bc8e03307b971f31987fa8b01a52da698aa1
diff --git a/include/shard/PGM.h b/include/shard/PGM.h
new file mode 100644
index 0000000..9fad8d0
--- /dev/null
+++ b/include/shard/PGM.h
@@ -0,0 +1,345 @@
+/*
+ * include/shard/PGM.h
+ *
+ * Copyright (C) 2023 Douglas B. Rumbaugh <drumbaugh@psu.edu>
+ *
+ * All rights reserved. Published under the Modified BSD License.
+ *
+ */
+#pragma once
+
+
+#include <vector>
+#include <cassert>
+#include <queue>
+#include <memory>
+#include <concepts>
+
+#include "pgm/pgm_index.hpp"
+#include "ds/PriorityQueue.h"
+#include "util/Cursor.h"
+#include "ds/BloomFilter.h"
+#include "util/bf_config.h"
+#include "framework/MutableBuffer.h"
+#include "framework/RecordInterface.h"
+#include "framework/ShardInterface.h"
+#include "framework/QueryInterface.h"
+
+namespace de {
+
+template <RecordInterface R>
+struct ts_range_query_parms {
+ decltype(R::key) lower_bound;
+ decltype(R::key) upper_bound;
+};
+
+template <RecordInterface R, bool Rejection>
+class PGMLookup;
+
+template <RecordInterface R>
+struct PGMState {
+ size_t start_idx;
+ size_t stop_idx;
+};
+
+template <RecordInterface R>
+struct PGMBufferState {
+ size_t cutoff;
+ Alias* alias;
+ decltype(R::weight) max_weight;
+
+ ~PGMBufferState() {
+ delete alias;
+ }
+
+};
+
+template <RecordInterface R>
+class PGM {
+private:
+ typedef decltype(R::key) K;
+ typedef decltype(R::value) V;
+
+public:
+
+ // FIXME: there has to be a better way to do this
+ friend class PGMLookup<R, true>;
+ friend class PGMLookup<R, false>;
+
+ PGM(MutableBuffer<R>* buffer)
+ : m_reccnt(0), m_tombstone_cnt(0) {
+
+ size_t alloc_size = (buffer->get_record_count() * sizeof(Wrapped<R>)) + (CACHELINE_SIZE - (buffer->get_record_count() * sizeof(Wrapped<R>)) % CACHELINE_SIZE);
+ assert(alloc_size % CACHELINE_SIZE == 0);
+ m_data = (Wrapped<R>*)std::aligned_alloc(CACHELINE_SIZE, alloc_size);
+ std::vector<K> keys;
+
+ m_bf = new BloomFilter<K>(BF_FPR, buffer->get_tombstone_count(), BF_HASH_FUNCS);
+
+ size_t offset = 0;
+ m_reccnt = 0;
+ auto base = buffer->get_data();
+ auto stop = base + buffer->get_record_count();
+
+ std::sort(base, stop, std::less<Wrapped<R>>());
+
+ K min_key = base->rec.key;
+ K max_key = (stop - 1)->rec.key;
+
+ while (base < stop) {
+ if (!(base->is_tombstone()) && (base + 1) < stop) {
+ if (base->rec == (base + 1)->rec && (base + 1)->is_tombstone()) {
+ base += 2;
+ continue;
+ }
+ } else if (base->is_deleted()) {
+ base += 1;
+ continue;
+ }
+
+ base->header &= 1;
+ m_data[m_reccnt++] = *base;
+ keys.emplace_back(base->rec.key);
+
+ if (m_bf && base->is_tombstone()) {
+ m_tombstone_cnt++;
+ m_bf->insert(base->rec.key);
+ }
+
+ base++;
+ }
+
+ if (m_reccnt > 0) {
+ m_pgm = pgm::PGMIndex<K>(keys);
+ }
+ }
+
+ PGM(PGM** shards, size_t len)
+ : m_reccnt(0), m_tombstone_cnt(0) {
+ std::vector<Cursor<Wrapped<R>>> cursors;
+ cursors.reserve(len);
+
+ PriorityQueue<Wrapped<R>> pq(len);
+
+ size_t attemp_reccnt = 0;
+ size_t tombstone_count = 0;
+
+ for (size_t i = 0; i < len; ++i) {
+ if (shards[i]) {
+ auto base = shards[i]->get_data();
+ cursors.emplace_back(Cursor{base, base + shards[i]->get_record_count(), 0, shards[i]->get_record_count()});
+ attemp_reccnt += shards[i]->get_record_count();
+ tombstone_count += shards[i]->get_tombstone_count();
+ pq.push(cursors[i].ptr, i);
+
+ } else {
+ cursors.emplace_back(Cursor<Wrapped<R>>{nullptr, nullptr, 0, 0});
+ }
+ }
+
+ m_bf = new BloomFilter<K>(BF_FPR, tombstone_count, BF_HASH_FUNCS);
+
+ size_t alloc_size = (attemp_reccnt * sizeof(Wrapped<R>)) + (CACHELINE_SIZE - (attemp_reccnt * sizeof(Wrapped<R>)) % CACHELINE_SIZE);
+ assert(alloc_size % CACHELINE_SIZE == 0);
+ m_data = (Wrapped<R>*)std::aligned_alloc(CACHELINE_SIZE, alloc_size);
+
+ std::vector<K> keys;
+
+ while (pq.size()) {
+ auto now = pq.peek();
+ auto next = pq.size() > 1 ? pq.peek(1) : queue_record<Wrapped<R>>{nullptr, 0};
+ if (!now.data->is_tombstone() && next.data != nullptr &&
+ now.data->rec == next.data->rec && next.data->is_tombstone()) {
+
+ pq.pop(); pq.pop();
+ auto& cursor1 = cursors[now.version];
+ auto& cursor2 = cursors[next.version];
+ if (advance_cursor<Wrapped<R>>(cursor1)) pq.push(cursor1.ptr, now.version);
+ if (advance_cursor<Wrapped<R>>(cursor2)) pq.push(cursor2.ptr, next.version);
+ } else {
+ auto& cursor = cursors[now.version];
+ if (!cursor.ptr->is_deleted()) {
+ m_data[m_reccnt++] = *cursor.ptr;
+ keys.emplace_back(cursor.ptr->rec.key);
+ if (m_bf && cursor.ptr->is_tombstone()) {
+ ++m_tombstone_cnt;
+ if (m_bf) m_bf->insert(cursor.ptr->rec.key);
+ }
+ }
+ pq.pop();
+
+ if (advance_cursor<Wrapped<R>>(cursor)) pq.push(cursor.ptr, now.version);
+ }
+ }
+
+ if (m_reccnt > 0) {
+ m_pgm = pgm::PGMIndex<K>(keys);
+ }
+ }
+
+ ~PGM() {
+ if (m_data) free(m_data);
+ if (m_bf) delete m_bf;
+
+ }
+
+ Wrapped<R> *point_lookup(const R &rec, bool filter=false) {
+ if (filter && !m_bf->lookup(rec.key)) {
+ return nullptr;
+ }
+
+ size_t idx = get_lower_bound(rec.key);
+ if (idx >= m_reccnt) {
+ return nullptr;
+ }
+
+ while (idx < m_reccnt && m_data[idx].rec < rec) ++idx;
+
+ if (m_data[idx].rec == rec) {
+ return m_data + idx;
+ }
+
+ return nullptr;
+ }
+
+ Wrapped<R>* get_data() const {
+ return m_data;
+ }
+
+ size_t get_record_count() const {
+ return m_reccnt;
+ }
+
+ size_t get_tombstone_count() const {
+ return m_tombstone_cnt;
+ }
+
+ const Wrapped<R>* get_record_at(size_t idx) const {
+ if (idx >= m_reccnt) return nullptr;
+ return m_data + idx;
+ }
+
+
+ size_t get_memory_usage() {
+ return 0;
+ }
+
+private:
+
+ // FIXME: depending upon the size of the returned bound,
+ // it may be better to switch between binary search and
+ // linear scan.
+ size_t get_lower_bound(const K& key) const {
+ auto bound = m_pgm.search(key);
+ size_t idx = bound.lo;
+
+ if (idx >= m_reccnt) {
+ return m_reccnt;
+ }
+
+ // if the found location _is_ the key, we're done.
+ if (m_data[idx].rec.key == key) {
+ return idx;
+ }
+
+ // if the found location is larger than the key, we need to
+ // move backwards towards the beginning of the array
+ if (m_data[idx].rec.key > key) {
+ for (ssize_t i=idx; i>=0; i--) {
+ if (m_data[i].rec.key < key) {
+ return i+1;
+ }
+ }
+ // otherwise, we move forward towards the end
+ } else {
+ for (size_t i=idx; i<m_reccnt; i++) {
+ if (m_data[i].rec.key >= key) {
+ return i - 1;
+ }
+ }
+ }
+
+ return m_reccnt;
+ }
+
+ Wrapped<R>* m_data;
+ size_t m_reccnt;
+ size_t m_tombstone_cnt;
+ K m_max_key;
+ K m_min_key;
+ pgm::PGMIndex<K> m_pgm;
+ BloomFilter<K> *m_bf;
+};
+
+
+template <RecordInterface R>
+class PGMRangeQuery {
+public:
+ static void *get_query_state(PGM<R> *ts, void *parms) {
+ auto res = new PGMState<R>();
+ auto p = (ts_range_query_parms<R> *) parms;
+
+ res->start_idx = ts->get_lower_bound(p->lower_bound);
+ res->stop_idx = ts->get_record_count();
+
+ return res;
+ }
+
+ static void* get_buffer_query_state(MutableBuffer<R> *buffer, void *parms) {
+ auto res = new PGMBufferState<R>();
+ res.cutoff = buffer->get_record_count();
+
+ return res;
+ }
+
+ static std::vector<Wrapped<R>> query(PGM<R> *ts, void *q_state, void *parms) {
+ std::vector<Wrapped<R>> records;
+ auto p = (ts_range_query_parms<R> *) parms;
+ auto s = (PGMState<R> *) q_state;
+ auto ptr = ts->get_record_at(s->lower_bound);
+ size_t i = 0;
+ while (ptr->rec.key <= p->upper_bound && i < s->stop_idx) {
+ records.emplace_back(ptr[i]);
+ i++;
+ }
+
+ return records;
+ }
+
+ static std::vector<Wrapped<R>> buffer_query(MutableBuffer<R> *buffer, void *state, void *parms) {
+ auto p = (ts_range_query_parms<R> *) parms;
+ auto s = (PGMBufferState<R> *) state;
+
+
+
+ }
+
+ static std::vector<R> merge(std::vector<std::vector<R>> &results) {
+ std::vector<R> output;
+
+ for (size_t i=0; i<results.size(); i++) {
+ for (size_t j=0; j<results[i].size(); j++) {
+ output.emplace_back(results[i][j]);
+ }
+ }
+
+ return output;
+ }
+
+ static void delete_query_state(void *state) {
+ auto s = (PGMState<R> *) state;
+ delete s;
+ }
+
+ static void delete_buffer_query_state(void *state) {
+ auto s = (PGMBufferState<R> *) state;
+ delete s;
+ }
+
+
+ //{q.get_buffer_query_state(p, p)};
+ //{q.buffer_query(p, p)};
+
+};
+
+}
diff --git a/tests/pgm_tests.cpp b/tests/pgm_tests.cpp
new file mode 100644
index 0000000..33979ae
--- /dev/null
+++ b/tests/pgm_tests.cpp
@@ -0,0 +1,217 @@
+/*
+ * tests/irs_tests.cpp
+ *
+ * Unit tests for PGM (Augmented B+Tree) shard
+ *
+ * Copyright (C) 2023 Douglas Rumbaugh <drumbaugh@psu.edu>
+ * Dong Xie <dongx@psu.edu>
+ *
+ * All rights reserved. Published under the Modified BSD License.
+ *
+ */
+
+#include "shard/PGM.h"
+#include "testing.h"
+
+#include <check.h>
+
+using namespace de;
+
+typedef PGM<Rec> Shard;
+
+START_TEST(t_mbuffer_init)
+{
+ auto buffer = new MutableBuffer<Rec>(1024, true, 1024);
+ for (uint64_t i = 512; i > 0; i--) {
+ uint32_t v = i;
+ buffer->append({i,v, 1});
+ }
+
+ for (uint64_t i = 1; i <= 256; ++i) {
+ uint32_t v = i;
+ buffer->append({i, v, 1}, true);
+ }
+
+ for (uint64_t i = 257; i <= 512; ++i) {
+ uint32_t v = i + 1;
+ buffer->append({i, v, 1});
+ }
+
+ Shard* shard = new Shard(buffer);
+ ck_assert_uint_eq(shard->get_record_count(), 512);
+
+ delete buffer;
+ delete shard;
+}
+
+
+START_TEST(t_irs_init)
+{
+ size_t n = 512;
+ auto mbuffer1 = create_test_mbuffer<Rec>(n);
+ auto mbuffer2 = create_test_mbuffer<Rec>(n);
+ auto mbuffer3 = create_test_mbuffer<Rec>(n);
+
+ auto shard1 = new Shard(mbuffer1);
+ auto shard2 = new Shard(mbuffer2);
+ auto shard3 = new Shard(mbuffer3);
+
+ Shard* shards[3] = {shard1, shard2, shard3};
+ auto shard4 = new Shard(shards, 3);
+
+ ck_assert_int_eq(shard4->get_record_count(), n * 3);
+ ck_assert_int_eq(shard4->get_tombstone_count(), 0);
+
+ size_t total_cnt = 0;
+ size_t shard1_idx = 0;
+ size_t shard2_idx = 0;
+ size_t shard3_idx = 0;
+
+ for (size_t i = 0; i < shard4->get_record_count(); ++i) {
+ auto rec1 = shard1->get_record_at(shard1_idx);
+ auto rec2 = shard2->get_record_at(shard2_idx);
+ auto rec3 = shard3->get_record_at(shard3_idx);
+
+ auto cur_rec = shard4->get_record_at(i);
+
+ if (shard1_idx < n && cur_rec->rec == rec1->rec) {
+ ++shard1_idx;
+ } else if (shard2_idx < n && cur_rec->rec == rec2->rec) {
+ ++shard2_idx;
+ } else if (shard3_idx < n && cur_rec->rec == rec3->rec) {
+ ++shard3_idx;
+ } else {
+ assert(false);
+ }
+ }
+
+ delete mbuffer1;
+ delete mbuffer2;
+ delete mbuffer3;
+
+ delete shard1;
+ delete shard2;
+ delete shard3;
+ delete shard4;
+}
+
+START_TEST(t_point_lookup)
+{
+ size_t n = 10000;
+
+ auto buffer = create_double_seq_mbuffer<Rec>(n, false);
+ auto shard = Shard(buffer);
+
+ for (size_t i=0; i<n; i++) {
+ Rec r;
+ auto rec = (buffer->get_data() + i);
+ r.key = rec->rec.key;
+ r.value = rec->rec.value;
+
+ auto result = shard.point_lookup(r);
+ ck_assert_ptr_nonnull(result);
+ ck_assert_int_eq(result->rec.key, r.key);
+ ck_assert_int_eq(result->rec.value, r.value);
+ }
+
+ delete buffer;
+}
+END_TEST
+
+
+START_TEST(t_point_lookup_miss)
+{
+ size_t n = 10000;
+
+ auto buffer = create_double_seq_mbuffer<Rec>(n, false);
+ auto isam = Shard(buffer);
+
+ for (size_t i=n + 100; i<2*n; i++) {
+ Rec r;
+ r.key = i;
+ r.value = i;
+
+ auto result = isam.point_lookup(r);
+ ck_assert_ptr_null(result);
+ }
+
+ delete buffer;
+}
+
+
+START_TEST(t_full_cancelation)
+{
+ size_t n = 100;
+ auto buffer = create_double_seq_mbuffer<Rec>(n, false);
+ auto buffer_ts = create_double_seq_mbuffer<Rec>(n, true);
+
+ Shard* shard = new Shard(buffer);
+ Shard* shard_ts = new Shard(buffer_ts);
+
+ ck_assert_int_eq(shard->get_record_count(), n);
+ ck_assert_int_eq(shard->get_tombstone_count(), 0);
+ ck_assert_int_eq(shard_ts->get_record_count(), n);
+ ck_assert_int_eq(shard_ts->get_tombstone_count(), n);
+
+ Shard* shards[] = {shard, shard_ts};
+
+ Shard* merged = new Shard(shards, 2);
+
+ ck_assert_int_eq(merged->get_tombstone_count(), 0);
+ ck_assert_int_eq(merged->get_record_count(), 0);
+
+ delete buffer;
+ delete buffer_ts;
+ delete shard;
+ delete shard_ts;
+ delete merged;
+}
+END_TEST
+
+
+Suite *unit_testing()
+{
+ Suite *unit = suite_create("PGM Shard Unit Testing");
+
+ TCase *create = tcase_create("de::PGM constructor Testing");
+ tcase_add_test(create, t_mbuffer_init);
+ tcase_add_test(create, t_irs_init);
+ tcase_set_timeout(create, 100);
+ suite_add_tcase(unit, create);
+
+
+ TCase *tombstone = tcase_create("de:PGM::tombstone cancellation Testing");
+ tcase_add_test(tombstone, t_full_cancelation);
+ suite_add_tcase(unit, tombstone);
+
+
+ TCase *lookup = tcase_create("de:PGM:point_lookup Testing");
+ tcase_add_test(lookup, t_point_lookup);
+ tcase_add_test(lookup, t_point_lookup_miss);
+ suite_add_tcase(unit, lookup);
+
+
+ return unit;
+}
+
+
+int shard_unit_tests()
+{
+ int failed = 0;
+ Suite *unit = unit_testing();
+ SRunner *unit_shardner = srunner_create(unit);
+
+ srunner_run_all(unit_shardner, CK_NORMAL);
+ failed = srunner_ntests_failed(unit_shardner);
+ srunner_free(unit_shardner);
+
+ return failed;
+}
+
+
+int main()
+{
+ int unit_failed = shard_unit_tests();
+
+ return (unit_failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE;
+}