summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
m---------external/psudb-common0
-rw-r--r--include/shard/ISAMTree.h125
-rw-r--r--tests/memisam_tests.cpp123
-rw-r--r--tests/testing.h14
4 files changed, 137 insertions, 125 deletions
diff --git a/external/psudb-common b/external/psudb-common
-Subproject d828d283ffe48b3d576843d4d6e8de78bdb18ff
+Subproject 56ce9fdb9eaea076d9f54e564a6018121b05219
diff --git a/include/shard/ISAMTree.h b/include/shard/ISAMTree.h
index e11c899..6b2f6b5 100644
--- a/include/shard/ISAMTree.h
+++ b/include/shard/ISAMTree.h
@@ -13,8 +13,6 @@
#include <vector>
#include <cassert>
-#include <queue>
-#include <memory>
#include "framework/ShardRequirements.h"
@@ -27,52 +25,54 @@ using psudb::CACHELINE_SIZE;
using psudb::BloomFilter;
using psudb::PriorityQueue;
using psudb::queue_record;
-using psudb::Alias;
namespace de {
-thread_local size_t mrun_cancelations = 0;
-
-template <RecordInterface R>
+template <KVPInterface R>
class ISAMTree {
private:
typedef decltype(R::key) K;
typedef decltype(R::value) V;
-constexpr static size_t inmem_isam_node_size = 256;
-constexpr static size_t inmem_isam_fanout = inmem_isam_node_size / (sizeof(K) + sizeof(char*));
+constexpr static size_t NODE_SZ = 256;
+constexpr static size_t INTERNAL_FANOUT = NODE_SZ / (sizeof(K) + sizeof(byte*));
struct InternalNode {
- K keys[inmem_isam_fanout];
- char* child[inmem_isam_fanout];
+ K keys[INTERNAL_FANOUT];
+ byte* child[INTERNAL_FANOUT];
};
-constexpr static size_t inmem_isam_leaf_fanout = inmem_isam_node_size / sizeof(R);
-constexpr static size_t inmem_isam_node_keyskip = sizeof(K) * inmem_isam_fanout;
-
-static_assert(sizeof(InternalNode) == inmem_isam_node_size, "node size does not match");
+static_assert(sizeof(InternalNode) == NODE_SZ, "node size does not match");
-public:
- ISAMTree(MutableBuffer<R>* buffer)
- :m_reccnt(0), m_tombstone_cnt(0), m_isam_nodes(nullptr), m_deleted_cnt(0) {
+constexpr static size_t LEAF_FANOUT = NODE_SZ / sizeof(R);
- m_bf = new BloomFilter<R>(BF_FPR, buffer->get_tombstone_count(), BF_HASH_FUNCS);
-
- m_alloc_size = (buffer->get_record_count() * sizeof(Wrapped<R>)) + (CACHELINE_SIZE - (buffer->get_record_count() * sizeof(Wrapped<R>)) % CACHELINE_SIZE);
- assert(m_alloc_size % CACHELINE_SIZE == 0);
- m_data = (Wrapped<R>*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size);
+public:
+ ISAMTree(BufferView<R> buffer)
+ : m_bf(new BloomFilter<R>(BF_FPR, buffer.get_tombstone_count(), BF_HASH_FUNCS))
+ , m_isam_nodes(nullptr)
+ , m_root(nullptr)
+ , m_reccnt(0)
+ , m_tombstone_cnt(0)
+ , m_internal_node_cnt(0)
+ , m_deleted_cnt(0)
+ , m_alloc_size(0)
+ , m_data(nullptr)
+ {
TIMER_INIT();
- size_t offset = 0;
- m_reccnt = 0;
- auto base = buffer->get_data();
- auto stop = base + buffer->get_record_count();
+ m_alloc_size = psudb::sf_aligned_alloc(CACHELINE_SIZE, buffer.get_record_count() * sizeof(Wrapped<R>), (byte**) &m_data);
TIMER_START();
+ auto temp_buffer = (Wrapped<R> *) psudb::sf_aligned_alloc(CACHELINE_SIZE, buffer.get_record_count() * sizeof(Wrapped<R>));
+ buffer.copy_to_buffer((byte *) temp_buffer);
+
+ auto base = temp_buffer;
+ auto stop = base + buffer.get_record_count();
std::sort(base, stop, std::less<Wrapped<R>>());
TIMER_STOP();
+
auto sort_time = TIMER_RESULT();
TIMER_START();
@@ -80,7 +80,6 @@ public:
if (!base->is_tombstone() && (base + 1 < stop)
&& base->rec == (base + 1)->rec && (base + 1)->is_tombstone()) {
base += 2;
- mrun_cancelations++;
continue;
} else if (base->is_deleted()) {
base += 1;
@@ -109,10 +108,21 @@ public:
}
TIMER_STOP();
auto level_time = TIMER_RESULT();
+
+ free(temp_buffer);
}
ISAMTree(ISAMTree** runs, size_t len)
- : m_reccnt(0), m_tombstone_cnt(0), m_deleted_cnt(0), m_isam_nodes(nullptr) {
+ : m_bf(nullptr)
+ , m_isam_nodes(nullptr)
+ , m_root(nullptr)
+ , m_reccnt(0)
+ , m_tombstone_cnt(0)
+ , m_internal_node_cnt(0)
+ , m_deleted_cnt(0)
+ , m_alloc_size(0)
+ , m_data(nullptr)
+ {
std::vector<Cursor<Wrapped<R>>> cursors;
cursors.reserve(len);
@@ -139,8 +149,6 @@ public:
assert(m_alloc_size % CACHELINE_SIZE == 0);
m_data = (Wrapped<R>*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size);
- size_t offset = 0;
-
while (pq.size()) {
auto now = pq.peek();
auto next = pq.size() > 1 ? pq.peek(1) : queue_record<Wrapped<R>>{nullptr, 0};
@@ -173,9 +181,9 @@ public:
}
~ISAMTree() {
- if (m_data) free(m_data);
- if (m_isam_nodes) free(m_isam_nodes);
- if (m_bf) delete m_bf;
+ free(m_data);
+ free(m_isam_nodes);
+ delete m_bf;
}
Wrapped<R> *point_lookup(const R &rec, bool filter=false) {
@@ -214,25 +222,25 @@ public:
}
size_t get_memory_usage() {
- return m_internal_node_cnt * inmem_isam_node_size + m_alloc_size;
+ return m_alloc_size;
}
size_t get_aux_memory_usage() {
- return 0;
+ return m_bf->memory_usage();
}
size_t get_lower_bound(const K& key) const {
const InternalNode* now = m_root;
- while (!is_leaf(reinterpret_cast<const char*>(now))) {
+ while (!is_leaf(reinterpret_cast<const byte*>(now))) {
const InternalNode* next = nullptr;
- for (size_t i = 0; i < inmem_isam_fanout - 1; ++i) {
+ for (size_t i = 0; i < INTERNAL_FANOUT - 1; ++i) {
if (now->child[i + 1] == nullptr || key <= now->keys[i]) {
next = reinterpret_cast<InternalNode*>(now->child[i]);
break;
}
}
- now = next ? next : reinterpret_cast<const InternalNode*>(now->child[inmem_isam_fanout - 1]);
+ now = next ? next : reinterpret_cast<const InternalNode*>(now->child[INTERNAL_FANOUT - 1]);
}
const Wrapped<R>* pos = reinterpret_cast<const Wrapped<R>*>(now);
@@ -243,16 +251,16 @@ public:
size_t get_upper_bound(const K& key) const {
const InternalNode* now = m_root;
- while (!is_leaf(reinterpret_cast<const char*>(now))) {
+ while (!is_leaf(reinterpret_cast<const byte*>(now))) {
const InternalNode* next = nullptr;
- for (size_t i = 0; i < inmem_isam_fanout - 1; ++i) {
+ for (size_t i = 0; i < INTERNAL_FANOUT - 1; ++i) {
if (now->child[i + 1] == nullptr || key < now->keys[i]) {
next = reinterpret_cast<InternalNode*>(now->child[i]);
break;
}
}
- now = next ? next : reinterpret_cast<const InternalNode*>(now->child[inmem_isam_fanout - 1]);
+ now = next ? next : reinterpret_cast<const InternalNode*>(now->child[INTERNAL_FANOUT - 1]);
}
const Wrapped<R>* pos = reinterpret_cast<const Wrapped<R>*>(now);
@@ -264,20 +272,17 @@ public:
private:
void build_internal_levels() {
- size_t n_leaf_nodes = m_reccnt / inmem_isam_leaf_fanout + (m_reccnt % inmem_isam_leaf_fanout != 0);
+ size_t n_leaf_nodes = m_reccnt / LEAF_FANOUT + (m_reccnt % LEAF_FANOUT != 0);
+
size_t level_node_cnt = n_leaf_nodes;
size_t node_cnt = 0;
do {
- level_node_cnt = level_node_cnt / inmem_isam_fanout + (level_node_cnt % inmem_isam_fanout != 0);
+ level_node_cnt = level_node_cnt / INTERNAL_FANOUT + (level_node_cnt % INTERNAL_FANOUT != 0);
node_cnt += level_node_cnt;
} while (level_node_cnt > 1);
- m_alloc_size = (node_cnt * inmem_isam_node_size) + (CACHELINE_SIZE - (node_cnt * inmem_isam_node_size) % CACHELINE_SIZE);
- assert(m_alloc_size % CACHELINE_SIZE == 0);
-
- m_isam_nodes = (InternalNode*)std::aligned_alloc(CACHELINE_SIZE, m_alloc_size);
+ m_alloc_size += psudb::sf_aligned_calloc(CACHELINE_SIZE, node_cnt, NODE_SZ, (byte**) &m_isam_nodes);
m_internal_node_cnt = node_cnt;
- memset(m_isam_nodes, 0, node_cnt * inmem_isam_node_size);
InternalNode* current_node = m_isam_nodes;
@@ -285,16 +290,16 @@ private:
const Wrapped<R>* leaf_stop = m_data + m_reccnt;
while (leaf_base < leaf_stop) {
size_t fanout = 0;
- for (size_t i = 0; i < inmem_isam_fanout; ++i) {
- auto rec_ptr = leaf_base + inmem_isam_leaf_fanout * i;
+ for (size_t i = 0; i < INTERNAL_FANOUT; ++i) {
+ auto rec_ptr = leaf_base + LEAF_FANOUT * i;
if (rec_ptr >= leaf_stop) break;
- const Wrapped<R>* sep_key = std::min(rec_ptr + inmem_isam_leaf_fanout - 1, leaf_stop - 1);
+ const Wrapped<R>* sep_key = std::min(rec_ptr + LEAF_FANOUT - 1, leaf_stop - 1);
current_node->keys[i] = sep_key->rec.key;
- current_node->child[i] = (char*)rec_ptr;
+ current_node->child[i] = (byte*)rec_ptr;
++fanout;
}
current_node++;
- leaf_base += fanout * inmem_isam_leaf_fanout;
+ leaf_base += fanout * LEAF_FANOUT;
}
auto level_start = m_isam_nodes;
@@ -304,12 +309,12 @@ private:
auto now = level_start;
while (now < level_stop) {
size_t child_cnt = 0;
- for (size_t i = 0; i < inmem_isam_fanout; ++i) {
+ for (size_t i = 0; i < INTERNAL_FANOUT; ++i) {
auto node_ptr = now + i;
++child_cnt;
if (node_ptr >= level_stop) break;
- current_node->keys[i] = node_ptr->keys[inmem_isam_fanout - 1];
- current_node->child[i] = (char*)node_ptr;
+ current_node->keys[i] = node_ptr->keys[INTERNAL_FANOUT - 1];
+ current_node->child[i] = (byte*)node_ptr;
}
now += child_cnt;
current_node++;
@@ -323,12 +328,10 @@ private:
m_root = level_start;
}
- bool is_leaf(const char* ptr) const {
- return ptr >= (const char*)m_data && ptr < (const char*)(m_data + m_reccnt);
+ bool is_leaf(const byte* ptr) const {
+ return ptr >= (const byte*)m_data && ptr < (const byte*)(m_data + m_reccnt);
}
- // Members: sorted data, internal ISAM levels, reccnt;
- Wrapped<R>* m_data;
psudb::BloomFilter<R> *m_bf;
InternalNode* m_isam_nodes;
InternalNode* m_root;
@@ -337,5 +340,7 @@ private:
size_t m_internal_node_cnt;
size_t m_deleted_cnt;
size_t m_alloc_size;
+
+ Wrapped<R>* m_data;
};
}
diff --git a/tests/memisam_tests.cpp b/tests/memisam_tests.cpp
index 8c499e2..919fd69 100644
--- a/tests/memisam_tests.cpp
+++ b/tests/memisam_tests.cpp
@@ -11,7 +11,7 @@
*/
#include "shard/ISAMTree.h"
-#include "query/irs.h"
+#include "query/rangequery.h"
#include "testing.h"
#include <check.h>
@@ -22,7 +22,7 @@ typedef ISAMTree<Rec> Shard;
START_TEST(t_mbuffer_init)
{
- auto buffer = new MutableBuffer<Rec>(1024, 1024);
+ auto buffer = new MutableBuffer<Rec>(512, 1024);
for (uint64_t i = 512; i > 0; i--) {
uint32_t v = i;
buffer->append({i,v, 1});
@@ -38,7 +38,7 @@ START_TEST(t_mbuffer_init)
buffer->append({i, v, 1});
}
- Shard* shard = new Shard(buffer);
+ Shard* shard = new Shard(buffer->get_buffer_view());
ck_assert_uint_eq(shard->get_record_count(), 512);
delete buffer;
@@ -46,16 +46,16 @@ START_TEST(t_mbuffer_init)
}
-START_TEST(t_irs_init)
+START_TEST(t_rq_init)
{
size_t n = 512;
auto mbuffer1 = create_test_mbuffer<Rec>(n);
auto mbuffer2 = create_test_mbuffer<Rec>(n);
auto mbuffer3 = create_test_mbuffer<Rec>(n);
- auto shard1 = new Shard(mbuffer1);
- auto shard2 = new Shard(mbuffer2);
- auto shard3 = new Shard(mbuffer3);
+ auto shard1 = new Shard(mbuffer1->get_buffer_view());
+ auto shard2 = new Shard(mbuffer2->get_buffer_view());
+ auto shard3 = new Shard(mbuffer3->get_buffer_view());
Shard* shards[3] = {shard1, shard2, shard3};
auto shard4 = new Shard(shards, 3);
@@ -101,18 +101,22 @@ START_TEST(t_point_lookup)
size_t n = 10000;
auto buffer = create_double_seq_mbuffer<Rec>(n, false);
- auto isam = Shard(buffer);
+ auto isam = Shard(buffer->get_buffer_view());
- for (size_t i=0; i<n; i++) {
- Rec r;
- auto rec = (buffer->get_data() + i);
- r.key = rec->rec.key;
- r.value = rec->rec.value;
+ {
+ auto view = buffer->get_buffer_view();
- auto result = isam.point_lookup(r);
- ck_assert_ptr_nonnull(result);
- ck_assert_int_eq(result->rec.key, r.key);
- ck_assert_int_eq(result->rec.value, r.value);
+ for (size_t i=0; i<n; i++) {
+ Rec r;
+ auto rec = view.get(i);
+ r.key = rec->rec.key;
+ r.value = rec->rec.value;
+
+ auto result = isam.point_lookup(r);
+ ck_assert_ptr_nonnull(result);
+ ck_assert_int_eq(result->rec.key, r.key);
+ ck_assert_int_eq(result->rec.value, r.value);
+ }
}
delete buffer;
@@ -125,7 +129,7 @@ START_TEST(t_point_lookup_miss)
size_t n = 10000;
auto buffer = create_double_seq_mbuffer<Rec>(n, false);
- auto isam = Shard(buffer);
+ auto isam = Shard(buffer->get_buffer_view());
for (size_t i=n + 100; i<2*n; i++) {
Rec r;
@@ -146,8 +150,8 @@ START_TEST(t_full_cancelation)
auto buffer = create_double_seq_mbuffer<Rec>(n, false);
auto buffer_ts = create_double_seq_mbuffer<Rec>(n, true);
- Shard* shard = new Shard(buffer);
- Shard* shard_ts = new Shard(buffer_ts);
+ Shard* shard = new Shard(buffer->get_buffer_view());
+ Shard* shard_ts = new Shard(buffer_ts->get_buffer_view());
ck_assert_int_eq(shard->get_record_count(), n);
ck_assert_int_eq(shard->get_tombstone_count(), 0);
@@ -170,11 +174,12 @@ START_TEST(t_full_cancelation)
END_TEST
-START_TEST(t_irs_query)
+/*
+START_TEST(t_rq_query)
{
size_t n=1000;
auto buffer = create_double_seq_mbuffer<Rec>(n);
- auto isam = Shard(buffer);
+ auto isam = Shard(buffer->get_buffer_view());
uint64_t lower_key = 100;
uint64_t upper_key = 250;
@@ -182,15 +187,15 @@ START_TEST(t_irs_query)
size_t k = 100;
size_t cnt[3] = {0};
- irs::Parms<Rec> parms = {lower_key, upper_key, k};
+ rq::Parms<Rec> parms = {lower_key, upper_key, k};
parms.rng = gsl_rng_alloc(gsl_rng_mt19937);
size_t total_samples = 0;
for (size_t i=0; i<1000; i++) {
- auto state = irs::Query<Shard, Rec, false>::get_query_state(&isam, &parms);
- ((irs::State<WRec> *) state)->sample_size = k;
- auto result = irs::Query<Shard, Rec, false>::query(&isam, state, &parms);
+ auto state = rq::Query<Shard, Rec, false>::get_query_state(&isam, &parms);
+ ((rq::State<WRec> *) state)->sample_size = k;
+ auto result = rq::Query<Shard, Rec, false>::query(&isam, state, &parms);
ck_assert_int_eq(result.size(), k);
@@ -199,7 +204,7 @@ START_TEST(t_irs_query)
ck_assert_int_ge(rec.rec.key, lower_key);
}
- irs::Query<Shard, Rec, false>::delete_query_state(state);
+ rq::Query<Shard, Rec, false>::delete_query_state(state);
}
gsl_rng_free(parms.rng);
@@ -208,12 +213,12 @@ START_TEST(t_irs_query)
END_TEST
-START_TEST(t_irs_query_merge)
+START_TEST(t_rq_query_merge)
{
size_t n=1000;
auto buffer = create_double_seq_mbuffer<Rec>(n);
- Shard shard = Shard(buffer);
+ Shard shard = Shard(buffer->get_buffer_view());
uint64_t lower_key = 100;
uint64_t upper_key = 250;
@@ -221,25 +226,25 @@ START_TEST(t_irs_query_merge)
size_t k = 1000;
size_t cnt[3] = {0};
- irs::Parms<Rec> parms = {lower_key, upper_key, k};
+ rq::Parms<Rec> parms = {lower_key, upper_key, k};
parms.rng = gsl_rng_alloc(gsl_rng_mt19937);
std::vector<std::vector<de::Wrapped<Rec>>> results(2);
for (size_t i=0; i<1000; i++) {
- auto state1 = irs::Query<Shard, Rec>::get_query_state(&shard, &parms);
- ((irs::State<WRec> *) state1)->sample_size = k;
- results[0] = irs::Query<Shard, Rec>::query(&shard, state1, &parms);
+ auto state1 = rq::Query<Shard, Rec>::get_query_state(&shard, &parms);
+ ((rq::State<WRec> *) state1)->sample_size = k;
+ results[0] = rq::Query<Shard, Rec>::query(&shard, state1, &parms);
- auto state2 = irs::Query<Shard, Rec>::get_query_state(&shard, &parms);
- ((irs::State<WRec> *) state2)->sample_size = k;
- results[1] = irs::Query<Shard, Rec>::query(&shard, state2, &parms);
+ auto state2 = rq::Query<Shard, Rec>::get_query_state(&shard, &parms);
+ ((rq::State<WRec> *) state2)->sample_size = k;
+ results[1] = rq::Query<Shard, Rec>::query(&shard, state2, &parms);
- irs::Query<Shard, Rec>::delete_query_state(state1);
- irs::Query<Shard, Rec>::delete_query_state(state2);
+ rq::Query<Shard, Rec>::delete_query_state(state1);
+ rq::Query<Shard, Rec>::delete_query_state(state2);
}
- auto merged = irs::Query<Shard, Rec>::merge(results, nullptr);
+ auto merged = rq::Query<Shard, Rec>::merge(results, nullptr);
ck_assert_int_eq(merged.size(), 2*k);
for (size_t i=0; i<merged.size(); i++) {
@@ -253,7 +258,7 @@ START_TEST(t_irs_query_merge)
END_TEST
-START_TEST(t_irs_buffer_query_scan)
+START_TEST(t_rq_buffer_query_scan)
{
size_t n=1000;
auto buffer = create_double_seq_mbuffer<Rec>(n);
@@ -264,15 +269,15 @@ START_TEST(t_irs_buffer_query_scan)
size_t k = 100;
size_t cnt[3] = {0};
- irs::Parms<Rec> parms = {lower_key, upper_key, k};
+ rq::Parms<Rec> parms = {lower_key, upper_key, k};
parms.rng = gsl_rng_alloc(gsl_rng_mt19937);
size_t total_samples = 0;
for (size_t i=0; i<1000; i++) {
- auto state = irs::Query<Shard, Rec, false>::get_buffer_query_state(buffer, &parms);
- ((irs::BufferState<WRec> *) state)->sample_size = k;
- auto result = irs::Query<Shard, Rec, false>::buffer_query(buffer, state, &parms);
+ auto state = rq::Query<Shard, Rec, false>::get_buffer_query_state(buffer, &parms);
+ ((rq::BufferState<WRec> *) state)->sample_size = k;
+ auto result = rq::Query<Shard, Rec, false>::buffer_query(buffer, state, &parms);
ck_assert_int_eq(result.size(), k);
@@ -281,7 +286,7 @@ START_TEST(t_irs_buffer_query_scan)
ck_assert_int_ge(rec.rec.key, lower_key);
}
- irs::Query<Shard, Rec, false>::delete_buffer_query_state(state);
+ rq::Query<Shard, Rec, false>::delete_buffer_query_state(state);
}
gsl_rng_free(parms.rng);
@@ -290,7 +295,7 @@ START_TEST(t_irs_buffer_query_scan)
END_TEST
-START_TEST(t_irs_buffer_query_rejection)
+START_TEST(t_rq_buffer_query_rejection)
{
size_t n=1000;
auto buffer = create_double_seq_mbuffer<Rec>(n);
@@ -301,15 +306,15 @@ START_TEST(t_irs_buffer_query_rejection)
size_t k = 10000;
size_t cnt[3] = {0};
- irs::Parms<Rec> parms = {lower_key, upper_key, k};
+ rq::Parms<Rec> parms = {lower_key, upper_key, k};
parms.rng = gsl_rng_alloc(gsl_rng_mt19937);
size_t total_samples = 0;
for (size_t i=0; i<1000; i++) {
- auto state = irs::Query<Shard, Rec>::get_buffer_query_state(buffer, &parms);
- ((irs::BufferState<WRec> *) state)->sample_size = k;
- auto result = irs::Query<Shard, Rec>::buffer_query(buffer, state, &parms);
+ auto state = rq::Query<Shard, Rec>::get_buffer_query_state(buffer, &parms);
+ ((rq::BufferState<WRec> *) state)->sample_size = k;
+ auto result = rq::Query<Shard, Rec>::buffer_query(buffer, state, &parms);
ck_assert_int_gt(result.size(), 0);
ck_assert_int_le(result.size(), k);
@@ -319,13 +324,14 @@ START_TEST(t_irs_buffer_query_rejection)
ck_assert_int_ge(rec.rec.key, lower_key);
}
- irs::Query<Shard, Rec>::delete_buffer_query_state(state);
+ rq::Query<Shard, Rec>::delete_buffer_query_state(state);
}
gsl_rng_free(parms.rng);
delete buffer;
}
END_TEST
+*/
Suite *unit_testing()
@@ -334,7 +340,7 @@ Suite *unit_testing()
TCase *create = tcase_create("de::ISAMTree constructor Testing");
tcase_add_test(create, t_mbuffer_init);
- tcase_add_test(create, t_irs_init);
+ tcase_add_test(create, t_rq_init);
tcase_set_timeout(create, 100);
suite_add_tcase(unit, create);
@@ -349,14 +355,15 @@ Suite *unit_testing()
tcase_add_test(lookup, t_point_lookup_miss);
suite_add_tcase(unit, lookup);
-
- TCase *sampling = tcase_create("de:ISAMTree::ISAMTreeQuery Testing");
- tcase_add_test(sampling, t_irs_query);
- tcase_add_test(sampling, t_irs_query_merge);
- tcase_add_test(sampling, t_irs_buffer_query_rejection);
- tcase_add_test(sampling, t_irs_buffer_query_scan);
+ /*
+ TCase *sampling = tcase_create("de:ISAMTree::IRS Testing");
+ tcase_add_test(sampling, t_rq_query);
+ tcase_add_test(sampling, t_rq_query_merge);
+ tcase_add_test(sampling, t_rq_buffer_query_rejection);
+ tcase_add_test(sampling, t_rq_buffer_query_scan);
tcase_set_timeout(sampling, 100);
suite_add_tcase(unit, sampling);
+ */
return unit;
}
diff --git a/tests/testing.h b/tests/testing.h
index e1d3402..4e660dd 100644
--- a/tests/testing.h
+++ b/tests/testing.h
@@ -77,7 +77,7 @@ static bool roughly_equal(int n1, int n2, size_t mag, double epsilon) {
}
static de::MutableBuffer<PRec> *create_2d_mbuffer(size_t cnt) {
- auto buffer = new de::MutableBuffer<PRec>(cnt, cnt);
+ auto buffer = new de::MutableBuffer<PRec>(cnt/2, cnt);
for (int64_t i=0; i<cnt; i++) {
buffer->append({rand(), rand()});
@@ -87,7 +87,7 @@ static de::MutableBuffer<PRec> *create_2d_mbuffer(size_t cnt) {
}
static de::MutableBuffer<PRec> *create_2d_sequential_mbuffer(size_t cnt) {
- auto buffer = new de::MutableBuffer<PRec>(cnt, cnt);
+ auto buffer = new de::MutableBuffer<PRec>(cnt/2, cnt);
for (int64_t i=0; i<cnt; i++) {
buffer->append({i, i});
}
@@ -98,7 +98,7 @@ static de::MutableBuffer<PRec> *create_2d_sequential_mbuffer(size_t cnt) {
template <de::KVPInterface R>
static de::MutableBuffer<R> *create_test_mbuffer(size_t cnt)
{
- auto buffer = new de::MutableBuffer<R>(cnt, cnt);
+ auto buffer = new de::MutableBuffer<R>(cnt/2, cnt);
R rec;
for (size_t i = 0; i < cnt; i++) {
@@ -119,7 +119,7 @@ template <de::KVPInterface R>
static de::MutableBuffer<R> *create_sequential_mbuffer(decltype(R::key) start, decltype(R::key) stop)
{
size_t cnt = stop - start;
- auto buffer = new de::MutableBuffer<R>(cnt, cnt);
+ auto buffer = new de::MutableBuffer<R>(cnt/2, cnt);
for (size_t i=start; i<stop; i++) {
R rec;
@@ -139,7 +139,7 @@ static de::MutableBuffer<R> *create_sequential_mbuffer(decltype(R::key) start, d
template <de::KVPInterface R>
static de::MutableBuffer<R> *create_test_mbuffer_tombstones(size_t cnt, size_t ts_cnt)
{
- auto buffer = new de::MutableBuffer<R>(cnt, ts_cnt);
+ auto buffer = new de::MutableBuffer<R>(cnt/2, cnt);
std::vector<std::pair<uint64_t, uint32_t>> tombstones;
@@ -171,7 +171,7 @@ template <typename R>
requires de::WeightedRecordInterface<R> && de::KVPInterface<R>
static de::MutableBuffer<R> *create_weighted_mbuffer(size_t cnt)
{
- auto buffer = new de::MutableBuffer<R>(cnt, cnt);
+ auto buffer = new de::MutableBuffer<R>(cnt/2, cnt);
// Put in half of the count with weight one.
for (uint32_t i=0; i< cnt / 2; i++) {
@@ -194,7 +194,7 @@ static de::MutableBuffer<R> *create_weighted_mbuffer(size_t cnt)
template <de::KVPInterface R>
static de::MutableBuffer<R> *create_double_seq_mbuffer(size_t cnt, bool ts=false)
{
- auto buffer = new de::MutableBuffer<R>(cnt, cnt);
+ auto buffer = new de::MutableBuffer<R>(cnt/2, cnt);
for (size_t i = 0; i < cnt / 2; i++) {
R rec;