summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/framework/DynamicExtension.h15
-rw-r--r--include/framework/InternalLevel.h41
-rw-r--r--include/shard/MemISAM.h29
-rw-r--r--include/shard/WIRS.h27
-rw-r--r--tests/internal_level_tests.cpp8
-rw-r--r--tests/memisam_tests.cpp20
-rw-r--r--tests/wirs_tests.cpp22
7 files changed, 77 insertions, 85 deletions
diff --git a/include/framework/DynamicExtension.h b/include/framework/DynamicExtension.h
index 1d9ee76..fd42c5f 100644
--- a/include/framework/DynamicExtension.h
+++ b/include/framework/DynamicExtension.h
@@ -359,7 +359,7 @@ public:
}
}
- shards.emplace_back(new Shard(get_buffer(), nullptr, DELETE_TAGGING));
+ shards.emplace_back(new Shard(get_buffer(), nullptr));
Shard *shards_array[shards.size()];
@@ -370,7 +370,7 @@ public:
}
}
- Shard *flattened = new Shard(shards_array, j, nullptr, DELETE_TAGGING);
+ Shard *flattened = new Shard(shards_array, j, nullptr);
for (auto shard : shards) {
delete shard;
@@ -446,7 +446,7 @@ private:
if (new_idx > 0) {
assert(m_levels[new_idx - 1]->get_shard(0)->get_tombstone_count() == 0);
}
- m_levels.emplace_back(new InternalLevel<R>(new_idx, new_shard_cnt, DELETE_TAGGING));
+ m_levels.emplace_back(new InternalLevel<R>(new_idx, new_shard_cnt));
m_last_level_idx++;
return new_idx;
@@ -526,15 +526,14 @@ private:
// merging two memory levels
if (LSM_LEVELING) {
auto tmp = m_levels[base_level];
- m_levels[base_level] = InternalLevel<R>::merge_levels(m_levels[base_level], m_levels[incoming_level],
- DELETE_TAGGING, rng);
+ m_levels[base_level] = InternalLevel<R>::merge_levels(m_levels[base_level], m_levels[incoming_level], rng);
mark_as_unused(tmp);
} else {
m_levels[base_level]->append_merged_shards(m_levels[incoming_level], rng);
}
mark_as_unused(m_levels[incoming_level]);
- m_levels[incoming_level] = new InternalLevel<R>(incoming_level, (LSM_LEVELING) ? 1 : m_scale_factor, DELETE_TAGGING);
+ m_levels[incoming_level] = new InternalLevel<R>(incoming_level, (LSM_LEVELING) ? 1 : m_scale_factor);
}
@@ -543,9 +542,9 @@ private:
if (LSM_LEVELING) {
// FIXME: Kludgey implementation due to interface constraints.
auto old_level = m_levels[0];
- auto temp_level = new InternalLevel<R>(0, 1, DELETE_TAGGING);
+ auto temp_level = new InternalLevel<R>(0, 1);
temp_level->append_mem_table(buffer, rng);
- auto new_level = InternalLevel<R>::merge_levels(old_level, temp_level, DELETE_TAGGING, rng);
+ auto new_level = InternalLevel<R>::merge_levels(old_level, temp_level, rng);
m_levels[0] = new_level;
delete temp_level;
diff --git a/include/framework/InternalLevel.h b/include/framework/InternalLevel.h
index f0f19da..19bfe9f 100644
--- a/include/framework/InternalLevel.h
+++ b/include/framework/InternalLevel.h
@@ -26,12 +26,13 @@ class InternalLevel {
typedef decltype(R::key) K;
typedef decltype(R::value) V;
+ typedef WIRS<R> Shard;
private:
struct InternalLevelStructure {
InternalLevelStructure(size_t cap)
: m_cap(cap)
- , m_shards(new WIRS<R>*[cap]{nullptr})
+ , m_shards(new Shard*[cap]{nullptr})
, m_bfs(new BloomFilter*[cap]{nullptr}) {}
~InternalLevelStructure() {
@@ -45,72 +46,69 @@ private:
}
size_t m_cap;
- WIRS<R>** m_shards;
+ Shard** m_shards;
BloomFilter** m_bfs;
};
public:
- InternalLevel(ssize_t level_no, size_t shard_cap, bool tagging)
+ InternalLevel(ssize_t level_no, size_t shard_cap)
: m_level_no(level_no), m_shard_cnt(0)
- , m_structure(new InternalLevelStructure(shard_cap))
- , m_tagging(tagging) {}
+ , m_structure(new InternalLevelStructure(shard_cap)) {}
// Create a new memory level sharing the shards and repurposing it as previous level_no + 1
// WARNING: for leveling only.
- InternalLevel(InternalLevel* level, bool tagging)
+ InternalLevel(InternalLevel* level)
: m_level_no(level->m_level_no + 1), m_shard_cnt(level->m_shard_cnt)
- , m_structure(level->m_structure)
- , m_tagging(tagging) {
+ , m_structure(level->m_structure) {
assert(m_structure->m_cap == 1 && m_shard_cnt == 1);
}
-
~InternalLevel() {}
// WARNING: for leveling only.
// assuming the base level is the level new level is merging into. (base_level is larger.)
- static InternalLevel* merge_levels(InternalLevel* base_level, InternalLevel* new_level, bool tagging, const gsl_rng* rng) {
+ static InternalLevel* merge_levels(InternalLevel* base_level, InternalLevel* new_level, const gsl_rng* rng) {
assert(base_level->m_level_no > new_level->m_level_no || (base_level->m_level_no == 0 && new_level->m_level_no == 0));
- auto res = new InternalLevel(base_level->m_level_no, 1, tagging);
+ auto res = new InternalLevel(base_level->m_level_no, 1);
res->m_shard_cnt = 1;
res->m_structure->m_bfs[0] =
new BloomFilter(BF_FPR,
new_level->get_tombstone_count() + base_level->get_tombstone_count(),
BF_HASH_FUNCS, rng);
- WIRS<R>* shards[2];
+ Shard* shards[2];
shards[0] = base_level->m_structure->m_shards[0];
shards[1] = new_level->m_structure->m_shards[0];
- res->m_structure->m_shards[0] = new WIRS<R>(shards, 2, res->m_structure->m_bfs[0], tagging);
+ res->m_structure->m_shards[0] = new Shard(shards, 2, res->m_structure->m_bfs[0]);
return res;
}
void append_mem_table(MutableBuffer<R>* buffer, const gsl_rng* rng) {
assert(m_shard_cnt < m_structure->m_cap);
m_structure->m_bfs[m_shard_cnt] = new BloomFilter(BF_FPR, buffer->get_tombstone_count(), BF_HASH_FUNCS, rng);
- m_structure->m_shards[m_shard_cnt] = new WIRS<R>(buffer, m_structure->m_bfs[m_shard_cnt], m_tagging);
+ m_structure->m_shards[m_shard_cnt] = new Shard(buffer, m_structure->m_bfs[m_shard_cnt]);
++m_shard_cnt;
}
void append_merged_shards(InternalLevel* level, const gsl_rng* rng) {
assert(m_shard_cnt < m_structure->m_cap);
m_structure->m_bfs[m_shard_cnt] = new BloomFilter(BF_FPR, level->get_tombstone_count(), BF_HASH_FUNCS, rng);
- m_structure->m_shards[m_shard_cnt] = new WIRS<R>(level->m_structure->m_shards, level->m_shard_cnt, m_structure->m_bfs[m_shard_cnt], m_tagging);
+ m_structure->m_shards[m_shard_cnt] = new Shard(level->m_structure->m_shards, level->m_shard_cnt, m_structure->m_bfs[m_shard_cnt]);
++m_shard_cnt;
}
- WIRS<R> *get_merged_shard() {
- WIRS<R> *shards[m_shard_cnt];
+ Shard *get_merged_shard() {
+ Shard *shards[m_shard_cnt];
for (size_t i=0; i<m_shard_cnt; i++) {
shards[i] = (m_structure->m_shards[i]) ? m_structure->m_shards[i] : nullptr;
}
- return new WIRS<R>(shards, m_shard_cnt, nullptr, m_tagging);
+ return new Shard(shards, m_shard_cnt, nullptr);
}
// Append the sample range in-order.....
- void get_shard_weights(std::vector<uint64_t>& weights, std::vector<std::pair<ShardID, WIRS<R> *>> &shards, std::vector<void*>& shard_states, const K& low, const K& high) {
+ void get_shard_weights(std::vector<uint64_t>& weights, std::vector<std::pair<ShardID, Shard *>> &shards, std::vector<void*>& shard_states, const K& low, const K& high) {
for (size_t i=0; i<m_shard_cnt; i++) {
if (m_structure->m_shards[i]) {
auto shard_state = m_structure->m_shards[i]->get_sample_shard_state(low, high);
@@ -119,7 +117,7 @@ public:
weights.push_back(shard_state->tot_weight);
shard_states.emplace_back(shard_state);
} else {
- WIRS<R>::delete_state(shard_state);
+ Shard::delete_state(shard_state);
}
}
}
@@ -158,7 +156,7 @@ public:
return m_structure->m_shards[shard_no]->get_record_at(idx);
}
- WIRS<R>* get_shard(size_t idx) {
+ Shard* get_shard(size_t idx) {
return m_structure->m_shards[idx];
}
@@ -253,7 +251,6 @@ private:
size_t m_shard_cnt;
size_t m_shard_size_cap;
- bool m_tagging;
std::shared_ptr<InternalLevelStructure> m_structure;
};
diff --git a/include/shard/MemISAM.h b/include/shard/MemISAM.h
index dd2fd85..d1f3bb3 100644
--- a/include/shard/MemISAM.h
+++ b/include/shard/MemISAM.h
@@ -43,8 +43,8 @@ constexpr static size_t inmem_isam_node_keyskip = sizeof(K) * inmem_isam_fanout;
static_assert(sizeof(InMemISAMNode) == inmem_isam_node_size, "node size does not match");
public:
- MemISAM(std::string data_fname, size_t record_cnt, size_t tombstone_cnt, BloomFilter *bf, bool tagging)
- : m_reccnt(record_cnt), m_tombstone_cnt(tombstone_cnt), m_deleted_cnt(0), m_tagging(tagging) {
+ MemISAM(std::string data_fname, size_t record_cnt, size_t tombstone_cnt, BloomFilter *bf)
+ : m_reccnt(record_cnt), m_tombstone_cnt(tombstone_cnt), m_deleted_cnt(0) {
// read the stored data file the file
size_t alloc_size = (record_cnt * sizeof(R)) + (CACHELINE_SIZE - (record_cnt * sizeof(R)) % CACHELINE_SIZE);
@@ -71,8 +71,8 @@ public:
}
}
- MemISAM(MutableBuffer<R>* buffer, BloomFilter* bf, bool tagging)
- :m_reccnt(0), m_tombstone_cnt(0), m_isam_nodes(nullptr), m_deleted_cnt(0), m_tagging(tagging) {
+ MemISAM(MutableBuffer<R>* buffer, BloomFilter* bf)
+ :m_reccnt(0), m_tombstone_cnt(0), m_isam_nodes(nullptr), m_deleted_cnt(0) {
size_t alloc_size = (buffer->get_record_count() * sizeof(R)) + (CACHELINE_SIZE - (buffer->get_record_count() * sizeof(R)) % CACHELINE_SIZE);
assert(alloc_size % CACHELINE_SIZE == 0);
@@ -91,13 +91,11 @@ public:
TIMER_START();
while (base < stop) {
- if (!m_tagging) {
- if (!base->is_tombstone() && (base + 1 < stop)
- && *base == *(base + 1) && (base + 1)->is_tombstone()) {
- base += 2;
- mrun_cancelations++;
- continue;
- }
+ if (!base->is_tombstone() && (base + 1 < stop)
+ && *base == *(base + 1) && (base + 1)->is_tombstone()) {
+ base += 2;
+ mrun_cancelations++;
+ continue;
} else if (base->is_deleted()) {
base += 1;
continue;
@@ -126,8 +124,8 @@ public:
//fprintf(stdout, "%ld %ld %ld\n", sort_time, copy_time, level_time);
}
- MemISAM(MemISAM** runs, size_t len, BloomFilter* bf, bool tagging)
- :m_reccnt(0), m_tombstone_cnt(0), m_deleted_cnt(0), m_isam_nodes(nullptr), m_tagging(tagging) {
+ MemISAM(MemISAM** runs, size_t len, BloomFilter* bf)
+ :m_reccnt(0), m_tombstone_cnt(0), m_deleted_cnt(0), m_isam_nodes(nullptr) {
std::vector<Cursor<R>> cursors;
cursors.reserve(len);
@@ -155,7 +153,7 @@ public:
while (pq.size()) {
auto now = pq.peek();
auto next = pq.size() > 1 ? pq.peek(1) : queue_record<R>{nullptr, 0};
- if (!m_tagging && !now.data->is_tombstone() && next.data != nullptr &&
+ if (!now.data->is_tombstone() && next.data != nullptr &&
*now.data == *next.data && next.data->is_tombstone()) {
pq.pop(); pq.pop();
@@ -165,7 +163,7 @@ public:
if (advance_cursor(cursor2)) pq.push(cursor2.ptr, next.version);
} else {
auto& cursor = cursors[now.version];
- if (!m_tagging || !cursor.ptr->is_deleted()) {
+ if (!cursor.ptr->is_deleted()) {
m_data[m_reccnt++] = *cursor.ptr;
if (cursor.ptr->is_tombstone()) {
++m_tombstone_cnt;
@@ -357,7 +355,6 @@ private:
size_t m_tombstone_cnt;
size_t m_internal_node_cnt;
size_t m_deleted_cnt;
- bool m_tagging;
};
}
diff --git a/include/shard/WIRS.h b/include/shard/WIRS.h
index 41766b9..2572caf 100644
--- a/include/shard/WIRS.h
+++ b/include/shard/WIRS.h
@@ -53,9 +53,9 @@ private:
};
public:
- WIRS(MutableBuffer<R>* buffer, BloomFilter* bf, bool tagging)
+ WIRS(MutableBuffer<R>* buffer, BloomFilter* bf)
: m_reccnt(0), m_tombstone_cnt(0), m_deleted_cnt(0), m_total_weight(0), m_rejection_cnt(0),
- m_ts_check_cnt(0), m_tagging(tagging), m_root(nullptr) {
+ m_ts_check_cnt(0), m_root(nullptr) {
size_t alloc_size = (buffer->get_record_count() * sizeof(R)) + (CACHELINE_SIZE - (buffer->get_record_count() * sizeof(R)) % CACHELINE_SIZE);
assert(alloc_size % CACHELINE_SIZE == 0);
@@ -67,13 +67,11 @@ public:
auto stop = base + buffer->get_record_count();
while (base < stop) {
- if (!m_tagging) {
- if (!(base->is_tombstone()) && (base + 1) < stop) {
- if (*base == *(base + 1) && (base + 1)->is_tombstone()) {
- base += 2;
- wirs_cancelations++;
- continue;
- }
+ if (!(base->is_tombstone()) && (base + 1) < stop) {
+ if (*base == *(base + 1) && (base + 1)->is_tombstone()) {
+ base += 2;
+ wirs_cancelations++;
+ continue;
}
} else if (base->is_deleted()) {
base += 1;
@@ -97,9 +95,9 @@ public:
}
}
- WIRS(WIRS** shards, size_t len, BloomFilter* bf, bool tagging)
+ WIRS(WIRS** shards, size_t len, BloomFilter* bf)
: m_reccnt(0), m_tombstone_cnt(0), m_deleted_cnt(0), m_total_weight(0), m_rejection_cnt(0), m_ts_check_cnt(0),
- m_tagging(tagging), m_root(nullptr) {
+ m_root(nullptr) {
std::vector<Cursor<R>> cursors;
cursors.reserve(len);
@@ -125,7 +123,7 @@ public:
while (pq.size()) {
auto now = pq.peek();
auto next = pq.size() > 1 ? pq.peek(1) : queue_record<R>{nullptr, 0};
- if (!m_tagging && !now.data->is_tombstone() && next.data != nullptr &&
+ if (!now.data->is_tombstone() && next.data != nullptr &&
*now.data == *next.data && next.data->is_tombstone()) {
pq.pop(); pq.pop();
@@ -135,7 +133,7 @@ public:
if (advance_cursor<R>(cursor2)) pq.push(cursor2.ptr, next.version);
} else {
auto& cursor = cursors[now.version];
- if (!m_tagging || !cursor.ptr->is_deleted()) {
+ if (!cursor.ptr->is_deleted()) {
m_data[m_reccnt++] = *cursor.ptr;
m_total_weight += cursor.ptr->weight;
if (bf && cursor.ptr->is_tombstone()) {
@@ -295,6 +293,7 @@ public:
return min;
}
+ /*
bool check_delete(K key, V val) {
size_t idx = get_lower_bound(key);
if (idx >= m_reccnt) {
@@ -312,6 +311,7 @@ public:
m_rejection_cnt += result;
return result;
}
+ */
bool check_tombstone(const R& rec) {
m_ts_check_cnt++;
@@ -421,7 +421,6 @@ private:
R* m_data;
std::vector<Alias *> m_alias;
wirs_node<R>* m_root;
- bool m_tagging;
W m_total_weight;
size_t m_reccnt;
size_t m_tombstone_cnt;
diff --git a/tests/internal_level_tests.cpp b/tests/internal_level_tests.cpp
index 7842b01..74d29f0 100644
--- a/tests/internal_level_tests.cpp
+++ b/tests/internal_level_tests.cpp
@@ -23,16 +23,16 @@ START_TEST(t_memlevel_merge)
auto tbl1 = create_test_mbuffer<WRec>(100);
auto tbl2 = create_test_mbuffer<WRec>(100);
- auto base_level = new InternalLevel<WRec>(1, 1, false);
+ auto base_level = new InternalLevel<WRec>(1, 1);
base_level->append_mem_table(tbl1, g_rng);
ck_assert_int_eq(base_level->get_record_cnt(), 100);
- auto merging_level = new InternalLevel<WRec>(0, 1, false);
+ auto merging_level = new InternalLevel<WRec>(0, 1);
merging_level->append_mem_table(tbl2, g_rng);
ck_assert_int_eq(merging_level->get_record_cnt(), 100);
auto old_level = base_level;
- base_level = InternalLevel<WRec>::merge_levels(old_level, merging_level, false, g_rng);
+ base_level = InternalLevel<WRec>::merge_levels(old_level, merging_level, g_rng);
delete old_level;
delete merging_level;
@@ -48,7 +48,7 @@ InternalLevel<WRec> *create_test_memlevel(size_t reccnt) {
auto tbl1 = create_test_mbuffer<WRec>(reccnt/2);
auto tbl2 = create_test_mbuffer<WRec>(reccnt/2);
- auto base_level = new InternalLevel<WRec>(1, 2, false);
+ auto base_level = new InternalLevel<WRec>(1, 2);
base_level->append_mem_table(tbl1, g_rng);
base_level->append_mem_table(tbl2, g_rng);
diff --git a/tests/memisam_tests.cpp b/tests/memisam_tests.cpp
index 1609edf..260b47c 100644
--- a/tests/memisam_tests.cpp
+++ b/tests/memisam_tests.cpp
@@ -30,7 +30,7 @@ START_TEST(t_memtable_init)
}
BloomFilter* bf = new BloomFilter(BF_FPR, buffer->get_tombstone_count(), BF_HASH_FUNCS, g_rng);
- M_ISAM* run = new M_ISAM(buffer, bf, false);
+ M_ISAM* run = new M_ISAM(buffer, bf);
ck_assert_uint_eq(run->get_record_count(), 512);
delete bf;
@@ -48,13 +48,13 @@ START_TEST(t_inmemrun_init)
BloomFilter* bf1 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
BloomFilter* bf2 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
BloomFilter* bf3 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
- auto run1 = new M_ISAM(memtable1, bf1, false);
- auto run2 = new M_ISAM(memtable2, bf2, false);
- auto run3 = new M_ISAM(memtable3, bf3, false);
+ auto run1 = new M_ISAM(memtable1, bf1);
+ auto run2 = new M_ISAM(memtable2, bf2);
+ auto run3 = new M_ISAM(memtable3, bf3);
BloomFilter* bf4 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
M_ISAM* runs[3] = {run1, run2, run3};
- auto run4 = new M_ISAM(runs, 3, bf4, false);
+ auto run4 = new M_ISAM(runs, 3, bf4);
ck_assert_int_eq(run4->get_record_count(), n * 3);
ck_assert_int_eq(run4->get_tombstone_count(), 0);
@@ -103,7 +103,7 @@ START_TEST(t_get_lower_bound_index)
ck_assert_ptr_nonnull(memtable);
BloomFilter* bf = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
- M_ISAM* run = new M_ISAM(memtable, bf, false);
+ M_ISAM* run = new M_ISAM(memtable, bf);
ck_assert_int_eq(run->get_record_count(), n);
ck_assert_int_eq(run->get_tombstone_count(), 0);
@@ -128,7 +128,7 @@ START_TEST(t_get_upper_bound_index)
ck_assert_ptr_nonnull(memtable);
BloomFilter* bf = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
- M_ISAM* run = new M_ISAM(memtable, bf, false);
+ M_ISAM* run = new M_ISAM(memtable, bf);
ck_assert_int_eq(run->get_record_count(), n);
ck_assert_int_eq(run->get_tombstone_count(), 0);
@@ -157,8 +157,8 @@ START_TEST(t_full_cancelation)
BloomFilter* bf2 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
BloomFilter* bf3 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
- M_ISAM* run = new M_ISAM(mtable, bf1, false);
- M_ISAM* run_ts = new M_ISAM(mtable_ts, bf2, false);
+ M_ISAM* run = new M_ISAM(mtable, bf1);
+ M_ISAM* run_ts = new M_ISAM(mtable_ts, bf2);
ck_assert_int_eq(run->get_record_count(), n);
ck_assert_int_eq(run->get_tombstone_count(), 0);
@@ -167,7 +167,7 @@ START_TEST(t_full_cancelation)
M_ISAM* runs[] = {run, run_ts};
- M_ISAM* merged = new M_ISAM(runs, 2, bf3, false);
+ M_ISAM* merged = new M_ISAM(runs, 2, bf3);
ck_assert_int_eq(merged->get_tombstone_count(), 0);
ck_assert_int_eq(merged->get_record_count(), 0);
diff --git a/tests/wirs_tests.cpp b/tests/wirs_tests.cpp
index 673bdca..828362d 100644
--- a/tests/wirs_tests.cpp
+++ b/tests/wirs_tests.cpp
@@ -40,7 +40,7 @@ START_TEST(t_mbuffer_init)
}
BloomFilter* bf = new BloomFilter(BF_FPR, mem_table->get_tombstone_count(), BF_HASH_FUNCS, g_rng);
- Shard* shard = new Shard(mem_table, bf, false);
+ Shard* shard = new Shard(mem_table, bf);
ck_assert_uint_eq(shard->get_record_count(), 512);
delete bf;
@@ -58,13 +58,13 @@ START_TEST(t_wirs_init)
BloomFilter* bf1 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
BloomFilter* bf2 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
BloomFilter* bf3 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
- auto shard1 = new Shard(mbuffer1, bf1, false);
- auto shard2 = new Shard(mbuffer2, bf2, false);
- auto shard3 = new Shard(mbuffer3, bf3, false);
+ auto shard1 = new Shard(mbuffer1, bf1);
+ auto shard2 = new Shard(mbuffer2, bf2);
+ auto shard3 = new Shard(mbuffer3, bf3);
BloomFilter* bf4 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
Shard* shards[3] = {shard1, shard2, shard3};
- auto shard4 = new Shard(shards, 3, bf4, false);
+ auto shard4 = new Shard(shards, 3, bf4);
ck_assert_int_eq(shard4->get_record_count(), n * 3);
ck_assert_int_eq(shard4->get_tombstone_count(), 0);
@@ -113,7 +113,7 @@ START_TEST(t_get_lower_bound_index)
ck_assert_ptr_nonnull(mbuffer);
BloomFilter* bf = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
- Shard* shard = new Shard(mbuffer, bf, false);
+ Shard* shard = new Shard(mbuffer, bf);
ck_assert_int_eq(shard->get_record_count(), n);
ck_assert_int_eq(shard->get_tombstone_count(), 0);
@@ -141,8 +141,8 @@ START_TEST(t_full_cancelation)
BloomFilter* bf2 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
BloomFilter* bf3 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
- Shard* shard = new Shard(buffer, bf1, false);
- Shard* shard_ts = new Shard(buffer_ts, bf2, false);
+ Shard* shard = new Shard(buffer, bf1);
+ Shard* shard_ts = new Shard(buffer_ts, bf2);
ck_assert_int_eq(shard->get_record_count(), n);
ck_assert_int_eq(shard->get_tombstone_count(), 0);
@@ -151,7 +151,7 @@ START_TEST(t_full_cancelation)
Shard* shards[] = {shard, shard_ts};
- Shard* merged = new Shard(shards, 2, bf3, false);
+ Shard* merged = new Shard(shards, 2, bf3);
ck_assert_int_eq(merged->get_tombstone_count(), 0);
ck_assert_int_eq(merged->get_record_count(), 0);
@@ -174,7 +174,7 @@ START_TEST(t_weighted_sampling)
auto buffer = create_weighted_mbuffer<WRec>(n);
BloomFilter* bf = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
- Shard* shard = new Shard(buffer, bf, false);
+ Shard* shard = new Shard(buffer, bf);
uint64_t lower_key = 0;
uint64_t upper_key = 5;
@@ -234,7 +234,7 @@ START_TEST(t_tombstone_check)
}
BloomFilter* bf1 = new BloomFilter(100, BF_HASH_FUNCS, g_rng);
- auto shard = new Shard(buffer, bf1, false);
+ auto shard = new Shard(buffer, bf1);
for (size_t i=0; i<tombstones.size(); i++) {
ck_assert(shard->check_tombstone({tombstones[i].first, tombstones[i].second}));