summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDouglas Rumbaugh <dbr4@psu.edu>2023-05-17 10:58:57 -0400
committerDouglas Rumbaugh <dbr4@psu.edu>2023-05-17 10:58:57 -0400
commit75a8418b580234521b5fa23340bee959c357acf9 (patch)
tree5c32d5fb20749a65a642a0d969f7c47adc4691b7 /include
parentff000799c3254f52e0beabbe9c62d10c3fc4178e (diff)
downloaddynamic-extension-75a8418b580234521b5fa23340bee959c357acf9.tar.gz
Removed unncessary tagging paramter from shards and levels
Diffstat (limited to 'include')
-rw-r--r--include/framework/DynamicExtension.h15
-rw-r--r--include/framework/InternalLevel.h41
-rw-r--r--include/shard/MemISAM.h29
-rw-r--r--include/shard/WIRS.h27
4 files changed, 52 insertions, 60 deletions
diff --git a/include/framework/DynamicExtension.h b/include/framework/DynamicExtension.h
index 1d9ee76..fd42c5f 100644
--- a/include/framework/DynamicExtension.h
+++ b/include/framework/DynamicExtension.h
@@ -359,7 +359,7 @@ public:
}
}
- shards.emplace_back(new Shard(get_buffer(), nullptr, DELETE_TAGGING));
+ shards.emplace_back(new Shard(get_buffer(), nullptr));
Shard *shards_array[shards.size()];
@@ -370,7 +370,7 @@ public:
}
}
- Shard *flattened = new Shard(shards_array, j, nullptr, DELETE_TAGGING);
+ Shard *flattened = new Shard(shards_array, j, nullptr);
for (auto shard : shards) {
delete shard;
@@ -446,7 +446,7 @@ private:
if (new_idx > 0) {
assert(m_levels[new_idx - 1]->get_shard(0)->get_tombstone_count() == 0);
}
- m_levels.emplace_back(new InternalLevel<R>(new_idx, new_shard_cnt, DELETE_TAGGING));
+ m_levels.emplace_back(new InternalLevel<R>(new_idx, new_shard_cnt));
m_last_level_idx++;
return new_idx;
@@ -526,15 +526,14 @@ private:
// merging two memory levels
if (LSM_LEVELING) {
auto tmp = m_levels[base_level];
- m_levels[base_level] = InternalLevel<R>::merge_levels(m_levels[base_level], m_levels[incoming_level],
- DELETE_TAGGING, rng);
+ m_levels[base_level] = InternalLevel<R>::merge_levels(m_levels[base_level], m_levels[incoming_level], rng);
mark_as_unused(tmp);
} else {
m_levels[base_level]->append_merged_shards(m_levels[incoming_level], rng);
}
mark_as_unused(m_levels[incoming_level]);
- m_levels[incoming_level] = new InternalLevel<R>(incoming_level, (LSM_LEVELING) ? 1 : m_scale_factor, DELETE_TAGGING);
+ m_levels[incoming_level] = new InternalLevel<R>(incoming_level, (LSM_LEVELING) ? 1 : m_scale_factor);
}
@@ -543,9 +542,9 @@ private:
if (LSM_LEVELING) {
// FIXME: Kludgey implementation due to interface constraints.
auto old_level = m_levels[0];
- auto temp_level = new InternalLevel<R>(0, 1, DELETE_TAGGING);
+ auto temp_level = new InternalLevel<R>(0, 1);
temp_level->append_mem_table(buffer, rng);
- auto new_level = InternalLevel<R>::merge_levels(old_level, temp_level, DELETE_TAGGING, rng);
+ auto new_level = InternalLevel<R>::merge_levels(old_level, temp_level, rng);
m_levels[0] = new_level;
delete temp_level;
diff --git a/include/framework/InternalLevel.h b/include/framework/InternalLevel.h
index f0f19da..19bfe9f 100644
--- a/include/framework/InternalLevel.h
+++ b/include/framework/InternalLevel.h
@@ -26,12 +26,13 @@ class InternalLevel {
typedef decltype(R::key) K;
typedef decltype(R::value) V;
+ typedef WIRS<R> Shard;
private:
struct InternalLevelStructure {
InternalLevelStructure(size_t cap)
: m_cap(cap)
- , m_shards(new WIRS<R>*[cap]{nullptr})
+ , m_shards(new Shard*[cap]{nullptr})
, m_bfs(new BloomFilter*[cap]{nullptr}) {}
~InternalLevelStructure() {
@@ -45,72 +46,69 @@ private:
}
size_t m_cap;
- WIRS<R>** m_shards;
+ Shard** m_shards;
BloomFilter** m_bfs;
};
public:
- InternalLevel(ssize_t level_no, size_t shard_cap, bool tagging)
+ InternalLevel(ssize_t level_no, size_t shard_cap)
: m_level_no(level_no), m_shard_cnt(0)
- , m_structure(new InternalLevelStructure(shard_cap))
- , m_tagging(tagging) {}
+ , m_structure(new InternalLevelStructure(shard_cap)) {}
// Create a new memory level sharing the shards and repurposing it as previous level_no + 1
// WARNING: for leveling only.
- InternalLevel(InternalLevel* level, bool tagging)
+ InternalLevel(InternalLevel* level)
: m_level_no(level->m_level_no + 1), m_shard_cnt(level->m_shard_cnt)
- , m_structure(level->m_structure)
- , m_tagging(tagging) {
+ , m_structure(level->m_structure) {
assert(m_structure->m_cap == 1 && m_shard_cnt == 1);
}
-
~InternalLevel() {}
// WARNING: for leveling only.
// assuming the base level is the level new level is merging into. (base_level is larger.)
- static InternalLevel* merge_levels(InternalLevel* base_level, InternalLevel* new_level, bool tagging, const gsl_rng* rng) {
+ static InternalLevel* merge_levels(InternalLevel* base_level, InternalLevel* new_level, const gsl_rng* rng) {
assert(base_level->m_level_no > new_level->m_level_no || (base_level->m_level_no == 0 && new_level->m_level_no == 0));
- auto res = new InternalLevel(base_level->m_level_no, 1, tagging);
+ auto res = new InternalLevel(base_level->m_level_no, 1);
res->m_shard_cnt = 1;
res->m_structure->m_bfs[0] =
new BloomFilter(BF_FPR,
new_level->get_tombstone_count() + base_level->get_tombstone_count(),
BF_HASH_FUNCS, rng);
- WIRS<R>* shards[2];
+ Shard* shards[2];
shards[0] = base_level->m_structure->m_shards[0];
shards[1] = new_level->m_structure->m_shards[0];
- res->m_structure->m_shards[0] = new WIRS<R>(shards, 2, res->m_structure->m_bfs[0], tagging);
+ res->m_structure->m_shards[0] = new Shard(shards, 2, res->m_structure->m_bfs[0]);
return res;
}
void append_mem_table(MutableBuffer<R>* buffer, const gsl_rng* rng) {
assert(m_shard_cnt < m_structure->m_cap);
m_structure->m_bfs[m_shard_cnt] = new BloomFilter(BF_FPR, buffer->get_tombstone_count(), BF_HASH_FUNCS, rng);
- m_structure->m_shards[m_shard_cnt] = new WIRS<R>(buffer, m_structure->m_bfs[m_shard_cnt], m_tagging);
+ m_structure->m_shards[m_shard_cnt] = new Shard(buffer, m_structure->m_bfs[m_shard_cnt]);
++m_shard_cnt;
}
void append_merged_shards(InternalLevel* level, const gsl_rng* rng) {
assert(m_shard_cnt < m_structure->m_cap);
m_structure->m_bfs[m_shard_cnt] = new BloomFilter(BF_FPR, level->get_tombstone_count(), BF_HASH_FUNCS, rng);
- m_structure->m_shards[m_shard_cnt] = new WIRS<R>(level->m_structure->m_shards, level->m_shard_cnt, m_structure->m_bfs[m_shard_cnt], m_tagging);
+ m_structure->m_shards[m_shard_cnt] = new Shard(level->m_structure->m_shards, level->m_shard_cnt, m_structure->m_bfs[m_shard_cnt]);
++m_shard_cnt;
}
- WIRS<R> *get_merged_shard() {
- WIRS<R> *shards[m_shard_cnt];
+ Shard *get_merged_shard() {
+ Shard *shards[m_shard_cnt];
for (size_t i=0; i<m_shard_cnt; i++) {
shards[i] = (m_structure->m_shards[i]) ? m_structure->m_shards[i] : nullptr;
}
- return new WIRS<R>(shards, m_shard_cnt, nullptr, m_tagging);
+ return new Shard(shards, m_shard_cnt, nullptr);
}
// Append the sample range in-order.....
- void get_shard_weights(std::vector<uint64_t>& weights, std::vector<std::pair<ShardID, WIRS<R> *>> &shards, std::vector<void*>& shard_states, const K& low, const K& high) {
+ void get_shard_weights(std::vector<uint64_t>& weights, std::vector<std::pair<ShardID, Shard *>> &shards, std::vector<void*>& shard_states, const K& low, const K& high) {
for (size_t i=0; i<m_shard_cnt; i++) {
if (m_structure->m_shards[i]) {
auto shard_state = m_structure->m_shards[i]->get_sample_shard_state(low, high);
@@ -119,7 +117,7 @@ public:
weights.push_back(shard_state->tot_weight);
shard_states.emplace_back(shard_state);
} else {
- WIRS<R>::delete_state(shard_state);
+ Shard::delete_state(shard_state);
}
}
}
@@ -158,7 +156,7 @@ public:
return m_structure->m_shards[shard_no]->get_record_at(idx);
}
- WIRS<R>* get_shard(size_t idx) {
+ Shard* get_shard(size_t idx) {
return m_structure->m_shards[idx];
}
@@ -253,7 +251,6 @@ private:
size_t m_shard_cnt;
size_t m_shard_size_cap;
- bool m_tagging;
std::shared_ptr<InternalLevelStructure> m_structure;
};
diff --git a/include/shard/MemISAM.h b/include/shard/MemISAM.h
index dd2fd85..d1f3bb3 100644
--- a/include/shard/MemISAM.h
+++ b/include/shard/MemISAM.h
@@ -43,8 +43,8 @@ constexpr static size_t inmem_isam_node_keyskip = sizeof(K) * inmem_isam_fanout;
static_assert(sizeof(InMemISAMNode) == inmem_isam_node_size, "node size does not match");
public:
- MemISAM(std::string data_fname, size_t record_cnt, size_t tombstone_cnt, BloomFilter *bf, bool tagging)
- : m_reccnt(record_cnt), m_tombstone_cnt(tombstone_cnt), m_deleted_cnt(0), m_tagging(tagging) {
+ MemISAM(std::string data_fname, size_t record_cnt, size_t tombstone_cnt, BloomFilter *bf)
+ : m_reccnt(record_cnt), m_tombstone_cnt(tombstone_cnt), m_deleted_cnt(0) {
// read the stored data file the file
size_t alloc_size = (record_cnt * sizeof(R)) + (CACHELINE_SIZE - (record_cnt * sizeof(R)) % CACHELINE_SIZE);
@@ -71,8 +71,8 @@ public:
}
}
- MemISAM(MutableBuffer<R>* buffer, BloomFilter* bf, bool tagging)
- :m_reccnt(0), m_tombstone_cnt(0), m_isam_nodes(nullptr), m_deleted_cnt(0), m_tagging(tagging) {
+ MemISAM(MutableBuffer<R>* buffer, BloomFilter* bf)
+ :m_reccnt(0), m_tombstone_cnt(0), m_isam_nodes(nullptr), m_deleted_cnt(0) {
size_t alloc_size = (buffer->get_record_count() * sizeof(R)) + (CACHELINE_SIZE - (buffer->get_record_count() * sizeof(R)) % CACHELINE_SIZE);
assert(alloc_size % CACHELINE_SIZE == 0);
@@ -91,13 +91,11 @@ public:
TIMER_START();
while (base < stop) {
- if (!m_tagging) {
- if (!base->is_tombstone() && (base + 1 < stop)
- && *base == *(base + 1) && (base + 1)->is_tombstone()) {
- base += 2;
- mrun_cancelations++;
- continue;
- }
+ if (!base->is_tombstone() && (base + 1 < stop)
+ && *base == *(base + 1) && (base + 1)->is_tombstone()) {
+ base += 2;
+ mrun_cancelations++;
+ continue;
} else if (base->is_deleted()) {
base += 1;
continue;
@@ -126,8 +124,8 @@ public:
//fprintf(stdout, "%ld %ld %ld\n", sort_time, copy_time, level_time);
}
- MemISAM(MemISAM** runs, size_t len, BloomFilter* bf, bool tagging)
- :m_reccnt(0), m_tombstone_cnt(0), m_deleted_cnt(0), m_isam_nodes(nullptr), m_tagging(tagging) {
+ MemISAM(MemISAM** runs, size_t len, BloomFilter* bf)
+ :m_reccnt(0), m_tombstone_cnt(0), m_deleted_cnt(0), m_isam_nodes(nullptr) {
std::vector<Cursor<R>> cursors;
cursors.reserve(len);
@@ -155,7 +153,7 @@ public:
while (pq.size()) {
auto now = pq.peek();
auto next = pq.size() > 1 ? pq.peek(1) : queue_record<R>{nullptr, 0};
- if (!m_tagging && !now.data->is_tombstone() && next.data != nullptr &&
+ if (!now.data->is_tombstone() && next.data != nullptr &&
*now.data == *next.data && next.data->is_tombstone()) {
pq.pop(); pq.pop();
@@ -165,7 +163,7 @@ public:
if (advance_cursor(cursor2)) pq.push(cursor2.ptr, next.version);
} else {
auto& cursor = cursors[now.version];
- if (!m_tagging || !cursor.ptr->is_deleted()) {
+ if (!cursor.ptr->is_deleted()) {
m_data[m_reccnt++] = *cursor.ptr;
if (cursor.ptr->is_tombstone()) {
++m_tombstone_cnt;
@@ -357,7 +355,6 @@ private:
size_t m_tombstone_cnt;
size_t m_internal_node_cnt;
size_t m_deleted_cnt;
- bool m_tagging;
};
}
diff --git a/include/shard/WIRS.h b/include/shard/WIRS.h
index 41766b9..2572caf 100644
--- a/include/shard/WIRS.h
+++ b/include/shard/WIRS.h
@@ -53,9 +53,9 @@ private:
};
public:
- WIRS(MutableBuffer<R>* buffer, BloomFilter* bf, bool tagging)
+ WIRS(MutableBuffer<R>* buffer, BloomFilter* bf)
: m_reccnt(0), m_tombstone_cnt(0), m_deleted_cnt(0), m_total_weight(0), m_rejection_cnt(0),
- m_ts_check_cnt(0), m_tagging(tagging), m_root(nullptr) {
+ m_ts_check_cnt(0), m_root(nullptr) {
size_t alloc_size = (buffer->get_record_count() * sizeof(R)) + (CACHELINE_SIZE - (buffer->get_record_count() * sizeof(R)) % CACHELINE_SIZE);
assert(alloc_size % CACHELINE_SIZE == 0);
@@ -67,13 +67,11 @@ public:
auto stop = base + buffer->get_record_count();
while (base < stop) {
- if (!m_tagging) {
- if (!(base->is_tombstone()) && (base + 1) < stop) {
- if (*base == *(base + 1) && (base + 1)->is_tombstone()) {
- base += 2;
- wirs_cancelations++;
- continue;
- }
+ if (!(base->is_tombstone()) && (base + 1) < stop) {
+ if (*base == *(base + 1) && (base + 1)->is_tombstone()) {
+ base += 2;
+ wirs_cancelations++;
+ continue;
}
} else if (base->is_deleted()) {
base += 1;
@@ -97,9 +95,9 @@ public:
}
}
- WIRS(WIRS** shards, size_t len, BloomFilter* bf, bool tagging)
+ WIRS(WIRS** shards, size_t len, BloomFilter* bf)
: m_reccnt(0), m_tombstone_cnt(0), m_deleted_cnt(0), m_total_weight(0), m_rejection_cnt(0), m_ts_check_cnt(0),
- m_tagging(tagging), m_root(nullptr) {
+ m_root(nullptr) {
std::vector<Cursor<R>> cursors;
cursors.reserve(len);
@@ -125,7 +123,7 @@ public:
while (pq.size()) {
auto now = pq.peek();
auto next = pq.size() > 1 ? pq.peek(1) : queue_record<R>{nullptr, 0};
- if (!m_tagging && !now.data->is_tombstone() && next.data != nullptr &&
+ if (!now.data->is_tombstone() && next.data != nullptr &&
*now.data == *next.data && next.data->is_tombstone()) {
pq.pop(); pq.pop();
@@ -135,7 +133,7 @@ public:
if (advance_cursor<R>(cursor2)) pq.push(cursor2.ptr, next.version);
} else {
auto& cursor = cursors[now.version];
- if (!m_tagging || !cursor.ptr->is_deleted()) {
+ if (!cursor.ptr->is_deleted()) {
m_data[m_reccnt++] = *cursor.ptr;
m_total_weight += cursor.ptr->weight;
if (bf && cursor.ptr->is_tombstone()) {
@@ -295,6 +293,7 @@ public:
return min;
}
+ /*
bool check_delete(K key, V val) {
size_t idx = get_lower_bound(key);
if (idx >= m_reccnt) {
@@ -312,6 +311,7 @@ public:
m_rejection_cnt += result;
return result;
}
+ */
bool check_tombstone(const R& rec) {
m_ts_check_cnt++;
@@ -421,7 +421,6 @@ private:
R* m_data;
std::vector<Alias *> m_alias;
wirs_node<R>* m_root;
- bool m_tagging;
W m_total_weight;
size_t m_reccnt;
size_t m_tombstone_cnt;