summaryrefslogtreecommitdiffstats
path: root/include/framework
diff options
context:
space:
mode:
authorDouglas Rumbaugh <dbr4@psu.edu>2023-12-21 17:03:39 -0500
committerDouglas Rumbaugh <dbr4@psu.edu>2023-12-21 17:03:39 -0500
commit24a42e300c96e2815bf20be3f6cce3efee1c4303 (patch)
tree15e376296a1e2f8c2ce1f7e49d3ef5a0e72508a9 /include/framework
parentec9253b8cac4c31a9e20c201108d4804f7b68d71 (diff)
downloaddynamic-extension-24a42e300c96e2815bf20be3f6cce3efee1c4303.tar.gz
ExtensionStructure: adjusted leveling logic to avoid unneeded copies
This also reduces the special-case overhead on shards. As it was, shards would need to handle a special case when constructing from other shards where the first of the two provided shards was a nullptr, which caused a number of subtle issues (or outright crashes in some cases) with existing shard implementations.
Diffstat (limited to 'include/framework')
-rw-r--r--include/framework/structure/ExtensionStructure.h19
1 files changed, 14 insertions, 5 deletions
diff --git a/include/framework/structure/ExtensionStructure.h b/include/framework/structure/ExtensionStructure.h
index 3cd55ac..60016a0 100644
--- a/include/framework/structure/ExtensionStructure.h
+++ b/include/framework/structure/ExtensionStructure.h
@@ -347,13 +347,19 @@ public:
*/
inline void reconstruction(level_index base_level, level_index incoming_level) {
if constexpr (L == LayoutPolicy::LEVELING) {
- auto tmp = m_levels[base_level];
- m_levels[base_level] = InternalLevel<R, Shard, Q>::reconstruction(m_levels[base_level].get(), m_levels[incoming_level].get());
+ /* if the base level has a shard, merge the base and incoming together to make a new one */
+ if (m_levels[base_level]->get_shard_count() > 0) {
+ m_levels[base_level] = InternalLevel<R, Shard, Q>::reconstruction(m_levels[base_level].get(), m_levels[incoming_level].get());
+ /* otherwise, we can just move the incoming to the base */
+ } else {
+ m_levels[base_level] = m_levels[incoming_level];
+ }
} else {
m_levels[base_level]->append_level(m_levels[incoming_level].get());
m_levels[base_level]->finalize();
}
+ /* place a new, empty level where the incoming level used to be */
m_levels[incoming_level] = std::shared_ptr<InternalLevel<R, Shard, Q>>(new InternalLevel<R, Shard, Q>(incoming_level, (L == LayoutPolicy::LEVELING) ? 1 : m_scale_factor));
}
@@ -432,10 +438,13 @@ private:
auto old_level = m_levels[0].get();
auto temp_level = new InternalLevel<R, Shard, Q>(0, 1);
temp_level->append_buffer(buffer);
- auto new_level = InternalLevel<R, Shard, Q>::reconstruction(old_level, temp_level);
- m_levels[0] = new_level;
- delete temp_level;
+ if (old_level->get_shard_count() > 0) {
+ m_levels[0] = InternalLevel<R, Shard, Q>::reconstruction(old_level, temp_level);
+ delete temp_level;
+ } else {
+ m_levels[0] = std::shared_ptr<InternalLevel<R, Shard, Q>>(temp_level);
+ }
} else {
m_levels[0]->append_buffer(buffer);
}