summaryrefslogtreecommitdiffstats
path: root/include/framework/structure/MutableBuffer.h
blob: 3a06f0d14c30d95bba06903397a6a479358828ef (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
/*
 * include/framework/structure/MutableBuffer.h
 *
 * Copyright (C) 2023 Douglas B. Rumbaugh <drumbaugh@psu.edu>
 *                    Dong Xie <dongx@psu.edu>
 *
 * Distributed under the Modified BSD License.
 *
 * NOTE: Concerning the tombstone count. One possible approach
 * would be to track the number of tombstones below and above the
 * low water mark--this would be straightforward to do. Then, if we
 * *require* that the head only advance up to the LWM, we can get a
 * correct view on the number of tombstones in the active buffer at
 * any point in time, and the BufferView will have a pretty good
 * approximation as well (potentially with a few extra if new inserts
 * happen between when the tail pointer and tombstone count are fetched)
 *
 */
#pragma once

#include <cstdlib>
#include <atomic>
#include <cassert>
#include <immintrin.h>

#include "psu-util/alignment.h"
#include "util/bf_config.h"
#include "psu-ds/BloomFilter.h"
#include "framework/interface/Record.h"
#include "framework/structure/BufferView.h"

using psudb::CACHELINE_SIZE;

namespace de {

template <RecordInterface R>
class MutableBuffer {
    friend class BufferView<R>;
public:
    MutableBuffer(size_t low_watermark, size_t high_watermark, size_t capacity=0) 
        : m_lwm(low_watermark)
        , m_hwm(high_watermark)
        , m_cap((capacity == 0) ? 2 * high_watermark : capacity)
        , m_tail(0)
        , m_head(0)
        , m_head_refcnt(0)
        , m_old_head(0)
        , m_old_head_refcnt(0)
        , m_data((Wrapped<R> *) psudb::sf_aligned_alloc(CACHELINE_SIZE, m_cap * sizeof(Wrapped<R>)))
        , m_tombstone_filter(new psudb::BloomFilter<R>(BF_FPR, m_hwm, BF_HASH_FUNCS))
        , m_tscnt(0)
        , m_old_tscnt(0)
        , m_active_head_advance(false) 
    {
        assert(m_cap > m_hwm);
        assert(m_hwm > m_lwm);
    }

    ~MutableBuffer() {
        free(m_data);
        delete m_tombstone_filter;
    }

    int append(const R &rec, bool tombstone=false) {
        int32_t pos = 0;
        if ((pos = try_advance_tail()) == -1) return 0;

        Wrapped<R> wrec;
        wrec.rec = rec;
        wrec.header = 0;
        if (tombstone) wrec.set_tombstone();

        m_data[pos] = wrec;
        m_data[pos].header |= (pos << 2);

        if (tombstone) {
            m_tscnt.fetch_add(1);
            if (m_tombstone_filter) m_tombstone_filter->insert(rec);
        }

        return 1;     
    }

    bool truncate() {
        m_tscnt.store(0);
        m_tail.store(0);
        if (m_tombstone_filter) m_tombstone_filter->clear();

        return true;
    }

    size_t get_record_count() {
        return m_tail - m_head;
    }
    
    size_t get_capacity() {
        return m_cap;
    }

    bool is_full() {
        return get_record_count() >= m_hwm;
    }

    bool is_at_low_watermark() {
        return get_record_count() >= m_lwm;
    }

    size_t get_tombstone_count() {
        return m_tscnt.load();
    }

    bool delete_record(const R& rec) {
        return get_buffer_view().delete_record(rec);
   }

    bool check_tombstone(const R& rec) {
        return get_buffer_view().check_tombstone(rec);
    }

    size_t get_memory_usage() {
        return m_cap * sizeof(Wrapped<R>);
    }

    size_t get_aux_memory_usage() {
        return m_tombstone_filter->get_memory_usage();
    }

    BufferView<R> get_buffer_view() {
        m_head_refcnt.fetch_add(1);
        auto f = std::bind(release_head_reference, (void *) this, m_head.load());
        return BufferView<R>(m_data, m_cap, m_head.load(), m_tail.load(), m_tscnt.load(), m_tombstone_filter, f);
    }

    /*
     * Advance the buffer following a reconstruction. Move current
     * head and head_refcnt into old_head and old_head_refcnt, then
     * assign new_head to old_head.
     */
    bool advance_head(size_t new_head) {
        assert(new_head > m_head.load());
        assert(new_head <= m_tail.load());

        /* refuse to advance head while there is an old with one references */
        if (m_old_head_refcnt > 0) {
            return false;
        }

        m_active_head_advance.store(true);

        /*
         * the order here is very important. We first store zero to the 
         * old_refcnt (should be zero anyway). Then we move the current 
         * head to old head. At this point, any new buffer views should
         * increment the old head refcnt, so no new references to the 
         * current head will be taken. Then we add the current head 
         * refcnt to this. This is to ensure that no references get 
         * dropped. Only after this do we change to the new head
         */
        m_old_head_refcnt.store(0);

        m_old_head.store(m_head.load());
        m_old_head_refcnt.fetch_add(m_head_refcnt);

        m_head_refcnt.store(0);
        m_head.store(new_head);

        m_active_head_advance.store(false);
        return true;
    }

    void set_low_watermark(size_t lwm) {
        assert(lwm < m_hwm);
        m_lwm = lwm;
    }

    size_t get_low_watermark() {
        return m_lwm;
    }

    void set_high_watermark(size_t hwm) {
        assert(hwm > m_lwm);
        assert(hwm < m_cap);
        m_hwm = hwm;
    }

    size_t get_high_watermark() {
        return m_hwm;
    }

    size_t get_tail() {
        return m_tail.load();
    }

    /*
     * Note: this returns the available physical storage capacity,
     * *not* now many more records can be inserted before the
     * HWM is reached.
     */
    size_t get_available_capacity() {
        return m_cap - (m_tail.load() - m_old_head.load());
    }

private:
    int64_t try_advance_tail() {
        size_t old_value = m_tail.load();

        /* if full, fail to advance the tail */
        if (old_value >= m_hwm) {
            return -1;
        }

        while (!m_tail.compare_exchange_strong(old_value, old_value+1)) {
            /* if full, stop trying and fail to advance the tail */
            if (m_tail.load() >= m_hwm) {
                return -1;
            }

            _mm_pause();
        }

        return old_value;
    }

    size_t to_idx(size_t i, size_t head) {
        return (head + i) % m_cap;
    }

    static void release_head_reference(void *buff, size_t head) {
        MutableBuffer<R> *buffer = (MutableBuffer<R> *) buff;

        /* 
         * check old head first. During a head transition, the head being 
         * retired will first be assigned to *both* head and old_head. As
         * a result, any refcnt updates during this time should be applied
         * to old_head, even if the current head and the head being released
         * also match.
         */
        if (head == buffer->m_old_head.load()) {
            buffer->m_old_head_refcnt.fetch_sub(1);
            /* 
             * if the old head refcnt drops to 0, free
             * the records by setting old_head = head
             * before this, spin while the two heads are equal to
             * avoid 
             */
            while (buffer->m_active_head_advance.load()) {
                _mm_pause();
            }

            if (buffer->m_old_head_refcnt.load() == 0) {
                buffer->m_old_head.store(buffer->m_head);   
            }
        } else if (head == buffer->m_head.load()) {
            buffer->m_head_refcnt.fetch_sub(1);
        }
    }

    size_t m_lwm;
    size_t m_hwm;
    size_t m_cap;
    
    alignas(64) std::atomic<size_t> m_tail;

    alignas(64) std::atomic<size_t> m_head;
    alignas(64) std::atomic<size_t> m_head_refcnt;

    alignas(64) std::atomic<size_t> m_old_head;
    alignas(64) std::atomic<size_t> m_old_head_refcnt;
    
    Wrapped<R>* m_data;
    psudb::BloomFilter<R>* m_tombstone_filter;
    alignas(64) std::atomic<size_t> m_tscnt;
    size_t m_old_tscnt;

    alignas(64) std::atomic<bool> m_active_head_advance;
};

}