summaryrefslogtreecommitdiffstats
path: root/include/framework/scheduling/FIFOScheduler.h
blob: ba62f9e459dc9342b83feab88263ab704c951e7b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
/*
 * include/framework/scheduling/FIFOScheduler.h
 *
 * Copyright (C) 2023 Douglas B. Rumbaugh <drumbaugh@psu.edu> 
 *
 * Distributed under the Modified BSD License.
 *
 */
#pragma once

#include <vector>
#include <memory>
#include <queue>
#include <thread>
#include <condition_variable>
#include <future>

#include "util/types.h"
#include "framework/interface/Shard.h"
#include "framework/interface/Query.h"
#include "framework/interface/Record.h"
#include "framework/structure/MutableBuffer.h"
#include "framework/util/Configuration.h"
#include "framework/structure/ExtensionStructure.h"
#include "framework/scheduling/Task.h"

#include "ctpl/ctpl.h"
#include "psu-ds/LockedPriorityQueue.h"

namespace de {


class FIFOScheduler {
private:
    static const size_t DEFAULT_MAX_THREADS = 8;

public:
    FIFOScheduler(size_t memory_budget, size_t thread_cnt)
      : m_memory_budget((memory_budget) ? memory_budget : UINT64_MAX)
      , m_thrd_cnt((thread_cnt) ? thread_cnt: DEFAULT_MAX_THREADS)
      , m_used_memory(0)
      , m_used_thrds(0)
      , m_shutdown(false)
    {
        m_sched_thrd = std::thread(&FIFOScheduler::run, this);
        m_thrd_pool.resize(m_thrd_cnt);
    }

    ~FIFOScheduler() {
        shutdown();

        m_cv.notify_all();
        m_sched_thrd.join();
    }

    void schedule_job(std::function<void(void*)> job, size_t size, void *args) {
        std::unique_lock<std::mutex> lk(m_cv_lock);
        size_t ts = m_counter.fetch_add(1);
        m_task_queue.push(Task(size, ts, job, args));

        m_cv.notify_all();
    }

    void shutdown() {
        m_shutdown.store(true);
    }

private:
    psudb::LockedPriorityQueue<Task> m_task_queue;

    size_t m_memory_budget;
    size_t m_thrd_cnt;

    std::atomic<bool> m_shutdown; 

    std::atomic<size_t> m_counter;
    std::mutex m_cv_lock;
    std::condition_variable m_cv;

    std::thread m_sched_thrd;
    ctpl::thread_pool m_thrd_pool;

    std::atomic<size_t> m_used_thrds;
    std::atomic<size_t> m_used_memory;

    void schedule_next() {
        assert(m_task_queue.size() > 0);
        auto t = m_task_queue.pop();
        m_thrd_pool.push(t);
    }

    void run() {
        do {
            std::unique_lock<std::mutex> cv_lock(m_cv_lock);
            m_cv.wait(cv_lock);

            while (m_task_queue.size() > 0 && m_thrd_pool.n_idle() > 0) {
                schedule_next();
            }
        } while(!m_shutdown.load());
    }
};

}