From e1232357504fd9c8290a09813a88e5b0c8dbfaf0 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Mon, 19 Oct 2020 15:53:53 -0400 Subject: [PATCH 01/56] tentatively bring montage init from ChurnTest to MontageHashTable --- src/rideables/MontageHashTable.hpp | 7 ++++++- src/tests/ChurnTest.hpp | 8 ++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/src/rideables/MontageHashTable.hpp b/src/rideables/MontageHashTable.hpp index 8e09bd4c..04bf2015 100644 --- a/src/rideables/MontageHashTable.hpp +++ b/src/rideables/MontageHashTable.hpp @@ -62,7 +62,12 @@ class MontageHashTable : public RMap, public Recoverable{ std::hash hash_fn; Bucket buckets[idxSize]; GlobalTestConfig* gtc; - MontageHashTable(GlobalTestConfig* gtc_):gtc(gtc_){ }; + MontageHashTable(GlobalTestConfig* gtc_):gtc(gtc_){ + // init Persistent allocator + Persistent::init(); + // init epoch system + pds::init(gtc); + }; optional get(K key, int tid){ diff --git a/src/tests/ChurnTest.hpp b/src/tests/ChurnTest.hpp index 780cd526..32085e6d 100644 --- a/src/tests/ChurnTest.hpp +++ b/src/tests/ChurnTest.hpp @@ -109,11 +109,11 @@ void ChurnTest::init(GlobalTestConfig* gtc){ assert(sigaction(SIGUSR1, &sa, NULL) == 0); #endif - // init Persistent allocator - Persistent::init(); + // // init Persistent allocator + // Persistent::init(); - // init epoch system - pds::init(gtc); + // // init epoch system + // pds::init(gtc); getRideable(gtc); From c2c9922c810657fc2fec79ac147ace2ad908fb38 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Tue, 20 Oct 2020 11:33:11 -0400 Subject: [PATCH 02/56] added init_thread to Rideable --- src/Rideable.hpp | 2 ++ src/rideables/MontageHashTable.hpp | 5 +++++ src/tests/ChurnTest.hpp | 3 +-- src/tests/MapChurnTest.hpp | 7 +++++-- 4 files changed, 13 insertions(+), 4 deletions(-) diff --git a/src/Rideable.hpp b/src/Rideable.hpp index f99d8b60..50d0b50b 100644 --- a/src/Rideable.hpp +++ b/src/Rideable.hpp @@ -10,9 +10,11 @@ class GlobalTestConfig; +class LocalTestConfig; class Rideable{ public: + virtual void init_thread(GlobalTestConfig* gtc, LocalTestConfig* ltc){}; virtual ~Rideable(){}; }; diff --git a/src/rideables/MontageHashTable.hpp b/src/rideables/MontageHashTable.hpp index 04bf2015..26eeca8b 100644 --- a/src/rideables/MontageHashTable.hpp +++ b/src/rideables/MontageHashTable.hpp @@ -67,8 +67,13 @@ class MontageHashTable : public RMap, public Recoverable{ Persistent::init(); // init epoch system pds::init(gtc); + // init main thread + pds::init_thread(0); }; + void init_thread(GlobalTestConfig* gtc, LocalTestConfig* ltc){ + pds::init_thread(ltc->tid); + } optional get(K key, int tid){ size_t idx=hash_fn(key)%idxSize; diff --git a/src/tests/ChurnTest.hpp b/src/tests/ChurnTest.hpp index 32085e6d..a8c889ca 100644 --- a/src/tests/ChurnTest.hpp +++ b/src/tests/ChurnTest.hpp @@ -54,11 +54,11 @@ class ChurnTest : public Test{ ChurnTest(int p_gets, int p_puts, int p_inserts, int p_removes, int range): ChurnTest(p_gets, p_puts, p_inserts, p_removes, range,0){} void init(GlobalTestConfig* gtc); - void parInit(GlobalTestConfig* gtc, LocalTestConfig* ltc); int execute(GlobalTestConfig* gtc, LocalTestConfig* ltc); void cleanup(GlobalTestConfig* gtc); pthread_barrier_t barrier; + virtual void parInit(GlobalTestConfig* gtc, LocalTestConfig* ltc); virtual void getRideable(GlobalTestConfig* gtc) = 0; virtual void doPrefill(GlobalTestConfig* gtc) = 0; virtual void operation(uint64_t key, int op, int tid) = 0; @@ -89,7 +89,6 @@ void ChurnTest::parInit(GlobalTestConfig* gtc, LocalTestConfig* ltc){ if(ltc->tid==0) doPrefill(gtc); #endif - pds::init_thread(ltc->tid); } void ChurnTest::init(GlobalTestConfig* gtc){ diff --git a/src/tests/MapChurnTest.hpp b/src/tests/MapChurnTest.hpp index 7775d5fa..a20fac06 100644 --- a/src/tests/MapChurnTest.hpp +++ b/src/tests/MapChurnTest.hpp @@ -46,6 +46,11 @@ class MapChurnTest : public ChurnTest{ ChurnTest::init(gtc); } + void parInit(GlobalTestConfig* gtc, LocalTestConfig* ltc){ + ChurnTest::parInit(gtc, ltc); + m->init_thread(gtc, ltc); + } + void getRideable(GlobalTestConfig* gtc){ Rideable* ptr = gtc->allocRideable(); m = dynamic_cast*>(ptr); @@ -54,7 +59,6 @@ class MapChurnTest : public ChurnTest{ } } void doPrefill(GlobalTestConfig* gtc){ - pds::init_thread(0); if (this->prefill > 0){ /* Wentao: * to avoid repeated k during prefilling, we instead @@ -91,7 +95,6 @@ class MapChurnTest : public ChurnTest{ m->remove(k,tid); } } - }; template From 93c178fe98503b33221c51f4b9b604a464f6286d Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Wed, 21 Oct 2020 17:09:29 -0400 Subject: [PATCH 03/56] reduce pds::init_thread in MapChurnTest --- src/tests/MapChurnTest.hpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/tests/MapChurnTest.hpp b/src/tests/MapChurnTest.hpp index a20fac06..dd847a1a 100644 --- a/src/tests/MapChurnTest.hpp +++ b/src/tests/MapChurnTest.hpp @@ -47,8 +47,8 @@ class MapChurnTest : public ChurnTest{ } void parInit(GlobalTestConfig* gtc, LocalTestConfig* ltc){ - ChurnTest::parInit(gtc, ltc); m->init_thread(gtc, ltc); + ChurnTest::parInit(gtc, ltc); } void getRideable(GlobalTestConfig* gtc){ @@ -111,7 +111,6 @@ inline std::string MapChurnTest::fromInt(uint64_t v){ template<> inline void MapChurnTest::doPrefill(GlobalTestConfig* gtc){ // randomly prefill until specified amount of keys are successfully inserted - pds::init_thread(0); if (this->prefill > 0){ std::mt19937_64 gen_k(0); // int stride = this->range/this->prefill; From cc0da9b2af93dcae9caadc1b48d229bfd43b15a1 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Wed, 21 Oct 2020 17:42:20 -0400 Subject: [PATCH 04/56] moved initializations of Persistent and pds into rideables --- ext/ycsb-tcd/ycsbc.cc | 2 +- src/rideables/MontageGraph.hpp | 11 +++++++++++ src/rideables/MontageLfHashTable.hpp | 15 +++++++++++++-- src/rideables/MontageMSQueue.hpp | 17 ++++++++++++++--- src/rideables/MontageNatarajanTree.hpp | 17 ++++++++++++++--- src/rideables/MontageQueue.hpp | 14 ++++++++++++-- src/tests/GraphRecoveryTest.hpp | 8 ++++---- src/tests/GraphTest.hpp | 6 +++--- src/tests/HeapChurnTest.hpp | 11 ++++++----- src/tests/KVTest.hpp | 11 ++++++----- src/tests/MapTest.hpp | 15 ++++++++------- src/tests/QueueChurnTest.hpp | 13 +++++++------ src/tests/QueueTest.hpp | 13 +++++++------ src/tests/RecoverVerifyTest.hpp | 11 ++++++----- src/tests/SetChurnTest.hpp | 2 +- src/tests/TGraphConstructionTest.hpp | 9 +++++---- src/tests/ToyTest.hpp | 1 + src/tests/YCSBTest.hpp | 13 +++++++------ 18 files changed, 126 insertions(+), 63 deletions(-) diff --git a/ext/ycsb-tcd/ycsbc.cc b/ext/ycsb-tcd/ycsbc.cc index 51a3a47e..369e4490 100644 --- a/ext/ycsb-tcd/ycsbc.cc +++ b/ext/ycsb-tcd/ycsbc.cc @@ -131,7 +131,7 @@ int main(const int argc, char *argv[]) { hwloc_get_type_depth(gtc.topology, HWLOC_OBJ_PU)); std::cout<<"initial affinity built"<tid); + } + tVertex** idxToVertex; // Transient set of transient vertices to index map // Thread-safe and does not leak edges diff --git a/src/rideables/MontageLfHashTable.hpp b/src/rideables/MontageLfHashTable.hpp index 617d1b6b..049ca2d2 100644 --- a/src/rideables/MontageLfHashTable.hpp +++ b/src/rideables/MontageLfHashTable.hpp @@ -79,9 +79,20 @@ class MontageLfHashTable : public RMap{ return mixPtrMark(mptr,true); } public: - MontageLfHashTable(int task_num) : tracker(task_num, 100, 1000, true) {}; + MontageLfHashTable(GlobalTestConfig* gtc) : tracker(gtc->task_num, 100, 1000, true) { + // init Persistent allocator + Persistent::init(); + // init epoch system + pds::init(gtc); + // init main thread + pds::init_thread(0); + }; ~MontageLfHashTable(){}; + void init_thread(GlobalTestConfig* gtc, LocalTestConfig* ltc){ + pds::init_thread(ltc->tid); + } + optional get(K key, int tid); optional put(K key, V val, int tid); bool insert(K key, V val, int tid); @@ -92,7 +103,7 @@ class MontageLfHashTable : public RMap{ template class MontageLfHashTableFactory : public RideableFactory{ Rideable* build(GlobalTestConfig* gtc){ - return new MontageLfHashTable(gtc->task_num); + return new MontageLfHashTable(gtc); } }; diff --git a/src/rideables/MontageMSQueue.hpp b/src/rideables/MontageMSQueue.hpp index b9460b95..6b2e907d 100644 --- a/src/rideables/MontageMSQueue.hpp +++ b/src/rideables/MontageMSQueue.hpp @@ -54,14 +54,25 @@ class MontageMSQueue : public RQueue{ RCUTracker tracker; public: - MontageMSQueue(int task_num): + MontageMSQueue(GlobalTestConfig* gtc): global_sn(0), head(nullptr), tail(nullptr), - tracker(task_num, 100, 1000, true){ + tracker(gtc->task_num, 100, 1000, true){ + // init Persistent allocator + Persistent::init(); + // init epoch system + pds::init(gtc); + // init main thread + pds::init_thread(0); + Node* dummy = new Node(); head.store(dummy); tail.store(dummy); } + void init_thread(GlobalTestConfig* gtc, LocalTestConfig* ltc){ + pds::init_thread(ltc->tid); + } + ~MontageMSQueue(){}; void enqueue(T val, int tid); @@ -142,7 +153,7 @@ optional MontageMSQueue::dequeue(int tid){ template class MontageMSQueueFactory : public RideableFactory{ Rideable* build(GlobalTestConfig* gtc){ - return new MontageMSQueue(gtc->task_num); + return new MontageMSQueue(gtc); } }; diff --git a/src/rideables/MontageNatarajanTree.hpp b/src/rideables/MontageNatarajanTree.hpp index c01c1d2a..3f571d5a 100644 --- a/src/rideables/MontageNatarajanTree.hpp +++ b/src/rideables/MontageNatarajanTree.hpp @@ -148,15 +148,26 @@ class MontageNatarajanTree : public RMap{ bool cleanup(K key, int tid); // void doRangeQuery(Node& k1, Node& k2, int tid, Node* root, std::map& res); public: - MontageNatarajanTree(int task_num): tracker(task_num, 100, 1000, true){ + MontageNatarajanTree(GlobalTestConfig* gtc): tracker(gtc->task_num, 100, 1000, true){ + // init Persistent allocator + Persistent::init(); + // init epoch system + pds::init(gtc); + // init main thread + pds::init_thread(0); + r.right.store(new Node(inf2)); r.left.store(&s); s.right.store(new Node(inf1)); s.left.store(new Node(inf0)); - records = new padded[task_num]{}; + records = new padded[gtc->task_num]{}; }; ~MontageNatarajanTree(){}; + void init_thread(GlobalTestConfig* gtc, LocalTestConfig* ltc){ + pds::init_thread(ltc->tid); + } + optional get(K key, int tid); optional put(K key, V val, int tid); bool insert(K key, V val, int tid); @@ -168,7 +179,7 @@ class MontageNatarajanTree : public RMap{ template class MontageNatarajanTreeFactory : public RideableFactory{ Rideable* build(GlobalTestConfig* gtc){ - return new MontageNatarajanTree(gtc->task_num); + return new MontageNatarajanTree(gtc); } }; diff --git a/src/rideables/MontageQueue.hpp b/src/rideables/MontageQueue.hpp index 9794bba7..69b53278 100644 --- a/src/rideables/MontageQueue.hpp +++ b/src/rideables/MontageQueue.hpp @@ -64,12 +64,22 @@ class MontageQueue : public RQueue{ std::mutex lock; public: - MontageQueue(int task_num): + MontageQueue(GlobalTestConfig* gtc): global_sn(0), head(nullptr), tail(nullptr){ + // init Persistent allocator + Persistent::init(); + // init epoch system + pds::init(gtc); + // init main thread + pds::init_thread(0); } ~MontageQueue(){}; + void init_thread(GlobalTestConfig* gtc, LocalTestConfig* ltc){ + pds::init_thread(ltc->tid); + } + void enqueue(T val, int tid); optional dequeue(int tid); }; @@ -119,7 +129,7 @@ optional MontageQueue::dequeue(int tid){ template class MontageQueueFactory : public RideableFactory{ Rideable* build(GlobalTestConfig* gtc){ - return new MontageQueue(gtc->task_num); + return new MontageQueue(gtc); } }; diff --git a/src/tests/GraphRecoveryTest.hpp b/src/tests/GraphRecoveryTest.hpp index 7cfafffd..e423c2ac 100644 --- a/src/tests/GraphRecoveryTest.hpp +++ b/src/tests/GraphRecoveryTest.hpp @@ -35,8 +35,8 @@ class GraphRecoveryTest : public Test { void init(GlobalTestConfig *gtc) { std::cout << "initializing" << std::endl; - Persistent::init(); - pds::init(gtc); + // Persistent::init(); + // pds::init(gtc); pthread_barrier_init(&pthread_barrier, NULL, gtc->task_num); @@ -59,7 +59,7 @@ class GraphRecoveryTest : public Test { errexit("GraphRecoveryTest must be run on Recoverable type object."); } - pds::init_thread(0); + // pds::init_thread(0); /* set interval to inf so this won't be killed by timeout */ gtc->interval = numeric_limits::max(); std::cout << "Finished init func" << std::endl; @@ -101,7 +101,7 @@ class GraphRecoveryTest : public Test { void parInit(GlobalTestConfig *gtc, LocalTestConfig *ltc) { pthread_barrier_wait(&pthread_barrier); auto begin = chrono::high_resolution_clock::now(); - pds::init_thread(ltc->tid); + // pds::init_thread(ltc->tid); // Loop through the files in parallel int num_threads = gtc->task_num; int tid = ltc->tid; diff --git a/src/tests/GraphTest.hpp b/src/tests/GraphTest.hpp index 8b5169d1..c895d824 100644 --- a/src/tests/GraphTest.hpp +++ b/src/tests/GraphTest.hpp @@ -46,8 +46,8 @@ class GraphTest : public Test { } void init(GlobalTestConfig *gtc) { - Persistent::init(); - pds::init(gtc); + // Persistent::init(); + // pds::init(gtc); uint64_t new_ops = total_ops / gtc->task_num; thd_ops = new uint64_t[gtc->task_num]; for (int i = 0; itask_num; i++) { @@ -98,7 +98,7 @@ class GraphTest : public Test { } void parInit(GlobalTestConfig *gtc, LocalTestConfig *ltc) { - pds::init_thread(ltc->tid); + // pds::init_thread(ltc->tid); size_t x = max_verts; size_t numEdges = (x * x) * 0.5; std::random_device rd; diff --git a/src/tests/HeapChurnTest.hpp b/src/tests/HeapChurnTest.hpp index 6484a0d1..23c53c8b 100644 --- a/src/tests/HeapChurnTest.hpp +++ b/src/tests/HeapChurnTest.hpp @@ -29,15 +29,16 @@ class HeapChurnTest : public Test{ inline V fromInt(uint64_t v); void parInit(GlobalTestConfig* gtc, LocalTestConfig* ltc){ - pds::init_thread(ltc->tid); + q->init_thread(gtc, ltc); + // pds::init_thread(ltc->tid); } void init(GlobalTestConfig* gtc){ - // init Persistent allocator - Persistent::init(); + // // init Persistent allocator + // Persistent::init(); - // init epoch system - pds::init(gtc); + // // init epoch system + // pds::init(gtc); getRideable(gtc); diff --git a/src/tests/KVTest.hpp b/src/tests/KVTest.hpp index f6bdfc2b..e1a11ad5 100644 --- a/src/tests/KVTest.hpp +++ b/src/tests/KVTest.hpp @@ -39,14 +39,15 @@ class KVTest : public Test{ } } void parInit(GlobalTestConfig* gtc, LocalTestConfig* ltc){ - pds::init_thread(ltc->tid); + m->init_thread(gtc, ltc); + // pds::init_thread(ltc->tid); } void init(GlobalTestConfig* gtc){ - // init Persistent allocator - Persistent::init(); + // // init Persistent allocator + // Persistent::init(); - // init epoch system - pds::init(gtc); + // // init epoch system + // pds::init(gtc); if(gtc->checkEnv("ValueSize")){ val_size = atoi((gtc->getEnv("ValueSize")).c_str()); diff --git a/src/tests/MapTest.hpp b/src/tests/MapTest.hpp index 3b9f4e8b..304f72da 100644 --- a/src/tests/MapTest.hpp +++ b/src/tests/MapTest.hpp @@ -92,11 +92,12 @@ class MapTest : public Test{ inline K fromInt(uint64_t v); void parInit(GlobalTestConfig* gtc, LocalTestConfig* ltc){ + m->init_thread(gtc, ltc); #ifdef PRONTO if(ltc->tid==0) doPrefill(gtc); #endif - pds::init_thread(ltc->tid); + // pds::init_thread(ltc->tid); } void init(GlobalTestConfig* gtc){ #ifdef PRONTO @@ -114,11 +115,11 @@ class MapTest : public Test{ assert(sigaction(SIGSEGV, &sa, NULL) == 0); assert(sigaction(SIGUSR1, &sa, NULL) == 0); #endif - // init Persistent allocator - Persistent::init(); + // // init Persistent allocator + // Persistent::init(); - // init epoch system - pds::init(gtc); + // // init epoch system + // pds::init(gtc); getRideable(gtc); @@ -176,7 +177,7 @@ class MapTest : public Test{ } } void doPrefill(GlobalTestConfig* gtc){ - pds::init_thread(0); + // pds::init_thread(0); if (this->prefill > 0){ /* Wentao: * to avoid repeated k during prefilling, we instead @@ -261,7 +262,7 @@ inline std::string MapTest::fromInt(uint64_t v){ template<> inline void MapTest::doPrefill(GlobalTestConfig* gtc){ // randomly prefill until specified amount of keys are successfully inserted - pds::init_thread(0); + // pds::init_thread(0); if (this->prefill > 0){ std::mt19937_64 gen_k(0); // int stride = this->range/this->prefill; diff --git a/src/tests/QueueChurnTest.hpp b/src/tests/QueueChurnTest.hpp index 543315e7..d799fd15 100644 --- a/src/tests/QueueChurnTest.hpp +++ b/src/tests/QueueChurnTest.hpp @@ -65,11 +65,12 @@ class QueueChurnTest : public Test{ } void parInit(GlobalTestConfig* gtc, LocalTestConfig* ltc){ + q->init_thread(gtc, ltc); #ifdef PRONTO if(ltc->tid==0) doPrefill(gtc); #endif - pds::init_thread(ltc->tid); + // pds::init_thread(ltc->tid); } void init(GlobalTestConfig* gtc){ @@ -88,11 +89,11 @@ class QueueChurnTest : public Test{ assert(sigaction(SIGSEGV, &sa, NULL) == 0); assert(sigaction(SIGUSR1, &sa, NULL) == 0); #endif - // init Persistent allocator - Persistent::init(); + // // init Persistent allocator + // Persistent::init(); - // init epoch system - pds::init(gtc); + // // init epoch system + // pds::init(gtc); if(gtc->checkEnv("ValueSize")){ val_size = atoi((gtc->getEnv("ValueSize")).c_str()); @@ -177,7 +178,7 @@ class QueueChurnTest : public Test{ } } void doPrefill(GlobalTestConfig* gtc){ - pds::init_thread(0); + // pds::init_thread(0); if (this->prefill > 0){ int i = 0; while(iprefill){ diff --git a/src/tests/QueueTest.hpp b/src/tests/QueueTest.hpp index 5294aa94..c386ffcd 100644 --- a/src/tests/QueueTest.hpp +++ b/src/tests/QueueTest.hpp @@ -83,11 +83,12 @@ class QueueTest : public Test{ // } void parInit(GlobalTestConfig* gtc, LocalTestConfig* ltc){ + q->init_thread(gtc, ltc); #ifdef PRONTO if(ltc->tid==0) doPrefill(gtc,0); #endif - pds::init_thread(ltc->tid); + // pds::init_thread(ltc->tid); } void init(GlobalTestConfig* gtc){ @@ -106,11 +107,11 @@ class QueueTest : public Test{ assert(sigaction(SIGSEGV, &sa, NULL) == 0); assert(sigaction(SIGUSR1, &sa, NULL) == 0); #endif - // init Persistent allocator - Persistent::init(); + // // init Persistent allocator + // Persistent::init(); - // init epoch system - pds::init(gtc); + // // init epoch system + // pds::init(gtc); if(gtc->checkEnv("ValueSize")){ val_size = atoi((gtc->getEnv("ValueSize")).c_str()); @@ -207,7 +208,7 @@ class QueueTest : public Test{ } } void doPrefill(GlobalTestConfig* gtc, int tid){ - pds::init_thread(tid); + // pds::init_thread(tid); if(this->prefill > 0){ int i = 0; for(i = 0; i < this->prefill; i++){ diff --git a/src/tests/RecoverVerifyTest.hpp b/src/tests/RecoverVerifyTest.hpp index 489dbb84..0dfff9e3 100644 --- a/src/tests/RecoverVerifyTest.hpp +++ b/src/tests/RecoverVerifyTest.hpp @@ -33,7 +33,8 @@ class RecoverVerifyTest : public Test{ template void RecoverVerifyTest::parInit(GlobalTestConfig* gtc, LocalTestConfig* ltc){ - pds::init_thread(ltc->tid); + m->init_thread(gtc, ltc); + // pds::init_thread(ltc->tid); } template @@ -41,11 +42,11 @@ void RecoverVerifyTest::init(GlobalTestConfig* gtc){ if (gtc->task_num != 1){ errexit("RecoverVerifyTest only runs on single thread."); } - // init Persistent allocator - Persistent::init(); + // // init Persistent allocator + // Persistent::init(); - // init epoch system - pds::init(gtc); + // // init epoch system + // pds::init(gtc); Rideable* ptr = gtc->allocRideable(); m = dynamic_cast*>(ptr); diff --git a/src/tests/SetChurnTest.hpp b/src/tests/SetChurnTest.hpp index 2ae4130b..92a87dd8 100644 --- a/src/tests/SetChurnTest.hpp +++ b/src/tests/SetChurnTest.hpp @@ -28,7 +28,7 @@ class SetChurnTest : public ChurnTest{ } } void doPrefill(GlobalTestConfig* gtc){ - pds::init_thread(0); + // pds::init_thread(0); // prefill deterministically: if (this->prefill > 0){ /* Wentao: diff --git a/src/tests/TGraphConstructionTest.hpp b/src/tests/TGraphConstructionTest.hpp index c937c6a6..44100aa0 100644 --- a/src/tests/TGraphConstructionTest.hpp +++ b/src/tests/TGraphConstructionTest.hpp @@ -34,8 +34,8 @@ class TGraphConstructionTest : public Test { void init(GlobalTestConfig *gtc) { std::cout << "initializing" << std::endl; - Persistent::init(); - pds::init(gtc); + // Persistent::init(); + // pds::init(gtc); uint64_t new_ops = total_ops / gtc->task_num; thd_ops = new uint64_t[gtc->task_num]; for (auto i = 0; itask_num; i++) { @@ -51,7 +51,7 @@ class TGraphConstructionTest : public Test { errexit("TGraphConstructionTest must be run on RGraph type object."); } - pds::init_thread(0); + // pds::init_thread(0); /* set interval to inf so this won't be killed by timeout */ gtc->interval = numeric_limits::max(); std::cout << "Finished init func" << std::endl; @@ -81,7 +81,8 @@ class TGraphConstructionTest : public Test { return 0; } void parInit(GlobalTestConfig *gtc, LocalTestConfig *ltc) { - pds::init_thread(ltc->tid); + // pds::init_thread(ltc->tid); + g->init_thread(gtc, ltc); // Loop through the files in parallel int num_threads = gtc->task_num; int tid = ltc->tid; diff --git a/src/tests/ToyTest.hpp b/src/tests/ToyTest.hpp index 83e52ec9..23625c6b 100644 --- a/src/tests/ToyTest.hpp +++ b/src/tests/ToyTest.hpp @@ -27,6 +27,7 @@ class ToyTest : public Test{ // called by all threads in parallel void parInit(GlobalTestConfig* gtc, LocalTestConfig* ltc){ + t->init_thread(gtc, ltc); // pds::init_thread(ltc->tid); } // runs the test diff --git a/src/tests/YCSBTest.hpp b/src/tests/YCSBTest.hpp index 147239fb..4f2aaa67 100644 --- a/src/tests/YCSBTest.hpp +++ b/src/tests/YCSBTest.hpp @@ -43,14 +43,15 @@ class YCSBTest : public Test{ } } void parInit(GlobalTestConfig* gtc, LocalTestConfig* ltc){ - pds::init_thread(ltc->tid); + m->init_thread(gtc, ltc); + // pds::init_thread(ltc->tid); } void init(GlobalTestConfig* gtc){ - // init Persistent allocator - Persistent::init(); + // // init Persistent allocator + // Persistent::init(); - // init epoch system - pds::init(gtc); + // // init epoch system + // pds::init(gtc); if(gtc->checkEnv("ValueSize")){ val_size = atoi((gtc->getEnv("ValueSize")).c_str()); @@ -120,7 +121,7 @@ class YCSBTest : public Test{ void doPrefill(std::string infile_name, int tid){ std::ifstream infile(infile_name); std::string cmd; - pds::init_thread(tid); + // pds::init_thread(tid); while(getline(infile, cmd)){ operation(cmd, tid); From a95f11cbac192d5ab36ba08d5b377fb3ecb021a1 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Thu, 22 Oct 2020 15:29:13 -0400 Subject: [PATCH 05/56] fixed graph-related tests --- src/tests/GraphRecoveryTest.hpp | 1 + src/tests/GraphTest.hpp | 1 + 2 files changed, 2 insertions(+) diff --git a/src/tests/GraphRecoveryTest.hpp b/src/tests/GraphRecoveryTest.hpp index e423c2ac..1f055f35 100644 --- a/src/tests/GraphRecoveryTest.hpp +++ b/src/tests/GraphRecoveryTest.hpp @@ -101,6 +101,7 @@ class GraphRecoveryTest : public Test { void parInit(GlobalTestConfig *gtc, LocalTestConfig *ltc) { pthread_barrier_wait(&pthread_barrier); auto begin = chrono::high_resolution_clock::now(); + g->init_thread(gtc, ltc); // pds::init_thread(ltc->tid); // Loop through the files in parallel int num_threads = gtc->task_num; diff --git a/src/tests/GraphTest.hpp b/src/tests/GraphTest.hpp index c895d824..411ce771 100644 --- a/src/tests/GraphTest.hpp +++ b/src/tests/GraphTest.hpp @@ -99,6 +99,7 @@ class GraphTest : public Test { void parInit(GlobalTestConfig *gtc, LocalTestConfig *ltc) { // pds::init_thread(ltc->tid); + g->init_thread(gtc, ltc); size_t x = max_verts; size_t numEdges = (x * x) * 0.5; std::random_device rd; From 6dd4b6df36cf7c4a1e477e7b52eda2997f356998 Mon Sep 17 00:00:00 2001 From: Wentao Cai Date: Thu, 8 Oct 2020 15:01:58 -0400 Subject: [PATCH 06/56] create obj while building ralloc --- ext/ralloc/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/ext/ralloc/Makefile b/ext/ralloc/Makefile index 5a327bf3..4c7da314 100644 --- a/ext/ralloc/Makefile +++ b/ext/ralloc/Makefile @@ -20,6 +20,7 @@ LIBS = -pthread -lstdc++ -latomic all: libralloc.a ./obj/%.o: ./src/%.cpp + @mkdir -p $(@D) $(CXX) -I $(SRC) -o $@ -c $^ $(CXXFLAGS) libralloc.a:./obj/SizeClass.o ./obj/RegionManager.o ./obj/TCache.o ./obj/BaseMeta.o ./obj/ralloc.o From 631a0344ab74da088493eeb385552f37a2559f99 Mon Sep 17 00:00:00 2001 From: Wentao Cai Date: Thu, 8 Oct 2020 15:17:15 -0400 Subject: [PATCH 07/56] suppress compiler warning in mod nvm_malloc --- .../ulib-svn/src/base/hash_chain_prot.h | 1 + .../nvm_malloc/ulib-svn/src/base/hash_func.c | 120 +++++++++--------- .../ulib-svn/test/hash_chain_r_bench.cpp | 1 + 3 files changed, 62 insertions(+), 60 deletions(-) diff --git a/ext/mod-single-repo/nvm_malloc/ulib-svn/src/base/hash_chain_prot.h b/ext/mod-single-repo/nvm_malloc/ulib-svn/src/base/hash_chain_prot.h index 61a94f26..8e9d5cc4 100644 --- a/ext/mod-single-repo/nvm_malloc/ulib-svn/src/base/hash_chain_prot.h +++ b/ext/mod-single-repo/nvm_malloc/ulib-svn/src/base/hash_chain_prot.h @@ -289,6 +289,7 @@ chainhash_set_entry_##name##_t *b) \ { \ chainhash_set_entry_##name##_t *head, *tail; \ + head = NULL; \ tail = (chainhash_set_entry_##name##_t *)&head; \ while (a && b) { \ /* if equal, take 'a' -- important for sort stability */ \ diff --git a/ext/mod-single-repo/nvm_malloc/ulib-svn/src/base/hash_func.c b/ext/mod-single-repo/nvm_malloc/ulib-svn/src/base/hash_func.c index f7946014..5a4fe781 100644 --- a/ext/mod-single-repo/nvm_malloc/ulib-svn/src/base/hash_func.c +++ b/ext/mod-single-repo/nvm_malloc/ulib-svn/src/base/hash_func.c @@ -48,12 +48,12 @@ uint64_t hash_fast64(const void *buf, size_t len, uint64_t seed) v = 0; switch (len & 7) { - case 7: v ^= (uint64_t)pc[6] << 48; - case 6: v ^= (uint64_t)pc[5] << 40; - case 5: v ^= (uint64_t)pc[4] << 32; - case 4: v ^= (uint64_t)pc[3] << 24; - case 3: v ^= (uint64_t)pc[2] << 16; - case 2: v ^= (uint64_t)pc[1] << 8; + case 7: v ^= (uint64_t)pc[6] << 48; __attribute__ ((fallthrough)); + case 6: v ^= (uint64_t)pc[5] << 40; __attribute__ ((fallthrough)); + case 5: v ^= (uint64_t)pc[4] << 32; __attribute__ ((fallthrough)); + case 4: v ^= (uint64_t)pc[3] << 24; __attribute__ ((fallthrough)); + case 3: v ^= (uint64_t)pc[2] << 16; __attribute__ ((fallthrough)); + case 2: v ^= (uint64_t)pc[1] << 8; __attribute__ ((fallthrough)); case 1: v ^= (uint64_t)pc[0]; v ^= v >> 23; v *= 0x2127599bf4325c37ULL; @@ -95,12 +95,12 @@ uint64_t hash_ferm64(const void *buf, size_t len, uint64_t seed) pc = (const unsigned char*)pos; switch (len & 7) { - case 7: v ^= (uint64_t)pc[6] << 48; - case 6: v ^= (uint64_t)pc[5] << 40; - case 5: v ^= (uint64_t)pc[4] << 32; - case 4: v ^= (uint64_t)pc[3] << 24; - case 3: v ^= (uint64_t)pc[2] << 16; - case 2: v ^= (uint64_t)pc[1] << 8; + case 7: v ^= (uint64_t)pc[6] << 48; __attribute__ ((fallthrough)); + case 6: v ^= (uint64_t)pc[5] << 40; __attribute__ ((fallthrough)); + case 5: v ^= (uint64_t)pc[4] << 32; __attribute__ ((fallthrough)); + case 4: v ^= (uint64_t)pc[3] << 24; __attribute__ ((fallthrough)); + case 3: v ^= (uint64_t)pc[2] << 16; __attribute__ ((fallthrough)); + case 2: v ^= (uint64_t)pc[1] << 8; __attribute__ ((fallthrough)); case 1: v ^= (uint64_t)pc[0]; h ^= v; FER_MIX64(h); @@ -303,16 +303,16 @@ uint32_t hash_jenkins(const void *key, size_t length, uint32_t initval) switch(length) { case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; - case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ - case 10: c+=((uint32_t)k8[9])<<8; /* fall through */ - case 9 : c+=k8[8]; /* fall through */ + case 11: c+=((uint32_t)k8[10])<<16; __attribute__ ((fallthrough));/* fall through */ + case 10: c+=((uint32_t)k8[9])<<8; __attribute__ ((fallthrough));/* fall through */ + case 9 : c+=k8[8]; __attribute__ ((fallthrough));/* fall through */ case 8 : b+=k[1]; a+=k[0]; break; - case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ - case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */ - case 5 : b+=k8[4]; /* fall through */ + case 7 : b+=((uint32_t)k8[6])<<16; __attribute__ ((fallthrough));/* fall through */ + case 6 : b+=((uint32_t)k8[5])<<8; __attribute__ ((fallthrough));/* fall through */ + case 5 : b+=k8[4]; __attribute__ ((fallthrough));/* fall through */ case 4 : a+=k[0]; break; - case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ - case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */ + case 3 : a+=((uint32_t)k8[2])<<16; __attribute__ ((fallthrough));/* fall through */ + case 2 : a+=((uint32_t)k8[1])<<8; __attribute__ ((fallthrough));/* fall through */ case 1 : a+=k8[0]; break; case 0 : return c; } @@ -342,23 +342,23 @@ uint32_t hash_jenkins(const void *key, size_t length, uint32_t initval) b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; - case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ + case 11: c+=((uint32_t)k8[10])<<16; __attribute__ ((fallthrough));/* fall through */ case 10: c+=k[4]; b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; - case 9 : c+=k8[8]; /* fall through */ + case 9 : c+=k8[8]; __attribute__ ((fallthrough));/* fall through */ case 8 : b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; - case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ + case 7 : b+=((uint32_t)k8[6])<<16; __attribute__ ((fallthrough));/* fall through */ case 6 : b+=k[2]; a+=k[0]+(((uint32_t)k[1])<<16); break; - case 5 : b+=k8[4]; /* fall through */ + case 5 : b+=k8[4]; __attribute__ ((fallthrough));/* fall through */ case 4 : a+=k[0]+(((uint32_t)k[1])<<16); break; - case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ + case 3 : a+=((uint32_t)k8[2])<<16; __attribute__ ((fallthrough));/* fall through */ case 2 : a+=k[0]; break; case 1 : a+=k8[0]; @@ -392,17 +392,17 @@ uint32_t hash_jenkins(const void *key, size_t length, uint32_t initval) /*-------------------------------- last block: affect all 32 bits of (c) */ switch(length) /* all the case statements fall through */ { - case 12: c+=((uint32_t)k[11])<<24; - case 11: c+=((uint32_t)k[10])<<16; - case 10: c+=((uint32_t)k[9])<<8; - case 9 : c+=k[8]; - case 8 : b+=((uint32_t)k[7])<<24; - case 7 : b+=((uint32_t)k[6])<<16; - case 6 : b+=((uint32_t)k[5])<<8; - case 5 : b+=k[4]; - case 4 : a+=((uint32_t)k[3])<<24; - case 3 : a+=((uint32_t)k[2])<<16; - case 2 : a+=((uint32_t)k[1])<<8; + case 12: c+=((uint32_t)k[11])<<24; __attribute__ ((fallthrough)); + case 11: c+=((uint32_t)k[10])<<16; __attribute__ ((fallthrough)); + case 10: c+=((uint32_t)k[9])<<8; __attribute__ ((fallthrough)); + case 9 : c+=k[8]; __attribute__ ((fallthrough)); + case 8 : b+=((uint32_t)k[7])<<24; __attribute__ ((fallthrough)); + case 7 : b+=((uint32_t)k[6])<<16; __attribute__ ((fallthrough)); + case 6 : b+=((uint32_t)k[5])<<8; __attribute__ ((fallthrough)); + case 5 : b+=k[4]; __attribute__ ((fallthrough)); + case 4 : a+=((uint32_t)k[3])<<24; __attribute__ ((fallthrough)); + case 3 : a+=((uint32_t)k[2])<<16; __attribute__ ((fallthrough)); + case 2 : a+=((uint32_t)k[1])<<8; __attribute__ ((fallthrough)); case 1 : a+=k[0]; break; case 0 : return c; @@ -484,16 +484,16 @@ void hash_jenkins2(const void *key, size_t length, uint32_t *pc, uint32_t *pb) switch(length) { case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; - case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ - case 10: c+=((uint32_t)k8[9])<<8; /* fall through */ - case 9 : c+=k8[8]; /* fall through */ + case 11: c+=((uint32_t)k8[10])<<16; __attribute__ ((fallthrough));/* fall through */ + case 10: c+=((uint32_t)k8[9])<<8; __attribute__ ((fallthrough));/* fall through */ + case 9 : c+=k8[8]; __attribute__ ((fallthrough));/* fall through */ case 8 : b+=k[1]; a+=k[0]; break; - case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ - case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */ - case 5 : b+=k8[4]; /* fall through */ + case 7 : b+=((uint32_t)k8[6])<<16; __attribute__ ((fallthrough));/* fall through */ + case 6 : b+=((uint32_t)k8[5])<<8; __attribute__ ((fallthrough));/* fall through */ + case 5 : b+=k8[4]; __attribute__ ((fallthrough));/* fall through */ case 4 : a+=k[0]; break; - case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ - case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */ + case 3 : a+=((uint32_t)k8[2])<<16; __attribute__ ((fallthrough));/* fall through */ + case 2 : a+=((uint32_t)k8[1])<<8; __attribute__ ((fallthrough));/* fall through */ case 1 : a+=k8[0]; break; case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */ } @@ -523,23 +523,23 @@ void hash_jenkins2(const void *key, size_t length, uint32_t *pc, uint32_t *pb) b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; - case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ + case 11: c+=((uint32_t)k8[10])<<16; __attribute__ ((fallthrough));/* fall through */ case 10: c+=k[4]; b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; - case 9 : c+=k8[8]; /* fall through */ + case 9 : c+=k8[8]; __attribute__ ((fallthrough));/* fall through */ case 8 : b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; - case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ + case 7 : b+=((uint32_t)k8[6])<<16; __attribute__ ((fallthrough));/* fall through */ case 6 : b+=k[2]; a+=k[0]+(((uint32_t)k[1])<<16); break; - case 5 : b+=k8[4]; /* fall through */ + case 5 : b+=k8[4]; __attribute__ ((fallthrough));/* fall through */ case 4 : a+=k[0]+(((uint32_t)k[1])<<16); break; - case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ + case 3 : a+=((uint32_t)k8[2])<<16; __attribute__ ((fallthrough));/* fall through */ case 2 : a+=k[0]; break; case 1 : a+=k8[0]; @@ -573,17 +573,17 @@ void hash_jenkins2(const void *key, size_t length, uint32_t *pc, uint32_t *pb) /*-------------------------------- last block: affect all 32 bits of (c) */ switch(length) /* all the case statements fall through */ { - case 12: c+=((uint32_t)k[11])<<24; - case 11: c+=((uint32_t)k[10])<<16; - case 10: c+=((uint32_t)k[9])<<8; - case 9 : c+=k[8]; - case 8 : b+=((uint32_t)k[7])<<24; - case 7 : b+=((uint32_t)k[6])<<16; - case 6 : b+=((uint32_t)k[5])<<8; - case 5 : b+=k[4]; - case 4 : a+=((uint32_t)k[3])<<24; - case 3 : a+=((uint32_t)k[2])<<16; - case 2 : a+=((uint32_t)k[1])<<8; + case 12: c+=((uint32_t)k[11])<<24; __attribute__ ((fallthrough)); + case 11: c+=((uint32_t)k[10])<<16; __attribute__ ((fallthrough)); + case 10: c+=((uint32_t)k[9])<<8; __attribute__ ((fallthrough)); + case 9 : c+=k[8]; __attribute__ ((fallthrough)); + case 8 : b+=((uint32_t)k[7])<<24; __attribute__ ((fallthrough)); + case 7 : b+=((uint32_t)k[6])<<16; __attribute__ ((fallthrough)); + case 6 : b+=((uint32_t)k[5])<<8; __attribute__ ((fallthrough)); + case 5 : b+=k[4]; __attribute__ ((fallthrough)); + case 4 : a+=((uint32_t)k[3])<<24; __attribute__ ((fallthrough)); + case 3 : a+=((uint32_t)k[2])<<16; __attribute__ ((fallthrough)); + case 2 : a+=((uint32_t)k[1])<<8; __attribute__ ((fallthrough)); case 1 : a+=k[0]; break; case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */ diff --git a/ext/mod-single-repo/nvm_malloc/ulib-svn/test/hash_chain_r_bench.cpp b/ext/mod-single-repo/nvm_malloc/ulib-svn/test/hash_chain_r_bench.cpp index d4340370..733f56b1 100644 --- a/ext/mod-single-repo/nvm_malloc/ulib-svn/test/hash_chain_r_bench.cpp +++ b/ext/mod-single-repo/nvm_malloc/ulib-svn/test/hash_chain_r_bench.cpp @@ -53,6 +53,7 @@ void constant_insert(long ins, long get) for (t = 0; t < get; t++) { chain_hash_map_r::const_iterator it = map.find(myrand()); + (void)it; counter++; } From e7bd2a2171638f01aa33a980abfafc7f390db366 Mon Sep 17 00:00:00 2001 From: Wentao Cai Date: Fri, 9 Oct 2020 10:38:48 -0400 Subject: [PATCH 08/56] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 956bf29a..635df721 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ Montage is a system for building fast buffered persistent data structures, developed at the University of Rochester by H. Wen, W. Cai, M. Du, L. Jenkins, B. Valpey, and M. L. Scott. The work has been -submitted to arXiv ([link](https://arxiv.org/abs/2009.701)). You can +submitted to arXiv ([link](https://arxiv.org/abs/2009.13701)). You can also find its short version on DISC' 20 as a brief announcement ([link](https://doi.org/10.4230/LIPIcs.DISC.2020.52)). From badf3a892db7158ecf0d314ae480597c0ea00f3e Mon Sep 17 00:00:00 2001 From: "Haosen (Hensen) Wen" Date: Tue, 13 Oct 2020 23:45:30 -0400 Subject: [PATCH 09/56] Update README.md --- src/persist/README.md | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/persist/README.md b/src/persist/README.md index fa797ade..73e76b83 100644 --- a/src/persist/README.md +++ b/src/persist/README.md @@ -1,8 +1,4 @@ -This folder contains infrastructures for fast recoverable data structures. For -pseudocode of (the original) epoch system, see: - -https://docs.google.com/document/d/1J_hAxgGEVqVhe89moDAQskpgZQAUYICUoWUUBOYqK1U/edit - +This folder contains infrastructures for Montage. ## Environment variables and values usage: add argument: From a0fdd4ee84cdb4c115e33641ae51b915093800a6 Mon Sep 17 00:00:00 2001 From: "Haosen (Hensen) Wen" Date: Tue, 13 Oct 2020 23:48:35 -0400 Subject: [PATCH 10/56] Update README.md --- src/persist/README.md | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/src/persist/README.md b/src/persist/README.md index 73e76b83..dc7e5ad5 100644 --- a/src/persist/README.md +++ b/src/persist/README.md @@ -23,14 +23,3 @@ to command line. * `CurrEpoch`: per-thread indicator of current epoch on the thread * `EpochLength`: specify epoch length. * `EpochLengthUnit`: specify epoch length unit: `Second` (default) `Millisecond` or `Microsecond`. - -## List of design-space-related experiments - -* independent: write-flush same cache line vs. different cache line -* Typical combinations of PersistStrat + Persister: - * DirWB - * PerEpoch+{Advancer, WorkerThread, SingleDedicated, PerWorkerDedicated} - * BufferedWB+{WorkerThread, SingleDedicated, PerWorkerDedicated} - * BufferedWB's buffer size sensitivity test, with dumping size: full, half or 1. -* EpochAdvancer: Dedicated vs. SingleWorker -* EpochLength sensitivity test From 9fce56b85d9ed46de85e0e438c042d4abd01b910 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Sun, 25 Oct 2020 22:08:07 -0400 Subject: [PATCH 11/56] put initialization and finalization into Recoverable for MontageHashTable. --- src/persist/Recoverable.hpp | 17 +++++++++++++++++ src/persist/api/pblk_naked.hpp | 1 + src/rideables/MontageHashTable.hpp | 15 +++++---------- src/tests/ChurnTest.hpp | 6 +++--- src/tests/MapChurnTest.hpp | 4 ++++ 5 files changed, 30 insertions(+), 13 deletions(-) diff --git a/src/persist/Recoverable.hpp b/src/persist/Recoverable.hpp index 93630f01..77bb356b 100644 --- a/src/persist/Recoverable.hpp +++ b/src/persist/Recoverable.hpp @@ -1,10 +1,27 @@ #ifndef RECOVERABLE_HPP #define RECOVERABLE_HPP +#include "TestConfig.hpp" // TODO: report recover errors/exceptions class Recoverable{ public: + Recoverable(){} // TODO: get rid of default constructor. + Recoverable(GlobalTestConfig* gtc){ + // init Persistent allocator + Persistent::init(); + // init epoch system + pds::init(gtc); + // init main thread + pds::init_thread(0); + } + ~Recoverable(){ + pds::finalize(); + Persistent::finalize(); + } + void init_thread(GlobalTestConfig*, LocalTestConfig* ltc){ + pds::init_thread(ltc->tid); + } // return num of blocks recovered. virtual int recover(bool simulated = false) = 0; }; diff --git a/src/persist/api/pblk_naked.hpp b/src/persist/api/pblk_naked.hpp index d2f02085..20af9084 100644 --- a/src/persist/api/pblk_naked.hpp +++ b/src/persist/api/pblk_naked.hpp @@ -37,6 +37,7 @@ namespace pds{ inline void finalize(){ delete esys; + esys = nullptr; // for debugging. } #define CHECK_EPOCH() ({\ diff --git a/src/rideables/MontageHashTable.hpp b/src/rideables/MontageHashTable.hpp index 26eeca8b..15b7dc00 100644 --- a/src/rideables/MontageHashTable.hpp +++ b/src/rideables/MontageHashTable.hpp @@ -50,7 +50,9 @@ class MontageHashTable : public RMap, public Recoverable{ payload = payload->set_val(v); } ~ListNode(){ - PDELETE(payload); + if (payload){ + PDELETE(payload); + } } }__attribute__((aligned(CACHELINE_SIZE))); struct Bucket{ @@ -62,17 +64,10 @@ class MontageHashTable : public RMap, public Recoverable{ std::hash hash_fn; Bucket buckets[idxSize]; GlobalTestConfig* gtc; - MontageHashTable(GlobalTestConfig* gtc_):gtc(gtc_){ - // init Persistent allocator - Persistent::init(); - // init epoch system - pds::init(gtc); - // init main thread - pds::init_thread(0); - }; + MontageHashTable(GlobalTestConfig* gtc_): Recoverable(gtc_), gtc(gtc_){}; void init_thread(GlobalTestConfig* gtc, LocalTestConfig* ltc){ - pds::init_thread(ltc->tid); + Recoverable::init_thread(gtc, ltc); } optional get(K key, int tid){ diff --git a/src/tests/ChurnTest.hpp b/src/tests/ChurnTest.hpp index a8c889ca..7e57f503 100644 --- a/src/tests/ChurnTest.hpp +++ b/src/tests/ChurnTest.hpp @@ -55,9 +55,9 @@ class ChurnTest : public Test{ ChurnTest(p_gets, p_puts, p_inserts, p_removes, range,0){} void init(GlobalTestConfig* gtc); int execute(GlobalTestConfig* gtc, LocalTestConfig* ltc); - void cleanup(GlobalTestConfig* gtc); pthread_barrier_t barrier; + virtual void cleanup(GlobalTestConfig* gtc); virtual void parInit(GlobalTestConfig* gtc, LocalTestConfig* ltc); virtual void getRideable(GlobalTestConfig* gtc) = 0; virtual void doPrefill(GlobalTestConfig* gtc) = 0; @@ -179,8 +179,8 @@ void ChurnTest::cleanup(GlobalTestConfig* gtc){ Savitar_core_finalize(); pthread_mutex_destroy(&snapshot_lock); #endif - pds::finalize(); - Persistent::finalize(); + // pds::finalize(); + // Persistent::finalize(); } #ifdef PRONTO diff --git a/src/tests/MapChurnTest.hpp b/src/tests/MapChurnTest.hpp index dd847a1a..c8f16494 100644 --- a/src/tests/MapChurnTest.hpp +++ b/src/tests/MapChurnTest.hpp @@ -95,6 +95,10 @@ class MapChurnTest : public ChurnTest{ m->remove(k,tid); } } + void cleanup(GlobalTestConfig* gtc){ + ChurnTest::cleanup(gtc); + delete m; + } }; template From b8e7cf451f3821e611e7da0fce5eaec645a62df1 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Mon, 26 Oct 2020 19:03:26 -0400 Subject: [PATCH 12/56] put initialization and finalization into Recoverable --- src/persist/Recoverable.hpp | 2 +- src/rideables/MontageGraph.hpp | 19 ++++++++----------- src/rideables/MontageLfHashTable.hpp | 18 +++++++++--------- src/rideables/MontageMSQueue.hpp | 18 +++++++++--------- src/rideables/MontageNatarajanTree.hpp | 20 ++++++++++---------- src/rideables/MontageQueue.hpp | 18 +++++++++--------- 6 files changed, 46 insertions(+), 49 deletions(-) diff --git a/src/persist/Recoverable.hpp b/src/persist/Recoverable.hpp index 77bb356b..80d344d6 100644 --- a/src/persist/Recoverable.hpp +++ b/src/persist/Recoverable.hpp @@ -2,11 +2,11 @@ #define RECOVERABLE_HPP #include "TestConfig.hpp" +#include "pblk_naked.hpp" // TODO: report recover errors/exceptions class Recoverable{ public: - Recoverable(){} // TODO: get rid of default constructor. Recoverable(GlobalTestConfig* gtc){ // init Persistent allocator Persistent::init(); diff --git a/src/rideables/MontageGraph.hpp b/src/rideables/MontageGraph.hpp index f3bcb6e1..f672222f 100644 --- a/src/rideables/MontageGraph.hpp +++ b/src/rideables/MontageGraph.hpp @@ -63,7 +63,7 @@ class MontageGraph : public RGraph, public Recoverable{ }; class tVertex { public: - Vertex *payload; + Vertex *payload = nullptr; int id; // cached id uint64_t seqNumber; @@ -83,7 +83,11 @@ class MontageGraph : public RGraph, public Recoverable{ this->id = p->get_unsafe_id(); } - ~tVertex() { PDELETE(payload); } + ~tVertex() { + if (payload){ + PDELETE(payload); + } + } void set_lbl(int l) { payload = payload->set_lbl(l); @@ -117,14 +121,7 @@ class MontageGraph : public RGraph, public Recoverable{ Relation *e; }; - MontageGraph(GlobalTestConfig* gtc) { - // init Persistent allocator - Persistent::init(); - // init epoch system - pds::init(gtc); - // init main thread - pds::init_thread(0); - + MontageGraph(GlobalTestConfig* gtc) : Recoverable(gtc) { BEGIN_OP_AUTOEND(); idxToVertex = new tVertex*[numVertices]; // Initialize... @@ -134,7 +131,7 @@ class MontageGraph : public RGraph, public Recoverable{ } void init_thread(GlobalTestConfig* gtc, LocalTestConfig* ltc){ - pds::init_thread(ltc->tid); + Recoverable::init_thread(gtc, ltc); } tVertex** idxToVertex; // Transient set of transient vertices to index map diff --git a/src/rideables/MontageLfHashTable.hpp b/src/rideables/MontageLfHashTable.hpp index 049ca2d2..44c3630f 100644 --- a/src/rideables/MontageLfHashTable.hpp +++ b/src/rideables/MontageLfHashTable.hpp @@ -16,9 +16,10 @@ #include "RMap.hpp" #include "RCUTracker.hpp" #include "CustomTypes.hpp" +#include "Recoverable.hpp" template -class MontageLfHashTable : public RMap{ +class MontageLfHashTable : public RMap, Recoverable{ public: class Payload : public PBlk{ GENERATE_FIELD(K, key, Payload); @@ -79,18 +80,17 @@ class MontageLfHashTable : public RMap{ return mixPtrMark(mptr,true); } public: - MontageLfHashTable(GlobalTestConfig* gtc) : tracker(gtc->task_num, 100, 1000, true) { - // init Persistent allocator - Persistent::init(); - // init epoch system - pds::init(gtc); - // init main thread - pds::init_thread(0); + MontageLfHashTable(GlobalTestConfig* gtc) : Recoverable(gtc), tracker(gtc->task_num, 100, 1000, true) { }; ~MontageLfHashTable(){}; void init_thread(GlobalTestConfig* gtc, LocalTestConfig* ltc){ - pds::init_thread(ltc->tid); + Recoverable::init_thread(gtc, ltc); + } + + int recover(bool simulated){ + errexit("recover of MontageLfHashTable not implemented."); + return 0; } optional get(K key, int tid); diff --git a/src/rideables/MontageMSQueue.hpp b/src/rideables/MontageMSQueue.hpp index 6b2e907d..bb82207c 100644 --- a/src/rideables/MontageMSQueue.hpp +++ b/src/rideables/MontageMSQueue.hpp @@ -9,12 +9,13 @@ #include "RQueue.hpp" #include "RCUTracker.hpp" #include "CustomTypes.hpp" +#include "Recoverable.hpp" #include "persist_struct_api.hpp" using namespace pds; template -class MontageMSQueue : public RQueue{ +class MontageMSQueue : public RQueue, Recoverable{ public: class Payload : public PBlk{ GENERATE_FIELD(T, val, Payload); @@ -55,14 +56,8 @@ class MontageMSQueue : public RQueue{ public: MontageMSQueue(GlobalTestConfig* gtc): - global_sn(0), head(nullptr), tail(nullptr), + Recoverable(gtc), global_sn(0), head(nullptr), tail(nullptr), tracker(gtc->task_num, 100, 1000, true){ - // init Persistent allocator - Persistent::init(); - // init epoch system - pds::init(gtc); - // init main thread - pds::init_thread(0); Node* dummy = new Node(); head.store(dummy); @@ -70,7 +65,12 @@ class MontageMSQueue : public RQueue{ } void init_thread(GlobalTestConfig* gtc, LocalTestConfig* ltc){ - pds::init_thread(ltc->tid); + Recoverable::init_thread(gtc, ltc); + } + + int recover(bool simulated){ + errexit("recover of MontageMSQueue not implemented."); + return 0; } ~MontageMSQueue(){}; diff --git a/src/rideables/MontageNatarajanTree.hpp b/src/rideables/MontageNatarajanTree.hpp index 3f571d5a..4089fcc2 100644 --- a/src/rideables/MontageNatarajanTree.hpp +++ b/src/rideables/MontageNatarajanTree.hpp @@ -9,12 +9,13 @@ #include "RMap.hpp" #include "RCUTracker.hpp" #include "CustomTypes.hpp" +#include "Recoverable.hpp" #include "persist_struct_api.hpp" using namespace pds; template -class MontageNatarajanTree : public RMap{ +class MontageNatarajanTree : public RMap, public Recoverable{ public: class Payload : public PBlk{ GENERATE_FIELD(K, key, Payload); @@ -148,14 +149,8 @@ class MontageNatarajanTree : public RMap{ bool cleanup(K key, int tid); // void doRangeQuery(Node& k1, Node& k2, int tid, Node* root, std::map& res); public: - MontageNatarajanTree(GlobalTestConfig* gtc): tracker(gtc->task_num, 100, 1000, true){ - // init Persistent allocator - Persistent::init(); - // init epoch system - pds::init(gtc); - // init main thread - pds::init_thread(0); - + MontageNatarajanTree(GlobalTestConfig* gtc): + Recoverable(gtc), tracker(gtc->task_num, 100, 1000, true){ r.right.store(new Node(inf2)); r.left.store(&s); s.right.store(new Node(inf1)); @@ -165,7 +160,12 @@ class MontageNatarajanTree : public RMap{ ~MontageNatarajanTree(){}; void init_thread(GlobalTestConfig* gtc, LocalTestConfig* ltc){ - pds::init_thread(ltc->tid); + Recoverable::init_thread(gtc, ltc); + } + + int recover(bool simulated){ + errexit("recover of MontageNatarajanTree not implemented."); + return 0; } optional get(K key, int tid); diff --git a/src/rideables/MontageQueue.hpp b/src/rideables/MontageQueue.hpp index 69b53278..af724f72 100644 --- a/src/rideables/MontageQueue.hpp +++ b/src/rideables/MontageQueue.hpp @@ -9,13 +9,14 @@ #include "RQueue.hpp" #include "RCUTracker.hpp" #include "CustomTypes.hpp" +#include "Recoverable.hpp" #include "persist_struct_api.hpp" #include using namespace pds; template -class MontageQueue : public RQueue{ +class MontageQueue : public RQueue, public Recoverable{ public: class Payload : public PBlk{ GENERATE_FIELD(T, val, Payload); @@ -65,19 +66,18 @@ class MontageQueue : public RQueue{ public: MontageQueue(GlobalTestConfig* gtc): - global_sn(0), head(nullptr), tail(nullptr){ - // init Persistent allocator - Persistent::init(); - // init epoch system - pds::init(gtc); - // init main thread - pds::init_thread(0); + Recoverable(gtc), global_sn(0), head(nullptr), tail(nullptr){ } ~MontageQueue(){}; void init_thread(GlobalTestConfig* gtc, LocalTestConfig* ltc){ - pds::init_thread(ltc->tid); + Recoverable::init_thread(gtc, ltc); + } + + int recover(bool simulated){ + errexit("recover of MontageQueue not implemented."); + return 0; } void enqueue(T val, int tid); From 8d55256324e14b5d7d0750099af2c37ef69158c2 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Mon, 26 Oct 2020 19:50:03 -0400 Subject: [PATCH 13/56] removed EpochSysVerifyTest --- src/main.cpp | 1 - src/persist/EpochSys.hpp | 5 - src/persist/Recoverable.hpp | 2 + src/tests/EpochSysVerifyTest.hpp | 152 ------------------------------- 4 files changed, 2 insertions(+), 158 deletions(-) delete mode 100644 src/tests/EpochSysVerifyTest.hpp diff --git a/src/main.cpp b/src/main.cpp index 29f72f0a..79f0b6dd 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -71,7 +71,6 @@ #include "MapTest.hpp" #include "MapChurnTest.hpp" #ifndef MNEMOSYNE -#include "EpochSysVerifyTest.hpp" #include "RecoverVerifyTest.hpp" #include "GraphRecoveryTest.hpp" #include "TGraphConstructionTest.hpp" diff --git a/src/persist/EpochSys.hpp b/src/persist/EpochSys.hpp index 2e474c11..fc4afa0f 100644 --- a/src/persist/EpochSys.hpp +++ b/src/persist/EpochSys.hpp @@ -13,9 +13,6 @@ #include "HarnessUtils.hpp" #include "Persistent.hpp" #include "persist_utils.hpp" -// #include "EpochSysVerifyTest.hpp" - -class EpochSysVerifyTest; namespace pds{ @@ -70,7 +67,6 @@ enum PBlkType {INIT, ALLOC, UPDATE, DELETE, RECLAIMED, EPOCH, OWNED}; // class PBlk{ class PBlk : public Persistent{ friend class EpochSys; - friend class EpochSysVerifyTest; static UIDGenerator uid_generator; protected: // Wentao: the first word should NOT be any persistent value for @@ -277,7 +273,6 @@ class EpochSys{ void on_end_transaction(EpochSys* esys, uint64_t c){} }; - friend class EpochSysVerifyTest; private: // persistent fields: diff --git a/src/persist/Recoverable.hpp b/src/persist/Recoverable.hpp index 80d344d6..a62d03c9 100644 --- a/src/persist/Recoverable.hpp +++ b/src/persist/Recoverable.hpp @@ -2,10 +2,12 @@ #define RECOVERABLE_HPP #include "TestConfig.hpp" +#include "EpochSys.hpp" #include "pblk_naked.hpp" // TODO: report recover errors/exceptions class Recoverable{ + pds::EpochSys* _esys = nullptr; public: Recoverable(GlobalTestConfig* gtc){ // init Persistent allocator diff --git a/src/tests/EpochSysVerifyTest.hpp b/src/tests/EpochSysVerifyTest.hpp deleted file mode 100644 index b05d23bf..00000000 --- a/src/tests/EpochSysVerifyTest.hpp +++ /dev/null @@ -1,152 +0,0 @@ -#ifndef EPOCHSYSVERIFYTEST_HPP -#define EPOCHSYSVERIFYTEST_HPP - -#include "TestConfig.hpp" -#include "EpochSys.hpp" -#include "Persistent.hpp" -#include "persist_struct_api.hpp" - -using namespace pds; -class Foo : public PBlk{ -public: - void persist() {} - int bar; - Foo(int b): bar(b){} -}; - -class EpochSysVerifyTest : public Test{ -public: - EpochSys* esys; - void init(GlobalTestConfig* gtc); - void parInit(GlobalTestConfig* gtc, LocalTestConfig* ltc); - int execute(GlobalTestConfig* gtc, LocalTestConfig* ltc); - void cleanup(GlobalTestConfig* gtc); - - EpochSysVerifyTest(); - void test(int line, bool r, string hint); -}; - -EpochSysVerifyTest::EpochSysVerifyTest(){ - -} - -void EpochSysVerifyTest::init(GlobalTestConfig* gtc){ - Persistent::init(); - if (gtc->verbose){ - std::cout<<"EpochSysVerifyTest is currently ignoring rideables."<task_num != 1){ - errexit("EpochSysVerify currently runs on only 1 thread."); - } - pds::init(gtc); - // esys = new EpochSys(); - esys = pds::esys; -} - -void EpochSysVerifyTest::parInit(GlobalTestConfig* gtc, LocalTestConfig* ltc){ - pds::init_thread(ltc->tid); -} - -void EpochSysVerifyTest::test(int line, bool r, string hint){ - std::cout<<"line:"<tid == 0); - // // So let's test the epoch system on a single thread: - // { - // // basic transaction operations: - // esys->reset(); - // uint64_t e1 = esys->begin_transaction(); - // // this begin_transaction should return 0. - // TEST(e1 == 0, e1); - // uint64_t e2 = esys->begin_transaction(); - // // this begin_transaction should also return 0. - // TEST(e2 == 0, e2); - // // active_transaction of epoch 0 should have 2 in it. - // TEST(esys->active_transactions[0].ui.load() == 2, esys->active_transactions[0].ui.load()); - // esys->end_transaction(e1); - // esys->end_transaction(e2); - // // active_transaction of epoch 0 should have 0 in it. - // TEST(esys->active_transactions[0].ui.load() == 0, esys->active_transactions[0].ui.load()); - // esys->reset(); - // }; - // { - // esys->reset(); - // // pblk operations and epoch advance: - // esys->advance_epoch(0); - // TEST(esys->global_epoch->load() == 1, esys->global_epoch->load()); - // Foo* foo = new Foo(1); - // uint64_t e1 = esys->begin_transaction(); - // foo = esys->register_alloc_pblk(foo, e1); - // // to_be_persisted of epoch e1 (1) should have size 1. - // TEST(esys->to_be_persisted[e1%4].size() == 1, esys->to_be_persisted[e1%4].size()); - // // the epoch of foo should be e1. - // TEST(foo->epoch == e1, foo->epoch); - // Foo* bar = esys->openwrite_pblk(foo, e1); - // esys->register_update_pblk(bar, e1); - // // openwrite_pblk should return the same copy. - // TEST(bar == foo, (uint64_t)foo); - // esys->advance_epoch(0); - // // epoch should remain the same, as we're assuming epoch is still 0 when we call advance_wpoch. - // TEST(esys->global_epoch->load() == e1, esys->global_epoch->load()); - - // esys->advance_epoch(e1); - // // epoch should be 2, as the epoch used to be e1 (1). - // TEST(esys->global_epoch->load() == e1+1, esys->global_epoch->load()); - // // begin a new transaction. - // uint64_t e2 = esys->begin_transaction(); - // // e2 should be 2. - // TEST(e2 == 2, e2); - // foo = esys->openwrite_pblk(bar, e2); - // esys->register_update_pblk(foo, e2); - // // now foo should be different from bar, since it's re-opened in e2. - // TEST(foo != bar, (uint64_t)foo); - // // bar should be put in e2's to-be-freed list. - // TEST(esys->to_be_freed[e2%4].size() == 1, esys->to_be_freed[e2%4].size()); - // // foo should be put in e2's to-be-persisted list. - // TEST(esys->to_be_persisted[e2%4].size() == 1, esys->to_be_persisted[e2%4].size()); - // // the following openwrite should throw an exception, since it's already e2. - // bool throwed = false; - // try{ - // foo = esys->openwrite_pblk(foo, e1); - // } catch (OldSeeNewException& e){ - // throwed = true; - // } - // TEST(throwed, e1); - // esys->end_transaction(e2); - // esys->end_transaction(e1); - - // esys->advance_epoch(e2); - // // now epoch should be 3. e1's persist list should be empty. - // TEST(esys->to_be_persisted[e1%4].size() == 0, esys->to_be_persisted[e1%4].size()); - // esys->advance_epoch(e2+1); - // // now epoch should be 3. e2's persist list should be empty. - // TEST(esys->to_be_persisted[e2%4].size() == 0, esys->to_be_persisted[e2%4].size()); - // // e2's free list should not be empty. - // TEST(esys->to_be_freed[e2%4].size() != 0, esys->to_be_freed[e2%4].size()); - // esys->advance_epoch(e2+2); - // // now epoch should be 4. e2's free list should be empty. - // TEST(esys->to_be_freed[e2%4].size() == 0, esys->to_be_freed[e2%4].size()); - - // esys->reset(); - // }; - - return 0; -} - -void EpochSysVerifyTest::cleanup(GlobalTestConfig* gtc){ - -} - - - -#endif \ No newline at end of file From 17ab8acdd98e0a146519058e307d115a711035a3 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Thu, 29 Oct 2020 17:39:51 -0400 Subject: [PATCH 14/56] moved epoch advancers to hpp and cpp files. --- src/persist/EpochAdvancers.cpp | 85 ++++++++++++++++++++ src/persist/EpochAdvancers.hpp | 71 +++++++++++++++++ src/persist/EpochSys.hpp | 137 +-------------------------------- 3 files changed, 157 insertions(+), 136 deletions(-) create mode 100644 src/persist/EpochAdvancers.cpp create mode 100644 src/persist/EpochAdvancers.hpp diff --git a/src/persist/EpochAdvancers.cpp b/src/persist/EpochAdvancers.cpp new file mode 100644 index 00000000..f37fe945 --- /dev/null +++ b/src/persist/EpochAdvancers.cpp @@ -0,0 +1,85 @@ +#include "EpochSys.hpp" +#include "EpochAdvancers.hpp" + +using namespace pds; + +SingleThreadEpochAdvancer::SingleThreadEpochAdvancer(GlobalTestConfig* gtc){ + trans_cnts = new padded[gtc->task_num]; + for (int i = 0; i < gtc->task_num; i++){ + trans_cnts[i].ui = 0; + } +} +void SingleThreadEpochAdvancer::set_epoch_freq(int epoch_power){ + epoch_threshold = 0x1ULL << epoch_power; +} +void SingleThreadEpochAdvancer::set_help_freq(int help_power){ + help_threshold = 0x1ULL << help_power; +} +void SingleThreadEpochAdvancer::on_end_transaction(EpochSys* esys, uint64_t c){ + assert(_tid != -1); + trans_cnts[_tid].ui++; + if (_tid == 0){ + // only a single thread can advance epochs. + if (trans_cnts[_tid].ui % epoch_threshold == 0){ + esys->advance_epoch(c); + } + } +} + + +void GlobalCounterEpochAdvancer::set_epoch_freq(int epoch_power){ + epoch_threshold = 0x1ULL << epoch_power; +} +void GlobalCounterEpochAdvancer::set_help_freq(int help_power){ + help_threshold = 0x1ULL << help_power; +} +void GlobalCounterEpochAdvancer::on_end_transaction(EpochSys* esys, uint64_t c){ + uint64_t curr_cnt = trans_cnt.fetch_add(1, std::memory_order_acq_rel); + if (curr_cnt % epoch_threshold == 0){ + esys->advance_epoch(c); + } else if (curr_cnt % help_threshold == 0){ + esys->help_local(); + } +} + + +DedicatedEpochAdvancer::DedicatedEpochAdvancer(GlobalTestConfig* gtc, EpochSys* es):esys(es){ + if (gtc->checkEnv("EpochLength")){ + epoch_length = stoi(gtc->getEnv("EpochLength")); + } else { + epoch_length = 100*1000; + } + if (gtc->checkEnv("EpochLengthUnit")){ + std::string env_unit = gtc->getEnv("EpochLengthUnit"); + if (env_unit == "Second"){ + epoch_length *= 1000000; + } else if (env_unit == "Millisecond"){ + epoch_length *= 1000; + } else if (env_unit == "Microsecond"){ + // do nothing. + } else { + errexit("time unit not supported."); + } + } + started.store(false); + advancer_thread = std::move(std::thread(&DedicatedEpochAdvancer::advancer, this)); + started.store(true); +} + +void DedicatedEpochAdvancer::advancer(){ + while(!started.load()){} + while(started.load()){ + esys->advance_epoch_dedicated(); + std::this_thread::sleep_for(std::chrono::microseconds(epoch_length)); + } + // std::cout<<"advancer_thread terminating..."< +#include +#include "TestConfig.hpp" +#include "ConcurrentPrimitives.hpp" + +class EpochSys; + +///////////////////// +// Epoch Advancers // +///////////////////// + +class EpochAdvancer{ +public: + virtual void set_epoch_freq(int epoch_freq) = 0; + virtual void set_help_freq(int help_freq) = 0; + virtual void on_end_transaction(EpochSys* esys, uint64_t c) = 0; + virtual ~EpochAdvancer(){} +}; + +class SingleThreadEpochAdvancer : public EpochAdvancer{ + // uint64_t trans_cnt; + padded* trans_cnts; + uint64_t epoch_threshold = 0x1ULL << 19; + uint64_t help_threshold = 0x1ULL << 6; +public: + SingleThreadEpochAdvancer(GlobalTestConfig* gtc); + void set_epoch_freq(int epoch_power); + void set_help_freq(int help_power); + void on_end_transaction(EpochSys* esys, uint64_t c); +}; + +class GlobalCounterEpochAdvancer : public EpochAdvancer{ + std::atomic trans_cnt; + uint64_t epoch_threshold = 0x1ULL << 14; + uint64_t help_threshold = 0x1ULL; +public: + // GlobalCounterEpochAdvancer(); + void set_epoch_freq(int epoch_power); + void set_help_freq(int help_power); + void on_end_transaction(EpochSys* esys, uint64_t c); +}; + +class DedicatedEpochAdvancer : public EpochAdvancer{ + EpochSys* esys; + std::thread advancer_thread; + std::atomic started; + uint64_t epoch_length = 100*1000; + void advancer(); +public: + DedicatedEpochAdvancer(GlobalTestConfig* gtc, EpochSys* es); + ~DedicatedEpochAdvancer(); + void set_epoch_freq(int epoch_interval){} + void set_help_freq(int help_interval){} + void on_end_transaction(EpochSys* esys, uint64_t c){ + // do nothing here. + } +}; + +class NoEpochAdvancer : public EpochAdvancer{ + // an epoch advancer that does absolutely nothing. +public: + // GlobalCounterEpochAdvancer(); + void set_epoch_freq(int epoch_power){} + void set_help_freq(int help_power){} + void on_end_transaction(EpochSys* esys, uint64_t c){} +}; + +#endif \ No newline at end of file diff --git a/src/persist/EpochSys.hpp b/src/persist/EpochSys.hpp index 5f15b4d2..0bb4a44d 100644 --- a/src/persist/EpochSys.hpp +++ b/src/persist/EpochSys.hpp @@ -139,145 +139,10 @@ class PBlkArray : public PBlk{ #include "PerThreadContainers.hpp" #include "ToBePersistedContainers.hpp" #include "ToBeFreedContainers.hpp" +#include "EpochAdvancers.hpp" class EpochSys{ - ///////////////////// - // Epoch Advancers // - ///////////////////// - - class EpochAdvancer{ - public: - virtual void set_epoch_freq(int epoch_freq) = 0; - virtual void set_help_freq(int help_freq) = 0; - virtual void on_end_transaction(EpochSys* esys, uint64_t c) = 0; - virtual ~EpochAdvancer(){} - }; - - class SingleThreadEpochAdvancer : public EpochAdvancer{ - // uint64_t trans_cnt; - padded* trans_cnts; - uint64_t epoch_threshold = 0x1ULL << 19; - uint64_t help_threshold = 0x1ULL << 6; - public: - SingleThreadEpochAdvancer(GlobalTestConfig* gtc){ - trans_cnts = new padded[gtc->task_num]; - for (int i = 0; i < gtc->task_num; i++){ - trans_cnts[i].ui = 0; - } - } - void set_epoch_freq(int epoch_power){ - epoch_threshold = 0x1ULL << epoch_power; - } - void set_help_freq(int help_power){ - help_threshold = 0x1ULL << help_power; - } - void on_end_transaction(EpochSys* esys, uint64_t c){ - assert(_tid != -1); - trans_cnts[_tid].ui++; - if (_tid == 0){ - // only a single thread can advance epochs. - if (trans_cnts[_tid].ui % epoch_threshold == 0){ - esys->advance_epoch(c); - } - // else if (trans_cnts[_tid].ui % help_threshold == 0){ - // esys->help_local(); - // } - } - // else { - // if (trans_cnts[_tid].ui % help_threshold == 0){ - // esys->help_local(); - // } - // } - } - }; - - class GlobalCounterEpochAdvancer : public EpochAdvancer{ - std::atomic trans_cnt; - uint64_t epoch_threshold = 0x1ULL << 14; - uint64_t help_threshold = 0x1ULL; - public: - // GlobalCounterEpochAdvancer(); - void set_epoch_freq(int epoch_power){ - epoch_threshold = 0x1ULL << epoch_power; - } - void set_help_freq(int help_power){ - help_threshold = 0x1ULL << help_power; - } - void on_end_transaction(EpochSys* esys, uint64_t c){ - uint64_t curr_cnt = trans_cnt.fetch_add(1, std::memory_order_acq_rel); - if (curr_cnt % epoch_threshold == 0){ - esys->advance_epoch(c); - } else if (curr_cnt % help_threshold == 0){ - esys->help_local(); - } - } - }; - - class DedicatedEpochAdvancer : public EpochAdvancer{ - EpochSys* esys; - std::thread advancer_thread; - std::atomic started; - uint64_t epoch_length = 100*1000; - void advancer(){ - while(!started.load()){} - while(started.load()){ - esys->advance_epoch_dedicated(); - std::this_thread::sleep_for(std::chrono::microseconds(epoch_length)); - } - // std::cout<<"advancer_thread terminating..."<checkEnv("EpochLength")){ - epoch_length = stoi(gtc->getEnv("EpochLength")); - } else { - epoch_length = 100*1000; - } - if (gtc->checkEnv("EpochLengthUnit")){ - std::string env_unit = gtc->getEnv("EpochLengthUnit"); - if (env_unit == "Second"){ - epoch_length *= 1000000; - } else if (env_unit == "Millisecond"){ - epoch_length *= 1000; - } else if (env_unit == "Microsecond"){ - // do nothing. - } else { - errexit("time unit not supported."); - } - } - - started.store(false); - advancer_thread = std::move(std::thread(&DedicatedEpochAdvancer::advancer, this)); - started.store(true); - } - ~DedicatedEpochAdvancer(){ - // std::cout<<"terminating advancer_thread"< Date: Thu, 29 Oct 2020 17:52:07 -0400 Subject: [PATCH 15/56] moved UIDGenerator to utils. --- src/persist/EpochSys.hpp | 21 --------------------- src/persist/persist_utils.hpp | 21 +++++++++++++++++++++ 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/persist/EpochSys.hpp b/src/persist/EpochSys.hpp index 0bb4a44d..cc14368c 100644 --- a/src/persist/EpochSys.hpp +++ b/src/persist/EpochSys.hpp @@ -39,27 +39,6 @@ struct OldSeeNewException : public std::exception { } }; -class UIDGenerator{ - padded* curr_ids; -public: - void init(uint64_t task_num){ - uint64_t buf = task_num-1; - int shift = 64; - uint64_t max = 1; - for (; buf != 0; buf >>= 1){ - shift--; - max <<= 1; - } - curr_ids = new padded[max]; - for (uint64_t i = 0; i < max; i++){ - curr_ids[i].ui = i << shift; - } - } - uint64_t get_id(int tid){ - return curr_ids[tid].ui++; - } -}; - class EpochSys; enum PBlkType {INIT, ALLOC, UPDATE, DELETE, RECLAIMED, EPOCH, OWNED}; diff --git a/src/persist/persist_utils.hpp b/src/persist/persist_utils.hpp index 741e9847..24573df2 100644 --- a/src/persist/persist_utils.hpp +++ b/src/persist/persist_utils.hpp @@ -6,6 +6,27 @@ #include #include +class UIDGenerator{ + padded* curr_ids; +public: + void init(uint64_t task_num){ + uint64_t buf = task_num-1; + int shift = 64; + uint64_t max = 1; + for (; buf != 0; buf >>= 1){ + shift--; + max <<= 1; + } + curr_ids = new padded[max]; + for (uint64_t i = 0; i < max; i++){ + curr_ids[i].ui = i << shift; + } + } + uint64_t get_id(int tid){ + return curr_ids[tid].ui++; + } +}; + // A single-threaded circular buffer that grows exponentially // when populated and never shrinks (for now). // The buffer always allocates new spaces in chunks: the key From ac3fc356bd6d1274a01d1e2b4dd681be106a1ba8 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Fri, 30 Oct 2020 16:41:53 -0400 Subject: [PATCH 16/56] pulled out inclusions from namespace; put _tid into EpochSys as static thread_local --- src/persist/EpochAdvancers.cpp | 8 +- src/persist/EpochAdvancers.hpp | 4 + src/persist/EpochSys.cpp | 3 +- src/persist/EpochSys.hpp | 78 +++--- src/persist/PString.hpp | 14 +- src/persist/PerThreadContainers.hpp | 61 ++--- src/persist/ToBeFreedContainers.cpp | 89 +++++++ src/persist/ToBeFreedContainers.hpp | 177 +++++--------- src/persist/ToBePersistedContainers.cpp | 263 ++++++++++++++++++++ src/persist/ToBePersistedContainers.hpp | 313 ++++-------------------- src/persist/TransactionTrackers.cpp | 163 ++++++++++++ src/persist/TransactionTrackers.hpp | 192 ++++----------- src/persist/api/pblk_naked.hpp | 90 +++---- src/persist/common_macros.hpp | 18 ++ src/persist/persist_config.hpp | 2 - src/persist/persist_utils.hpp | 27 +- src/rideables/MontageMSQueue.hpp | 4 +- src/utils/DCSS.hpp | 10 +- src/utils/Persistent.hpp | 3 + 19 files changed, 840 insertions(+), 679 deletions(-) create mode 100644 src/persist/ToBeFreedContainers.cpp create mode 100644 src/persist/ToBePersistedContainers.cpp create mode 100644 src/persist/TransactionTrackers.cpp create mode 100644 src/persist/common_macros.hpp delete mode 100644 src/persist/persist_config.hpp diff --git a/src/persist/EpochAdvancers.cpp b/src/persist/EpochAdvancers.cpp index f37fe945..6a53073c 100644 --- a/src/persist/EpochAdvancers.cpp +++ b/src/persist/EpochAdvancers.cpp @@ -16,11 +16,11 @@ void SingleThreadEpochAdvancer::set_help_freq(int help_power){ help_threshold = 0x1ULL << help_power; } void SingleThreadEpochAdvancer::on_end_transaction(EpochSys* esys, uint64_t c){ - assert(_tid != -1); - trans_cnts[_tid].ui++; - if (_tid == 0){ + assert(EpochSys::tid != -1); + trans_cnts[EpochSys::tid].ui++; + if (EpochSys::tid == 0){ // only a single thread can advance epochs. - if (trans_cnts[_tid].ui % epoch_threshold == 0){ + if (trans_cnts[EpochSys::tid].ui % epoch_threshold == 0){ esys->advance_epoch(c); } } diff --git a/src/persist/EpochAdvancers.hpp b/src/persist/EpochAdvancers.hpp index 93c48703..1c387c46 100644 --- a/src/persist/EpochAdvancers.hpp +++ b/src/persist/EpochAdvancers.hpp @@ -6,6 +6,8 @@ #include "TestConfig.hpp" #include "ConcurrentPrimitives.hpp" +namespace pds{ + class EpochSys; ///////////////////// @@ -68,4 +70,6 @@ class NoEpochAdvancer : public EpochAdvancer{ void on_end_transaction(EpochSys* esys, uint64_t c){} }; +} + #endif \ No newline at end of file diff --git a/src/persist/EpochSys.cpp b/src/persist/EpochSys.cpp index e2d9ead7..b950f26c 100644 --- a/src/persist/EpochSys.cpp +++ b/src/persist/EpochSys.cpp @@ -5,13 +5,14 @@ #include namespace pds{ - __thread int _tid = -1; + // __thread int _tid = -1; EpochSys* esys = nullptr; padded* epochs = nullptr; SysMode sys_mode = ONLINE; // std::atomic abort_cnt(0); // std::atomic total_cnt(0); + thread_local int EpochSys::tid = -1; UIDGenerator PBlk::uid_generator; padded* local_descs = nullptr; diff --git a/src/persist/EpochSys.hpp b/src/persist/EpochSys.hpp index cc14368c..87f429f3 100644 --- a/src/persist/EpochSys.hpp +++ b/src/persist/EpochSys.hpp @@ -14,20 +14,16 @@ #include "Persistent.hpp" #include "persist_utils.hpp" -namespace pds{ - -#define ASSERT_DERIVE(der, base)\ - static_assert(std::is_convertible::value,\ - #der " must inherit " #base " as public"); - -#define ASSERT_COPY(t)\ - static_assert(std::is_copy_constructible::value,\ - "type" #t "requires copying"); +#include "common_macros.hpp" +#include "TransactionTrackers.hpp" +#include "PerThreadContainers.hpp" +#include "ToBePersistedContainers.hpp" +#include "ToBeFreedContainers.hpp" +#include "EpochAdvancers.hpp" -#define INIT_EPOCH 3 -#define NULL_EPOCH 0 +namespace pds{ -extern __thread int _tid; +// extern __thread int _tid; enum SysMode {ONLINE, RECOVER}; @@ -51,16 +47,16 @@ class PBlk : public Persistent{ // Wentao: the first word should NOT be any persistent value for // epoch-system-level recovery (i.e., epoch), as Ralloc repurposes the first // word for block free list, which may interfere with the recovery. - // Currently we use (transient) payload as the first word. If we decide to + // Currently we use (transient) "reserved" as the first word. If we decide to // remove this field, we need to either prepend another dummy word, or // change the block free list in Ralloc. - // only used in transient headers. - PBlk* payload; + // transient. + void* _reserved; uint64_t epoch = NULL_EPOCH; PBlkType blktype = INIT; - uint64_t owner_id = 0; + uint64_t owner_id = 0; // TODO: make consider abandon this field and use id all the time. uint64_t id = 0; pptr retire = nullptr; // bool persisted = false; // For debug purposes. Might not be needed at the end. @@ -77,12 +73,11 @@ class PBlk : public Persistent{ static void init(int task_num){ uid_generator.init(task_num); } - // PBlk(uint64_t e): epoch(e), persisted(false){} - PBlk(): epoch(NULL_EPOCH), blktype(INIT), owner_id(0), id(uid_generator.get_id(_tid)), retire(nullptr){} - // PBlk(bool is_data): blktype(is_data?DATA:INIT), id(uid_generator.get_id(tid)) {} + // id gets inited by EpochSys instance. + PBlk(): epoch(NULL_EPOCH), blktype(INIT), owner_id(0), retire(nullptr){} + // id gets inited by EpochSys instance. PBlk(const PBlk* owner): - blktype(OWNED), owner_id(owner->blktype==OWNED? owner->owner_id : owner->id), - id(uid_generator.get_id(_tid)) {} + blktype(OWNED), owner_id(owner->blktype==OWNED? owner->owner_id : owner->id) {} PBlk(const PBlk& oth): blktype(oth.blktype==OWNED? OWNED:INIT), owner_id(oth.owner_id), id(oth.id) {} inline uint64_t get_id() {return id;} virtual pptr get_data() {return nullptr;} @@ -107,18 +102,14 @@ class PBlkArray : public PBlk{ T* content; //transient ptr inline size_t get_size()const{return size;} }; -// class PArray : public PBlk{ -// T* content; -// // TODO: set constructor private. use EpochSys::alloc_array -// // to get memory from Ralloc and in-place new PArray in the front, and -// // in-place new all T objects in the array (with exception for 1-word T's). -// }; -#include "TransactionTrackers.hpp" -#include "PerThreadContainers.hpp" -#include "ToBePersistedContainers.hpp" -#include "ToBeFreedContainers.hpp" -#include "EpochAdvancers.hpp" +struct Epoch : public PBlk{ + std::atomic global_epoch; + void persist(){} + Epoch(){ + global_epoch.store(NULL_EPOCH, std::memory_order_relaxed); + } +}; class EpochSys{ @@ -127,6 +118,10 @@ class EpochSys{ Epoch* epoch_container = nullptr; std::atomic* global_epoch = nullptr; + // semi-persistent fields: + // TODO: set a periodic-updated persistent boundary to recover to. + UIDGenerator uid_generator; + // transient fields: TransactionTracker* trans_tracker = nullptr; ToBePersistContainer* to_be_persisted = nullptr; @@ -135,15 +130,17 @@ class EpochSys{ GlobalTestConfig* gtc = nullptr; int task_num; - // static __thread int tid; bool consistent_increment(std::atomic& counter, const uint64_t c); public: + /* static */ + static thread_local int tid; + std::mutex dedicated_epoch_advancer_lock; - EpochSys(GlobalTestConfig* _gtc) : gtc(_gtc) { + EpochSys(GlobalTestConfig* _gtc) : uid_generator(_gtc->task_num), gtc(_gtc) { reset(); // TODO: change to recover() later on. } @@ -185,11 +182,11 @@ class EpochSys{ } void simulate_crash(){ - if(pds::_tid==0){ + if(tid==0){ delete epoch_advancer; epoch_advancer = nullptr; } - Persistent::simulate_crash(pds::_tid); + Persistent::simulate_crash(tid); } //////////////// // Operations // @@ -302,7 +299,10 @@ T* EpochSys::register_alloc_pblk(T* b, uint64_t c){ if (blk->blktype == INIT){ blk->blktype = ALLOC; } - // to_be_persisted[c%4].push(blk); + if (blk->id == 0){ + blk->id = uid_generator.get_id(tid); + } + to_be_persisted->register_persist(blk, c); PBlk* data = blk->get_data(); if (data){ @@ -323,7 +323,7 @@ PBlkArray* EpochSys::alloc_pblk_array(size_t s, uint64_t c){ new (p) T(); p++; } - ret->epoch = c; + register_alloc_pblk(ret); // temporarily removed the following persist: // we have to persist it after modifications anyways. // to_be_persisted->register_persist(ret, c); @@ -341,7 +341,7 @@ PBlkArray* EpochSys::alloc_pblk_array(PBlk* owner, size_t s, uint64_t c){ new (p) T(); p++; } - ret->epoch = c; + register_alloc_pblk(ret); // temporarily removed the following persist: // we have to persist it after modifications anyways. // to_be_persisted->register_persist(ret, c); diff --git a/src/persist/PString.hpp b/src/persist/PString.hpp index b20143b8..13739a9f 100644 --- a/src/persist/PString.hpp +++ b/src/persist/PString.hpp @@ -8,7 +8,7 @@ #include "pptr.hpp" using namespace pds; -extern __thread int pds::_tid; +// extern __thread int pds::_tid; // class PString : public PBlk{ // pptr> char_array; @@ -16,12 +16,12 @@ extern __thread int pds::_tid; // public: // // TODO: it's kind of cheating to use epochs[] here... // PString(PBlk* owner, const std::string& str) : PBlk(owner), -// char_array(esys->alloc_pblk_array(owner, str.size()+1, epochs[_tid].ui)) { +// char_array(esys->alloc_pblk_array(owner, str.size()+1, epochs[EpochSys::tid].ui)) { // memcpy(char_array->content, str.c_str(), str.size()+1); -// esys->register_update_pblk(char_array, epochs[_tid].ui); +// esys->register_update_pblk(char_array, epochs[EpochSys::tid].ui); // } // PString(const PString& oth): PBlk(oth), -// char_array(esys->copy_pblk_array(oth.char_array, epochs[_tid].ui)) {} +// char_array(esys->copy_pblk_array(oth.char_array, epochs[EpochSys::tid].ui)) {} // pptr get_data(){ // return pptr(char_array); @@ -35,15 +35,15 @@ extern __thread int pds::_tid; // PString& operator = (const PString &oth){ //assignment // // if(char_array!=nullptr) // char_array being null is impossible // PDELETE_DATA((PBlkArray*)char_array); -// char_array = esys->copy_pblk_array(oth.char_array, epochs[_tid].ui); +// char_array = esys->copy_pblk_array(oth.char_array, epochs[EpochSys::tid].ui); // return *this; // } // PString& operator=(const std::string& str){ // PDELETE_DATA((PBlkArray*)char_array); -// char_array = esys->alloc_pblk_array(this, str.size()+1, epochs[_tid].ui); +// char_array = esys->alloc_pblk_array(this, str.size()+1, epochs[EpochSys::tid].ui); // memcpy(char_array->content, str.c_str(), str.size()+1); -// esys->register_update_pblk(char_array, epochs[_tid].ui); +// esys->register_update_pblk(char_array, epochs[EpochSys::tid].ui); // return *this; // } diff --git a/src/persist/PerThreadContainers.hpp b/src/persist/PerThreadContainers.hpp index 0a730485..5903af68 100644 --- a/src/persist/PerThreadContainers.hpp +++ b/src/persist/PerThreadContainers.hpp @@ -1,6 +1,12 @@ #ifndef PERTHREADCONTAINERS_HPP #define PERTHREADCONTAINERS_HPP +#include + +#include "ConcurrentPrimitives.hpp" +#include "persist_utils.hpp" + +namespace pds{ /////////////////////////// // Concurrent Containers // /////////////////////////// @@ -8,10 +14,9 @@ template class PerThreadContainer{ public: - virtual void push(T x, uint64_t c) = 0; + virtual void push(T x, int tid, uint64_t c) = 0; virtual void pop_all(void (*func)(T& x), uint64_t c) = 0; - virtual void pop_all_local(void (*func)(T& x), uint64_t c) = 0; - virtual bool try_pop_local(void (*func)(T& x), uint64_t c) = 0; + virtual bool try_pop_local(void (*func)(T& x), int tid, uint64_t c) = 0; virtual void pop_all_local(void (*func)(T& x), int tid, uint64_t c) = 0; virtual void clear() = 0; virtual ~PerThreadContainer(){} @@ -26,18 +31,12 @@ class CircBufferContainer: public PerThreadContainer{ containers[i].ui = new PerThreadCircBuffer(task_num); } } - void push(T x, uint64_t c){ - containers[c%4].ui->push(x, _tid); + void push(T x, int tid, uint64_t c){ + containers[c%4].ui->push(x, tid); } void pop_all(void (*func)(T& x), uint64_t c){ containers[c%4].ui->pop_all(func); } - void pop_all_local(void (*func)(T& x), uint64_t c){ - containers[c%4].ui->pop_all_local(func, _tid); - } - bool try_pop_local(void (*func)(T& x), uint64_t c){ - return containers[c%4].ui->try_pop_local(func, _tid); - } void pop_all_local(void (*func)(T& x), int tid, uint64_t c){ assert(tid != -1); containers[c%4].ui->pop_all_local(func, tid); @@ -62,21 +61,15 @@ class FixedCircBufferContainer: public PerThreadContainer{ containers[i].ui = new PerThreadFixedCircBuffer(task_num, cap); } } - void push(T x, uint64_t c){ - containers[c%4].ui->push(x, _tid); + void push(T x, int tid, uint64_t c){ + containers[c%4].ui->push(x, tid); } - bool try_push(T x, uint64_t c){ - return containers[c%4].ui->try_push(x, _tid); + bool try_push(T x, int tid, uint64_t c){ + return containers[c%4].ui->try_push(x, tid); } void pop_all(void (*func)(T& x), uint64_t c){ containers[c%4].ui->pop_all(func); } - void pop_all_local(void (*func)(T& x), uint64_t c){ - containers[c%4].ui->pop_all_local(func, _tid); - } - bool try_pop_local(void (*func)(T& x), uint64_t c){ - return containers[c%4].ui->try_pop_local(func, _tid); - } void pop_all_local(void (*func)(T& x), int tid, uint64_t c){ assert(tid != -1); containers[c%4].ui->pop_all_local(func, tid); @@ -85,8 +78,8 @@ class FixedCircBufferContainer: public PerThreadContainer{ assert(tid != -1); return containers[c%4].ui->try_pop_local(func, tid); } - bool full_local(uint64_t c){ - return containers[c%4].ui->full_local(_tid); + bool full_local(int tid, uint64_t c){ + return containers[c%4].ui->full_local(tid); } void clear(){ for (int i = 0; i < 4; i++){ @@ -104,18 +97,12 @@ class VectorContainer: public PerThreadContainer{ containers[i].ui = new PerThreadVector(task_num); } } - void push(T x, uint64_t c){ - containers[c%4].ui->push(x, _tid); + void push(T x, int tid, uint64_t c){ + containers[c%4].ui->push(x, tid); } void pop_all(void (*func)(T& x), uint64_t c){ containers[c%4].ui->pop_all(func); } - void pop_all_local(void (*func)(T& x), uint64_t c){ - containers[c%4].ui->pop_all_local(func, _tid); - } - bool try_pop_local(void (*func)(T& x), uint64_t c){ - return containers[c%4].ui->try_pop_local(func, _tid); - } void pop_all_local(void (*func)(T& x), int tid, uint64_t c){ assert(tid != -1); containers[c%4].ui->pop_all_local(func, tid); @@ -140,18 +127,12 @@ class HashSetContainer: public PerThreadContainer{ containers[i].ui = new PerThreadHashSet(task_num); } } - void push(T x, uint64_t c){ - containers[c%4].ui->push(x, _tid); + void push(T x, int tid, uint64_t c){ + containers[c%4].ui->push(x, tid); } void pop_all(void (*func)(T& x), uint64_t c){ containers[c%4].ui->pop_all(func); } - void pop_all_local(void (*func)(T& x), uint64_t c){ - containers[c%4].ui->pop_all_local(func, _tid); - } - bool try_pop_local(void (*func)(T& x), uint64_t c){ - return containers[c%4].ui->try_pop_local(func, _tid); - } void pop_all_local(void (*func)(T& x), int tid, uint64_t c){ assert(tid != -1); containers[c%4].ui->pop_all_local(func, tid); @@ -167,4 +148,6 @@ class HashSetContainer: public PerThreadContainer{ } }; +} + #endif \ No newline at end of file diff --git a/src/persist/ToBeFreedContainers.cpp b/src/persist/ToBeFreedContainers.cpp new file mode 100644 index 00000000..8bdc4e18 --- /dev/null +++ b/src/persist/ToBeFreedContainers.cpp @@ -0,0 +1,89 @@ +#include "ToBeFreedContainers.hpp" + +#include "EpochSys.hpp" + +using namespace pds; + +void PerThreadFreedContainer::do_free(PBlk*& x){ + delete x; +} +PerThreadFreedContainer::PerThreadFreedContainer(GlobalTestConfig* gtc): task_num(gtc->task_num){ + container = new VectorContainer(gtc->task_num); + threadEpoch = new padded[gtc->task_num]; + locks = new padded[gtc->task_num]; + for(int i = 0; i < gtc->task_num; i++){ + threadEpoch[i] = NULL_EPOCH; + } +} +PerThreadFreedContainer::~PerThreadFreedContainer(){ + delete container; +} +void PerThreadFreedContainer::free_on_new_epoch(uint64_t c){ + /* there are 3 possilibities: + 1. thread's previous transaction epoch is c, in this case, just return + 2. thread's previous transaction epoch is c-1, in this case, free the retired blocks in epoch c-2, and update the thread's + most recent transaction epoch number + 3. thread's previous transaction epoch is smaller than c-1, in this case, just return, because epoch advanver has already + freed all the blocks from 2 epochs ago, then update the thread's most recent transaction epoch number + So we need to keep the to_be_free->help_free(c-2) in epoch_advancer. */ + + if( c == threadEpoch[EpochSys::tid] -1){ + std::lock_guard lk(locks[EpochSys::tid].ui); + help_free_local(c - 2); + threadEpoch[EpochSys::tid] = c; + }else if( c < threadEpoch[EpochSys::tid] -1){ + threadEpoch[EpochSys::tid] = c; + } +} +void PerThreadFreedContainer::register_free(PBlk* blk, uint64_t c){ + // container[c%4].ui->push(blk, EpochSys::tid); + container->push(blk, EpochSys::tid, c); +} +void PerThreadFreedContainer::help_free(uint64_t c){ + // try to get all the locks, spin when unable to get the target lock while holding all acquired locks + // optimization? + for(int i = 0; i < task_num; i++){ + while(!locks[i].ui.try_lock()){} + } + + container->pop_all(&do_free, c); + + for(int i = 0; i < task_num; i++){ + locks[i].ui.unlock(); + } +} +void PerThreadFreedContainer::help_free_local(uint64_t c){ + container->pop_all_local(&do_free, EpochSys::tid, c); +} +void PerThreadFreedContainer::clear(){ + container->clear(); +} + + +void PerEpochFreedContainer::do_free(PBlk*& x){ + delete x; +} +PerEpochFreedContainer::PerEpochFreedContainer(GlobalTestConfig* gtc){ + container = new VectorContainer(gtc->task_num); + // container = new HashSetContainer(gtc->task_num); +} +PerEpochFreedContainer::~PerEpochFreedContainer(){ + delete container; +} +void PerEpochFreedContainer::register_free(PBlk* blk, uint64_t c){ + // container[c%4].ui->push(blk, EpochSys::tid); + container->push(blk, EpochSys::tid, c); +} +void PerEpochFreedContainer::help_free(uint64_t c){ + container->pop_all(&do_free, c); +} +void PerEpochFreedContainer::help_free_local(uint64_t c){ + container->pop_all_local(&do_free, EpochSys::tid, c); +} +void PerEpochFreedContainer::clear(){ + container->clear(); +} + +void NoToBeFreedContainer::register_free(PBlk* blk, uint64_t c){ + delete blk; +} diff --git a/src/persist/ToBeFreedContainers.hpp b/src/persist/ToBeFreedContainers.hpp index e4331ad5..b10411f2 100644 --- a/src/persist/ToBeFreedContainers.hpp +++ b/src/persist/ToBeFreedContainers.hpp @@ -1,136 +1,75 @@ #ifndef TO_BE_FREED_CONTAINERS_HPP #define TO_BE_FREED_CONTAINERS_HPP +#include + +#include "TestConfig.hpp" +#include "PerThreadContainers.hpp" + /////////////////////////// // To-be-free Containers // /////////////////////////// - class ToBeFreedContainer{ - public: - virtual void register_free(PBlk* blk, uint64_t c) {}; - virtual void help_free(uint64_t c) {}; - virtual void help_free_local(uint64_t c) {}; - virtual void clear() = 0; - virtual void free_on_new_epoch(uint64_t c){}; - virtual ~ToBeFreedContainer(){} - }; +namespace pds{ - class PerThreadFreedContainer : public ToBeFreedContainer{ - PerThreadContainer* container = nullptr; - padded* threadEpoch; - padded* locks = nullptr; - int task_num; - static void do_free(PBlk*& x){ - delete x; - } - public: - PerThreadFreedContainer(){} - PerThreadFreedContainer(GlobalTestConfig* gtc): task_num(gtc->task_num){ - container = new VectorContainer(gtc->task_num); - threadEpoch = new padded[gtc->task_num]; - locks = new padded[gtc->task_num]; - for(int i = 0; i < gtc->task_num; i++){ - threadEpoch[i] = NULL_EPOCH; - } - } - ~PerThreadFreedContainer(){ - delete container; - } +class PBlk; - void free_on_new_epoch(uint64_t c){ - /* there are 3 possilibities: - 1. thread's previous transaction epoch is c, in this case, just return - 2. thread's previous transaction epoch is c-1, in this case, free the retired blocks in epoch c-2, and update the thread's - most recent transaction epoch number - 3. thread's previous transaction epoch is smaller than c-1, in this case, just return, because epoch advanver has already - freed all the blocks from 2 epochs ago, then update the thread's most recent transaction epoch number - So we need to keep the to_be_free->help_free(c-2) in epoch_advancer. */ +class ToBeFreedContainer{ +public: + virtual void register_free(PBlk* blk, uint64_t c) {}; + virtual void help_free(uint64_t c) {}; + virtual void help_free_local(uint64_t c) {}; + virtual void clear() = 0; + virtual void free_on_new_epoch(uint64_t c){}; + virtual ~ToBeFreedContainer(){} +}; - if( c == threadEpoch[_tid] -1){ - std::lock_guard lk(locks[_tid].ui); - help_free_local(c - 2); - threadEpoch[_tid] = c; - }else if( c < threadEpoch[_tid] -1){ - threadEpoch[_tid] = c; - } - } +class PerThreadFreedContainer : public ToBeFreedContainer{ + PerThreadContainer* container = nullptr; + padded* threadEpoch; + padded* locks = nullptr; + int task_num; + static void do_free(PBlk*& x); +public: + PerThreadFreedContainer(){} + PerThreadFreedContainer(GlobalTestConfig* gtc); + ~PerThreadFreedContainer(); - void register_free(PBlk* blk, uint64_t c){ - // container[c%4].ui->push(blk, _tid); - container->push(blk, c); - } - void help_free(uint64_t c){ - // try to get all the locks, spin when unable to get the target lock while holding all acquired locks - // optimization? - for(int i = 0; i < task_num; i++){ - while(!locks[i].ui.try_lock()){} - } + void free_on_new_epoch(uint64_t c); - container->pop_all(&do_free, c); - - for(int i = 0; i < task_num; i++){ - locks[i].ui.unlock(); - } - } - void help_free_local(uint64_t c){ - container->pop_all_local(&do_free, c); - } - void clear(){ - container->clear(); - } - }; + void register_free(PBlk* blk, uint64_t c); + void help_free(uint64_t c); + void help_free_local(uint64_t c); + void clear(); +}; - class PerEpochFreedContainer : public ToBeFreedContainer{ - PerThreadContainer* container = nullptr; - static void do_free(PBlk*& x){ - delete x; - } - public: - PerEpochFreedContainer(){ - // errexit("DO NOT USE DEFAULT CONSTRUCTOR OF ToBeFreedContainer"); - } - PerEpochFreedContainer(GlobalTestConfig* gtc){ - container = new VectorContainer(gtc->task_num); - // container = new HashSetContainer(gtc->task_num); - } - ~PerEpochFreedContainer(){ - delete container; - } - void free_on_new_epoch(uint64_t c){} - void register_free(PBlk* blk, uint64_t c){ - // container[c%4].ui->push(blk, _tid); - container->push(blk, c); - } - void help_free(uint64_t c){ - container->pop_all(&do_free, c); - } - void help_free_local(uint64_t c){ - container->pop_all_local(&do_free, c); - } - void clear(){ - container->clear(); - } - }; +class PerEpochFreedContainer : public ToBeFreedContainer{ + PerThreadContainer* container = nullptr; + static void do_free(PBlk*& x); +public: + PerEpochFreedContainer(){ + // errexit("DO NOT USE DEFAULT CONSTRUCTOR OF ToBeFreedContainer"); + } + PerEpochFreedContainer(GlobalTestConfig* gtc); + ~PerEpochFreedContainer(); + void free_on_new_epoch(uint64_t c){} + void register_free(PBlk* blk, uint64_t c); + void help_free(uint64_t c); + void help_free_local(uint64_t c); + void clear(); +}; - class NoToBeFreedContainer : public ToBeFreedContainer{ - // A to-be-freed container that does absolutely nothing. - public: - NoToBeFreedContainer(){} - virtual void register_free(PBlk* blk, uint64_t c){ - delete blk; - } - void free_on_new_epoch(uint64_t c){} - virtual void help_free(uint64_t c){} - virtual void help_free_local(uint64_t c){} - virtual void clear(){} - }; +class NoToBeFreedContainer : public ToBeFreedContainer{ + // A to-be-freed container that does absolutely nothing. +public: + NoToBeFreedContainer(){} + virtual void register_free(PBlk* blk, uint64_t c); + void free_on_new_epoch(uint64_t c){} + virtual void help_free(uint64_t c){} + virtual void help_free_local(uint64_t c){} + virtual void clear(){} +}; - struct Epoch : public PBlk{ - std::atomic global_epoch; - void persist(){} - Epoch(){ - global_epoch.store(NULL_EPOCH, std::memory_order_relaxed); - } - }; +} #endif diff --git a/src/persist/ToBePersistedContainers.cpp b/src/persist/ToBePersistedContainers.cpp new file mode 100644 index 00000000..da008a7f --- /dev/null +++ b/src/persist/ToBePersistedContainers.cpp @@ -0,0 +1,263 @@ +#include "ToBePersistedContainers.hpp" +#include "EpochSys.hpp" + +using namespace pds; + +void PerEpoch::AdvancerPersister::persist_epoch(uint64_t c){ + con->container->pop_all(&do_persist, c); +} +void PerEpoch::PerThreadDedicatedWait::persister_main(int worker_id){ + // pin this thread to hyperthreads of worker threads. + hwloc_set_cpubind(gtc->topology, + persister_affinities[worker_id]->cpuset,HWLOC_CPUBIND_THREAD); + // spin until signaled to destruct. + int last_signal = 0; + int curr_signal = 0; + int curr_epoch = NULL_EPOCH; + while(!exit){ + // wait on worker (tid == worker_id) thread's signal. + std::unique_lock lck(signal.bell); + while(last_signal == curr_signal && !exit){ + curr_signal = signal.curr; + signal.ring.wait(lck); + curr_epoch = signal.epoch; + } + last_signal = curr_signal; + // dumps + con->container->pop_all_local(&do_persist, worker_id, curr_epoch); + } +} +PerEpoch::PerThreadDedicatedWait::PerThreadDedicatedWait(PerEpoch* _con, GlobalTestConfig* _gtc) : + Persister(_con), gtc(_gtc) { + // re-build worker thread affinity that pin current threads to individual cores + gtc->affinities.clear(); + gtc->buildPerCoreAffinity(gtc->affinities, 0); + // build affinities that pin persisters to hyperthreads of worker threads + gtc->buildPerCoreAffinity(persister_affinities, 1); + // init environment + exit.store(false, std::memory_order_relaxed); + // spawn threads + for (int i = 0; i < gtc->task_num; i++){ + persisters.push_back(std::move( + std::thread(&PerThreadDedicatedWait::persister_main, this, i))); + } +} +PerEpoch::PerThreadDedicatedWait::~PerThreadDedicatedWait(){ + // signal exit of worker threads. + exit.store(true, std::memory_order_release); + { + std::unique_lock lck(signal.bell); + signal.curr++; + } + signal.ring.notify_all(); + // join threads + for (auto i = persisters.begin(); i != persisters.end(); i++){ + if (i->joinable()){ + i->join(); + } + } +} +void PerEpoch::do_persist(std::pair& addr_size){ + persist_func::clwb_range_nofence( + addr_size.first, addr_size.second); +} +void PerEpoch::PerThreadDedicatedWait::persist_epoch(uint64_t c){ + // notify hyperthreads. + { + std::unique_lock lck(signal.bell); + signal.curr++; + signal.epoch = c; + } + signal.ring.notify_all(); +} + +void PerEpoch::register_persist(PBlk* blk, uint64_t c){ + if (c == NULL_EPOCH){ + errexit("registering persist of epoch NULL."); + } + size_t sz = RP_malloc_size(blk); + container->push(std::make_pair((char*)blk, (size_t)sz), EpochSys::tid, c); +} +void PerEpoch::register_persist_raw(PBlk* blk, uint64_t c){ + container->push(std::make_pair((char*)blk, 1), EpochSys::tid, c); +} +void PerEpoch::persist_epoch(uint64_t c){ + persister->persist_epoch(c); +} +void PerEpoch::clear(){ + container->clear(); +} + +void BufferedWB::PerThreadDedicatedWait::persister_main(int worker_id){ + // pin this thread to hyperthreads of worker threads. + hwloc_set_cpubind(gtc->topology, + persister_affinities[worker_id]->cpuset,HWLOC_CPUBIND_THREAD); + // spin until signaled to destruct. + int last_signal = 0; + int curr_signal = 0; + uint64_t curr_epoch = NULL_EPOCH; + + while(!exit){ + // wait on worker (tid == worker_id) thread's signal. + std::unique_lock lck(signals[worker_id].bell); + while(last_signal == curr_signal && !exit){ + curr_signal = signals[worker_id].curr; + signals[worker_id].ring.wait(lck); + curr_epoch = signals[worker_id].epoch; + } + last_signal = curr_signal; + // dumps + for (int i = 0; i < con->dump_size; i++){ + con->container->try_pop_local(&do_persist, worker_id, curr_epoch); + } + } +} +BufferedWB::PerThreadDedicatedWait::PerThreadDedicatedWait(BufferedWB* _con, GlobalTestConfig* _gtc) : + Persister(_con), gtc(_gtc) { + // re-build worker thread affinity that pin current threads to individual cores + gtc->affinities.clear(); + gtc->buildPerCoreAffinity(gtc->affinities, 0); + // build affinities that pin persisters to hyperthreads of worker threads + gtc->buildPerCoreAffinity(persister_affinities, 1); + // init environment + exit.store(false, std::memory_order_relaxed); + signals = new Signal[gtc->task_num]; + // spawn threads + for (int i = 0; i < gtc->task_num; i++){ + persisters.push_back(std::move( + std::thread(&PerThreadDedicatedWait::persister_main, this, i))); + } +} +BufferedWB::PerThreadDedicatedWait::~PerThreadDedicatedWait(){ + // signal exit of worker threads. + exit.store(true, std::memory_order_release); + for (int i = 0; i < gtc->task_num; i++){ + // TODO: lock here? + signals[i].curr++; + signals[i].ring.notify_one(); + } + // join threads + for (auto i = persisters.begin(); i != persisters.end(); i++){ + if (i->joinable()){ + // std::cout<<"joining thread."<join(); + // std::cout<<"joined."< lck(signals[EpochSys::tid].bell); + signals[EpochSys::tid].curr++; + signals[EpochSys::tid].epoch = c; + } + signals[EpochSys::tid].ring.notify_one(); +} + +void BufferedWB::PerThreadDedicatedBusy::persister_main(int worker_id){ + // pin this thread to hyperthreads of worker threads. + hwloc_set_cpubind(gtc->topology, + persister_affinities[worker_id]->cpuset,HWLOC_CPUBIND_THREAD); + // spin until signaled to destruct. + int last_signal = 0; + int curr_signal = 0; + uint64_t curr_epoch = NULL_EPOCH; + while(!exit){ + // wait on worker (tid == worker_id) thread's signal. + while(true){ + if (exit.load(std::memory_order_acquire)){ + return; + } + curr_signal = signals[worker_id].curr.load(std::memory_order_acquire); + if (curr_signal != last_signal){ + break; + } + } + curr_epoch = signals[worker_id].epoch; + signals[worker_id].ack.fetch_add(1, std::memory_order_release); + last_signal = curr_signal; + // dumps + for (int i = 0; i < con->dump_size; i++){ + con->container->try_pop_local(&do_persist, worker_id, curr_epoch); + } + } +} +BufferedWB::PerThreadDedicatedBusy::PerThreadDedicatedBusy(BufferedWB* _con, GlobalTestConfig* _gtc) : + Persister(_con), gtc(_gtc) { + // re-build worker thread affinity that pin current threads to individual cores + gtc->affinities.clear(); + gtc->buildPerCoreAffinity(gtc->affinities, 0); + // build affinities that pin persisters to hyperthreads of worker threads + gtc->buildPerCoreAffinity(persister_affinities, 1); + // init environment + exit.store(false, std::memory_order_relaxed); + signals = new Signal[gtc->task_num]; + // spawn threads + for (int i = 0; i < gtc->task_num; i++){ + signals[i].curr.store(0, std::memory_order_relaxed); + signals[i].ack.store(0, std::memory_order_relaxed); + persisters.push_back(std::move( + std::thread(&PerThreadDedicatedBusy::persister_main, this, i))); + } +} +BufferedWB::PerThreadDedicatedBusy::~PerThreadDedicatedBusy(){ + // signal exit of worker threads. + exit.store(true, std::memory_order_release); + // join threads + for (auto i = persisters.begin(); i != persisters.end(); i++){ + if (i->joinable()){ + i->join(); + } + } + delete signals; +} +void BufferedWB::PerThreadDedicatedBusy::help_persist_local(uint64_t c){ + // notify hyperthread. + signals[EpochSys::tid].epoch = c; + int prev = signals[EpochSys::tid].curr.fetch_add(1, std::memory_order_release); + // make sure the persister gets the correct epoch. + while(prev == signals[EpochSys::tid].ack.load(std::memory_order_acquire)); +} +void BufferedWB::WorkerThreadPersister::help_persist_local(uint64_t c){ + for (int i = 0; i < con->dump_size; i++){ + con->container->try_pop_local(&do_persist, EpochSys::tid, c); + } +} +void BufferedWB::do_persist(std::pair& addr_size){ + persist_func::clwb_range_nofence( + addr_size.first, addr_size.second); +} +void BufferedWB::dump(uint64_t c){ + for (int i = 0; i < dump_size; i++){ + container->try_pop_local(&do_persist, EpochSys::tid, c); + } +} +void BufferedWB::push(std::pair entry, uint64_t c){ + while (!container->try_push(entry, EpochSys::tid, c)){// in case other thread(s) are doing write-backs. + persister->help_persist_local(c); + } +} +void BufferedWB::register_persist(PBlk* blk, uint64_t c){ + if (c == NULL_EPOCH){ + errexit("registering persist of epoch NULL."); + } + size_t sz = RP_malloc_size(blk); + push(std::make_pair((char*)blk, (size_t)sz), c); + +} +void BufferedWB::register_persist_raw(PBlk* blk, uint64_t c){ + if (c == NULL_EPOCH){ + errexit("registering persist of epoch NULL."); + } + push(std::make_pair((char*)blk, 1), c); +} +void BufferedWB::persist_epoch(uint64_t c){ // NOTE: this is not thread-safe. + for (int i = 0; i < task_num; i++){ + container->pop_all_local(&do_persist, i, c); + } +} +void BufferedWB::clear(){ + container->clear(); +} \ No newline at end of file diff --git a/src/persist/ToBePersistedContainers.hpp b/src/persist/ToBePersistedContainers.hpp index 0e6e4c0a..924e1edf 100644 --- a/src/persist/ToBePersistedContainers.hpp +++ b/src/persist/ToBePersistedContainers.hpp @@ -1,6 +1,24 @@ #ifndef TO_BE_PERSISTED_CONTAINERS_HPP #define TO_BE_PERSISTED_CONTAINERS_HPP +#include +#include +#include +#include + +#include "TestConfig.hpp" +#include "ConcurrentPrimitives.hpp" +#include "PersistFunc.hpp" +#include "PerThreadContainers.hpp" +#include "persist_utils.hpp" +#include "common_macros.hpp" +#include "Persistent.hpp" + +namespace pds{ + +class PBlk; +class EpochSys; + template struct PairHash{ size_t operator () (const std::pair &x) const{ @@ -36,9 +54,7 @@ class PerEpoch : public ToBePersistContainer{ class AdvancerPersister : public Persister{ public: AdvancerPersister(PerEpoch* _con): Persister(_con){} - void persist_epoch(uint64_t c){ - con->container->pop_all(&do_persist, c); - } + void persist_epoch(uint64_t c); }; class PerThreadDedicatedWait : public Persister{ @@ -47,82 +63,24 @@ class PerEpoch : public ToBePersistContainer{ std::condition_variable ring; int curr = 0; uint64_t epoch = INIT_EPOCH; - }__attribute__((aligned(CACHELINE_SIZE))); + }__attribute__((aligned(CACHE_LINE_SIZE))); GlobalTestConfig* gtc; std::vector persisters; std::vector persister_affinities; - atomic exit; + std::atomic exit; Signal signal; // TODO: explain in comment what's going on here. - void persister_main(int worker_id){ - // pin this thread to hyperthreads of worker threads. - hwloc_set_cpubind(gtc->topology, - persister_affinities[worker_id]->cpuset,HWLOC_CPUBIND_THREAD); - // spin until signaled to destruct. - int last_signal = 0; - int curr_signal = 0; - int curr_epoch = NULL_EPOCH; - while(!exit){ - // wait on worker (tid == worker_id) thread's signal. - std::unique_lock lck(signal.bell); - while(last_signal == curr_signal && !exit){ - curr_signal = signal.curr; - signal.ring.wait(lck); - curr_epoch = signal.epoch; - } - last_signal = curr_signal; - // dumps - con->container->pop_all_local(&do_persist, worker_id, curr_epoch); - } - } + void persister_main(int worker_id); public: - PerThreadDedicatedWait(PerEpoch* _con, GlobalTestConfig* _gtc) : Persister(_con), gtc(_gtc) { - // re-build worker thread affinity that pin current threads to individual cores - gtc->affinities.clear(); - gtc->buildPerCoreAffinity(gtc->affinities, 0); - // build affinities that pin persisters to hyperthreads of worker threads - gtc->buildPerCoreAffinity(persister_affinities, 1); - // init environment - exit.store(false, std::memory_order_relaxed); - // spawn threads - for (int i = 0; i < gtc->task_num; i++){ - persisters.push_back(std::move( - std::thread(&PerThreadDedicatedWait::persister_main, this, i))); - } - } - ~PerThreadDedicatedWait(){ - // signal exit of worker threads. - exit.store(true, std::memory_order_release); - { - std::unique_lock lck(signal.bell); - signal.curr++; - } - signal.ring.notify_all(); - // join threads - for (auto i = persisters.begin(); i != persisters.end(); i++){ - if (i->joinable()){ - i->join(); - } - } - } - void persist_epoch(uint64_t c){ - // notify hyperthreads. - { - std::unique_lock lck(signal.bell); - signal.curr++; - signal.epoch = c; - } - signal.ring.notify_all(); - } + PerThreadDedicatedWait(PerEpoch* _con, GlobalTestConfig* _gtc); + ~PerThreadDedicatedWait(); + void persist_epoch(uint64_t c); }; PerThreadContainer>* container = nullptr; Persister* persister = nullptr; - static void do_persist(std::pair& addr_size){ - persist_func::clwb_range_nofence( - addr_size.first, addr_size.second); - } + static void do_persist(std::pair& addr_size); public: PerEpoch(GlobalTestConfig* gtc){ if (gtc->checkEnv("Container")){ @@ -152,34 +110,21 @@ class PerEpoch : public ToBePersistContainer{ } else { persister = new AdvancerPersister(this); } - } ~PerEpoch(){ delete persister; delete container; } - void register_persist(PBlk* blk, uint64_t c){ - if (c == NULL_EPOCH){ - errexit("registering persist of epoch NULL."); - } - size_t sz = RP_malloc_size(blk); - container->push(std::make_pair((char*)blk, (size_t)sz), c); - } - void register_persist_raw(PBlk* blk, uint64_t c){ - container->push(std::make_pair((char*)blk, 1), c); - } - void persist_epoch(uint64_t c){ - persister->persist_epoch(c); - } - void clear(){ - container->clear(); - } + void register_persist(PBlk* blk, uint64_t c); + void register_persist_raw(PBlk* blk, uint64_t c); + void persist_epoch(uint64_t c); + void clear(); }; class DirWB : public ToBePersistContainer{ public: void register_persist(PBlk* blk, uint64_t c){ - persist_func::clwb_range_nofence(blk, RP_malloc_size(blk)); + persist_func::clwb_range_nofence(blk, Persistent::get_malloc_size(blk)); } void persist_epoch(uint64_t c){} void clear(){} @@ -200,81 +145,19 @@ class BufferedWB : public ToBePersistContainer{ std::condition_variable ring; int curr = 0; uint64_t epoch = INIT_EPOCH; - }__attribute__((aligned(CACHELINE_SIZE))); + }__attribute__((aligned(CACHE_LINE_SIZE))); GlobalTestConfig* gtc; std::vector persisters; std::vector persister_affinities; - atomic exit; + std::atomic exit; Signal* signals; // TODO: explain in comment what's going on here. - void persister_main(int worker_id){ - // pin this thread to hyperthreads of worker threads. - hwloc_set_cpubind(gtc->topology, - persister_affinities[worker_id]->cpuset,HWLOC_CPUBIND_THREAD); - // spin until signaled to destruct. - int last_signal = 0; - int curr_signal = 0; - uint64_t curr_epoch = NULL_EPOCH; - - while(!exit){ - // wait on worker (tid == worker_id) thread's signal. - std::unique_lock lck(signals[worker_id].bell); - while(last_signal == curr_signal && !exit){ - curr_signal = signals[worker_id].curr; - signals[worker_id].ring.wait(lck); - curr_epoch = signals[worker_id].epoch; - } - last_signal = curr_signal; - // dumps - for (int i = 0; i < con->dump_size; i++){ - con->container->try_pop_local(&do_persist, worker_id, curr_epoch); - } - } - } + void persister_main(int worker_id); public: - PerThreadDedicatedWait(BufferedWB* _con, GlobalTestConfig* _gtc) : Persister(_con), gtc(_gtc) { - // re-build worker thread affinity that pin current threads to individual cores - gtc->affinities.clear(); - gtc->buildPerCoreAffinity(gtc->affinities, 0); - // build affinities that pin persisters to hyperthreads of worker threads - gtc->buildPerCoreAffinity(persister_affinities, 1); - // init environment - exit.store(false, std::memory_order_relaxed); - signals = new Signal[gtc->task_num]; - // spawn threads - for (int i = 0; i < gtc->task_num; i++){ - persisters.push_back(std::move( - std::thread(&PerThreadDedicatedWait::persister_main, this, i))); - } - } - ~PerThreadDedicatedWait(){ - // signal exit of worker threads. - exit.store(true, std::memory_order_release); - for (int i = 0; i < gtc->task_num; i++){ - // TODO: lock here? - signals[i].curr++; - signals[i].ring.notify_one(); - } - // join threads - for (auto i = persisters.begin(); i != persisters.end(); i++){ - if (i->joinable()){ - // std::cout<<"joining thread."<join(); - // std::cout<<"joined."< lck(signals[_tid].bell); - signals[_tid].curr++; - signals[_tid].epoch = c; - } - signals[_tid].ring.notify_one(); - } + PerThreadDedicatedWait(BufferedWB* _con, GlobalTestConfig* _gtc); + ~PerThreadDedicatedWait(); + void help_persist_local(uint64_t c); }; class PerThreadDedicatedBusy : public Persister{ @@ -282,88 +165,25 @@ class BufferedWB : public ToBePersistContainer{ std::atomic curr; std::atomic ack; uint64_t epoch = INIT_EPOCH; - }__attribute__((aligned(CACHELINE_SIZE))); + }__attribute__((aligned(CACHE_LINE_SIZE))); GlobalTestConfig* gtc; std::vector persisters; std::vector persister_affinities; - atomic exit; + std::atomic exit; Signal* signals; // TODO: explain in comment what's going on here. - void persister_main(int worker_id){ - // pin this thread to hyperthreads of worker threads. - hwloc_set_cpubind(gtc->topology, - persister_affinities[worker_id]->cpuset,HWLOC_CPUBIND_THREAD); - // spin until signaled to destruct. - int last_signal = 0; - int curr_signal = 0; - uint64_t curr_epoch = NULL_EPOCH; - while(!exit){ - // wait on worker (tid == worker_id) thread's signal. - while(true){ - if (exit.load(std::memory_order_acquire)){ - return; - } - curr_signal = signals[worker_id].curr.load(std::memory_order_acquire); - if (curr_signal != last_signal){ - break; - } - } - curr_epoch = signals[worker_id].epoch; - signals[worker_id].ack.fetch_add(1, std::memory_order_release); - last_signal = curr_signal; - // dumps - for (int i = 0; i < con->dump_size; i++){ - con->container->try_pop_local(&do_persist, worker_id, curr_epoch); - } - } - } + void persister_main(int worker_id); public: - PerThreadDedicatedBusy(BufferedWB* _con, GlobalTestConfig* _gtc) : Persister(_con), gtc(_gtc) { - // re-build worker thread affinity that pin current threads to individual cores - gtc->affinities.clear(); - gtc->buildPerCoreAffinity(gtc->affinities, 0); - // build affinities that pin persisters to hyperthreads of worker threads - gtc->buildPerCoreAffinity(persister_affinities, 1); - // init environment - exit.store(false, std::memory_order_relaxed); - signals = new Signal[gtc->task_num]; - // spawn threads - for (int i = 0; i < gtc->task_num; i++){ - signals[i].curr.store(0, std::memory_order_relaxed); - signals[i].ack.store(0, std::memory_order_relaxed); - persisters.push_back(std::move( - std::thread(&PerThreadDedicatedBusy::persister_main, this, i))); - } - } - ~PerThreadDedicatedBusy(){ - // signal exit of worker threads. - exit.store(true, std::memory_order_release); - // join threads - for (auto i = persisters.begin(); i != persisters.end(); i++){ - if (i->joinable()){ - i->join(); - } - } - delete signals; - } - void help_persist_local(uint64_t c){ - // notify hyperthread. - signals[_tid].epoch = c; - int prev = signals[_tid].curr.fetch_add(1, std::memory_order_release); - // make sure the persister gets the correct epoch. - while(prev == signals[_tid].ack.load(std::memory_order_acquire)); - } + PerThreadDedicatedBusy(BufferedWB* _con, GlobalTestConfig* _gtc); + ~PerThreadDedicatedBusy(); + void help_persist_local(uint64_t c); }; class WorkerThreadPersister : public Persister{ public: WorkerThreadPersister(BufferedWB* _con) : Persister(_con) {} - void help_persist_local(uint64_t c){ - for (int i = 0; i < con->dump_size; i++){ - con->container->try_pop_local(&do_persist, c); - } - } + void help_persist_local(uint64_t c); }; FixedCircBufferContainer>* container = nullptr; @@ -374,15 +194,8 @@ class BufferedWB : public ToBePersistContainer{ int task_num; int buffer_size = 2048; int dump_size = 1024; - static void do_persist(std::pair& addr_size){ - persist_func::clwb_range_nofence( - addr_size.first, addr_size.second); - } - void dump(uint64_t c){ - for (int i = 0; i < dump_size; i++){ - container->try_pop_local(&do_persist, c); - } - } + static void do_persist(std::pair& addr_size); + void dump(uint64_t c); public: BufferedWB (GlobalTestConfig* _gtc): gtc(_gtc), task_num(_gtc->task_num){ if (gtc->checkEnv("BufferSize")){ @@ -419,33 +232,11 @@ class BufferedWB : public ToBePersistContainer{ delete counters; delete persister; } - void push(std::pair entry, uint64_t c){ - while (!container->try_push(entry, c)){// in case other thread(s) are doing write-backs. - persister->help_persist_local(c); - } - } - void register_persist(PBlk* blk, uint64_t c){ - if (c == NULL_EPOCH){ - errexit("registering persist of epoch NULL."); - } - size_t sz = RP_malloc_size(blk); - push(std::make_pair((char*)blk, (size_t)sz), c); - - } - void register_persist_raw(PBlk* blk, uint64_t c){ - if (c == NULL_EPOCH){ - errexit("registering persist of epoch NULL."); - } - push(std::make_pair((char*)blk, 1), c); - } - void persist_epoch(uint64_t c){ // NOTE: this is not thread-safe. - for (int i = 0; i < task_num; i++){ - container->pop_all_local(&do_persist, i, c); - } - } - void clear(){ - container->clear(); - } + void push(std::pair entry, uint64_t c); + void register_persist(PBlk* blk, uint64_t c); + void register_persist_raw(PBlk* blk, uint64_t c); + void persist_epoch(uint64_t c); + void clear(); }; class NoToBePersistContainer : public ToBePersistContainer{ @@ -456,4 +247,6 @@ class NoToBePersistContainer : public ToBePersistContainer{ void clear(){} }; +} + #endif \ No newline at end of file diff --git a/src/persist/TransactionTrackers.cpp b/src/persist/TransactionTrackers.cpp new file mode 100644 index 00000000..e570dd3e --- /dev/null +++ b/src/persist/TransactionTrackers.cpp @@ -0,0 +1,163 @@ +#include "TransactionTrackers.hpp" +#include "EpochSys.hpp" + +namespace pds{ + +bool PerEpochTransactionTracker::consistent_set(uint64_t target, uint64_t c){ + assert(EpochSys::tid != -1); + curr_epochs[EpochSys::tid].ui.store(target, std::memory_order_seq_cst); // fence + if (c == global_epoch->load(std::memory_order_acquire)){ + return true; + } else { + curr_epochs[EpochSys::tid].ui.store(NULL_EPOCH, std::memory_order_seq_cst); // TODO: double-check this fence. + return false; + } +} +PerEpochTransactionTracker::PerEpochTransactionTracker(atomic* ge, int tn): TransactionTracker(ge), task_num(tn){ + curr_epochs = new paddedAtomic[task_num]; + for (int i = 0; i < task_num; i++){ + curr_epochs[i].ui.store(NULL_EPOCH); + } +} +bool PerEpochTransactionTracker::consistent_register_active(uint64_t target, uint64_t c){ + return consistent_set(target, c); +} +bool PerEpochTransactionTracker::consistent_register_bookkeeping(uint64_t target, uint64_t c){ + return consistent_set(target, c); +} +void PerEpochTransactionTracker::unregister_active(uint64_t target){ + assert(EpochSys::tid != -1); + curr_epochs[EpochSys::tid].ui.store(NULL_EPOCH, std::memory_order_seq_cst); +} +void PerEpochTransactionTracker::unregister_bookkeeping(uint64_t target){ + assert(EpochSys::tid != -1); + curr_epochs[EpochSys::tid].ui.store(NULL_EPOCH, std::memory_order_seq_cst); +} +bool PerEpochTransactionTracker::no_active(uint64_t target){ + for (int i = 0; i < task_num; i++){ + uint64_t curr_epoch = curr_epochs[i].ui.load(std::memory_order_acquire); + if (target == curr_epoch && curr_epoch != NULL_EPOCH){ + // std::cout<<"target:"<& counter, const uint64_t c){ + counter.fetch_add(1, std::memory_order_seq_cst); + if (c == global_epoch->load(std::memory_order_seq_cst)){ + return true; + } else { + counter.fetch_sub(1, std::memory_order_seq_cst); + return false; + } +} +AtomicTransactionTracker::AtomicTransactionTracker(atomic* ge): TransactionTracker(ge){ + for (int i = 0; i < 4; i++){ + active_transactions[i].ui.store(0, std::memory_order_relaxed); + bookkeeping_transactions[i].ui.store(0, std::memory_order_relaxed); + } +} +bool AtomicTransactionTracker::consistent_register_active(uint64_t target, uint64_t c){ + return consistent_increment(active_transactions[target%4].ui, c); +} +bool AtomicTransactionTracker::consistent_register_bookkeeping(uint64_t target, uint64_t c){ + return consistent_increment(bookkeeping_transactions[target%4].ui, c); +} +void AtomicTransactionTracker::unregister_active(uint64_t target){ + active_transactions[target%4].ui.fetch_sub(1, std::memory_order_seq_cst); +} +void AtomicTransactionTracker::unregister_bookkeeping(uint64_t target){ + bookkeeping_transactions[target%4].ui.fetch_sub(1, std::memory_order_seq_cst); +} +bool AtomicTransactionTracker::no_active(uint64_t target){ + return (active_transactions[target%4].ui.load(std::memory_order_seq_cst) == 0); +} +bool AtomicTransactionTracker::no_bookkeeping(uint64_t target){ + return (bookkeeping_transactions[target%4].ui.load(std::memory_order_seq_cst) == 0); +} + + +void NoFenceTransactionTracker::set_register(paddedAtomic* indicators){ + assert(EpochSys::tid != -1); + indicators[EpochSys::tid].ui.store(true, std::memory_order_release); +} +void NoFenceTransactionTracker::set_unregister(paddedAtomic* indicators){ + assert(EpochSys::tid != -1); + indicators[EpochSys::tid].ui.store(false, std::memory_order_release); +} +bool NoFenceTransactionTracker::consistent_register(paddedAtomic* indicators, const uint64_t c){ + set_register(indicators); + if (c == global_epoch->load(std::memory_order_acquire)){ + return true; + } else { + // Hs: I guess we don't ever need a fence here. + assert(EpochSys::tid != -1); + indicators[EpochSys::tid].ui.store(false, std::memory_order_release); + return false; + } +} +bool NoFenceTransactionTracker::all_false(paddedAtomic* indicators){ + for (int i = 0; i < task_num; i++){ + if (indicators[i].ui.load(std::memory_order_acquire) == true){ + return false; + } + } + return true; +} +NoFenceTransactionTracker::NoFenceTransactionTracker(atomic* ge, int tn): + TransactionTracker(ge), task_num(tn){ + for (int i = 0; i < 4; i++){ + active_transactions[i].ui = new paddedAtomic[task_num]; + bookkeeping_transactions[i].ui = new paddedAtomic[task_num]; + for (int j = 0; j < task_num; j++){ + active_transactions[i].ui[j].ui.store(false); + bookkeeping_transactions[i].ui[j].ui.store(false); + } + } +} +bool NoFenceTransactionTracker::consistent_register_active(uint64_t target, uint64_t c){ + return consistent_register(active_transactions[target%4].ui, c); +} +bool NoFenceTransactionTracker::consistent_register_bookkeeping(uint64_t target, uint64_t c){ + return consistent_register(bookkeeping_transactions[target%4].ui, c); +} +void NoFenceTransactionTracker::unregister_active(uint64_t target){ + set_unregister(active_transactions[target%4].ui); +} +void NoFenceTransactionTracker::unregister_bookkeeping(uint64_t target){ + set_unregister(bookkeeping_transactions[target%4].ui); +} +bool NoFenceTransactionTracker::no_active(uint64_t target){ + return all_false(active_transactions[target%4].ui); +} +bool NoFenceTransactionTracker::no_bookkeeping(uint64_t target){ + return all_false(bookkeeping_transactions[target%4].ui); +} + +void FenceBeginTransactionTracker::set_register(paddedAtomic* indicators){ + assert(EpochSys::tid != -1); + indicators[EpochSys::tid].ui.store(true, std::memory_order_seq_cst); +} +FenceBeginTransactionTracker::FenceBeginTransactionTracker(atomic* ge, int task_num): + NoFenceTransactionTracker(ge, task_num){} + +void FenceEndTransactionTracker::set_unregister(paddedAtomic* indicators){ + assert(EpochSys::tid != -1); + indicators[EpochSys::tid].ui.store(false, std::memory_order_seq_cst); +} +FenceEndTransactionTracker::FenceEndTransactionTracker(atomic* ge, int task_num): + NoFenceTransactionTracker(ge, task_num){} + +} \ No newline at end of file diff --git a/src/persist/TransactionTrackers.hpp b/src/persist/TransactionTrackers.hpp index 9b336adf..27f9a378 100644 --- a/src/persist/TransactionTrackers.hpp +++ b/src/persist/TransactionTrackers.hpp @@ -1,14 +1,21 @@ #ifndef TRANSACTION_TRACKERS #define TRANSACTION_TRACKERS +#include +#include + +#include "ConcurrentPrimitives.hpp" + +namespace pds{ + ////////////////////////// // Transaction Trackers // ////////////////////////// class TransactionTracker{ public: - atomic* global_epoch = nullptr; - TransactionTracker(atomic* ge): global_epoch(ge){} + std::atomic* global_epoch = nullptr; + TransactionTracker(std::atomic* ge): global_epoch(ge){} virtual bool consistent_register_active(uint64_t target, uint64_t c) = 0; virtual bool consistent_register_bookkeeping(uint64_t target, uint64_t c) = 0; virtual void unregister_active(uint64_t target) = 0; @@ -22,61 +29,22 @@ class PerEpochTransactionTracker: public TransactionTracker{ paddedAtomic* curr_epochs; int task_num; - bool consistent_set(uint64_t target, uint64_t c){ - assert(_tid != -1); - curr_epochs[_tid].ui.store(target, std::memory_order_seq_cst); // fence - if (c == global_epoch->load(std::memory_order_acquire)){ - return true; - } else { - curr_epochs[_tid].ui.store(NULL_EPOCH, std::memory_order_seq_cst); // TODO: double-check this fence. - return false; - } - } + bool consistent_set(uint64_t target, uint64_t c); public: - PerEpochTransactionTracker(atomic* ge, int tn): TransactionTracker(ge), task_num(tn){ - curr_epochs = new paddedAtomic[task_num]; - for (int i = 0; i < task_num; i++){ - curr_epochs[i].ui.store(NULL_EPOCH); - } - } - bool consistent_register_active(uint64_t target, uint64_t c){ - return consistent_set(target, c); - } - bool consistent_register_bookkeeping(uint64_t target, uint64_t c){ - return consistent_set(target, c); - } - void unregister_active(uint64_t target){ - assert(_tid != -1); - curr_epochs[_tid].ui.store(NULL_EPOCH, std::memory_order_seq_cst); - } - void unregister_bookkeeping(uint64_t target){ - assert(_tid != -1); - curr_epochs[_tid].ui.store(NULL_EPOCH, std::memory_order_seq_cst); - } - bool no_active(uint64_t target){ - for (int i = 0; i < task_num; i++){ - uint64_t curr_epoch = curr_epochs[i].ui.load(std::memory_order_acquire); - if (target == curr_epoch && curr_epoch != NULL_EPOCH){ - // std::cout<<"target:"<* ge, int tn); + bool consistent_register_active(uint64_t target, uint64_t c); + bool consistent_register_bookkeeping(uint64_t target, uint64_t c); + void unregister_active(uint64_t target); + void unregister_bookkeeping(uint64_t target); + bool no_active(uint64_t target); + bool no_bookkeeping(uint64_t target); + void finalize(); }; class NoTransactionTracker : public TransactionTracker{ // a transaction counter that does absolutely nothing. public: - NoTransactionTracker(atomic* ge): TransactionTracker(ge){} + NoTransactionTracker(std::atomic* ge): TransactionTracker(ge){} bool consistent_register_active(uint64_t target, uint64_t c){ return true; } @@ -96,120 +64,46 @@ class AtomicTransactionTracker : public TransactionTracker{ paddedAtomic active_transactions[4]; paddedAtomic bookkeeping_transactions[4]; - bool consistent_increment(std::atomic& counter, const uint64_t c){ - counter.fetch_add(1, std::memory_order_seq_cst); - if (c == global_epoch->load(std::memory_order_seq_cst)){ - return true; - } else { - counter.fetch_sub(1, std::memory_order_seq_cst); - return false; - } - } + bool consistent_increment(std::atomic& counter, const uint64_t c); public: - AtomicTransactionTracker(atomic* ge): TransactionTracker(ge){ - for (int i = 0; i < 4; i++){ - active_transactions[i].ui.store(0, std::memory_order_relaxed); - bookkeeping_transactions[i].ui.store(0, std::memory_order_relaxed); - } - } - bool consistent_register_active(uint64_t target, uint64_t c){ - return consistent_increment(active_transactions[target%4].ui, c); - } - bool consistent_register_bookkeeping(uint64_t target, uint64_t c){ - return consistent_increment(bookkeeping_transactions[target%4].ui, c); - } - void unregister_active(uint64_t target){ - active_transactions[target%4].ui.fetch_sub(1, std::memory_order_seq_cst); - } - void unregister_bookkeeping(uint64_t target){ - bookkeeping_transactions[target%4].ui.fetch_sub(1, std::memory_order_seq_cst); - } - bool no_active(uint64_t target){ - return (active_transactions[target%4].ui.load(std::memory_order_seq_cst) == 0); - } - bool no_bookkeeping(uint64_t target){ - return (bookkeeping_transactions[target%4].ui.load(std::memory_order_seq_cst) == 0); - } + AtomicTransactionTracker(std::atomic* ge); + bool consistent_register_active(uint64_t target, uint64_t c); + bool consistent_register_bookkeeping(uint64_t target, uint64_t c); + void unregister_active(uint64_t target); + void unregister_bookkeeping(uint64_t target); + bool no_active(uint64_t target); + bool no_bookkeeping(uint64_t target); }; class NoFenceTransactionTracker : public TransactionTracker{ padded*> active_transactions[4]; padded*> bookkeeping_transactions[4]; int task_num; - virtual void set_register(paddedAtomic* indicators){ - assert(_tid != -1); - indicators[_tid].ui.store(true, std::memory_order_release); - } - virtual void set_unregister(paddedAtomic* indicators){ - assert(_tid != -1); - indicators[_tid].ui.store(false, std::memory_order_release); - } - bool consistent_register(paddedAtomic* indicators, const uint64_t c){ - set_register(indicators); - if (c == global_epoch->load(std::memory_order_acquire)){ - return true; - } else { - // Hs: I guess we don't ever need a fence here. - assert(_tid != -1); - indicators[_tid].ui.store(false, std::memory_order_release); - return false; - } - } - bool all_false(paddedAtomic* indicators){ - for (int i = 0; i < task_num; i++){ - if (indicators[i].ui.load(std::memory_order_acquire) == true){ - return false; - } - } - return true; - } + virtual void set_register(paddedAtomic* indicators); + virtual void set_unregister(paddedAtomic* indicators); + bool consistent_register(paddedAtomic* indicators, const uint64_t c); + bool all_false(paddedAtomic* indicators); public: - NoFenceTransactionTracker(atomic* ge, int tn): TransactionTracker(ge), task_num(tn){ - for (int i = 0; i < 4; i++){ - active_transactions[i].ui = new paddedAtomic[task_num]; - bookkeeping_transactions[i].ui = new paddedAtomic[task_num]; - for (int j = 0; j < task_num; j++){ - active_transactions[i].ui[j].ui.store(false); - bookkeeping_transactions[i].ui[j].ui.store(false); - } - } - } - bool consistent_register_active(uint64_t target, uint64_t c){ - return consistent_register(active_transactions[target%4].ui, c); - } - bool consistent_register_bookkeeping(uint64_t target, uint64_t c){ - return consistent_register(bookkeeping_transactions[target%4].ui, c); - } - virtual void unregister_active(uint64_t target){ - set_unregister(active_transactions[target%4].ui); - } - virtual void unregister_bookkeeping(uint64_t target){ - set_unregister(bookkeeping_transactions[target%4].ui); - } - bool no_active(uint64_t target){ - return all_false(active_transactions[target%4].ui); - } - bool no_bookkeeping(uint64_t target){ - return all_false(bookkeeping_transactions[target%4].ui); - } + NoFenceTransactionTracker(std::atomic* ge, int tn); + bool consistent_register_active(uint64_t target, uint64_t c); + bool consistent_register_bookkeeping(uint64_t target, uint64_t c); + virtual void unregister_active(uint64_t target); + virtual void unregister_bookkeeping(uint64_t target); + bool no_active(uint64_t target); + bool no_bookkeeping(uint64_t target); }; class FenceBeginTransactionTracker : public NoFenceTransactionTracker{ - virtual void set_register(paddedAtomic* indicators){ - assert(_tid != -1); - indicators[_tid].ui.store(true, std::memory_order_seq_cst); - } + virtual void set_register(paddedAtomic* indicators); public: - FenceBeginTransactionTracker(atomic* ge, int task_num): NoFenceTransactionTracker(ge, task_num){} + FenceBeginTransactionTracker(std::atomic* ge, int task_num); }; class FenceEndTransactionTracker : public NoFenceTransactionTracker{ - virtual void set_unregister(paddedAtomic* indicators){ - assert(_tid != -1); - indicators[_tid].ui.store(false, std::memory_order_seq_cst); - } + virtual void set_unregister(paddedAtomic* indicators); public: - FenceEndTransactionTracker(atomic* ge, int task_num): NoFenceTransactionTracker(ge, task_num){} + FenceEndTransactionTracker(std::atomic* ge, int task_num); }; +} #endif \ No newline at end of file diff --git a/src/persist/api/pblk_naked.hpp b/src/persist/api/pblk_naked.hpp index 25f08925..a1346bf1 100644 --- a/src/persist/api/pblk_naked.hpp +++ b/src/persist/api/pblk_naked.hpp @@ -13,15 +13,15 @@ namespace pds{ extern EpochSys* esys; extern padded* epochs; - extern __thread int _tid; + // extern __thread int _tid; extern padded* local_descs; inline void init(GlobalTestConfig* gtc){ // here we assume that pds::init is called before pds::init_thread, hence the assertion. // if this assertion triggers, note that the order may be reversed. Evaluation needed. - assert(_tid == -1); - if (_tid == -1){ - _tid = 0; + assert(EpochSys::tid == -1); + if (EpochSys::tid == -1){ + EpochSys::tid = 0; } sys_mode = ONLINE; PBlk::init(gtc->task_num); @@ -34,7 +34,7 @@ namespace pds{ } inline void init_thread(int id) { - _tid = id; + EpochSys::tid = id; // esys->init_thread(id); } @@ -44,37 +44,37 @@ namespace pds{ } #define CHECK_EPOCH() ({\ - esys->check_epoch(epochs[_tid].ui);\ + esys->check_epoch(epochs[EpochSys::tid].ui);\ }) #define BEGIN_OP( ... ) ({ \ - assert(epochs[_tid].ui == NULL_EPOCH);\ - epochs[_tid].ui = esys->begin_transaction();\ + assert(epochs[EpochSys::tid].ui == NULL_EPOCH);\ + epochs[EpochSys::tid].ui = esys->begin_transaction();\ std::vector __blks = { __VA_ARGS__ };\ for (auto b = __blks.begin(); b != __blks.end(); b++){\ - esys->register_alloc_pblk(*b, epochs[_tid].ui);\ + esys->register_alloc_pblk(*b, epochs[EpochSys::tid].ui);\ }\ - assert(epochs[_tid].ui != NULL_EPOCH); }) + assert(epochs[EpochSys::tid].ui != NULL_EPOCH); }) // end current operation by reducing transaction count of our epoch. // if our operation is already aborted, do nothing. #define END_OP ({\ - if (epochs[_tid].ui != NULL_EPOCH){ \ - esys->end_transaction(epochs[_tid].ui);\ - epochs[_tid].ui = NULL_EPOCH;} }) + if (epochs[EpochSys::tid].ui != NULL_EPOCH){ \ + esys->end_transaction(epochs[EpochSys::tid].ui);\ + epochs[EpochSys::tid].ui = NULL_EPOCH;} }) // end current operation by reducing transaction count of our epoch. // if our operation is already aborted, do nothing. #define END_READONLY_OP ({\ - if (epochs[_tid].ui != NULL_EPOCH){ \ - esys->end_readonly_transaction(epochs[_tid].ui);\ - epochs[_tid].ui = NULL_EPOCH;} }) + if (epochs[EpochSys::tid].ui != NULL_EPOCH){ \ + esys->end_readonly_transaction(epochs[EpochSys::tid].ui);\ + epochs[EpochSys::tid].ui = NULL_EPOCH;} }) // end current epoch and not move towards next epoch in esys. #define ABORT_OP ({ \ - assert(epochs[_tid].ui != NULL_EPOCH);\ - esys->abort_transaction(epochs[_tid].ui);\ - epochs[_tid].ui = NULL_EPOCH;}) + assert(epochs[EpochSys::tid].ui != NULL_EPOCH);\ + esys->abort_transaction(epochs[EpochSys::tid].ui);\ + epochs[EpochSys::tid].ui = NULL_EPOCH;}) class EpochHolder{ @@ -100,14 +100,14 @@ namespace pds{ EpochHolderReadOnly __holder; #define PNEW(t, ...) ({\ - epochs[_tid].ui == NULL_EPOCH ? \ + epochs[EpochSys::tid].ui == NULL_EPOCH ? \ new t( __VA_ARGS__ ) : \ - esys->register_alloc_pblk(new t( __VA_ARGS__ ), epochs[_tid].ui);}) + esys->register_alloc_pblk(new t( __VA_ARGS__ ), epochs[EpochSys::tid].ui);}) #define PDELETE(b) ({\ if (sys_mode == ONLINE) {\ - assert(epochs[_tid].ui != NULL_EPOCH);\ - esys->free_pblk(b, epochs[_tid].ui);}}) + assert(epochs[EpochSys::tid].ui != NULL_EPOCH);\ + esys->free_pblk(b, epochs[EpochSys::tid].ui);}}) #define PDELETE_DATA(b) ({\ if (sys_mode == ONLINE) {\ @@ -115,16 +115,16 @@ namespace pds{ }}) #define PRETIRE(b) ({\ - assert(epochs[_tid].ui != NULL_EPOCH);\ - esys->retire_pblk(b, epochs[_tid].ui);\ + assert(epochs[EpochSys::tid].ui != NULL_EPOCH);\ + esys->retire_pblk(b, epochs[EpochSys::tid].ui);\ }) #define PRECLAIM(b) ({\ - if (epochs[_tid].ui == NULL_EPOCH){\ + if (epochs[EpochSys::tid].ui == NULL_EPOCH){\ BEGIN_OP_AUTOEND();\ - esys->reclaim_pblk(b, epochs[_tid].ui);\ + esys->reclaim_pblk(b, epochs[EpochSys::tid].ui);\ } else {\ - esys->reclaim_pblk(b, epochs[_tid].ui);\ + esys->reclaim_pblk(b, epochs[EpochSys::tid].ui);\ }\ }) @@ -142,29 +142,29 @@ namespace pds{ public:\ /* get method open a pblk for read. */\ t TOKEN_CONCAT(get_, n)() const{\ - assert(epochs[_tid].ui != NULL_EPOCH);\ - return esys->openread_pblk(this, epochs[_tid].ui)->TOKEN_CONCAT(m_, n);\ + assert(epochs[EpochSys::tid].ui != NULL_EPOCH);\ + return esys->openread_pblk(this, epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n);\ }\ /* get method open a pblk for read. Allows old-see-new reads. */\ t TOKEN_CONCAT(get_unsafe_, n)() const{\ - if(epochs[_tid].ui != NULL_EPOCH)\ - return esys->openread_pblk_unsafe(this, epochs[_tid].ui)->TOKEN_CONCAT(m_, n);\ + if(epochs[EpochSys::tid].ui != NULL_EPOCH)\ + return esys->openread_pblk_unsafe(this, epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n);\ else\ return TOKEN_CONCAT(m_, n);\ }\ /* set method open a pblk for write. return a new copy when necessary */\ template \ T* TOKEN_CONCAT(set_, n)(const in_type& TOKEN_CONCAT(tmp_, n)){\ - assert(epochs[_tid].ui != NULL_EPOCH);\ - auto ret = esys->openwrite_pblk(this, epochs[_tid].ui);\ + assert(epochs[EpochSys::tid].ui != NULL_EPOCH);\ + auto ret = esys->openwrite_pblk(this, epochs[EpochSys::tid].ui);\ ret->TOKEN_CONCAT(m_, n) = TOKEN_CONCAT(tmp_, n);\ - esys->register_update_pblk(ret, epochs[_tid].ui);\ + esys->register_update_pblk(ret, epochs[EpochSys::tid].ui);\ return ret;\ }\ /* set the field by the parameter. called only outside BEGIN_OP and END_OP */\ template \ void TOKEN_CONCAT(set_unsafe_, n)(const in_type& TOKEN_CONCAT(tmp_, n)){\ - assert(epochs[_tid].ui == NULL_EPOCH);\ + assert(epochs[EpochSys::tid].ui == NULL_EPOCH);\ TOKEN_CONCAT(m_, n) = TOKEN_CONCAT(tmp_, n);\ } @@ -178,20 +178,20 @@ namespace pds{ t TOKEN_CONCAT(m_, n)[s];\ /* get method open a pblk for read. */\ t TOKEN_CONCAT(get_, n)(int i) const{\ - assert(epochs[_tid].ui != NULL_EPOCH);\ - return esys->openread_pblk(this, epochs[_tid].ui)->TOKEN_CONCAT(m_, n)[i];\ + assert(epochs[EpochSys::tid].ui != NULL_EPOCH);\ + return esys->openread_pblk(this, epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n)[i];\ }\ /* get method open a pblk for read. Allows old-see-new reads. */\ t TOKEN_CONCAT(get_unsafe_, n)(int i) const{\ - assert(epochs[_tid].ui != NULL_EPOCH);\ - return esys->openread_pblk_unsafe(this, epochs[_tid].ui)->TOKEN_CONCAT(m_, n)[i];\ + assert(epochs[EpochSys::tid].ui != NULL_EPOCH);\ + return esys->openread_pblk_unsafe(this, epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n)[i];\ }\ /* set method open a pblk for write. return a new copy when necessary */\ T* TOKEN_CONCAT(set_, n)(int i, t TOKEN_CONCAT(tmp_, n)){\ - assert(epochs[_tid].ui != NULL_EPOCH);\ - auto ret = esys->openwrite_pblk(this, epochs[_tid].ui);\ + assert(epochs[EpochSys::tid].ui != NULL_EPOCH);\ + auto ret = esys->openwrite_pblk(this, epochs[EpochSys::tid].ui);\ ret->TOKEN_CONCAT(m_, n)[i] = TOKEN_CONCAT(tmp_, n);\ - esys->register_update_pblk(ret, epochs[_tid].ui);\ + esys->register_update_pblk(ret, epochs[EpochSys::tid].ui);\ return ret;\ } @@ -228,10 +228,10 @@ namespace pds{ // template // static T* alloc(size_t s, uint64_t head_id){ - // assert(epochs[_tid].ui != NULL_EPOCH); + // assert(epochs[EpochSys::tid].ui != NULL_EPOCH); // T* ret = static_cast(RP_malloc(sizeof(T) + s)); // new (ret) T(data, s); - // esys->register_alloc_pdata(ret, epochs[_tid].ui, head_id); + // esys->register_alloc_pdata(ret, epochs[EpochSys::tid].ui, head_id); // return ret; // } // }; diff --git a/src/persist/common_macros.hpp b/src/persist/common_macros.hpp new file mode 100644 index 00000000..0f194f7a --- /dev/null +++ b/src/persist/common_macros.hpp @@ -0,0 +1,18 @@ +#ifndef COMMON_MACROS_HPP +#define COMMON_MACROS_HPP + +namespace pds{ +#define INIT_EPOCH 3 +#define NULL_EPOCH 0 + +#define ASSERT_DERIVE(der, base)\ + static_assert(std::is_convertible::value,\ + #der " must inherit " #base " as public"); + +#define ASSERT_COPY(t)\ + static_assert(std::is_copy_constructible::value,\ + "type" #t "requires copying"); + +} + +#endif \ No newline at end of file diff --git a/src/persist/persist_config.hpp b/src/persist/persist_config.hpp deleted file mode 100644 index 8f3a4add..00000000 --- a/src/persist/persist_config.hpp +++ /dev/null @@ -1,2 +0,0 @@ -// Config file containing macro defs: - diff --git a/src/persist/persist_utils.hpp b/src/persist/persist_utils.hpp index 24573df2..62eda403 100644 --- a/src/persist/persist_utils.hpp +++ b/src/persist/persist_utils.hpp @@ -2,13 +2,24 @@ #define PERSIST_UTILS_HPP #include "ConcurrentPrimitives.hpp" -#include -#include -#include +#include "HarnessUtils.hpp" +#include +#include +#include +#include class UIDGenerator{ - padded* curr_ids; + padded* curr_ids = nullptr; public: + UIDGenerator(){} + UIDGenerator(uint64_t task_num){ + init(task_num); + } + ~UIDGenerator(){ + if (curr_ids){ + delete curr_ids; + } + } void init(uint64_t task_num){ uint64_t buf = task_num-1; int shift = 64; @@ -17,7 +28,9 @@ class UIDGenerator{ shift--; max <<= 1; } - curr_ids = new padded[max]; + if (!curr_ids){ + curr_ids = new padded[max]; + } for (uint64_t i = 0; i < max; i++){ curr_ids[i].ui = i << shift; } @@ -119,7 +132,7 @@ class CircBuffer{ void clear(){ head.ui = tail.ui; } -}__attribute__((aligned(CACHELINE_SIZE))); +}__attribute__((aligned(CACHE_LINE_SIZE))); // a group of per-thread circular buffer // NOTE: The container is NOT thread safe. @@ -247,7 +260,7 @@ class FixedCircBuffer{ head.ui = 0; tail.ui = 0; } -}__attribute__((aligned(CACHELINE_SIZE))); +}__attribute__((aligned(CACHE_LINE_SIZE))); // a group of per-thread fixed-size circular buffer // NOTE: this is designed for single-consumer pattern only. The container is NOT thread safe. diff --git a/src/rideables/MontageMSQueue.hpp b/src/rideables/MontageMSQueue.hpp index 2bbb12d3..9418c60e 100644 --- a/src/rideables/MontageMSQueue.hpp +++ b/src/rideables/MontageMSQueue.hpp @@ -97,7 +97,7 @@ void MontageMSQueue::enqueue(T v, int tid){ // directly set m_sn and BEGIN_OP will flush it new_node->set_sn(s); BEGIN_OP(); - new_node->payload->set_epoch(epochs[_tid].ui); + new_node->payload->set_epoch(epochs[EpochSys::tid].ui); /* set_sn must happen before PDELETE of payload since it's * before linearization point. * Also, this must set sn in place since we still remain in @@ -105,7 +105,7 @@ void MontageMSQueue::enqueue(T v, int tid){ */ // new_node->set_sn(s); if((cur_tail->next).CAS_verify(next, new_node)){ - esys->register_alloc_pblk(new_node->payload, epochs[_tid].ui); + esys->register_alloc_pblk(new_node->payload, epochs[EpochSys::tid].ui); END_OP; break; } diff --git a/src/utils/DCSS.hpp b/src/utils/DCSS.hpp index a2ea0012..d718410a 100644 --- a/src/utils/DCSS.hpp +++ b/src/utils/DCSS.hpp @@ -331,17 +331,17 @@ bool atomic_nbptr_t::CAS_verify(nbptr_t expected, const T& desired){ // now r.cnt must be ..00, and r.cnt+1 is ..01, which means "nbptr // contains a descriptor" and "a descriptor is in progress" assert((r.cnt & 3UL) == 0UL); - new (&local_descs[_tid].ui) sc_desc_t(r.cnt+1, + new (&local_descs[EpochSys::tid].ui) sc_desc_t(r.cnt+1, reinterpret_cast(this), expected.val, reinterpret_cast(desired), - epochs[_tid].ui); - nbptr_t new_r(reinterpret_cast(&local_descs[_tid].ui), r.cnt+1); + epochs[EpochSys::tid].ui); + nbptr_t new_r(reinterpret_cast(&local_descs[EpochSys::tid].ui), r.cnt+1); if(!nbptr.compare_exchange_strong(r,new_r)){ return false; } - local_descs[_tid].ui.try_complete(esys, reinterpret_cast(this)); - if(local_descs[_tid].ui.committed()) return true; + local_descs[EpochSys::tid].ui.try_complete(esys, reinterpret_cast(this)); + if(local_descs[EpochSys::tid].ui.committed()) return true; else return false; } diff --git a/src/utils/Persistent.hpp b/src/utils/Persistent.hpp index 3e98561b..45f865ac 100644 --- a/src/utils/Persistent.hpp +++ b/src/utils/Persistent.hpp @@ -43,6 +43,9 @@ class Persistent { // pm_close(); RP_close(); } + static size_t get_malloc_size(void* ptr){ + return RP_malloc_size(ptr); + } // n: number of iterators it's going to return static std::vector recover(int n){ char* heap_prefix = (char*) malloc(L_cuserid+6); From e50a37145a5e2b839e3e7be7bf59ae928b14d140 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Fri, 30 Oct 2020 21:04:25 -0400 Subject: [PATCH 17/56] put global variables into EpochSys class --- src/persist/EpochSys.cpp | 10 +- src/persist/EpochSys.hpp | 106 +++---------- src/persist/PersistStructs.cpp | 22 +++ src/persist/PersistStructs.hpp | 255 +++++++++++++++++++++++++++++++ src/persist/api/pblk_naked.hpp | 123 ++++++--------- src/rideables/MontageMSQueue.hpp | 4 +- src/utils/DCSS.hpp | 186 ++-------------------- 7 files changed, 354 insertions(+), 352 deletions(-) create mode 100644 src/persist/PersistStructs.cpp create mode 100644 src/persist/PersistStructs.hpp diff --git a/src/persist/EpochSys.cpp b/src/persist/EpochSys.cpp index b950f26c..1e9eb142 100644 --- a/src/persist/EpochSys.cpp +++ b/src/persist/EpochSys.cpp @@ -5,17 +5,9 @@ #include namespace pds{ - // __thread int _tid = -1; EpochSys* esys = nullptr; - padded* epochs = nullptr; - SysMode sys_mode = ONLINE; - // std::atomic abort_cnt(0); - // std::atomic total_cnt(0); - - thread_local int EpochSys::tid = -1; - UIDGenerator PBlk::uid_generator; - padded* local_descs = nullptr; + thread_local int EpochSys::tid = -1; void EpochSys::parse_env(){ if (to_be_persisted){ diff --git a/src/persist/EpochSys.hpp b/src/persist/EpochSys.hpp index 87f429f3..28e4ea9e 100644 --- a/src/persist/EpochSys.hpp +++ b/src/persist/EpochSys.hpp @@ -15,6 +15,7 @@ #include "persist_utils.hpp" #include "common_macros.hpp" +#include "PersistStructs.hpp" #include "TransactionTrackers.hpp" #include "PerThreadContainers.hpp" #include "ToBePersistedContainers.hpp" @@ -23,96 +24,9 @@ namespace pds{ -// extern __thread int _tid; - enum SysMode {ONLINE, RECOVER}; -extern SysMode sys_mode; - -struct OldSeeNewException : public std::exception { - const char * what () const throw () { - return "OldSeeNewException not handled."; - } -}; - -class EpochSys; - -enum PBlkType {INIT, ALLOC, UPDATE, DELETE, RECLAIMED, EPOCH, OWNED}; - -// class PBlk{ -class PBlk : public Persistent{ - friend class EpochSys; - static UIDGenerator uid_generator; -protected: - // Wentao: the first word should NOT be any persistent value for - // epoch-system-level recovery (i.e., epoch), as Ralloc repurposes the first - // word for block free list, which may interfere with the recovery. - // Currently we use (transient) "reserved" as the first word. If we decide to - // remove this field, we need to either prepend another dummy word, or - // change the block free list in Ralloc. - - // transient. - void* _reserved; - - uint64_t epoch = NULL_EPOCH; - PBlkType blktype = INIT; - uint64_t owner_id = 0; // TODO: make consider abandon this field and use id all the time. - uint64_t id = 0; - pptr retire = nullptr; - // bool persisted = false; // For debug purposes. Might not be needed at the end. - - // void call_persist(){ // For debug purposes. Might not be needed at the end. - // persist(); - // persisted = true; - // } -public: - void set_epoch(uint64_t e){ - // only for testing - epoch=e; - } - static void init(int task_num){ - uid_generator.init(task_num); - } - // id gets inited by EpochSys instance. - PBlk(): epoch(NULL_EPOCH), blktype(INIT), owner_id(0), retire(nullptr){} - // id gets inited by EpochSys instance. - PBlk(const PBlk* owner): - blktype(OWNED), owner_id(owner->blktype==OWNED? owner->owner_id : owner->id) {} - PBlk(const PBlk& oth): blktype(oth.blktype==OWNED? OWNED:INIT), owner_id(oth.owner_id), id(oth.id) {} - inline uint64_t get_id() {return id;} - virtual pptr get_data() {return nullptr;} - virtual ~PBlk(){ - // Wentao: we need to zeroize epoch and flush it, avoiding it left after free - epoch = NULL_EPOCH; - // persist_func::clwb(&epoch); - } -}; - -template -class PBlkArray : public PBlk{ - friend class EpochSys; - size_t size; - // NOTE: see EpochSys::alloc_pblk_array() for its sementical allocators. - PBlkArray(): PBlk(){} - PBlkArray(PBlk* owner) : PBlk(owner), content((T*)((char*)this + sizeof(PBlkArray))){} -public: - PBlkArray(const PBlkArray& oth): PBlk(oth), size(oth.size), - content((T*)((char*)this + sizeof(PBlkArray))){} - virtual ~PBlkArray(){}; - T* content; //transient ptr - inline size_t get_size()const{return size;} -}; - -struct Epoch : public PBlk{ - std::atomic global_epoch; - void persist(){} - Epoch(){ - global_epoch.store(NULL_EPOCH, std::memory_order_relaxed); - } -}; - class EpochSys{ - private: // persistent fields: Epoch* epoch_container = nullptr; @@ -137,10 +51,24 @@ class EpochSys{ /* static */ static thread_local int tid; - + std::mutex dedicated_epoch_advancer_lock; + /* public members for API only */ + // current epoch of each thread. + padded* epochs = nullptr; + // local descriptors for DCSS + // TODO: maybe put this into a derived class for NB data structures? + padded* local_descs = nullptr; + // system mode that toggles on/off PDELETE for recovery purpose. + SysMode sys_mode = ONLINE; + EpochSys(GlobalTestConfig* _gtc) : uid_generator(_gtc->task_num), gtc(_gtc) { + epochs = new padded[gtc->task_num]; + for(int i = 0; i < gtc->task_num; i++){ + epochs[i].ui = NULL_EPOCH; + } + local_descs = new padded[gtc->task_num]; reset(); // TODO: change to recover() later on. } @@ -160,6 +88,8 @@ class EpochSys{ delete trans_tracker; delete to_be_persisted; delete to_be_freed; + delete epochs; + delete local_descs; } void parse_env(); diff --git a/src/persist/PersistStructs.cpp b/src/persist/PersistStructs.cpp new file mode 100644 index 00000000..9254b298 --- /dev/null +++ b/src/persist/PersistStructs.cpp @@ -0,0 +1,22 @@ +#include "PersistStructs.hpp" +#include "EpochSys.hpp" + +namespace pds{ + +void sc_desc_t::try_complete(EpochSys* esys, uint64_t addr){ + nbptr_t _d = nbptr.load(); + int ret = 0; + if(_d.val!=addr) return; + if(in_progress(_d)){ + if(esys->check_epoch(cas_epoch)){ + ret = 2; + ret |= commit(_d); + } else { + ret = 4; + ret |= abort(_d); + } + } + cleanup(_d); +} + +} \ No newline at end of file diff --git a/src/persist/PersistStructs.hpp b/src/persist/PersistStructs.hpp new file mode 100644 index 00000000..ef6d2fe5 --- /dev/null +++ b/src/persist/PersistStructs.hpp @@ -0,0 +1,255 @@ +#ifndef PERSIST_STRUCTS_HPP +#define PERSIST_STRUCTS_HPP + +// TODO: this may not be a good file name, +// as some structures are actually transient. + +#include +#include +#include +#include + +#include "Persistent.hpp" +#include "common_macros.hpp" + +namespace pds{ + struct OldSeeNewException : public std::exception { + const char * what () const throw () { + return "OldSeeNewException not handled."; + } + }; + + enum PBlkType {INIT, ALLOC, UPDATE, DELETE, RECLAIMED, EPOCH, OWNED}; + + class EpochSys; + + //////////////////////////// + // PBlk-related structurs // + //////////////////////////// + + class PBlk : public Persistent{ + friend class EpochSys; + protected: + // Wentao: the first word should NOT be any persistent value for + // epoch-system-level recovery (i.e., epoch), as Ralloc repurposes the first + // word for block free list, which may interfere with the recovery. + // Currently we use (transient) "reserved" as the first word. If we decide to + // remove this field, we need to either prepend another dummy word, or + // change the block free list in Ralloc. + + // transient. + void* _reserved; + + uint64_t epoch = NULL_EPOCH; + PBlkType blktype = INIT; + uint64_t owner_id = 0; // TODO: make consider abandon this field and use id all the time. + uint64_t id = 0; + pptr retire = nullptr; + // bool persisted = false; // For debug purposes. Might not be needed at the end. + + // void call_persist(){ // For debug purposes. Might not be needed at the end. + // persist(); + // persisted = true; + // } + public: + void set_epoch(uint64_t e){ + // only for testing + epoch=e; + } + // id gets inited by EpochSys instance. + PBlk(): epoch(NULL_EPOCH), blktype(INIT), owner_id(0), retire(nullptr){} + // id gets inited by EpochSys instance. + PBlk(const PBlk* owner): + blktype(OWNED), owner_id(owner->blktype==OWNED? owner->owner_id : owner->id) {} + PBlk(const PBlk& oth): blktype(oth.blktype==OWNED? OWNED:INIT), owner_id(oth.owner_id), id(oth.id) {} + inline uint64_t get_id() {return id;} + virtual pptr get_data() {return nullptr;} + virtual ~PBlk(){ + // Wentao: we need to zeroize epoch and flush it, avoiding it left after free + epoch = NULL_EPOCH; + // persist_func::clwb(&epoch); + } + }; + + template + class PBlkArray : public PBlk{ + friend class EpochSys; + size_t size; + // NOTE: see EpochSys::alloc_pblk_array() for its sementical allocators. + PBlkArray(): PBlk(){} + PBlkArray(PBlk* owner) : PBlk(owner), content((T*)((char*)this + sizeof(PBlkArray))){} + public: + PBlkArray(const PBlkArray& oth): PBlk(oth), size(oth.size), + content((T*)((char*)this + sizeof(PBlkArray))){} + virtual ~PBlkArray(){}; + T* content; //transient ptr + inline size_t get_size()const{return size;} + }; + + struct Epoch : public PBlk{ + std::atomic global_epoch; + void persist(){} + Epoch(){ + global_epoch.store(NULL_EPOCH, std::memory_order_relaxed); + } + }; + + //////////////////////////////////////// + // counted pointer-related structures // + //////////////////////////////////////// + + struct EpochVerifyException : public std::exception { + const char * what () const throw () { + return "Epoch in which operation wants to linearize has passed; retry required."; + } + }; + + struct sc_desc_t; + template + class atomic_nbptr_t; + class nbptr_t{ + template + friend class atomic_nbptr_t; + inline bool is_desc() const { + return (cnt & 3UL) == 1UL; + } + inline sc_desc_t* get_desc() const { + assert(is_desc()); + return reinterpret_cast(val); + } + public: + uint64_t val; + uint64_t cnt; + template + inline T get_val() const { + static_assert(sizeof(T) == sizeof(uint64_t), "sizes do not match"); + return reinterpret_cast(val); + } + nbptr_t(uint64_t v, uint64_t c) : val(v), cnt(c) {}; + nbptr_t() : nbptr_t(0, 0) {}; + + inline bool operator==(const nbptr_t & b) const{ + return val==b.val && cnt==b.cnt; + } + inline bool operator!=(const nbptr_t & b) const{ + return !operator==(b); + } + }__attribute__((aligned(16))); + + extern EpochSys* esys; + + template + class atomic_nbptr_t{ + static_assert(sizeof(T) == sizeof(uint64_t), "sizes do not match"); + public: + // for cnt in nbptr: + // desc: ....01 + // real val: ....00 + std::atomic nbptr; + nbptr_t load(); + nbptr_t load_verify(); + inline T load_val(){ + return reinterpret_cast(load().val); + } + bool CAS_verify(nbptr_t expected, const T& desired); + inline bool CAS_verify(nbptr_t expected, const nbptr_t& desired){ + return CAS_verify(expected,desired.get_val()); + } + // CAS doesn't check epoch nor cnt + bool CAS(nbptr_t expected, const T& desired); + inline bool CAS(nbptr_t expected, const nbptr_t& desired){ + return CAS(expected,desired.get_val()); + } + void store(const T& desired); + inline void store(const nbptr_t& desired){ + store(desired.get_val()); + } + atomic_nbptr_t(const T& v) : nbptr(nbptr_t(reinterpret_cast(v), 0)){}; + atomic_nbptr_t() : atomic_nbptr_t(T()){}; + }; + + struct sc_desc_t{ + private: + // for cnt in nbptr: + // in progress: ....01 + // committed: ....10 + // aborted: ....11 + std::atomic nbptr; + const uint64_t old_val; + const uint64_t new_val; + const uint64_t cas_epoch; + inline bool abort(nbptr_t _d){ + // bring cnt from ..01 to ..11 + nbptr_t expected (_d.val, (_d.cnt & ~0x3UL) | 1UL); // in progress + nbptr_t desired(expected); + desired.cnt += 2; + return nbptr.compare_exchange_strong(expected, desired); + } + inline bool commit(nbptr_t _d){ + // bring cnt from ..01 to ..10 + nbptr_t expected (_d.val, (_d.cnt & ~0x3UL) | 1UL); // in progress + nbptr_t desired(expected); + desired.cnt += 1; + return nbptr.compare_exchange_strong(expected, desired); + } + inline bool committed(nbptr_t _d) const { + return (_d.cnt & 0x3UL) == 2UL; + } + inline bool in_progress(nbptr_t _d) const { + return (_d.cnt & 0x3UL) == 1UL; + } + inline bool match(nbptr_t old_d, nbptr_t new_d) const { + return ((old_d.cnt & ~0x3UL) == (new_d.cnt & ~0x3UL)) && + (old_d.val == new_d.val); + } + void cleanup(nbptr_t old_d){ + // must be called after desc is aborted or committed + nbptr_t new_d = nbptr.load(); + if(!match(old_d,new_d)) return; + assert(!in_progress(new_d)); + nbptr_t expected(reinterpret_cast(this),(new_d.cnt & ~0x3UL) | 1UL); + if(committed(new_d)) { + // bring cnt from ..10 to ..00 + reinterpret_cast*>( + new_d.val)->nbptr.compare_exchange_strong( + expected, + nbptr_t(new_val,new_d.cnt + 2)); + } else { + //aborted + // bring cnt from ..11 to ..00 + reinterpret_cast*>( + new_d.val)->nbptr.compare_exchange_strong( + expected, + nbptr_t(old_val,new_d.cnt + 1)); + } + } + public: + inline bool committed() const { + return committed(nbptr.load()); + } + inline bool in_progress() const { + return in_progress(nbptr.load()); + } + // TODO: try_complete used to be inline. Try to make it inline again when refactoring is finished. + // Hs: consider moving this into EpochSys if having trouble templatizing. + void try_complete(EpochSys* esys, uint64_t addr); + sc_desc_t( uint64_t c, uint64_t a, uint64_t o, + uint64_t n, uint64_t e) : + nbptr(nbptr_t(a,c)), old_val(o), new_val(n), cas_epoch(e){}; + sc_desc_t() : sc_desc_t(0,0,0,0,0){}; + }; + + template + void atomic_nbptr_t::store(const T& desired){ + // this function must be used only when there's no data race + nbptr_t r = nbptr.load(); + nbptr_t new_r(reinterpret_cast(desired),r.cnt); + nbptr.store(new_r); + } + + + + +} + +#endif \ No newline at end of file diff --git a/src/persist/api/pblk_naked.hpp b/src/persist/api/pblk_naked.hpp index a1346bf1..3c80035f 100644 --- a/src/persist/api/pblk_naked.hpp +++ b/src/persist/api/pblk_naked.hpp @@ -4,7 +4,7 @@ #include "TestConfig.hpp" #include "EpochSys.hpp" #include "ConcurrentPrimitives.hpp" -#include "DCSS.hpp" + #include // This api is inspired by object-based RSTM's api. @@ -12,9 +12,6 @@ namespace pds{ extern EpochSys* esys; - extern padded* epochs; - // extern __thread int _tid; - extern padded* local_descs; inline void init(GlobalTestConfig* gtc){ // here we assume that pds::init is called before pds::init_thread, hence the assertion. @@ -23,13 +20,7 @@ namespace pds{ if (EpochSys::tid == -1){ EpochSys::tid = 0; } - sys_mode = ONLINE; - PBlk::init(gtc->task_num); - epochs = new padded[gtc->task_num]; - local_descs = new padded[gtc->task_num]; - for(int i = 0; i < gtc->task_num; i++){ - epochs[i].ui = NULL_EPOCH; - } + esys->sys_mode = ONLINE; esys = new EpochSys(gtc); } @@ -44,37 +35,37 @@ namespace pds{ } #define CHECK_EPOCH() ({\ - esys->check_epoch(epochs[EpochSys::tid].ui);\ + esys->check_epoch(esys->epochs[EpochSys::tid].ui);\ }) #define BEGIN_OP( ... ) ({ \ - assert(epochs[EpochSys::tid].ui == NULL_EPOCH);\ - epochs[EpochSys::tid].ui = esys->begin_transaction();\ + assert(esys->epochs[EpochSys::tid].ui == NULL_EPOCH);\ + esys->epochs[EpochSys::tid].ui = esys->begin_transaction();\ std::vector __blks = { __VA_ARGS__ };\ for (auto b = __blks.begin(); b != __blks.end(); b++){\ - esys->register_alloc_pblk(*b, epochs[EpochSys::tid].ui);\ + esys->register_alloc_pblk(*b, esys->epochs[EpochSys::tid].ui);\ }\ - assert(epochs[EpochSys::tid].ui != NULL_EPOCH); }) + assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH); }) // end current operation by reducing transaction count of our epoch. // if our operation is already aborted, do nothing. #define END_OP ({\ - if (epochs[EpochSys::tid].ui != NULL_EPOCH){ \ - esys->end_transaction(epochs[EpochSys::tid].ui);\ - epochs[EpochSys::tid].ui = NULL_EPOCH;} }) + if (esys->epochs[EpochSys::tid].ui != NULL_EPOCH){ \ + esys->end_transaction(esys->epochs[EpochSys::tid].ui);\ + esys->epochs[EpochSys::tid].ui = NULL_EPOCH;} }) // end current operation by reducing transaction count of our epoch. // if our operation is already aborted, do nothing. #define END_READONLY_OP ({\ - if (epochs[EpochSys::tid].ui != NULL_EPOCH){ \ - esys->end_readonly_transaction(epochs[EpochSys::tid].ui);\ - epochs[EpochSys::tid].ui = NULL_EPOCH;} }) + if (esys->epochs[EpochSys::tid].ui != NULL_EPOCH){ \ + esys->end_readonly_transaction(esys->epochs[EpochSys::tid].ui);\ + esys->epochs[EpochSys::tid].ui = NULL_EPOCH;} }) // end current epoch and not move towards next epoch in esys. #define ABORT_OP ({ \ - assert(epochs[EpochSys::tid].ui != NULL_EPOCH);\ - esys->abort_transaction(epochs[EpochSys::tid].ui);\ - epochs[EpochSys::tid].ui = NULL_EPOCH;}) + assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ + esys->abort_transaction(esys->epochs[EpochSys::tid].ui);\ + esys->epochs[EpochSys::tid].ui = NULL_EPOCH;}) class EpochHolder{ @@ -100,31 +91,31 @@ namespace pds{ EpochHolderReadOnly __holder; #define PNEW(t, ...) ({\ - epochs[EpochSys::tid].ui == NULL_EPOCH ? \ + esys->epochs[EpochSys::tid].ui == NULL_EPOCH ? \ new t( __VA_ARGS__ ) : \ - esys->register_alloc_pblk(new t( __VA_ARGS__ ), epochs[EpochSys::tid].ui);}) + esys->register_alloc_pblk(new t( __VA_ARGS__ ), esys->epochs[EpochSys::tid].ui);}) #define PDELETE(b) ({\ - if (sys_mode == ONLINE) {\ - assert(epochs[EpochSys::tid].ui != NULL_EPOCH);\ - esys->free_pblk(b, epochs[EpochSys::tid].ui);}}) + if (esys->sys_mode == ONLINE) {\ + assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ + esys->free_pblk(b, esys->epochs[EpochSys::tid].ui);}}) #define PDELETE_DATA(b) ({\ - if (sys_mode == ONLINE) {\ + if (esys->sys_mode == ONLINE) {\ delete(b);\ }}) #define PRETIRE(b) ({\ - assert(epochs[EpochSys::tid].ui != NULL_EPOCH);\ - esys->retire_pblk(b, epochs[EpochSys::tid].ui);\ + assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ + esys->retire_pblk(b, esys->epochs[EpochSys::tid].ui);\ }) #define PRECLAIM(b) ({\ - if (epochs[EpochSys::tid].ui == NULL_EPOCH){\ + if (esys->epochs[EpochSys::tid].ui == NULL_EPOCH){\ BEGIN_OP_AUTOEND();\ - esys->reclaim_pblk(b, epochs[EpochSys::tid].ui);\ + esys->reclaim_pblk(b, esys->epochs[EpochSys::tid].ui);\ } else {\ - esys->reclaim_pblk(b, epochs[EpochSys::tid].ui);\ + esys->reclaim_pblk(b, esys->epochs[EpochSys::tid].ui);\ }\ }) @@ -142,29 +133,29 @@ namespace pds{ public:\ /* get method open a pblk for read. */\ t TOKEN_CONCAT(get_, n)() const{\ - assert(epochs[EpochSys::tid].ui != NULL_EPOCH);\ - return esys->openread_pblk(this, epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n);\ + assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ + return esys->openread_pblk(this, esys->epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n);\ }\ /* get method open a pblk for read. Allows old-see-new reads. */\ t TOKEN_CONCAT(get_unsafe_, n)() const{\ - if(epochs[EpochSys::tid].ui != NULL_EPOCH)\ - return esys->openread_pblk_unsafe(this, epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n);\ + if(esys->epochs[EpochSys::tid].ui != NULL_EPOCH)\ + return esys->openread_pblk_unsafe(this, esys->epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n);\ else\ return TOKEN_CONCAT(m_, n);\ }\ /* set method open a pblk for write. return a new copy when necessary */\ template \ T* TOKEN_CONCAT(set_, n)(const in_type& TOKEN_CONCAT(tmp_, n)){\ - assert(epochs[EpochSys::tid].ui != NULL_EPOCH);\ - auto ret = esys->openwrite_pblk(this, epochs[EpochSys::tid].ui);\ + assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ + auto ret = esys->openwrite_pblk(this, esys->epochs[EpochSys::tid].ui);\ ret->TOKEN_CONCAT(m_, n) = TOKEN_CONCAT(tmp_, n);\ - esys->register_update_pblk(ret, epochs[EpochSys::tid].ui);\ + esys->register_update_pblk(ret, esys->epochs[EpochSys::tid].ui);\ return ret;\ }\ /* set the field by the parameter. called only outside BEGIN_OP and END_OP */\ template \ void TOKEN_CONCAT(set_unsafe_, n)(const in_type& TOKEN_CONCAT(tmp_, n)){\ - assert(epochs[EpochSys::tid].ui == NULL_EPOCH);\ + assert(esys->epochs[EpochSys::tid].ui == NULL_EPOCH);\ TOKEN_CONCAT(m_, n) = TOKEN_CONCAT(tmp_, n);\ } @@ -178,20 +169,20 @@ namespace pds{ t TOKEN_CONCAT(m_, n)[s];\ /* get method open a pblk for read. */\ t TOKEN_CONCAT(get_, n)(int i) const{\ - assert(epochs[EpochSys::tid].ui != NULL_EPOCH);\ - return esys->openread_pblk(this, epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n)[i];\ + assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ + return esys->openread_pblk(this, esys->epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n)[i];\ }\ /* get method open a pblk for read. Allows old-see-new reads. */\ t TOKEN_CONCAT(get_unsafe_, n)(int i) const{\ - assert(epochs[EpochSys::tid].ui != NULL_EPOCH);\ - return esys->openread_pblk_unsafe(this, epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n)[i];\ + assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ + return esys->openread_pblk_unsafe(this, esys->epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n)[i];\ }\ /* set method open a pblk for write. return a new copy when necessary */\ T* TOKEN_CONCAT(set_, n)(int i, t TOKEN_CONCAT(tmp_, n)){\ - assert(epochs[EpochSys::tid].ui != NULL_EPOCH);\ - auto ret = esys->openwrite_pblk(this, epochs[EpochSys::tid].ui);\ + assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ + auto ret = esys->openwrite_pblk(this, esys->epochs[EpochSys::tid].ui);\ ret->TOKEN_CONCAT(m_, n)[i] = TOKEN_CONCAT(tmp_, n);\ - esys->register_update_pblk(ret, epochs[EpochSys::tid].ui);\ + esys->register_update_pblk(ret, esys->epochs[EpochSys::tid].ui);\ return ret;\ } @@ -204,37 +195,11 @@ namespace pds{ } inline void recover_mode(){ - pds::sys_mode = RECOVER; + esys->sys_mode = RECOVER; } inline void online_mode(){ - pds::sys_mode = ONLINE; + esys->sys_mode = ONLINE; } - - // class PBlk : public PBlkBase{ - // friend class EpochSys; - // public: - // PBlk():PBlkBase(false){} - // PBlk(const PBlk& oth):PBlkBase(oth){} - // virtual ~PBlk() {} - // }; - - // class PData : public PBlk{ - // friend class EpochSys; - // public: - // PData():PBlk(true) {} - // PData(const PData& oth):PBlk(oth){} - // virtual ~PData() {} - - // template - // static T* alloc(size_t s, uint64_t head_id){ - // assert(epochs[EpochSys::tid].ui != NULL_EPOCH); - // T* ret = static_cast(RP_malloc(sizeof(T) + s)); - // new (ret) T(data, s); - // esys->register_alloc_pdata(ret, epochs[EpochSys::tid].ui, head_id); - // return ret; - // } - // }; - } #endif diff --git a/src/rideables/MontageMSQueue.hpp b/src/rideables/MontageMSQueue.hpp index 9418c60e..8a04c932 100644 --- a/src/rideables/MontageMSQueue.hpp +++ b/src/rideables/MontageMSQueue.hpp @@ -97,7 +97,7 @@ void MontageMSQueue::enqueue(T v, int tid){ // directly set m_sn and BEGIN_OP will flush it new_node->set_sn(s); BEGIN_OP(); - new_node->payload->set_epoch(epochs[EpochSys::tid].ui); + new_node->payload->set_epoch(esys->epochs[EpochSys::tid].ui); /* set_sn must happen before PDELETE of payload since it's * before linearization point. * Also, this must set sn in place since we still remain in @@ -105,7 +105,7 @@ void MontageMSQueue::enqueue(T v, int tid){ */ // new_node->set_sn(s); if((cur_tail->next).CAS_verify(next, new_node)){ - esys->register_alloc_pblk(new_node->payload, epochs[EpochSys::tid].ui); + esys->register_alloc_pblk(new_node->payload, esys->epochs[EpochSys::tid].ui); END_OP; break; } diff --git a/src/utils/DCSS.hpp b/src/utils/DCSS.hpp index d718410a..8f4cbd7b 100644 --- a/src/utils/DCSS.hpp +++ b/src/utils/DCSS.hpp @@ -43,173 +43,11 @@ // #include "rtm.hpp" #include +#include "PersistStructs.hpp" #include "ConcurrentPrimitives.hpp" #include "EpochSys.hpp" namespace pds{ -struct EpochVerifyException : public std::exception { - const char * what () const throw () { - return "Epoch in which operation wants to linearize has passed; retry required."; - } -}; - -struct sc_desc_t; -template -class atomic_nbptr_t; -class nbptr_t{ - template - friend class atomic_nbptr_t; - inline bool is_desc() const { - return (cnt & 3UL) == 1UL; - } - inline sc_desc_t* get_desc() const { - assert(is_desc()); - return reinterpret_cast(val); - } -public: - uint64_t val; - uint64_t cnt; - template - inline T get_val() const { - static_assert(sizeof(T) == sizeof(uint64_t), "sizes do not match"); - return reinterpret_cast(val); - } - nbptr_t(uint64_t v, uint64_t c) : val(v), cnt(c) {}; - nbptr_t() : nbptr_t(0, 0) {}; - - inline bool operator==(const nbptr_t & b) const{ - return val==b.val && cnt==b.cnt; - } - inline bool operator!=(const nbptr_t & b) const{ - return !operator==(b); - } -}__attribute__((aligned(16))); - -extern padded* local_descs; -extern EpochSys* esys; -extern padded* epochs; - -template -class atomic_nbptr_t{ - static_assert(sizeof(T) == sizeof(uint64_t), "sizes do not match"); -public: - // for cnt in nbptr: - // desc: ....01 - // real val: ....00 - std::atomic nbptr; - nbptr_t load(); - nbptr_t load_verify(); - inline T load_val(){ - return reinterpret_cast(load().val); - } - bool CAS_verify(nbptr_t expected, const T& desired); - inline bool CAS_verify(nbptr_t expected, const nbptr_t& desired){ - return CAS_verify(expected,desired.get_val()); - } - // CAS doesn't check epoch nor cnt - bool CAS(nbptr_t expected, const T& desired); - inline bool CAS(nbptr_t expected, const nbptr_t& desired){ - return CAS(expected,desired.get_val()); - } - void store(const T& desired); - inline void store(const nbptr_t& desired){ - store(desired.get_val()); - } - atomic_nbptr_t(const T& v) : nbptr(nbptr_t(reinterpret_cast(v), 0)){}; - atomic_nbptr_t() : atomic_nbptr_t(T()){}; -}; - -struct sc_desc_t{ -private: - // for cnt in nbptr: - // in progress: ....01 - // committed: ....10 - // aborted: ....11 - std::atomic nbptr; - const uint64_t old_val; - const uint64_t new_val; - const uint64_t cas_epoch; - inline bool abort(nbptr_t _d){ - // bring cnt from ..01 to ..11 - nbptr_t expected (_d.val, (_d.cnt & ~0x3UL) | 1UL); // in progress - nbptr_t desired(expected); - desired.cnt += 2; - return nbptr.compare_exchange_strong(expected, desired); - } - inline bool commit(nbptr_t _d){ - // bring cnt from ..01 to ..10 - nbptr_t expected (_d.val, (_d.cnt & ~0x3UL) | 1UL); // in progress - nbptr_t desired(expected); - desired.cnt += 1; - return nbptr.compare_exchange_strong(expected, desired); - } - inline bool committed(nbptr_t _d) const { - return (_d.cnt & 0x3UL) == 2UL; - } - inline bool in_progress(nbptr_t _d) const { - return (_d.cnt & 0x3UL) == 1UL; - } - inline bool match(nbptr_t old_d, nbptr_t new_d) const { - return ((old_d.cnt & ~0x3UL) == (new_d.cnt & ~0x3UL)) && - (old_d.val == new_d.val); - } - void cleanup(nbptr_t old_d){ - // must be called after desc is aborted or committed - nbptr_t new_d = nbptr.load(); - if(!match(old_d,new_d)) return; - assert(!in_progress(new_d)); - nbptr_t expected(reinterpret_cast(this),(new_d.cnt & ~0x3UL) | 1UL); - if(committed(new_d)) { - // bring cnt from ..10 to ..00 - reinterpret_cast*>( - new_d.val)->nbptr.compare_exchange_strong( - expected, - nbptr_t(new_val,new_d.cnt + 2)); - } else { - //aborted - // bring cnt from ..11 to ..00 - reinterpret_cast*>( - new_d.val)->nbptr.compare_exchange_strong( - expected, - nbptr_t(old_val,new_d.cnt + 1)); - } - } -public: - inline bool committed() const { - return committed(nbptr.load()); - } - inline bool in_progress() const { - return in_progress(nbptr.load()); - } - inline void try_complete(EpochSys* esys, uint64_t addr){ - nbptr_t _d = nbptr.load(); - int ret = 0; - if(_d.val!=addr) return; - if(in_progress(_d)){ - if(esys->check_epoch(cas_epoch)){ - ret = 2; - ret |= commit(_d); - } else { - ret = 4; - ret |= abort(_d); - } - } - cleanup(_d); - } - sc_desc_t( uint64_t c, uint64_t a, uint64_t o, - uint64_t n, uint64_t e) : - nbptr(nbptr_t(a,c)), old_val(o), new_val(n), cas_epoch(e){}; - sc_desc_t() : sc_desc_t(0,0,0,0,0){}; -}; - -template -void atomic_nbptr_t::store(const T& desired){ - // this function must be used only when there's no data race - nbptr_t r = nbptr.load(); - nbptr_t new_r(reinterpret_cast(desired),r.cnt); - nbptr.store(new_r); -} - #ifdef VISIBLE_READ // implementation of load and cas for visible reads @@ -226,11 +64,11 @@ nbptr_t atomic_nbptr_t::load(){ template nbptr_t atomic_nbptr_t::load_verify(){ - assert(epochs[_tid].ui != NULL_EPOCH); + assert(esys->epochs[EpochSys::_tid].ui != NULL_EPOCH); nbptr_t r; while(true){ r = nbptr.load(); - if(esys->check_epoch(epochs[_tid].ui)){ + if(esys->check_epoch(esys->epochs[EpochSys::_tid].ui)){ nbptr_t ret(r.val,r.cnt+1); if(nbptr.compare_exchange_strong(r, ret)){ return r; @@ -243,8 +81,8 @@ nbptr_t atomic_nbptr_t::load_verify(){ template bool atomic_nbptr_t::CAS_verify(nbptr_t expected, const T& desired){ - assert(epochs[_tid].ui != NULL_EPOCH); - if(esys->check_epoch(epochs[_tid].ui)){ + assert(esys->epochs[EpochSys::_tid].ui != NULL_EPOCH); + if(esys->check_epoch(esys->epochs[EpochSys::_tid].ui)){ nbptr_t new_r(reinterpret_cast(desired),expected.cnt+1); return nbptr.compare_exchange_strong(expected, new_r); } else { @@ -287,7 +125,7 @@ nbptr_t atomic_nbptr_t::load_verify(){ template bool atomic_nbptr_t::CAS_verify(nbptr_t expected, const T& desired){ - assert(epochs[_tid].ui != NULL_EPOCH); + assert(esys->epochs[EpochSys::_tid].ui != NULL_EPOCH); // total_cnt.fetch_add(1); #ifdef USE_TSX unsigned status = _xbegin(); @@ -296,7 +134,7 @@ bool atomic_nbptr_t::CAS_verify(nbptr_t expected, const T& desired){ if(!r.is_desc()){ if( r.cnt!=expected.cnt || r.val!=expected.val || - !esys->check_epoch(epochs[_tid].ui)){ + !esys->check_epoch(esys->epochs[EpochSys::_tid].ui)){ _xend(); return false; } else { @@ -331,17 +169,17 @@ bool atomic_nbptr_t::CAS_verify(nbptr_t expected, const T& desired){ // now r.cnt must be ..00, and r.cnt+1 is ..01, which means "nbptr // contains a descriptor" and "a descriptor is in progress" assert((r.cnt & 3UL) == 0UL); - new (&local_descs[EpochSys::tid].ui) sc_desc_t(r.cnt+1, + new (&esys->local_descs[EpochSys::tid].ui) sc_desc_t(r.cnt+1, reinterpret_cast(this), expected.val, reinterpret_cast(desired), - epochs[EpochSys::tid].ui); - nbptr_t new_r(reinterpret_cast(&local_descs[EpochSys::tid].ui), r.cnt+1); + esys->epochs[EpochSys::tid].ui); + nbptr_t new_r(reinterpret_cast(&esys->local_descs[EpochSys::tid].ui), r.cnt+1); if(!nbptr.compare_exchange_strong(r,new_r)){ return false; } - local_descs[EpochSys::tid].ui.try_complete(esys, reinterpret_cast(this)); - if(local_descs[EpochSys::tid].ui.committed()) return true; + esys->local_descs[EpochSys::tid].ui.try_complete(esys, reinterpret_cast(this)); + if(esys->local_descs[EpochSys::tid].ui.committed()) return true; else return false; } From 3afe2c5e92ae70e25bd2e297a87f7ec2d35de56f Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Tue, 3 Nov 2020 16:47:24 -0500 Subject: [PATCH 18/56] trivial --- src/persist/EpochSys.hpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/persist/EpochSys.hpp b/src/persist/EpochSys.hpp index 28e4ea9e..cc42cdaf 100644 --- a/src/persist/EpochSys.hpp +++ b/src/persist/EpochSys.hpp @@ -45,8 +45,6 @@ class EpochSys{ GlobalTestConfig* gtc = nullptr; int task_num; - bool consistent_increment(std::atomic& counter, const uint64_t c); - public: /* static */ From f1025ed4c5fe2fe187b9260faa6da2dbaa901c3b Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Tue, 3 Nov 2020 23:42:10 -0500 Subject: [PATCH 19/56] debug --- src/persist/PersistStructs.hpp | 6 +++--- src/persist/api/pblk_naked.hpp | 1 - src/rideables/MontageMSQueue.hpp | 2 +- src/utils/DCSS.hpp | 12 ++++++------ 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/src/persist/PersistStructs.hpp b/src/persist/PersistStructs.hpp index ef6d2fe5..663314d1 100644 --- a/src/persist/PersistStructs.hpp +++ b/src/persist/PersistStructs.hpp @@ -23,9 +23,9 @@ namespace pds{ class EpochSys; - //////////////////////////// - // PBlk-related structurs // - //////////////////////////// + ///////////////////////////// + // PBlk-related structures // + ///////////////////////////// class PBlk : public Persistent{ friend class EpochSys; diff --git a/src/persist/api/pblk_naked.hpp b/src/persist/api/pblk_naked.hpp index 3c80035f..0a3365bc 100644 --- a/src/persist/api/pblk_naked.hpp +++ b/src/persist/api/pblk_naked.hpp @@ -20,7 +20,6 @@ namespace pds{ if (EpochSys::tid == -1){ EpochSys::tid = 0; } - esys->sys_mode = ONLINE; esys = new EpochSys(gtc); } diff --git a/src/rideables/MontageMSQueue.hpp b/src/rideables/MontageMSQueue.hpp index 8a04c932..a2dd4f90 100644 --- a/src/rideables/MontageMSQueue.hpp +++ b/src/rideables/MontageMSQueue.hpp @@ -35,7 +35,7 @@ class MontageMSQueue : public RQueue, Recoverable{ Node(): next(nullptr), payload(nullptr){}; Node(T v): next(nullptr), payload(PNEW(Payload, v)){ - assert(epochs[_tid].ui == NULL_EPOCH); + assert(esys->epochs[EpochSys::tid].ui == NULL_EPOCH); }; void set_sn(uint64_t s){ diff --git a/src/utils/DCSS.hpp b/src/utils/DCSS.hpp index 8f4cbd7b..1bdaaead 100644 --- a/src/utils/DCSS.hpp +++ b/src/utils/DCSS.hpp @@ -64,11 +64,11 @@ nbptr_t atomic_nbptr_t::load(){ template nbptr_t atomic_nbptr_t::load_verify(){ - assert(esys->epochs[EpochSys::_tid].ui != NULL_EPOCH); + assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH); nbptr_t r; while(true){ r = nbptr.load(); - if(esys->check_epoch(esys->epochs[EpochSys::_tid].ui)){ + if(esys->check_epoch(esys->epochs[EpochSys::tid].ui)){ nbptr_t ret(r.val,r.cnt+1); if(nbptr.compare_exchange_strong(r, ret)){ return r; @@ -81,8 +81,8 @@ nbptr_t atomic_nbptr_t::load_verify(){ template bool atomic_nbptr_t::CAS_verify(nbptr_t expected, const T& desired){ - assert(esys->epochs[EpochSys::_tid].ui != NULL_EPOCH); - if(esys->check_epoch(esys->epochs[EpochSys::_tid].ui)){ + assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH); + if(esys->check_epoch(esys->epochs[EpochSys::tid].ui)){ nbptr_t new_r(reinterpret_cast(desired),expected.cnt+1); return nbptr.compare_exchange_strong(expected, new_r); } else { @@ -125,7 +125,7 @@ nbptr_t atomic_nbptr_t::load_verify(){ template bool atomic_nbptr_t::CAS_verify(nbptr_t expected, const T& desired){ - assert(esys->epochs[EpochSys::_tid].ui != NULL_EPOCH); + assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH); // total_cnt.fetch_add(1); #ifdef USE_TSX unsigned status = _xbegin(); @@ -134,7 +134,7 @@ bool atomic_nbptr_t::CAS_verify(nbptr_t expected, const T& desired){ if(!r.is_desc()){ if( r.cnt!=expected.cnt || r.val!=expected.val || - !esys->check_epoch(esys->epochs[EpochSys::_tid].ui)){ + !esys->check_epoch(esys->epochs[EpochSys::tid].ui)){ _xend(); return false; } else { From 6ec058ddca924e4d94193cf35873657ff9c5f608 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Tue, 3 Nov 2020 23:48:04 -0500 Subject: [PATCH 20/56] replaced pds finalization with rideable reclamation --- src/tests/GraphRecoveryTest.hpp | 3 +-- src/tests/GraphTest.hpp | 3 +-- src/tests/MapTest.hpp | 3 +-- src/tests/QueueChurnTest.hpp | 3 +-- src/tests/QueueTest.hpp | 3 +-- src/tests/RecoverVerifyTest.hpp | 3 +-- src/tests/TGraphConstructionTest.hpp | 3 +-- src/tests/YCSBTest.hpp | 3 +-- 8 files changed, 8 insertions(+), 16 deletions(-) diff --git a/src/tests/GraphRecoveryTest.hpp b/src/tests/GraphRecoveryTest.hpp index 1f055f35..281d74e1 100644 --- a/src/tests/GraphRecoveryTest.hpp +++ b/src/tests/GraphRecoveryTest.hpp @@ -160,8 +160,7 @@ class GraphRecoveryTest : public Test { } void cleanup(GlobalTestConfig *gtc) { - pds::finalize(); - Persistent::finalize(); + delete g; } }; #endif diff --git a/src/tests/GraphTest.hpp b/src/tests/GraphTest.hpp index 411ce771..d72329d2 100644 --- a/src/tests/GraphTest.hpp +++ b/src/tests/GraphTest.hpp @@ -93,8 +93,7 @@ class GraphTest : public Test { } void cleanup(GlobalTestConfig *gtc) { - pds::finalize(); - Persistent::finalize(); + delete g; } void parInit(GlobalTestConfig *gtc, LocalTestConfig *ltc) { diff --git a/src/tests/MapTest.hpp b/src/tests/MapTest.hpp index 304f72da..9c4a4b0e 100644 --- a/src/tests/MapTest.hpp +++ b/src/tests/MapTest.hpp @@ -242,8 +242,7 @@ class MapTest : public Test{ Savitar_core_finalize(); pthread_mutex_destroy(&snapshot_lock); #endif - pds::finalize(); - Persistent::finalize(); + delete m; } }; diff --git a/src/tests/QueueChurnTest.hpp b/src/tests/QueueChurnTest.hpp index d799fd15..11cc03e0 100644 --- a/src/tests/QueueChurnTest.hpp +++ b/src/tests/QueueChurnTest.hpp @@ -167,8 +167,7 @@ class QueueChurnTest : public Test{ Savitar_core_finalize(); pthread_mutex_destroy(&snapshot_lock); #endif - pds::finalize(); - Persistent::finalize(); + delete q; } void getRideable(GlobalTestConfig* gtc){ Rideable* ptr = gtc->allocRideable(); diff --git a/src/tests/QueueTest.hpp b/src/tests/QueueTest.hpp index c386ffcd..63af25ef 100644 --- a/src/tests/QueueTest.hpp +++ b/src/tests/QueueTest.hpp @@ -197,8 +197,7 @@ class QueueTest : public Test{ Savitar_core_finalize(); pthread_mutex_destroy(&snapshot_lock); #endif - pds::finalize(); - Persistent::finalize(); + delete q; } void getRideable(GlobalTestConfig* gtc){ Rideable* ptr = gtc->allocRideable(); diff --git a/src/tests/RecoverVerifyTest.hpp b/src/tests/RecoverVerifyTest.hpp index 0dfff9e3..b2f5be98 100644 --- a/src/tests/RecoverVerifyTest.hpp +++ b/src/tests/RecoverVerifyTest.hpp @@ -138,8 +138,7 @@ int RecoverVerifyTest::execute(GlobalTestConfig* gtc, LocalTestConfig* ltc) template void RecoverVerifyTest::cleanup(GlobalTestConfig* gtc){ - pds::finalize(); - Persistent::finalize(); + delete m; } #endif diff --git a/src/tests/TGraphConstructionTest.hpp b/src/tests/TGraphConstructionTest.hpp index 44100aa0..d4ccbe1c 100644 --- a/src/tests/TGraphConstructionTest.hpp +++ b/src/tests/TGraphConstructionTest.hpp @@ -113,8 +113,7 @@ class TGraphConstructionTest : public Test { } void cleanup(GlobalTestConfig *gtc) { - pds::finalize(); - Persistent::finalize(); + delete g; } }; #endif diff --git a/src/tests/YCSBTest.hpp b/src/tests/YCSBTest.hpp index 4f2aaa67..b7640265 100644 --- a/src/tests/YCSBTest.hpp +++ b/src/tests/YCSBTest.hpp @@ -141,8 +141,7 @@ class YCSBTest : public Test{ return ops; } void cleanup(GlobalTestConfig* gtc){ - pds::finalize(); - Persistent::finalize(); + delete m; for(int i=0;itask_num;i++){ delete traces[i]; } From 45aa3c4970f3f3e0e3bac92f481fa1ba0dee1051 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Mon, 9 Nov 2020 08:26:27 -0500 Subject: [PATCH 21/56] moved begin_op/end_op logic into EpochSys class --- src/persist/EpochSys.hpp | 33 +++++++++++++++++++++++++++++++++ src/persist/api/pblk_naked.hpp | 20 ++++---------------- 2 files changed, 37 insertions(+), 16 deletions(-) diff --git a/src/persist/EpochSys.hpp b/src/persist/EpochSys.hpp index cc42cdaf..ab5fa729 100644 --- a/src/persist/EpochSys.hpp +++ b/src/persist/EpochSys.hpp @@ -116,6 +116,39 @@ class EpochSys{ } Persistent::simulate_crash(tid); } + + ///////// + // API // + ///////// + + void begin_op(std::vector& blks){ + assert(epochs[tid].ui == NULL_EPOCH); + epochs[tid].ui = esys->begin_transaction(); + for (auto b = blks.begin(); b != blks.end(); b++){ + register_alloc_pblk(*b, epochs[tid].ui); + } + } + + void end_op(){ + if (epochs[tid].ui != NULL_EPOCH){ + end_transaction(epochs[tid].ui); + epochs[tid].ui = NULL_EPOCH; + } + } + + void end_readonly_op(){ + if (epochs[tid].ui != NULL_EPOCH){ + end_readonly_transaction(epochs[tid].ui); + epochs[tid].ui = NULL_EPOCH; + } + } + + void abort_op(){ + assert(epochs[tid].ui != NULL_EPOCH); + abort_transaction(epochs[tid].ui); + epochs[tid].ui = NULL_EPOCH; + } + //////////////// // Operations // //////////////// diff --git a/src/persist/api/pblk_naked.hpp b/src/persist/api/pblk_naked.hpp index 0a3365bc..dd53ddd6 100644 --- a/src/persist/api/pblk_naked.hpp +++ b/src/persist/api/pblk_naked.hpp @@ -38,34 +38,22 @@ namespace pds{ }) #define BEGIN_OP( ... ) ({ \ - assert(esys->epochs[EpochSys::tid].ui == NULL_EPOCH);\ - esys->epochs[EpochSys::tid].ui = esys->begin_transaction();\ std::vector __blks = { __VA_ARGS__ };\ - for (auto b = __blks.begin(); b != __blks.end(); b++){\ - esys->register_alloc_pblk(*b, esys->epochs[EpochSys::tid].ui);\ - }\ - assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH); }) + esys->begin_op(__blks);}) // end current operation by reducing transaction count of our epoch. // if our operation is already aborted, do nothing. #define END_OP ({\ - if (esys->epochs[EpochSys::tid].ui != NULL_EPOCH){ \ - esys->end_transaction(esys->epochs[EpochSys::tid].ui);\ - esys->epochs[EpochSys::tid].ui = NULL_EPOCH;} }) + esys->end_op(); }) // end current operation by reducing transaction count of our epoch. // if our operation is already aborted, do nothing. #define END_READONLY_OP ({\ - if (esys->epochs[EpochSys::tid].ui != NULL_EPOCH){ \ - esys->end_readonly_transaction(esys->epochs[EpochSys::tid].ui);\ - esys->epochs[EpochSys::tid].ui = NULL_EPOCH;} }) + esys->end_readonly_op(); }) // end current epoch and not move towards next epoch in esys. #define ABORT_OP ({ \ - assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ - esys->abort_transaction(esys->epochs[EpochSys::tid].ui);\ - esys->epochs[EpochSys::tid].ui = NULL_EPOCH;}) - + esys->abort_op(); }) class EpochHolder{ public: From 505bae3bc26f0c67611d78caa2ead974678d4325 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Mon, 9 Nov 2020 17:59:27 -0500 Subject: [PATCH 22/56] added PBlk constructor that takes a Recoverable* as argument --- src/persist/PersistStructs.cpp | 5 +++++ src/persist/PersistStructs.hpp | 6 +++++- src/persist/Recoverable.cpp | 23 +++++++++++++++++++++++ src/persist/Recoverable.hpp | 20 +++++--------------- 4 files changed, 38 insertions(+), 16 deletions(-) create mode 100644 src/persist/Recoverable.cpp diff --git a/src/persist/PersistStructs.cpp b/src/persist/PersistStructs.cpp index 9254b298..6d7d10a3 100644 --- a/src/persist/PersistStructs.cpp +++ b/src/persist/PersistStructs.cpp @@ -1,8 +1,13 @@ #include "PersistStructs.hpp" #include "EpochSys.hpp" +#include "Recoverable.hpp" namespace pds{ +PBlk::PBlk(Recoverable* ds){ + ds->register_alloc_pblk(this); +} + void sc_desc_t::try_complete(EpochSys* esys, uint64_t addr){ nbptr_t _d = nbptr.load(); int ret = 0; diff --git a/src/persist/PersistStructs.hpp b/src/persist/PersistStructs.hpp index 663314d1..29f76f54 100644 --- a/src/persist/PersistStructs.hpp +++ b/src/persist/PersistStructs.hpp @@ -12,6 +12,8 @@ #include "Persistent.hpp" #include "common_macros.hpp" +class Recoverable; + namespace pds{ struct OldSeeNewException : public std::exception { const char * what () const throw () { @@ -22,6 +24,7 @@ namespace pds{ enum PBlkType {INIT, ALLOC, UPDATE, DELETE, RECLAIMED, EPOCH, OWNED}; class EpochSys; + ///////////////////////////// // PBlk-related structures // @@ -58,8 +61,9 @@ namespace pds{ } // id gets inited by EpochSys instance. PBlk(): epoch(NULL_EPOCH), blktype(INIT), owner_id(0), retire(nullptr){} + PBlk(Recoverable* ds); // id gets inited by EpochSys instance. - PBlk(const PBlk* owner): + PBlk(const PBlk* owner): blktype(OWNED), owner_id(owner->blktype==OWNED? owner->owner_id : owner->id) {} PBlk(const PBlk& oth): blktype(oth.blktype==OWNED? OWNED:INIT), owner_id(oth.owner_id), id(oth.id) {} inline uint64_t get_id() {return id;} diff --git a/src/persist/Recoverable.cpp b/src/persist/Recoverable.cpp new file mode 100644 index 00000000..2f313306 --- /dev/null +++ b/src/persist/Recoverable.cpp @@ -0,0 +1,23 @@ +#include "Recoverable.hpp" + +Recoverable::Recoverable(GlobalTestConfig* gtc){ + // init Persistent allocator + Persistent::init(); + // init epoch system + pds::init(gtc); + // init main thread + pds::init_thread(0); + + // TODO: replace this with _esys initialization. + _esys = pds::esys; +} +Recoverable::~Recoverable(){ + pds::finalize(); + Persistent::finalize(); +} +void Recoverable::init_thread(GlobalTestConfig*, LocalTestConfig* ltc){ + pds::init_thread(ltc->tid); +} +void Recoverable::register_alloc_pblk(pds::PBlk* pblk){ + _esys->register_alloc_pblk(pblk, _esys->epochs[pds::EpochSys::tid].ui); +} \ No newline at end of file diff --git a/src/persist/Recoverable.hpp b/src/persist/Recoverable.hpp index a62d03c9..32505719 100644 --- a/src/persist/Recoverable.hpp +++ b/src/persist/Recoverable.hpp @@ -9,23 +9,13 @@ class Recoverable{ pds::EpochSys* _esys = nullptr; public: - Recoverable(GlobalTestConfig* gtc){ - // init Persistent allocator - Persistent::init(); - // init epoch system - pds::init(gtc); - // init main thread - pds::init_thread(0); - } - ~Recoverable(){ - pds::finalize(); - Persistent::finalize(); - } - void init_thread(GlobalTestConfig*, LocalTestConfig* ltc){ - pds::init_thread(ltc->tid); - } + Recoverable(GlobalTestConfig* gtc); + ~Recoverable(); + void init_thread(GlobalTestConfig*, LocalTestConfig* ltc); + void register_alloc_pblk(pds::PBlk* pblk); // return num of blocks recovered. virtual int recover(bool simulated = false) = 0; }; + #endif \ No newline at end of file From 3ac4ddb90f4d9c6cb728134bc6179694d86d54a6 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Mon, 9 Nov 2020 21:12:07 -0500 Subject: [PATCH 23/56] put PNEW & PDELETE logic into esys --- src/persist/EpochSys.hpp | 34 +++++++++++++++++++++++++++++----- src/persist/api/pblk_naked.hpp | 18 ++++++++---------- 2 files changed, 37 insertions(+), 15 deletions(-) diff --git a/src/persist/EpochSys.hpp b/src/persist/EpochSys.hpp index ab5fa729..5750d5e6 100644 --- a/src/persist/EpochSys.hpp +++ b/src/persist/EpochSys.hpp @@ -52,12 +52,14 @@ class EpochSys{ std::mutex dedicated_epoch_advancer_lock; - /* public members for API only */ + /* public members for API */ // current epoch of each thread. padded* epochs = nullptr; // local descriptors for DCSS // TODO: maybe put this into a derived class for NB data structures? padded* local_descs = nullptr; + // containers for pending allocations + padded>* pending_allocs = nullptr; // system mode that toggles on/off PDELETE for recovery purpose. SysMode sys_mode = ONLINE; @@ -67,6 +69,7 @@ class EpochSys{ epochs[i].ui = NULL_EPOCH; } local_descs = new padded[gtc->task_num]; + pending_allocs = new padded>[gtc->task_num]; reset(); // TODO: change to recover() later on. } @@ -121,12 +124,14 @@ class EpochSys{ // API // ///////// - void begin_op(std::vector& blks){ + void begin_op(){ assert(epochs[tid].ui == NULL_EPOCH); epochs[tid].ui = esys->begin_transaction(); - for (auto b = blks.begin(); b != blks.end(); b++){ + for (auto b = pending_allocs[tid].ui.begin(); + b != pending_allocs[tid].ui.end(); b++){ register_alloc_pblk(*b, epochs[tid].ui); } + pending_allocs[tid].ui.clear(); } void end_op(){ @@ -149,6 +154,24 @@ class EpochSys{ epochs[tid].ui = NULL_EPOCH; } + template + void pdelete(T* b){ + ASSERT_DERIVE(T, PBlk); + ASSERT_COPY(T); + + if (sys_mode == ONLINE){ + if (epochs[tid].ui != NULL_EPOCH){ + free_pblk(b, epochs[tid].ui); + } else { + if (b->epoch == NULL_EPOCH){ + assert(pending_allocs[tid].ui.find(b) != pending_allocs[tid].ui.end()); + pending_allocs[tid].ui.erase(b); + } + delete b; + } + } + } + //////////////// // Operations // //////////////// @@ -246,13 +269,14 @@ T* EpochSys::register_alloc_pblk(T* b, uint64_t c){ // "requires copying"); ASSERT_DERIVE(T, PBlk); ASSERT_COPY(T); - + + PBlk* blk = b; if (c == NULL_EPOCH){ // register alloc before BEGIN_OP, return. Will be done by // the BEGIN_OP that calls this again with a non-NULL c. + pending_allocs[tid].ui.insert(blk); return b; } - PBlk* blk = b; blk->epoch = c; // Wentao: It's possible that payload is registered multiple times assert(blk->blktype == INIT || blk->blktype == OWNED || diff --git a/src/persist/api/pblk_naked.hpp b/src/persist/api/pblk_naked.hpp index dd53ddd6..1c4073c3 100644 --- a/src/persist/api/pblk_naked.hpp +++ b/src/persist/api/pblk_naked.hpp @@ -3,6 +3,7 @@ #include "TestConfig.hpp" #include "EpochSys.hpp" +#include "Recoverable.hpp" #include "ConcurrentPrimitives.hpp" #include @@ -37,23 +38,23 @@ namespace pds{ esys->check_epoch(esys->epochs[EpochSys::tid].ui);\ }) + // TODO: get rid of arguments in rideables. #define BEGIN_OP( ... ) ({ \ - std::vector __blks = { __VA_ARGS__ };\ - esys->begin_op(__blks);}) + esys->begin_op();}) // end current operation by reducing transaction count of our epoch. // if our operation is already aborted, do nothing. #define END_OP ({\ - esys->end_op(); }) + esys->end_op(); }) // end current operation by reducing transaction count of our epoch. // if our operation is already aborted, do nothing. #define END_READONLY_OP ({\ - esys->end_readonly_op(); }) + esys->end_readonly_op(); }) // end current epoch and not move towards next epoch in esys. #define ABORT_OP ({ \ - esys->abort_op(); }) + esys->abort_op(); }) class EpochHolder{ public: @@ -77,15 +78,12 @@ namespace pds{ BEGIN_OP( __VA_ARGS__ );\ EpochHolderReadOnly __holder; + // TODO: replace this with just new(Recoverable* ds, ... ) #define PNEW(t, ...) ({\ - esys->epochs[EpochSys::tid].ui == NULL_EPOCH ? \ - new t( __VA_ARGS__ ) : \ esys->register_alloc_pblk(new t( __VA_ARGS__ ), esys->epochs[EpochSys::tid].ui);}) #define PDELETE(b) ({\ - if (esys->sys_mode == ONLINE) {\ - assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ - esys->free_pblk(b, esys->epochs[EpochSys::tid].ui);}}) + esys->pdelete(b);}) #define PDELETE_DATA(b) ({\ if (esys->sys_mode == ONLINE) {\ From 5099e47765a7707f619e12bb10610a9cc7f0fefc Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Mon, 9 Nov 2020 21:19:56 -0500 Subject: [PATCH 24/56] put PRETIRE & PRECLAIM logic into esys --- src/persist/EpochSys.hpp | 24 ++++++++++++++++++++++++ src/persist/api/pblk_naked.hpp | 12 ++---------- 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/src/persist/EpochSys.hpp b/src/persist/EpochSys.hpp index 5750d5e6..8a4fa052 100644 --- a/src/persist/EpochSys.hpp +++ b/src/persist/EpochSys.hpp @@ -172,6 +172,30 @@ class EpochSys{ } } + template + void pretire(T* b){ + ASSERT_DERIVE(T, PBlk); + ASSERT_COPY(T); + + assert(eochs[tid].ui != NULL_EPOCH); + retire_pblk(b, epochs[tid].ui); + + } + + template + void preclaim(T* b){ + ASSERT_DERIVE(T, PBlk); + ASSERT_COPY(T); + + if (epochs[tid].ui == NULL_EPOCH){ + begin_op(); + } + reclaim_pblk(b, epochs[tid].ui); + if (epochs[tid].ui == NULL_EPOCH){ + end_op(); + } + } + //////////////// // Operations // //////////////// diff --git a/src/persist/api/pblk_naked.hpp b/src/persist/api/pblk_naked.hpp index 1c4073c3..118eb3ea 100644 --- a/src/persist/api/pblk_naked.hpp +++ b/src/persist/api/pblk_naked.hpp @@ -91,18 +91,10 @@ namespace pds{ }}) #define PRETIRE(b) ({\ - assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ - esys->retire_pblk(b, esys->epochs[EpochSys::tid].ui);\ - }) + esys->pretire(b);}) #define PRECLAIM(b) ({\ - if (esys->epochs[EpochSys::tid].ui == NULL_EPOCH){\ - BEGIN_OP_AUTOEND();\ - esys->reclaim_pblk(b, esys->epochs[EpochSys::tid].ui);\ - } else {\ - esys->reclaim_pblk(b, esys->epochs[EpochSys::tid].ui);\ - }\ - }) + esys->preclaim(b);}) // macro for concatenating two tokens into a new token #define TOKEN_CONCAT(a,b) a ## b From 4baef018345c598fc9d1abddf388ceaefa6cd0f7 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Wed, 11 Nov 2020 21:59:40 -0500 Subject: [PATCH 25/56] made PNEW, getter and setter work with esys instance --- src/persist/EpochSys.cpp | 2 +- src/persist/EpochSys.hpp | 31 +++++++----- src/persist/PersistStructs.cpp | 4 -- src/persist/PersistStructs.hpp | 1 - src/persist/Recoverable.cpp | 3 -- src/persist/Recoverable.hpp | 3 +- src/persist/api/pblk_naked.hpp | 62 +++++++++++++++++++---- src/persist/pnew.hpp | 89 ++++++++++++++++++++++++++++++++++ 8 files changed, 163 insertions(+), 32 deletions(-) create mode 100644 src/persist/pnew.hpp diff --git a/src/persist/EpochSys.cpp b/src/persist/EpochSys.cpp index 1e9eb142..fc917dc3 100644 --- a/src/persist/EpochSys.cpp +++ b/src/persist/EpochSys.cpp @@ -129,8 +129,8 @@ namespace pds{ do{ ret = global_epoch->load(std::memory_order_seq_cst); } while(!trans_tracker->consistent_register_active(ret, ret)); - //to_be_freed->free_on_new_epoch(ret); to_be_freed->free_on_new_epoch(ret); + return ret; } diff --git a/src/persist/EpochSys.hpp b/src/persist/EpochSys.hpp index 8a4fa052..b40e8fbc 100644 --- a/src/persist/EpochSys.hpp +++ b/src/persist/EpochSys.hpp @@ -95,7 +95,6 @@ class EpochSys{ void parse_env(); - // reset the epoch system. Maybe put it in the constructor later on. void reset(){ task_num = gtc->task_num; if (!epoch_container){ @@ -105,11 +104,6 @@ class EpochSys{ } global_epoch->store(INIT_EPOCH, std::memory_order_relaxed); parse_env(); - - // if (uid_generator){ - // delete uid_generator; - // } - // uid_generator = new UIDGenerator(gtc->task_num); } void simulate_crash(){ @@ -125,13 +119,14 @@ class EpochSys{ ///////// void begin_op(){ - assert(epochs[tid].ui == NULL_EPOCH); + assert(epochs[tid].ui == NULL_EPOCH); epochs[tid].ui = esys->begin_transaction(); + // TODO: any room for optimization here? + // TODO: put pending_allocs-related stuff into operations? for (auto b = pending_allocs[tid].ui.begin(); b != pending_allocs[tid].ui.end(); b++){ register_alloc_pblk(*b, epochs[tid].ui); } - pending_allocs[tid].ui.clear(); } void end_op(){ @@ -139,6 +134,7 @@ class EpochSys{ end_transaction(epochs[tid].ui); epochs[tid].ui = NULL_EPOCH; } + pending_allocs[tid].ui.clear(); } void end_readonly_op(){ @@ -146,6 +142,7 @@ class EpochSys{ end_readonly_transaction(epochs[tid].ui); epochs[tid].ui = NULL_EPOCH; } + assert(pending_allocs[tid].ui.empty()); } void abort_op(){ @@ -179,7 +176,6 @@ class EpochSys{ assert(eochs[tid].ui != NULL_EPOCH); retire_pblk(b, epochs[tid].ui); - } template @@ -196,6 +192,18 @@ class EpochSys{ } } + // pnew is in a separate file since there are a bunch of them. + // add more as needed. + #include "pnew.hpp" + + void recover_mode(){ + sys_mode = RECOVER; // PDELETE -> nop + } + + void online_mode(){ + sys_mode = ONLINE; + } + //////////////// // Operations // //////////////// @@ -296,8 +304,9 @@ T* EpochSys::register_alloc_pblk(T* b, uint64_t c){ PBlk* blk = b; if (c == NULL_EPOCH){ - // register alloc before BEGIN_OP, return. Will be done by - // the BEGIN_OP that calls this again with a non-NULL c. + // register alloc before BEGIN_OP, put it into pending_allocs bucket and + // return. Will be done by the BEGIN_OP that calls this again with a + // non-NULL c. pending_allocs[tid].ui.insert(blk); return b; } diff --git a/src/persist/PersistStructs.cpp b/src/persist/PersistStructs.cpp index 6d7d10a3..8e4a6583 100644 --- a/src/persist/PersistStructs.cpp +++ b/src/persist/PersistStructs.cpp @@ -4,10 +4,6 @@ namespace pds{ -PBlk::PBlk(Recoverable* ds){ - ds->register_alloc_pblk(this); -} - void sc_desc_t::try_complete(EpochSys* esys, uint64_t addr){ nbptr_t _d = nbptr.load(); int ret = 0; diff --git a/src/persist/PersistStructs.hpp b/src/persist/PersistStructs.hpp index 29f76f54..25e61004 100644 --- a/src/persist/PersistStructs.hpp +++ b/src/persist/PersistStructs.hpp @@ -61,7 +61,6 @@ namespace pds{ } // id gets inited by EpochSys instance. PBlk(): epoch(NULL_EPOCH), blktype(INIT), owner_id(0), retire(nullptr){} - PBlk(Recoverable* ds); // id gets inited by EpochSys instance. PBlk(const PBlk* owner): blktype(OWNED), owner_id(owner->blktype==OWNED? owner->owner_id : owner->id) {} diff --git a/src/persist/Recoverable.cpp b/src/persist/Recoverable.cpp index 2f313306..cb4644be 100644 --- a/src/persist/Recoverable.cpp +++ b/src/persist/Recoverable.cpp @@ -17,7 +17,4 @@ Recoverable::~Recoverable(){ } void Recoverable::init_thread(GlobalTestConfig*, LocalTestConfig* ltc){ pds::init_thread(ltc->tid); -} -void Recoverable::register_alloc_pblk(pds::PBlk* pblk){ - _esys->register_alloc_pblk(pblk, _esys->epochs[pds::EpochSys::tid].ui); } \ No newline at end of file diff --git a/src/persist/Recoverable.hpp b/src/persist/Recoverable.hpp index 32505719..45bee97f 100644 --- a/src/persist/Recoverable.hpp +++ b/src/persist/Recoverable.hpp @@ -7,12 +7,11 @@ // TODO: report recover errors/exceptions class Recoverable{ - pds::EpochSys* _esys = nullptr; public: + pds::EpochSys* _esys = nullptr; Recoverable(GlobalTestConfig* gtc); ~Recoverable(); void init_thread(GlobalTestConfig*, LocalTestConfig* ltc); - void register_alloc_pblk(pds::PBlk* pblk); // return num of blocks recovered. virtual int recover(bool simulated = false) = 0; }; diff --git a/src/persist/api/pblk_naked.hpp b/src/persist/api/pblk_naked.hpp index 118eb3ea..d7e7f7af 100644 --- a/src/persist/api/pblk_naked.hpp +++ b/src/persist/api/pblk_naked.hpp @@ -71,30 +71,34 @@ namespace pds{ }; #define BEGIN_OP_AUTOEND( ... ) \ - BEGIN_OP( __VA_ARGS__ );\ + BEGIN_OP();\ EpochHolder __holder; #define BEGIN_READONLY_OP_AUTOEND( ... ) \ - BEGIN_OP( __VA_ARGS__ );\ + BEGIN_OP();\ EpochHolderReadOnly __holder; - // TODO: replace this with just new(Recoverable* ds, ... ) #define PNEW(t, ...) ({\ - esys->register_alloc_pblk(new t( __VA_ARGS__ ), esys->epochs[EpochSys::tid].ui);}) + esys->pnew(__VA_ARGS__ );}) #define PDELETE(b) ({\ esys->pdelete(b);}) + #define PRETIRE(b) ({\ + esys->pretire(b);}) + + #define PRECLAIM(b) ({\ + esys->preclaim(b);}) + + // Hs: This is for "owned" PBlk's, currently not used in code base. + // may be useful for "data" blocks like dynamically-sized + // persistent String payload. #define PDELETE_DATA(b) ({\ if (esys->sys_mode == ONLINE) {\ delete(b);\ }}) - #define PRETIRE(b) ({\ - esys->pretire(b);}) - #define PRECLAIM(b) ({\ - esys->preclaim(b);}) // macro for concatenating two tokens into a new token #define TOKEN_CONCAT(a,b) a ## b @@ -109,11 +113,21 @@ namespace pds{ t TOKEN_CONCAT(m_, n);\ public:\ /* get method open a pblk for read. */\ + t TOKEN_CONCAT(get_, n)(Recoverable* ds) const{\ + assert(ds->_esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ + return ds->_esys->openread_pblk(this, esys->epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n);\ + }\ t TOKEN_CONCAT(get_, n)() const{\ assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ return esys->openread_pblk(this, esys->epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n);\ }\ /* get method open a pblk for read. Allows old-see-new reads. */\ + t TOKEN_CONCAT(get_unsafe_, n)(Recoverable* ds) const{\ + if(ds->_esys->epochs[EpochSys::tid].ui != NULL_EPOCH)\ + return ds->_esys->openread_pblk_unsafe(this, ds->_esys->epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n);\ + else\ + return TOKEN_CONCAT(m_, n);\ + }\ t TOKEN_CONCAT(get_unsafe_, n)() const{\ if(esys->epochs[EpochSys::tid].ui != NULL_EPOCH)\ return esys->openread_pblk_unsafe(this, esys->epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n);\ @@ -122,6 +136,14 @@ namespace pds{ }\ /* set method open a pblk for write. return a new copy when necessary */\ template \ + T* TOKEN_CONCAT(set_, n)(Recoverable* ds, const in_type& TOKEN_CONCAT(tmp_, n)){\ + assert(ds->_esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ + auto ret = ds->_esys->openwrite_pblk(this, ds->_esys->epochs[EpochSys::tid].ui);\ + ret->TOKEN_CONCAT(m_, n) = TOKEN_CONCAT(tmp_, n);\ + ds->_esys->register_update_pblk(ret, ds->_esys->epochs[EpochSys::tid].ui);\ + return ret;\ + }\ + template \ T* TOKEN_CONCAT(set_, n)(const in_type& TOKEN_CONCAT(tmp_, n)){\ assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ auto ret = esys->openwrite_pblk(this, esys->epochs[EpochSys::tid].ui);\ @@ -131,6 +153,11 @@ namespace pds{ }\ /* set the field by the parameter. called only outside BEGIN_OP and END_OP */\ template \ + void TOKEN_CONCAT(set_unsafe_, n)(Recoverable* ds, const in_type& TOKEN_CONCAT(tmp_, n)){\ + assert(ds->_esys->epochs[EpochSys::tid].ui == NULL_EPOCH);\ + TOKEN_CONCAT(m_, n) = TOKEN_CONCAT(tmp_, n);\ + }\ + template \ void TOKEN_CONCAT(set_unsafe_, n)(const in_type& TOKEN_CONCAT(tmp_, n)){\ assert(esys->epochs[EpochSys::tid].ui == NULL_EPOCH);\ TOKEN_CONCAT(m_, n) = TOKEN_CONCAT(tmp_, n);\ @@ -145,16 +172,31 @@ namespace pds{ protected:\ t TOKEN_CONCAT(m_, n)[s];\ /* get method open a pblk for read. */\ + t TOKEN_CONCAT(get_, n)(Recoverable* ds, int i) const{\ + assert(ds->_esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ + return ds->_esys->openread_pblk(this, ds->_esys->epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n)[i];\ + }\ t TOKEN_CONCAT(get_, n)(int i) const{\ assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ return esys->openread_pblk(this, esys->epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n)[i];\ }\ /* get method open a pblk for read. Allows old-see-new reads. */\ + t TOKEN_CONCAT(get_unsafe_, n)(Recoverable* ds, int i) const{\ + assert(ds->_esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ + return ds->_esys->openread_pblk_unsafe(this, ds->_esys->epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n)[i];\ + }\ t TOKEN_CONCAT(get_unsafe_, n)(int i) const{\ assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ return esys->openread_pblk_unsafe(this, esys->epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n)[i];\ }\ /* set method open a pblk for write. return a new copy when necessary */\ + T* TOKEN_CONCAT(set_, n)(Recoverable* ds, int i, t TOKEN_CONCAT(tmp_, n)){\ + assert(ds->_esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ + auto ret = ds->_esys->openwrite_pblk(this, ds->_esys->epochs[EpochSys::tid].ui);\ + ret->TOKEN_CONCAT(m_, n)[i] = TOKEN_CONCAT(tmp_, n);\ + ds->_esys->register_update_pblk(ret, ds->_esys->epochs[EpochSys::tid].ui);\ + return ret;\ + }\ T* TOKEN_CONCAT(set_, n)(int i, t TOKEN_CONCAT(tmp_, n)){\ assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ auto ret = esys->openwrite_pblk(this, esys->epochs[EpochSys::tid].ui);\ @@ -172,11 +214,11 @@ namespace pds{ } inline void recover_mode(){ - esys->sys_mode = RECOVER; + esys->recover_mode(); } inline void online_mode(){ - esys->sys_mode = ONLINE; + esys->online_mode(); } } #endif diff --git a/src/persist/pnew.hpp b/src/persist/pnew.hpp new file mode 100644 index 00000000..bf395c47 --- /dev/null +++ b/src/persist/pnew.hpp @@ -0,0 +1,89 @@ +// NOTE: don't include this file elsewhere! +// this is supposed to be a part of EpochSys.hpp + +// TODO: replace `new` operator of T with +// per-heap allocation and placement new. + +template +T* pnew(){ + T* ret = new T(); + register_alloc_pblk(ret, epochs[tid].ui); + return ret; +} + +template +T* pnew(T1 a1){ + T* ret = new T(a1); + register_alloc_pblk(ret, epochs[tid].ui); + return ret; +} + +template +T* pnew(T1 a1, T2 a2){ + T* ret = new T(a1, a2); + register_alloc_pblk(ret, epochs[tid].ui); + return ret; +} + +template +T* pnew(T1 a1, T2 a2, T3 a3){ + T* ret = new T(a1, a2, a3); + register_alloc_pblk(ret, epochs[tid].ui); + return ret; +} + +template +T* pnew(T1 a1, T2 a2, T3 a3, T4 a4){ + T* ret = new T(a1, a2, a3, a4); + register_alloc_pblk(ret, epochs[tid].ui); + return ret; +} + +template +T* pnew(T1 a1, T2 a2, T3 a3, T4 a4, T5 a5){ + T* ret = new T(a1, a2, a3, a4, a5); + register_alloc_pblk(ret, epochs[tid].ui); + return ret; +} + +template +T* pnew(T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, T6 a6){ + T* ret = new T(a1, a2, a3, a4, a5, a6); + register_alloc_pblk(ret, epochs[tid].ui); + return ret; +} + +template +T* pnew(T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, T6 a6, T7 a7){ + T* ret = new T(a1, a2, a3, a4, a5, a6, a7); + register_alloc_pblk(ret, epochs[tid].ui); + return ret; +} + +template +T* pnew(T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, T6 a6, T7 a7, T8 a8){ + T* ret = new T(a1, a2, a3, a4, a5, a6, a7, a8); + register_alloc_pblk(ret, epochs[tid].ui); + return ret; +} + +template +T* pnew(T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, T6 a6, T7 a7, T8 a8, T9 a9){ + T* ret = new T(a1, a2, a3, a4, a5, a6, a7, a8, a9); + register_alloc_pblk(ret, epochs[tid].ui); + return ret; +} + +template +T* pnew(T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, T6 a6, T7 a7, T8 a8, T9 a9, T10 a10){ + T* ret = new T(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10); + register_alloc_pblk(ret, epochs[tid].ui); + return ret; +} + +// add more as needed. \ No newline at end of file From 0a6f6ba5f428d82a7be2c597cc7fdc58ce674641 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Thu, 12 Nov 2020 17:01:32 -0500 Subject: [PATCH 26/56] got all other APIs into Recoverable.hpp --- src/persist/EpochSys.hpp | 43 ++++++-- src/persist/Recoverable.hpp | 192 ++++++++++++++++++++++++++++++++- src/persist/api/pblk_naked.hpp | 132 +---------------------- src/persist/pnew.hpp | 24 ++--- 4 files changed, 240 insertions(+), 151 deletions(-) diff --git a/src/persist/EpochSys.hpp b/src/persist/EpochSys.hpp index b40e8fbc..2acb4301 100644 --- a/src/persist/EpochSys.hpp +++ b/src/persist/EpochSys.hpp @@ -118,6 +118,10 @@ class EpochSys{ // API // ///////// + bool check_epoch(){ + return check_epoch(epochs[tid].ui); + } + void begin_op(){ assert(epochs[tid].ui == NULL_EPOCH); epochs[tid].ui = esys->begin_transaction(); @@ -171,18 +175,12 @@ class EpochSys{ template void pretire(T* b){ - ASSERT_DERIVE(T, PBlk); - ASSERT_COPY(T); - assert(eochs[tid].ui != NULL_EPOCH); retire_pblk(b, epochs[tid].ui); } template void preclaim(T* b){ - ASSERT_DERIVE(T, PBlk); - ASSERT_COPY(T); - if (epochs[tid].ui == NULL_EPOCH){ begin_op(); } @@ -192,9 +190,36 @@ class EpochSys{ } } - // pnew is in a separate file since there are a bunch of them. - // add more as needed. - #include "pnew.hpp" + template + T* register_alloc_pblk(T* b){ + return register_alloc_pblk(b, epochs[tid].ui); + } + + template + void register_update_pblk(T* b){ + register_update_pblk(b, epochs[tid].ui); + } + + template + const T* openread_pblk(const T* b){ + assert(epochs[tid].ui != NULL_EPOCH); + return openread_pblk(b, epochs[tid].ui); + } + + template + const T* openread_pblk_unsafe(const T* b){ + if (epochs[tid].ui != NULL_EPOCH){ + return openread_pblk_unsafe(b, epochs[tid].ui); + } else { + return b; + } + } + + template + T* openwrite_pblk(T* b){ + assert(epochs[tid].ui != NULL_EPOCH); + return openwrite_pblk(b, epochs[tid].ui); + } void recover_mode(){ sys_mode = RECOVER; // PDELETE -> nop diff --git a/src/persist/Recoverable.hpp b/src/persist/Recoverable.hpp index 45bee97f..90a9982b 100644 --- a/src/persist/Recoverable.hpp +++ b/src/persist/Recoverable.hpp @@ -9,12 +9,200 @@ class Recoverable{ public: pds::EpochSys* _esys = nullptr; + // return num of blocks recovered. + virtual int recover(bool simulated = false) = 0; Recoverable(GlobalTestConfig* gtc); ~Recoverable(); + void init_thread(GlobalTestConfig*, LocalTestConfig* ltc); - // return num of blocks recovered. - virtual int recover(bool simulated = false) = 0; + bool check_epoch(){ + return _esys->check_epoch(); + } + void begin_op(){ + _esys->begin_op(); + } + void end_op(){ + _esys->end_op(); + } + void end_readonly_op(){ + _esys->end_readonly_op(); + } + void abort_op(){ + _esys->abort_op(); + } + class MontageOpHolder{ + pds::EpochSys* esys_; + public: + MontageOpHolder(Recoverable* ds): esys_(ds->_esys){ + esys_->begin_op(); + } + MontageOpHolder(pds::EpochSys* _esys): esys_(_esys){ + esys_->begin_op(); + } + MontageOpHolder(): esys_(pds::esys){ + esys_->begin_op(); + } + ~MontageOpHolder(){ + esys_->end_op(); + } + }; + class MontageOpHolderReadOnly{ + pds::EpochSys* esys_; + public: + MontageOpHolderReadOnly(Recoverable* ds): esys_(ds->_esys){ + esys_->begin_op(); + } + MontageOpHolderReadOnly(pds::EpochSys* _esys): esys_(_esys){ + esys_->begin_op(); + } + MontageOpHolderReadOnly(): esys_(pds::esys){ + esys_->begin_op(); + } + ~MontageOpHolderReadOnly(){ + esys_->end_readonly_op(); + } + }; + + // pnew is in a separate file since there are a bunch of them. + // add more as needed. + #include "pnew.hpp" + + template + T* register_update_pblk(T* b){ + return _esys->register_update_pblk(b); + } + template + void pdelete(T* b){ + _esys->pdelete(b); + } + template + void pretire(T* b){ + _esys->pretire(b); + } + template + void preclaim(T* b){ + _esys->pdelete(b); + } + template + const T* openread_pblk(const T* b){ + return _esys->openread_pblk(b); + } + template + const T* openread_pblk_unsafe(const T* b){ + return _esys->openread_pblk_unsafe(b); + } + template + T* openwrite_pblk(T* b){ + return _esys->openwrite_pblk(b); + } + std::unordered_map* recover(const int rec_thd=10){ + return _esys->recover(rec_thd); + } + void recover_mode(){ + _esys->recover_mode(); + } + void online_mode(){ + _esys->online_mode(); + } + void flush(){ + _esys->flush(); + } }; +///////////////////////////// +// field generation macros // +///////////////////////////// + +// macro for concatenating two tokens into a new token +#define TOKEN_CONCAT(a,b) a ## b + +/** + * using the type t and the name n, generate a protected declaration for the + * field, as well as public getters and setters + */ +#define GENERATE_FIELD(t, n, T)\ +/* declare the field, with its name prefixed by m_ */\ +protected:\ + t TOKEN_CONCAT(m_, n);\ +public:\ +/* get method open a pblk for read. */\ +t TOKEN_CONCAT(get_, n)(Recoverable* ds) const{\ + return ds->openread_pblk(this)->TOKEN_CONCAT(m_, n);\ +}\ +t TOKEN_CONCAT(get_, n)() const{\ + return pds::esys->openread_pblk(this)->TOKEN_CONCAT(m_, n);\ +}\ +/* get method open a pblk for read. Allows old-see-new reads. */\ +t TOKEN_CONCAT(get_unsafe_, n)(Recoverable* ds) const{\ + return ds->openread_pblk_unsafe(this)->TOKEN_CONCAT(m_, n);\ +}\ +t TOKEN_CONCAT(get_unsafe_, n)() const{\ + return pds::esys->openread_pblk_unsafe(this)->TOKEN_CONCAT(m_, n);\ +}\ +/* set method open a pblk for write. return a new copy when necessary */\ +template \ +T* TOKEN_CONCAT(set_, n)(Recoverable* ds, const in_type& TOKEN_CONCAT(tmp_, n)){\ + assert(ds->epochs[EpochSys::tid].ui != NULL_EPOCH);\ + auto ret = ds->openwrite_pblk(this);\ + ret->TOKEN_CONCAT(m_, n) = TOKEN_CONCAT(tmp_, n);\ + ds->register_update_pblk(ret);\ + return ret;\ +}\ +template \ +T* TOKEN_CONCAT(set_, n)(const in_type& TOKEN_CONCAT(tmp_, n)){\ + assert(pds::esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ + auto ret = pds::esys->openwrite_pblk(this);\ + ret->TOKEN_CONCAT(m_, n) = TOKEN_CONCAT(tmp_, n);\ + pds::esys->register_update_pblk(ret);\ + return ret;\ +}\ +/* set the field by the parameter. called only outside BEGIN_OP and END_OP */\ +template \ +void TOKEN_CONCAT(set_unsafe_, n)(Recoverable* ds, const in_type& TOKEN_CONCAT(tmp_, n)){\ + TOKEN_CONCAT(m_, n) = TOKEN_CONCAT(tmp_, n);\ +}\ +template \ +void TOKEN_CONCAT(set_unsafe_, n)(const in_type& TOKEN_CONCAT(tmp_, n)){\ + TOKEN_CONCAT(m_, n) = TOKEN_CONCAT(tmp_, n);\ +} + +/** + * using the type t, the name n and length s, generate a protected + * declaration for the field, as well as public getters and setters + */ +#define GENERATE_ARRAY(t, n, s, T)\ +/* declare the field, with its name prefixed by m_ */\ +protected:\ + t TOKEN_CONCAT(m_, n)[s];\ +/* get method open a pblk for read. */\ +t TOKEN_CONCAT(get_, n)(Recoverable* ds, int i) const{\ + return ds->openread_pblk(this)->TOKEN_CONCAT(m_, n)[i];\ +}\ +t TOKEN_CONCAT(get_, n)(int i) const{\ + return pds::esys->openread_pblk(this)->TOKEN_CONCAT(m_, n)[i];\ +}\ +/* get method open a pblk for read. Allows old-see-new reads. */\ +t TOKEN_CONCAT(get_unsafe_, n)(Recoverable* ds, int i) const{\ + return ds->openread_pblk_unsafe(this)->TOKEN_CONCAT(m_, n)[i];\ +}\ +t TOKEN_CONCAT(get_unsafe_, n)(int i) const{\ + return pds::esys->openread_pblk_unsafe(this)->TOKEN_CONCAT(m_, n)[i];\ +}\ +/* set method open a pblk for write. return a new copy when necessary */\ +T* TOKEN_CONCAT(set_, n)(Recoverable* ds, int i, t TOKEN_CONCAT(tmp_, n)){\ + assert(ds->epochs[EpochSys::tid].ui != NULL_EPOCH);\ + auto ret = ds->openwrite_pblk(this);\ + ret->TOKEN_CONCAT(m_, n)[i] = TOKEN_CONCAT(tmp_, n);\ + ds->register_update_pblk(ret);\ + return ret;\ +}\ +T* TOKEN_CONCAT(set_, n)(int i, t TOKEN_CONCAT(tmp_, n)){\ + assert(pds::esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ + auto ret = pds::esys->openwrite_pblk(this);\ + ret->TOKEN_CONCAT(m_, n)[i] = TOKEN_CONCAT(tmp_, n);\ + pds::esys->register_update_pblk(ret);\ + return ret;\ +} + #endif \ No newline at end of file diff --git a/src/persist/api/pblk_naked.hpp b/src/persist/api/pblk_naked.hpp index d7e7f7af..7a4588ed 100644 --- a/src/persist/api/pblk_naked.hpp +++ b/src/persist/api/pblk_naked.hpp @@ -35,8 +35,7 @@ namespace pds{ } #define CHECK_EPOCH() ({\ - esys->check_epoch(esys->epochs[EpochSys::tid].ui);\ - }) + esys->check_epoch();}) // TODO: get rid of arguments in rideables. #define BEGIN_OP( ... ) ({ \ @@ -56,30 +55,14 @@ namespace pds{ #define ABORT_OP ({ \ esys->abort_op(); }) - class EpochHolder{ - public: - ~EpochHolder(){ - END_OP; - } - }; - - class EpochHolderReadOnly{ - public: - ~EpochHolderReadOnly(){ - END_READONLY_OP; - } - }; - #define BEGIN_OP_AUTOEND( ... ) \ - BEGIN_OP();\ - EpochHolder __holder; + Recoverable::MontageOpHolder __holder; #define BEGIN_READONLY_OP_AUTOEND( ... ) \ - BEGIN_OP();\ - EpochHolderReadOnly __holder; + Recoverable::MontageOpHolderReadOnly __holder; #define PNEW(t, ...) ({\ - esys->pnew(__VA_ARGS__ );}) + esys->register_alloc_pblk(new t(__VA_ARGS__));}) #define PDELETE(b) ({\ esys->pdelete(b);}) @@ -98,113 +81,6 @@ namespace pds{ delete(b);\ }}) - - - // macro for concatenating two tokens into a new token - #define TOKEN_CONCAT(a,b) a ## b - - /** - * using the type t and the name n, generate a protected declaration for the - * field, as well as public getters and setters - */ - #define GENERATE_FIELD(t, n, T)\ - /* declare the field, with its name prefixed by m_ */\ - protected:\ - t TOKEN_CONCAT(m_, n);\ - public:\ - /* get method open a pblk for read. */\ - t TOKEN_CONCAT(get_, n)(Recoverable* ds) const{\ - assert(ds->_esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ - return ds->_esys->openread_pblk(this, esys->epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n);\ - }\ - t TOKEN_CONCAT(get_, n)() const{\ - assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ - return esys->openread_pblk(this, esys->epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n);\ - }\ - /* get method open a pblk for read. Allows old-see-new reads. */\ - t TOKEN_CONCAT(get_unsafe_, n)(Recoverable* ds) const{\ - if(ds->_esys->epochs[EpochSys::tid].ui != NULL_EPOCH)\ - return ds->_esys->openread_pblk_unsafe(this, ds->_esys->epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n);\ - else\ - return TOKEN_CONCAT(m_, n);\ - }\ - t TOKEN_CONCAT(get_unsafe_, n)() const{\ - if(esys->epochs[EpochSys::tid].ui != NULL_EPOCH)\ - return esys->openread_pblk_unsafe(this, esys->epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n);\ - else\ - return TOKEN_CONCAT(m_, n);\ - }\ - /* set method open a pblk for write. return a new copy when necessary */\ - template \ - T* TOKEN_CONCAT(set_, n)(Recoverable* ds, const in_type& TOKEN_CONCAT(tmp_, n)){\ - assert(ds->_esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ - auto ret = ds->_esys->openwrite_pblk(this, ds->_esys->epochs[EpochSys::tid].ui);\ - ret->TOKEN_CONCAT(m_, n) = TOKEN_CONCAT(tmp_, n);\ - ds->_esys->register_update_pblk(ret, ds->_esys->epochs[EpochSys::tid].ui);\ - return ret;\ - }\ - template \ - T* TOKEN_CONCAT(set_, n)(const in_type& TOKEN_CONCAT(tmp_, n)){\ - assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ - auto ret = esys->openwrite_pblk(this, esys->epochs[EpochSys::tid].ui);\ - ret->TOKEN_CONCAT(m_, n) = TOKEN_CONCAT(tmp_, n);\ - esys->register_update_pblk(ret, esys->epochs[EpochSys::tid].ui);\ - return ret;\ - }\ - /* set the field by the parameter. called only outside BEGIN_OP and END_OP */\ - template \ - void TOKEN_CONCAT(set_unsafe_, n)(Recoverable* ds, const in_type& TOKEN_CONCAT(tmp_, n)){\ - assert(ds->_esys->epochs[EpochSys::tid].ui == NULL_EPOCH);\ - TOKEN_CONCAT(m_, n) = TOKEN_CONCAT(tmp_, n);\ - }\ - template \ - void TOKEN_CONCAT(set_unsafe_, n)(const in_type& TOKEN_CONCAT(tmp_, n)){\ - assert(esys->epochs[EpochSys::tid].ui == NULL_EPOCH);\ - TOKEN_CONCAT(m_, n) = TOKEN_CONCAT(tmp_, n);\ - } - - /** - * using the type t, the name n and length s, generate a protected - * declaration for the field, as well as public getters and setters - */ - #define GENERATE_ARRAY(t, n, s, T)\ - /* declare the field, with its name prefixed by m_ */\ - protected:\ - t TOKEN_CONCAT(m_, n)[s];\ - /* get method open a pblk for read. */\ - t TOKEN_CONCAT(get_, n)(Recoverable* ds, int i) const{\ - assert(ds->_esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ - return ds->_esys->openread_pblk(this, ds->_esys->epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n)[i];\ - }\ - t TOKEN_CONCAT(get_, n)(int i) const{\ - assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ - return esys->openread_pblk(this, esys->epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n)[i];\ - }\ - /* get method open a pblk for read. Allows old-see-new reads. */\ - t TOKEN_CONCAT(get_unsafe_, n)(Recoverable* ds, int i) const{\ - assert(ds->_esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ - return ds->_esys->openread_pblk_unsafe(this, ds->_esys->epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n)[i];\ - }\ - t TOKEN_CONCAT(get_unsafe_, n)(int i) const{\ - assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ - return esys->openread_pblk_unsafe(this, esys->epochs[EpochSys::tid].ui)->TOKEN_CONCAT(m_, n)[i];\ - }\ - /* set method open a pblk for write. return a new copy when necessary */\ - T* TOKEN_CONCAT(set_, n)(Recoverable* ds, int i, t TOKEN_CONCAT(tmp_, n)){\ - assert(ds->_esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ - auto ret = ds->_esys->openwrite_pblk(this, ds->_esys->epochs[EpochSys::tid].ui);\ - ret->TOKEN_CONCAT(m_, n)[i] = TOKEN_CONCAT(tmp_, n);\ - ds->_esys->register_update_pblk(ret, ds->_esys->epochs[EpochSys::tid].ui);\ - return ret;\ - }\ - T* TOKEN_CONCAT(set_, n)(int i, t TOKEN_CONCAT(tmp_, n)){\ - assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ - auto ret = esys->openwrite_pblk(this, esys->epochs[EpochSys::tid].ui);\ - ret->TOKEN_CONCAT(m_, n)[i] = TOKEN_CONCAT(tmp_, n);\ - esys->register_update_pblk(ret, esys->epochs[EpochSys::tid].ui);\ - return ret;\ - } - inline std::unordered_map* recover(const int rec_thd=10){ return esys->recover(rec_thd); } diff --git a/src/persist/pnew.hpp b/src/persist/pnew.hpp index bf395c47..1808780a 100644 --- a/src/persist/pnew.hpp +++ b/src/persist/pnew.hpp @@ -1,5 +1,5 @@ // NOTE: don't include this file elsewhere! -// this is supposed to be a part of EpochSys.hpp +// this is supposed to be a part of Recoverable.hpp // TODO: replace `new` operator of T with // per-heap allocation and placement new. @@ -7,42 +7,42 @@ template T* pnew(){ T* ret = new T(); - register_alloc_pblk(ret, epochs[tid].ui); + _esys->register_alloc_pblk(ret); return ret; } template T* pnew(T1 a1){ T* ret = new T(a1); - register_alloc_pblk(ret, epochs[tid].ui); + _esys->register_alloc_pblk(ret); return ret; } template T* pnew(T1 a1, T2 a2){ T* ret = new T(a1, a2); - register_alloc_pblk(ret, epochs[tid].ui); + _esys->register_alloc_pblk(ret); return ret; } template T* pnew(T1 a1, T2 a2, T3 a3){ T* ret = new T(a1, a2, a3); - register_alloc_pblk(ret, epochs[tid].ui); + _esys->register_alloc_pblk(ret); return ret; } template T* pnew(T1 a1, T2 a2, T3 a3, T4 a4){ T* ret = new T(a1, a2, a3, a4); - register_alloc_pblk(ret, epochs[tid].ui); + _esys->register_alloc_pblk(ret); return ret; } template T* pnew(T1 a1, T2 a2, T3 a3, T4 a4, T5 a5){ T* ret = new T(a1, a2, a3, a4, a5); - register_alloc_pblk(ret, epochs[tid].ui); + _esys->register_alloc_pblk(ret); return ret; } @@ -50,7 +50,7 @@ template T* pnew(T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, T6 a6){ T* ret = new T(a1, a2, a3, a4, a5, a6); - register_alloc_pblk(ret, epochs[tid].ui); + _esys->register_alloc_pblk(ret); return ret; } @@ -58,7 +58,7 @@ template T* pnew(T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, T6 a6, T7 a7){ T* ret = new T(a1, a2, a3, a4, a5, a6, a7); - register_alloc_pblk(ret, epochs[tid].ui); + _esys->register_alloc_pblk(ret); return ret; } @@ -66,7 +66,7 @@ template T* pnew(T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, T6 a6, T7 a7, T8 a8){ T* ret = new T(a1, a2, a3, a4, a5, a6, a7, a8); - register_alloc_pblk(ret, epochs[tid].ui); + _esys->register_alloc_pblk(ret); return ret; } @@ -74,7 +74,7 @@ template T* pnew(T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, T6 a6, T7 a7, T8 a8, T9 a9){ T* ret = new T(a1, a2, a3, a4, a5, a6, a7, a8, a9); - register_alloc_pblk(ret, epochs[tid].ui); + _esys->register_alloc_pblk(ret); return ret; } @@ -82,7 +82,7 @@ template T* pnew(T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, T6 a6, T7 a7, T8 a8, T9 a9, T10 a10){ T* ret = new T(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10); - register_alloc_pblk(ret, epochs[tid].ui); + _esys->register_alloc_pblk(ret); return ret; } From 10241de4fcc10293082b5832e74a2acc3a3e792f Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Thu, 12 Nov 2020 17:27:45 -0500 Subject: [PATCH 27/56] made MontageHashTable use new API --- src/persist/Recoverable.hpp | 6 +++--- src/rideables/MontageHashTable.hpp | 25 +++++++++++++------------ 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/src/persist/Recoverable.hpp b/src/persist/Recoverable.hpp index 90a9982b..9467ef8e 100644 --- a/src/persist/Recoverable.hpp +++ b/src/persist/Recoverable.hpp @@ -7,8 +7,8 @@ // TODO: report recover errors/exceptions class Recoverable{ -public: pds::EpochSys* _esys = nullptr; +public: // return num of blocks recovered. virtual int recover(bool simulated = false) = 0; Recoverable(GlobalTestConfig* gtc); @@ -68,8 +68,8 @@ class Recoverable{ #include "pnew.hpp" template - T* register_update_pblk(T* b){ - return _esys->register_update_pblk(b); + void register_update_pblk(T* b){ + _esys->register_update_pblk(b); } template void pdelete(T* b){ diff --git a/src/rideables/MontageHashTable.hpp b/src/rideables/MontageHashTable.hpp index 15b7dc00..d521c7ee 100644 --- a/src/rideables/MontageHashTable.hpp +++ b/src/rideables/MontageHashTable.hpp @@ -27,31 +27,32 @@ class MontageHashTable : public RMap, public Recoverable{ }__attribute__((aligned(CACHELINE_SIZE))); struct ListNode{ + MontageHashTable* ds; // Transient-to-persistent pointer Payload* payload = nullptr; // Transient-to-transient pointers ListNode* next = nullptr; ListNode(){} - ListNode(K key, V val){ - payload = PNEW(Payload, key, val); + ListNode(MontageHashTable* ds_, K key, V val): ds(ds_){ + payload = ds->pnew(key, val); } ListNode(Payload* _payload) : payload(_payload) {} // for recovery K get_key(){ assert(payload!=nullptr && "payload shouldn't be null"); // old-see-new never happens for locking ds - return (K)payload->get_unsafe_key(); + return (K)payload->get_unsafe_key(ds); } V get_val(){ assert(payload!=nullptr && "payload shouldn't be null"); - return (V)payload->get_unsafe_val(); + return (V)payload->get_unsafe_val(ds); } void set_val(V v){ assert(payload!=nullptr && "payload shouldn't be null"); - payload = payload->set_val(v); + payload = payload->set_val(ds, v); } ~ListNode(){ if (payload){ - PDELETE(payload); + ds->pdelete(payload); } } }__attribute__((aligned(CACHELINE_SIZE))); @@ -74,7 +75,7 @@ class MontageHashTable : public RMap, public Recoverable{ size_t idx=hash_fn(key)%idxSize; // while(true){ std::lock_guard lk(buckets[idx].lock); - BEGIN_OP_AUTOEND(); + MontageOpHolderReadOnly(this); // try{ ListNode* curr = buckets[idx].head.next; while(curr){ @@ -92,10 +93,10 @@ class MontageHashTable : public RMap, public Recoverable{ optional put(K key, V val, int tid){ size_t idx=hash_fn(key)%idxSize; - ListNode* new_node = new ListNode(key, val); + ListNode* new_node = new ListNode(this, key, val); // while(true){ std::lock_guard lk(buckets[idx].lock); - BEGIN_OP_AUTOEND(new_node->payload); + MontageOpHolder(this); // try{ ListNode* curr = buckets[idx].head.next; ListNode* prev = &buckets[idx].head; @@ -125,10 +126,10 @@ class MontageHashTable : public RMap, public Recoverable{ bool insert(K key, V val, int tid){ size_t idx=hash_fn(key)%idxSize; - ListNode* new_node = new ListNode(key, val); + ListNode* new_node = new ListNode(this, key, val); // while(true){ std::lock_guard lk(buckets[idx].lock); - BEGIN_OP_AUTOEND(new_node->payload); + MontageOpHolder(this); // try{ ListNode* curr = buckets[idx].head.next; ListNode* prev = &buckets[idx].head; @@ -163,7 +164,7 @@ class MontageHashTable : public RMap, public Recoverable{ size_t idx=hash_fn(key)%idxSize; // while(true){ std::lock_guard lk(buckets[idx].lock); - BEGIN_OP_AUTOEND(); + MontageOpHolder(this); // try{ ListNode* curr = buckets[idx].head.next; ListNode* prev = &buckets[idx].head; From 0f53b42914598f9d46fc06b6ac372f0d27c5e9a6 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Thu, 12 Nov 2020 19:39:57 -0500 Subject: [PATCH 28/56] put dcss-related code into EpochSys --- src/persist/EpochSys.cpp | 17 +- src/persist/EpochSys.hpp | 352 ++++++++++++++++++++++++++++++++- src/persist/PersistStructs.cpp | 23 --- src/persist/PersistStructs.hpp | 161 --------------- src/persist/Recoverable.cpp | 6 +- src/persist/Recoverable.hpp | 1 + src/utils/DCSS.hpp | 200 ------------------- unit_test/dcss.cpp | 3 +- 8 files changed, 374 insertions(+), 389 deletions(-) delete mode 100644 src/persist/PersistStructs.cpp delete mode 100644 src/utils/DCSS.hpp diff --git a/src/persist/EpochSys.cpp b/src/persist/EpochSys.cpp index fc917dc3..c82954d0 100644 --- a/src/persist/EpochSys.cpp +++ b/src/persist/EpochSys.cpp @@ -1,5 +1,4 @@ #include "EpochSys.hpp" -#include "DCSS.hpp" #include #include @@ -120,6 +119,22 @@ namespace pds{ // } } + void sc_desc_t::try_complete(EpochSys* esys, uint64_t addr){ + nbptr_t _d = nbptr.load(); + int ret = 0; + if(_d.val!=addr) return; + if(in_progress(_d)){ + if(esys->check_epoch(cas_epoch)){ + ret = 2; + ret |= commit(_d); + } else { + ret = 4; + ret |= abort(_d); + } + } + cleanup(_d); + } + bool EpochSys::check_epoch(uint64_t c){ return c == global_epoch->load(std::memory_order_seq_cst); } diff --git a/src/persist/EpochSys.hpp b/src/persist/EpochSys.hpp index 2acb4301..fe02ae49 100644 --- a/src/persist/EpochSys.hpp +++ b/src/persist/EpochSys.hpp @@ -24,6 +24,195 @@ namespace pds{ +class EpochSys; + +extern EpochSys* esys; + +//////////////////////////////////////// +// counted pointer-related structures // +//////////////////////////////////////// + +/* + * Macro VISIBLE_READ determines which version of API will be used. + * Macro USE_TSX determines whether TSX (Intel HTM) will be used. + * + * We highly recommend you to use default invisible read version, + * since it doesn't need you to handle EpochVerifyException and you + * can call just load rather than load_verify throughout your program + * + * We provides following double-compare-single-swap (DCSS) API for + * nonblocking data structures to use: + * + * atomic_nbptr_t: atomic double word for storing pointers + * that point to nodes, which link payloads in. It contains following + * functions: + * + * store(T val): + * store 64-bit long data without sync; cnt doesn't increment + * + * store(nbptr_t d): store(d.val) + * + * nbptr_t load(): + * load nbptr without verifying epoch + * + * nbptr_t load_verify(): + * load nbptr and verify epoch, used as lin point; + * for invisible reads this won't verify epoch + * + * bool CAS(nbptr_t expected, T desired): + * CAS in desired value and increment cnt if expected + * matches current nbptr + * + * bool CAS_verify(nbptr_t expected, T desired): + * CAS in desired value and increment cnt if expected + * matches current nbptr and global epoch doesn't change + * since BEGIN_OP + */ + +struct EpochVerifyException : public std::exception { + const char * what () const throw () { + return "Epoch in which operation wants to linearize has passed; retry required."; + } +}; + +struct sc_desc_t; + +template +class atomic_nbptr_t; +class nbptr_t{ + template + friend class atomic_nbptr_t; + inline bool is_desc() const { + return (cnt & 3UL) == 1UL; + } + inline sc_desc_t* get_desc() const { + assert(is_desc()); + return reinterpret_cast(val); + } +public: + uint64_t val; + uint64_t cnt; + template + inline T get_val() const { + static_assert(sizeof(T) == sizeof(uint64_t), "sizes do not match"); + return reinterpret_cast(val); + } + nbptr_t(uint64_t v, uint64_t c) : val(v), cnt(c) {}; + nbptr_t() : nbptr_t(0, 0) {}; + + inline bool operator==(const nbptr_t & b) const{ + return val==b.val && cnt==b.cnt; + } + inline bool operator!=(const nbptr_t & b) const{ + return !operator==(b); + } +}__attribute__((aligned(16))); + +template +class atomic_nbptr_t{ + static_assert(sizeof(T) == sizeof(uint64_t), "sizes do not match"); +public: + // for cnt in nbptr: + // desc: ....01 + // real val: ....00 + std::atomic nbptr; + nbptr_t load(); + nbptr_t load_verify(); + inline T load_val(){ + return reinterpret_cast(load().val); + } + bool CAS_verify(nbptr_t expected, const T& desired); + inline bool CAS_verify(nbptr_t expected, const nbptr_t& desired){ + return CAS_verify(expected,desired.get_val()); + } + // CAS doesn't check epoch nor cnt + bool CAS(nbptr_t expected, const T& desired); + inline bool CAS(nbptr_t expected, const nbptr_t& desired){ + return CAS(expected,desired.get_val()); + } + void store(const T& desired); + inline void store(const nbptr_t& desired){ + store(desired.get_val()); + } + atomic_nbptr_t(const T& v) : nbptr(nbptr_t(reinterpret_cast(v), 0)){}; + atomic_nbptr_t() : atomic_nbptr_t(T()){}; +}; + +struct sc_desc_t{ +private: + // for cnt in nbptr: + // in progress: ....01 + // committed: ....10 + // aborted: ....11 + std::atomic nbptr; + const uint64_t old_val; + const uint64_t new_val; + const uint64_t cas_epoch; + inline bool abort(nbptr_t _d){ + // bring cnt from ..01 to ..11 + nbptr_t expected (_d.val, (_d.cnt & ~0x3UL) | 1UL); // in progress + nbptr_t desired(expected); + desired.cnt += 2; + return nbptr.compare_exchange_strong(expected, desired); + } + inline bool commit(nbptr_t _d){ + // bring cnt from ..01 to ..10 + nbptr_t expected (_d.val, (_d.cnt & ~0x3UL) | 1UL); // in progress + nbptr_t desired(expected); + desired.cnt += 1; + return nbptr.compare_exchange_strong(expected, desired); + } + inline bool committed(nbptr_t _d) const { + return (_d.cnt & 0x3UL) == 2UL; + } + inline bool in_progress(nbptr_t _d) const { + return (_d.cnt & 0x3UL) == 1UL; + } + inline bool match(nbptr_t old_d, nbptr_t new_d) const { + return ((old_d.cnt & ~0x3UL) == (new_d.cnt & ~0x3UL)) && + (old_d.val == new_d.val); + } + void cleanup(nbptr_t old_d){ + // must be called after desc is aborted or committed + nbptr_t new_d = nbptr.load(); + if(!match(old_d,new_d)) return; + assert(!in_progress(new_d)); + nbptr_t expected(reinterpret_cast(this),(new_d.cnt & ~0x3UL) | 1UL); + if(committed(new_d)) { + // bring cnt from ..10 to ..00 + reinterpret_cast*>( + new_d.val)->nbptr.compare_exchange_strong( + expected, + nbptr_t(new_val,new_d.cnt + 2)); + } else { + //aborted + // bring cnt from ..11 to ..00 + reinterpret_cast*>( + new_d.val)->nbptr.compare_exchange_strong( + expected, + nbptr_t(old_val,new_d.cnt + 1)); + } + } +public: + inline bool committed() const { + return committed(nbptr.load()); + } + inline bool in_progress() const { + return in_progress(nbptr.load()); + } + // TODO: try_complete used to be inline. Try to make it inline again when refactoring is finished. + void try_complete(EpochSys* esys, uint64_t addr); + + sc_desc_t( uint64_t c, uint64_t a, uint64_t o, + uint64_t n, uint64_t e) : + nbptr(nbptr_t(a,c)), old_val(o), new_val(n), cas_epoch(e){}; + sc_desc_t() : sc_desc_t(0,0,0,0,0){}; +}; + +////////////////// +// Epoch System // +////////////////// + enum SysMode {ONLINE, RECOVER}; class EpochSys{ @@ -118,13 +307,17 @@ class EpochSys{ // API // ///////// + static void init_thread(int _tid){ + EpochSys::tid = _tid; + } + bool check_epoch(){ return check_epoch(epochs[tid].ui); } void begin_op(){ assert(epochs[tid].ui == NULL_EPOCH); - epochs[tid].ui = esys->begin_transaction(); + epochs[tid].ui = begin_transaction(); // TODO: any room for optimization here? // TODO: put pending_allocs-related stuff into operations? for (auto b = pending_allocs[tid].ui.begin(); @@ -551,6 +744,163 @@ T* EpochSys::openwrite_pblk(T* b, uint64_t c){ return b; } +template +void atomic_nbptr_t::store(const T& desired){ + // this function must be used only when there's no data race + nbptr_t r = nbptr.load(); + nbptr_t new_r(reinterpret_cast(desired),r.cnt); + nbptr.store(new_r); +} + +#ifdef VISIBLE_READ +// implementation of load and cas for visible reads + +template +nbptr_t atomic_nbptr_t::load(){ + nbptr_t r; + while(true){ + r = nbptr.load(); + nbptr_t ret(r.val,r.cnt+1); + if(nbptr.compare_exchange_strong(r, ret)) + return ret; + } +} + +template +nbptr_t atomic_nbptr_t::load_verify(){ + assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH); + nbptr_t r; + while(true){ + r = nbptr.load(); + if(esys->check_epoch(esys->epochs[EpochSys::tid].ui)){ + nbptr_t ret(r.val,r.cnt+1); + if(nbptr.compare_exchange_strong(r, ret)){ + return r; + } + } else { + throw EpochVerifyException(); + } + } +} + +template +bool atomic_nbptr_t::CAS_verify(nbptr_t expected, const T& desired){ + assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH); + if(esys->check_epoch(esys->epochs[EpochSys::tid].ui)){ + nbptr_t new_r(reinterpret_cast(desired),expected.cnt+1); + return nbptr.compare_exchange_strong(expected, new_r); + } else { + return false; + } +} + +template +bool atomic_nbptr_t::CAS(nbptr_t expected, const T& desired){ + nbptr_t new_r(reinterpret_cast(desired),expected.cnt+1); + return nbptr.compare_exchange_strong(expected, new_r); +} + +#else /* !VISIBLE_READ */ +/* implementation of load and cas for invisible reads */ + +template +nbptr_t atomic_nbptr_t::load(){ + nbptr_t r; + do { + r = nbptr.load(); + if(r.is_desc()) { + sc_desc_t* D = r.get_desc(); + D->try_complete(esys, reinterpret_cast(this)); + } + } while(r.is_desc()); + return r; +} + +template +nbptr_t atomic_nbptr_t::load_verify(){ + // invisible read doesn't need to verify epoch even if it's a + // linearization point + // this saves users from catching EpochVerifyException + return load(); +} + +// extern std::atomic abort_cnt; +// extern std::atomic total_cnt; + +template +bool atomic_nbptr_t::CAS_verify(nbptr_t expected, const T& desired){ + assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH); + // total_cnt.fetch_add(1); +#ifdef USE_TSX + unsigned status = _xbegin(); + if (status == _XBEGIN_STARTED) { + nbptr_t r = nbptr.load(); + if(!r.is_desc()){ + if( r.cnt!=expected.cnt || + r.val!=expected.val || + !esys->check_epoch(esys->epochs[EpochSys::tid].ui)){ + _xend(); + return false; + } else { + nbptr_t new_r (reinterpret_cast(desired), r.cnt+4); + nbptr.store(new_r); + _xend(); + return true; + } + } else { + // we only help complete descriptor, but not retry + _xend(); + r.get_desc()->try_complete(esys, reinterpret_cast(this)); + return false; + } + // execution won't reach here; program should have returned + assert(0); + } +#endif + // txn fails; fall back routine + // abort_cnt.fetch_add(1); + nbptr_t r = nbptr.load(); + if(r.is_desc()){ + sc_desc_t* D = r.get_desc(); + D->try_complete(esys, reinterpret_cast(this)); + return false; + } else { + if( r.cnt!=expected.cnt || + r.val!=expected.val) { + return false; + } + } + // now r.cnt must be ..00, and r.cnt+1 is ..01, which means "nbptr + // contains a descriptor" and "a descriptor is in progress" + assert((r.cnt & 3UL) == 0UL); + new (&esys->local_descs[EpochSys::tid].ui) sc_desc_t(r.cnt+1, + reinterpret_cast(this), + expected.val, + reinterpret_cast(desired), + esys->epochs[EpochSys::tid].ui); + nbptr_t new_r(reinterpret_cast(&esys->local_descs[EpochSys::tid].ui), r.cnt+1); + if(!nbptr.compare_exchange_strong(r,new_r)){ + return false; + } + esys->local_descs[EpochSys::tid].ui.try_complete(esys, reinterpret_cast(this)); + if(esys->local_descs[EpochSys::tid].ui.committed()) return true; + else return false; +} + +template +bool atomic_nbptr_t::CAS(nbptr_t expected, const T& desired){ + // CAS doesn't check epoch; just cas ptr to desired, with cnt+=4 + assert(!expected.is_desc()); + nbptr_t new_r(reinterpret_cast(desired), expected.cnt + 4); + if(!nbptr.compare_exchange_strong(expected,new_r)){ + return false; + } + return true; +} + +#endif /* !VISIBLE_READ */ + + } #endif \ No newline at end of file diff --git a/src/persist/PersistStructs.cpp b/src/persist/PersistStructs.cpp deleted file mode 100644 index 8e4a6583..00000000 --- a/src/persist/PersistStructs.cpp +++ /dev/null @@ -1,23 +0,0 @@ -#include "PersistStructs.hpp" -#include "EpochSys.hpp" -#include "Recoverable.hpp" - -namespace pds{ - -void sc_desc_t::try_complete(EpochSys* esys, uint64_t addr){ - nbptr_t _d = nbptr.load(); - int ret = 0; - if(_d.val!=addr) return; - if(in_progress(_d)){ - if(esys->check_epoch(cas_epoch)){ - ret = 2; - ret |= commit(_d); - } else { - ret = 4; - ret |= abort(_d); - } - } - cleanup(_d); -} - -} \ No newline at end of file diff --git a/src/persist/PersistStructs.hpp b/src/persist/PersistStructs.hpp index 25e61004..3f4378f5 100644 --- a/src/persist/PersistStructs.hpp +++ b/src/persist/PersistStructs.hpp @@ -1,9 +1,6 @@ #ifndef PERSIST_STRUCTS_HPP #define PERSIST_STRUCTS_HPP -// TODO: this may not be a good file name, -// as some structures are actually transient. - #include #include #include @@ -12,8 +9,6 @@ #include "Persistent.hpp" #include "common_macros.hpp" -class Recoverable; - namespace pds{ struct OldSeeNewException : public std::exception { const char * what () const throw () { @@ -97,162 +92,6 @@ namespace pds{ } }; - //////////////////////////////////////// - // counted pointer-related structures // - //////////////////////////////////////// - - struct EpochVerifyException : public std::exception { - const char * what () const throw () { - return "Epoch in which operation wants to linearize has passed; retry required."; - } - }; - - struct sc_desc_t; - template - class atomic_nbptr_t; - class nbptr_t{ - template - friend class atomic_nbptr_t; - inline bool is_desc() const { - return (cnt & 3UL) == 1UL; - } - inline sc_desc_t* get_desc() const { - assert(is_desc()); - return reinterpret_cast(val); - } - public: - uint64_t val; - uint64_t cnt; - template - inline T get_val() const { - static_assert(sizeof(T) == sizeof(uint64_t), "sizes do not match"); - return reinterpret_cast(val); - } - nbptr_t(uint64_t v, uint64_t c) : val(v), cnt(c) {}; - nbptr_t() : nbptr_t(0, 0) {}; - - inline bool operator==(const nbptr_t & b) const{ - return val==b.val && cnt==b.cnt; - } - inline bool operator!=(const nbptr_t & b) const{ - return !operator==(b); - } - }__attribute__((aligned(16))); - - extern EpochSys* esys; - - template - class atomic_nbptr_t{ - static_assert(sizeof(T) == sizeof(uint64_t), "sizes do not match"); - public: - // for cnt in nbptr: - // desc: ....01 - // real val: ....00 - std::atomic nbptr; - nbptr_t load(); - nbptr_t load_verify(); - inline T load_val(){ - return reinterpret_cast(load().val); - } - bool CAS_verify(nbptr_t expected, const T& desired); - inline bool CAS_verify(nbptr_t expected, const nbptr_t& desired){ - return CAS_verify(expected,desired.get_val()); - } - // CAS doesn't check epoch nor cnt - bool CAS(nbptr_t expected, const T& desired); - inline bool CAS(nbptr_t expected, const nbptr_t& desired){ - return CAS(expected,desired.get_val()); - } - void store(const T& desired); - inline void store(const nbptr_t& desired){ - store(desired.get_val()); - } - atomic_nbptr_t(const T& v) : nbptr(nbptr_t(reinterpret_cast(v), 0)){}; - atomic_nbptr_t() : atomic_nbptr_t(T()){}; - }; - - struct sc_desc_t{ - private: - // for cnt in nbptr: - // in progress: ....01 - // committed: ....10 - // aborted: ....11 - std::atomic nbptr; - const uint64_t old_val; - const uint64_t new_val; - const uint64_t cas_epoch; - inline bool abort(nbptr_t _d){ - // bring cnt from ..01 to ..11 - nbptr_t expected (_d.val, (_d.cnt & ~0x3UL) | 1UL); // in progress - nbptr_t desired(expected); - desired.cnt += 2; - return nbptr.compare_exchange_strong(expected, desired); - } - inline bool commit(nbptr_t _d){ - // bring cnt from ..01 to ..10 - nbptr_t expected (_d.val, (_d.cnt & ~0x3UL) | 1UL); // in progress - nbptr_t desired(expected); - desired.cnt += 1; - return nbptr.compare_exchange_strong(expected, desired); - } - inline bool committed(nbptr_t _d) const { - return (_d.cnt & 0x3UL) == 2UL; - } - inline bool in_progress(nbptr_t _d) const { - return (_d.cnt & 0x3UL) == 1UL; - } - inline bool match(nbptr_t old_d, nbptr_t new_d) const { - return ((old_d.cnt & ~0x3UL) == (new_d.cnt & ~0x3UL)) && - (old_d.val == new_d.val); - } - void cleanup(nbptr_t old_d){ - // must be called after desc is aborted or committed - nbptr_t new_d = nbptr.load(); - if(!match(old_d,new_d)) return; - assert(!in_progress(new_d)); - nbptr_t expected(reinterpret_cast(this),(new_d.cnt & ~0x3UL) | 1UL); - if(committed(new_d)) { - // bring cnt from ..10 to ..00 - reinterpret_cast*>( - new_d.val)->nbptr.compare_exchange_strong( - expected, - nbptr_t(new_val,new_d.cnt + 2)); - } else { - //aborted - // bring cnt from ..11 to ..00 - reinterpret_cast*>( - new_d.val)->nbptr.compare_exchange_strong( - expected, - nbptr_t(old_val,new_d.cnt + 1)); - } - } - public: - inline bool committed() const { - return committed(nbptr.load()); - } - inline bool in_progress() const { - return in_progress(nbptr.load()); - } - // TODO: try_complete used to be inline. Try to make it inline again when refactoring is finished. - // Hs: consider moving this into EpochSys if having trouble templatizing. - void try_complete(EpochSys* esys, uint64_t addr); - sc_desc_t( uint64_t c, uint64_t a, uint64_t o, - uint64_t n, uint64_t e) : - nbptr(nbptr_t(a,c)), old_val(o), new_val(n), cas_epoch(e){}; - sc_desc_t() : sc_desc_t(0,0,0,0,0){}; - }; - - template - void atomic_nbptr_t::store(const T& desired){ - // this function must be used only when there's no data race - nbptr_t r = nbptr.load(); - nbptr_t new_r(reinterpret_cast(desired),r.cnt); - nbptr.store(new_r); - } - - - - } #endif \ No newline at end of file diff --git a/src/persist/Recoverable.cpp b/src/persist/Recoverable.cpp index cb4644be..6135c963 100644 --- a/src/persist/Recoverable.cpp +++ b/src/persist/Recoverable.cpp @@ -16,5 +16,9 @@ Recoverable::~Recoverable(){ Persistent::finalize(); } void Recoverable::init_thread(GlobalTestConfig*, LocalTestConfig* ltc){ - pds::init_thread(ltc->tid); + _esys->init_thread(ltc->tid); +} + +void Recoverable::init_thread(int tid){ + _esys->init_thread(tid); } \ No newline at end of file diff --git a/src/persist/Recoverable.hpp b/src/persist/Recoverable.hpp index 9467ef8e..6bf5775e 100644 --- a/src/persist/Recoverable.hpp +++ b/src/persist/Recoverable.hpp @@ -15,6 +15,7 @@ class Recoverable{ ~Recoverable(); void init_thread(GlobalTestConfig*, LocalTestConfig* ltc); + void init_thread(int tid); bool check_epoch(){ return _esys->check_epoch(); } diff --git a/src/utils/DCSS.hpp b/src/utils/DCSS.hpp deleted file mode 100644 index 1bdaaead..00000000 --- a/src/utils/DCSS.hpp +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Macro VISIBLE_READ determines which version of API will be used. - * Macro USE_TSX determines whether TSX (Intel HTM) will be used. - * - * We highly recommend you to use default invisible read version, - * since it doesn't need you to handle EpochVerifyException and you - * can call just load rather than load_verify throughout your program - * - * We provides following double-compare-single-swap (DCSS) API for - * nonblocking data structures to use: - * - * atomic_nbptr_t: atomic double word for storing pointers - * that point to nodes, which link payloads in. It contains following - * functions: - * - * store(T val): - * store 64-bit long data without sync; cnt doesn't increment - * - * store(nbptr_t d): store(d.val) - * - * nbptr_t load(): - * load nbptr without verifying epoch - * - * nbptr_t load_verify(): - * load nbptr and verify epoch, used as lin point; - * for invisible reads this won't verify epoch - * - * bool CAS(nbptr_t expected, T desired): - * CAS in desired value and increment cnt if expected - * matches current nbptr - * - * bool CAS_verify(nbptr_t expected, T desired): - * CAS in desired value and increment cnt if expected - * matches current nbptr and global epoch doesn't change - * since BEGIN_OP - */ - -#ifndef DCSS_HPP -#define DCSS_HPP -#include - -#include - -// #include "rtm.hpp" -#include -#include "PersistStructs.hpp" -#include "ConcurrentPrimitives.hpp" -#include "EpochSys.hpp" -namespace pds{ - -#ifdef VISIBLE_READ -// implementation of load and cas for visible reads - -template -nbptr_t atomic_nbptr_t::load(){ - nbptr_t r; - while(true){ - r = nbptr.load(); - nbptr_t ret(r.val,r.cnt+1); - if(nbptr.compare_exchange_strong(r, ret)) - return ret; - } -} - -template -nbptr_t atomic_nbptr_t::load_verify(){ - assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH); - nbptr_t r; - while(true){ - r = nbptr.load(); - if(esys->check_epoch(esys->epochs[EpochSys::tid].ui)){ - nbptr_t ret(r.val,r.cnt+1); - if(nbptr.compare_exchange_strong(r, ret)){ - return r; - } - } else { - throw EpochVerifyException(); - } - } -} - -template -bool atomic_nbptr_t::CAS_verify(nbptr_t expected, const T& desired){ - assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH); - if(esys->check_epoch(esys->epochs[EpochSys::tid].ui)){ - nbptr_t new_r(reinterpret_cast(desired),expected.cnt+1); - return nbptr.compare_exchange_strong(expected, new_r); - } else { - return false; - } -} - -template -bool atomic_nbptr_t::CAS(nbptr_t expected, const T& desired){ - nbptr_t new_r(reinterpret_cast(desired),expected.cnt+1); - return nbptr.compare_exchange_strong(expected, new_r); -} - -#else /* !VISIBLE_READ */ -/* implementation of load and cas for invisible reads */ - -template -nbptr_t atomic_nbptr_t::load(){ - nbptr_t r; - do { - r = nbptr.load(); - if(r.is_desc()) { - sc_desc_t* D = r.get_desc(); - D->try_complete(esys, reinterpret_cast(this)); - } - } while(r.is_desc()); - return r; -} - -template -nbptr_t atomic_nbptr_t::load_verify(){ - // invisible read doesn't need to verify epoch even if it's a - // linearization point - // this saves users from catching EpochVerifyException - return load(); -} - -// extern std::atomic abort_cnt; -// extern std::atomic total_cnt; - -template -bool atomic_nbptr_t::CAS_verify(nbptr_t expected, const T& desired){ - assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH); - // total_cnt.fetch_add(1); -#ifdef USE_TSX - unsigned status = _xbegin(); - if (status == _XBEGIN_STARTED) { - nbptr_t r = nbptr.load(); - if(!r.is_desc()){ - if( r.cnt!=expected.cnt || - r.val!=expected.val || - !esys->check_epoch(esys->epochs[EpochSys::tid].ui)){ - _xend(); - return false; - } else { - nbptr_t new_r (reinterpret_cast(desired), r.cnt+4); - nbptr.store(new_r); - _xend(); - return true; - } - } else { - // we only help complete descriptor, but not retry - _xend(); - r.get_desc()->try_complete(esys, reinterpret_cast(this)); - return false; - } - // execution won't reach here; program should have returned - assert(0); - } -#endif - // txn fails; fall back routine - // abort_cnt.fetch_add(1); - nbptr_t r = nbptr.load(); - if(r.is_desc()){ - sc_desc_t* D = r.get_desc(); - D->try_complete(esys, reinterpret_cast(this)); - return false; - } else { - if( r.cnt!=expected.cnt || - r.val!=expected.val) { - return false; - } - } - // now r.cnt must be ..00, and r.cnt+1 is ..01, which means "nbptr - // contains a descriptor" and "a descriptor is in progress" - assert((r.cnt & 3UL) == 0UL); - new (&esys->local_descs[EpochSys::tid].ui) sc_desc_t(r.cnt+1, - reinterpret_cast(this), - expected.val, - reinterpret_cast(desired), - esys->epochs[EpochSys::tid].ui); - nbptr_t new_r(reinterpret_cast(&esys->local_descs[EpochSys::tid].ui), r.cnt+1); - if(!nbptr.compare_exchange_strong(r,new_r)){ - return false; - } - esys->local_descs[EpochSys::tid].ui.try_complete(esys, reinterpret_cast(this)); - if(esys->local_descs[EpochSys::tid].ui.committed()) return true; - else return false; -} - -template -bool atomic_nbptr_t::CAS(nbptr_t expected, const T& desired){ - // CAS doesn't check epoch; just cas ptr to desired, with cnt+=4 - assert(!expected.is_desc()); - nbptr_t new_r(reinterpret_cast(desired), expected.cnt + 4); - if(!nbptr.compare_exchange_strong(expected,new_r)){ - return false; - } - return true; -} - -#endif /* !VISIBLE_READ */ - -} -#endif \ No newline at end of file diff --git a/unit_test/dcss.cpp b/unit_test/dcss.cpp index f0e0d4d2..d4549f16 100644 --- a/unit_test/dcss.cpp +++ b/unit_test/dcss.cpp @@ -1,6 +1,5 @@ #include "Persistent.hpp" -#include "persist_struct_api.hpp" -#include "DCSS.hpp" +#include "Recoverable.hpp" #include "TestConfig.hpp" #include #include From 5e198ffef9fc778b78dd6f47474367cbf0c23265 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Thu, 12 Nov 2020 20:07:34 -0500 Subject: [PATCH 29/56] moved dcss-related code into Recoverable.hpp/cpp --- src/persist/EpochSys.cpp | 16 -- src/persist/EpochSys.hpp | 341 +-------------------------- src/persist/PersistStructs.hpp | 180 ++++++++++++++ src/persist/Recoverable.cpp | 20 ++ src/persist/Recoverable.hpp | 159 +++++++++++++ src/rideables/MontageHashTable.hpp | 8 +- src/rideables/MontageLfHashTable.hpp | 1 - src/rideables/MontageMSQueue.hpp | 1 - 8 files changed, 365 insertions(+), 361 deletions(-) diff --git a/src/persist/EpochSys.cpp b/src/persist/EpochSys.cpp index c82954d0..3e8d6e57 100644 --- a/src/persist/EpochSys.cpp +++ b/src/persist/EpochSys.cpp @@ -119,22 +119,6 @@ namespace pds{ // } } - void sc_desc_t::try_complete(EpochSys* esys, uint64_t addr){ - nbptr_t _d = nbptr.load(); - int ret = 0; - if(_d.val!=addr) return; - if(in_progress(_d)){ - if(esys->check_epoch(cas_epoch)){ - ret = 2; - ret |= commit(_d); - } else { - ret = 4; - ret |= abort(_d); - } - } - cleanup(_d); - } - bool EpochSys::check_epoch(uint64_t c){ return c == global_epoch->load(std::memory_order_seq_cst); } diff --git a/src/persist/EpochSys.hpp b/src/persist/EpochSys.hpp index fe02ae49..c80eac81 100644 --- a/src/persist/EpochSys.hpp +++ b/src/persist/EpochSys.hpp @@ -24,197 +24,14 @@ namespace pds{ -class EpochSys; - -extern EpochSys* esys; - -//////////////////////////////////////// -// counted pointer-related structures // -//////////////////////////////////////// - -/* - * Macro VISIBLE_READ determines which version of API will be used. - * Macro USE_TSX determines whether TSX (Intel HTM) will be used. - * - * We highly recommend you to use default invisible read version, - * since it doesn't need you to handle EpochVerifyException and you - * can call just load rather than load_verify throughout your program - * - * We provides following double-compare-single-swap (DCSS) API for - * nonblocking data structures to use: - * - * atomic_nbptr_t: atomic double word for storing pointers - * that point to nodes, which link payloads in. It contains following - * functions: - * - * store(T val): - * store 64-bit long data without sync; cnt doesn't increment - * - * store(nbptr_t d): store(d.val) - * - * nbptr_t load(): - * load nbptr without verifying epoch - * - * nbptr_t load_verify(): - * load nbptr and verify epoch, used as lin point; - * for invisible reads this won't verify epoch - * - * bool CAS(nbptr_t expected, T desired): - * CAS in desired value and increment cnt if expected - * matches current nbptr - * - * bool CAS_verify(nbptr_t expected, T desired): - * CAS in desired value and increment cnt if expected - * matches current nbptr and global epoch doesn't change - * since BEGIN_OP - */ - -struct EpochVerifyException : public std::exception { - const char * what () const throw () { - return "Epoch in which operation wants to linearize has passed; retry required."; - } -}; - -struct sc_desc_t; - -template -class atomic_nbptr_t; -class nbptr_t{ - template - friend class atomic_nbptr_t; - inline bool is_desc() const { - return (cnt & 3UL) == 1UL; - } - inline sc_desc_t* get_desc() const { - assert(is_desc()); - return reinterpret_cast(val); - } -public: - uint64_t val; - uint64_t cnt; - template - inline T get_val() const { - static_assert(sizeof(T) == sizeof(uint64_t), "sizes do not match"); - return reinterpret_cast(val); - } - nbptr_t(uint64_t v, uint64_t c) : val(v), cnt(c) {}; - nbptr_t() : nbptr_t(0, 0) {}; - - inline bool operator==(const nbptr_t & b) const{ - return val==b.val && cnt==b.cnt; - } - inline bool operator!=(const nbptr_t & b) const{ - return !operator==(b); - } -}__attribute__((aligned(16))); - -template -class atomic_nbptr_t{ - static_assert(sizeof(T) == sizeof(uint64_t), "sizes do not match"); -public: - // for cnt in nbptr: - // desc: ....01 - // real val: ....00 - std::atomic nbptr; - nbptr_t load(); - nbptr_t load_verify(); - inline T load_val(){ - return reinterpret_cast(load().val); - } - bool CAS_verify(nbptr_t expected, const T& desired); - inline bool CAS_verify(nbptr_t expected, const nbptr_t& desired){ - return CAS_verify(expected,desired.get_val()); - } - // CAS doesn't check epoch nor cnt - bool CAS(nbptr_t expected, const T& desired); - inline bool CAS(nbptr_t expected, const nbptr_t& desired){ - return CAS(expected,desired.get_val()); - } - void store(const T& desired); - inline void store(const nbptr_t& desired){ - store(desired.get_val()); - } - atomic_nbptr_t(const T& v) : nbptr(nbptr_t(reinterpret_cast(v), 0)){}; - atomic_nbptr_t() : atomic_nbptr_t(T()){}; -}; - -struct sc_desc_t{ -private: - // for cnt in nbptr: - // in progress: ....01 - // committed: ....10 - // aborted: ....11 - std::atomic nbptr; - const uint64_t old_val; - const uint64_t new_val; - const uint64_t cas_epoch; - inline bool abort(nbptr_t _d){ - // bring cnt from ..01 to ..11 - nbptr_t expected (_d.val, (_d.cnt & ~0x3UL) | 1UL); // in progress - nbptr_t desired(expected); - desired.cnt += 2; - return nbptr.compare_exchange_strong(expected, desired); - } - inline bool commit(nbptr_t _d){ - // bring cnt from ..01 to ..10 - nbptr_t expected (_d.val, (_d.cnt & ~0x3UL) | 1UL); // in progress - nbptr_t desired(expected); - desired.cnt += 1; - return nbptr.compare_exchange_strong(expected, desired); - } - inline bool committed(nbptr_t _d) const { - return (_d.cnt & 0x3UL) == 2UL; - } - inline bool in_progress(nbptr_t _d) const { - return (_d.cnt & 0x3UL) == 1UL; - } - inline bool match(nbptr_t old_d, nbptr_t new_d) const { - return ((old_d.cnt & ~0x3UL) == (new_d.cnt & ~0x3UL)) && - (old_d.val == new_d.val); - } - void cleanup(nbptr_t old_d){ - // must be called after desc is aborted or committed - nbptr_t new_d = nbptr.load(); - if(!match(old_d,new_d)) return; - assert(!in_progress(new_d)); - nbptr_t expected(reinterpret_cast(this),(new_d.cnt & ~0x3UL) | 1UL); - if(committed(new_d)) { - // bring cnt from ..10 to ..00 - reinterpret_cast*>( - new_d.val)->nbptr.compare_exchange_strong( - expected, - nbptr_t(new_val,new_d.cnt + 2)); - } else { - //aborted - // bring cnt from ..11 to ..00 - reinterpret_cast*>( - new_d.val)->nbptr.compare_exchange_strong( - expected, - nbptr_t(old_val,new_d.cnt + 1)); - } - } -public: - inline bool committed() const { - return committed(nbptr.load()); - } - inline bool in_progress() const { - return in_progress(nbptr.load()); - } - // TODO: try_complete used to be inline. Try to make it inline again when refactoring is finished. - void try_complete(EpochSys* esys, uint64_t addr); - - sc_desc_t( uint64_t c, uint64_t a, uint64_t o, - uint64_t n, uint64_t e) : - nbptr(nbptr_t(a,c)), old_val(o), new_val(n), cas_epoch(e){}; - sc_desc_t() : sc_desc_t(0,0,0,0,0){}; -}; - ////////////////// // Epoch System // ////////////////// enum SysMode {ONLINE, RECOVER}; +struct sc_desc_t; + class EpochSys{ private: // persistent fields: @@ -744,161 +561,7 @@ T* EpochSys::openwrite_pblk(T* b, uint64_t c){ return b; } -template -void atomic_nbptr_t::store(const T& desired){ - // this function must be used only when there's no data race - nbptr_t r = nbptr.load(); - nbptr_t new_r(reinterpret_cast(desired),r.cnt); - nbptr.store(new_r); -} - -#ifdef VISIBLE_READ -// implementation of load and cas for visible reads - -template -nbptr_t atomic_nbptr_t::load(){ - nbptr_t r; - while(true){ - r = nbptr.load(); - nbptr_t ret(r.val,r.cnt+1); - if(nbptr.compare_exchange_strong(r, ret)) - return ret; - } -} - -template -nbptr_t atomic_nbptr_t::load_verify(){ - assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH); - nbptr_t r; - while(true){ - r = nbptr.load(); - if(esys->check_epoch(esys->epochs[EpochSys::tid].ui)){ - nbptr_t ret(r.val,r.cnt+1); - if(nbptr.compare_exchange_strong(r, ret)){ - return r; - } - } else { - throw EpochVerifyException(); - } - } -} - -template -bool atomic_nbptr_t::CAS_verify(nbptr_t expected, const T& desired){ - assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH); - if(esys->check_epoch(esys->epochs[EpochSys::tid].ui)){ - nbptr_t new_r(reinterpret_cast(desired),expected.cnt+1); - return nbptr.compare_exchange_strong(expected, new_r); - } else { - return false; - } -} - -template -bool atomic_nbptr_t::CAS(nbptr_t expected, const T& desired){ - nbptr_t new_r(reinterpret_cast(desired),expected.cnt+1); - return nbptr.compare_exchange_strong(expected, new_r); -} - -#else /* !VISIBLE_READ */ -/* implementation of load and cas for invisible reads */ - -template -nbptr_t atomic_nbptr_t::load(){ - nbptr_t r; - do { - r = nbptr.load(); - if(r.is_desc()) { - sc_desc_t* D = r.get_desc(); - D->try_complete(esys, reinterpret_cast(this)); - } - } while(r.is_desc()); - return r; -} - -template -nbptr_t atomic_nbptr_t::load_verify(){ - // invisible read doesn't need to verify epoch even if it's a - // linearization point - // this saves users from catching EpochVerifyException - return load(); -} - -// extern std::atomic abort_cnt; -// extern std::atomic total_cnt; - -template -bool atomic_nbptr_t::CAS_verify(nbptr_t expected, const T& desired){ - assert(esys->epochs[EpochSys::tid].ui != NULL_EPOCH); - // total_cnt.fetch_add(1); -#ifdef USE_TSX - unsigned status = _xbegin(); - if (status == _XBEGIN_STARTED) { - nbptr_t r = nbptr.load(); - if(!r.is_desc()){ - if( r.cnt!=expected.cnt || - r.val!=expected.val || - !esys->check_epoch(esys->epochs[EpochSys::tid].ui)){ - _xend(); - return false; - } else { - nbptr_t new_r (reinterpret_cast(desired), r.cnt+4); - nbptr.store(new_r); - _xend(); - return true; - } - } else { - // we only help complete descriptor, but not retry - _xend(); - r.get_desc()->try_complete(esys, reinterpret_cast(this)); - return false; - } - // execution won't reach here; program should have returned - assert(0); - } -#endif - // txn fails; fall back routine - // abort_cnt.fetch_add(1); - nbptr_t r = nbptr.load(); - if(r.is_desc()){ - sc_desc_t* D = r.get_desc(); - D->try_complete(esys, reinterpret_cast(this)); - return false; - } else { - if( r.cnt!=expected.cnt || - r.val!=expected.val) { - return false; - } - } - // now r.cnt must be ..00, and r.cnt+1 is ..01, which means "nbptr - // contains a descriptor" and "a descriptor is in progress" - assert((r.cnt & 3UL) == 0UL); - new (&esys->local_descs[EpochSys::tid].ui) sc_desc_t(r.cnt+1, - reinterpret_cast(this), - expected.val, - reinterpret_cast(desired), - esys->epochs[EpochSys::tid].ui); - nbptr_t new_r(reinterpret_cast(&esys->local_descs[EpochSys::tid].ui), r.cnt+1); - if(!nbptr.compare_exchange_strong(r,new_r)){ - return false; - } - esys->local_descs[EpochSys::tid].ui.try_complete(esys, reinterpret_cast(this)); - if(esys->local_descs[EpochSys::tid].ui.committed()) return true; - else return false; -} - -template -bool atomic_nbptr_t::CAS(nbptr_t expected, const T& desired){ - // CAS doesn't check epoch; just cas ptr to desired, with cnt+=4 - assert(!expected.is_desc()); - nbptr_t new_r(reinterpret_cast(desired), expected.cnt + 4); - if(!nbptr.compare_exchange_strong(expected,new_r)){ - return false; - } - return true; -} -#endif /* !VISIBLE_READ */ } diff --git a/src/persist/PersistStructs.hpp b/src/persist/PersistStructs.hpp index 3f4378f5..d3a2b1a4 100644 --- a/src/persist/PersistStructs.hpp +++ b/src/persist/PersistStructs.hpp @@ -92,6 +92,186 @@ namespace pds{ } }; + //////////////////////////////////////// + // counted pointer-related structures // + //////////////////////////////////////// + + /* + * Macro VISIBLE_READ determines which version of API will be used. + * Macro USE_TSX determines whether TSX (Intel HTM) will be used. + * + * We highly recommend you to use default invisible read version, + * since it doesn't need you to handle EpochVerifyException and you + * can call just load rather than load_verify throughout your program + * + * We provides following double-compare-single-swap (DCSS) API for + * nonblocking data structures to use: + * + * atomic_nbptr_t: atomic double word for storing pointers + * that point to nodes, which link payloads in. It contains following + * functions: + * + * store(T val): + * store 64-bit long data without sync; cnt doesn't increment + * + * store(nbptr_t d): store(d.val) + * + * nbptr_t load(): + * load nbptr without verifying epoch + * + * nbptr_t load_verify(): + * load nbptr and verify epoch, used as lin point; + * for invisible reads this won't verify epoch + * + * bool CAS(nbptr_t expected, T desired): + * CAS in desired value and increment cnt if expected + * matches current nbptr + * + * bool CAS_verify(nbptr_t expected, T desired): + * CAS in desired value and increment cnt if expected + * matches current nbptr and global epoch doesn't change + * since BEGIN_OP + */ + + struct EpochVerifyException : public std::exception { + const char * what () const throw () { + return "Epoch in which operation wants to linearize has passed; retry required."; + } + }; + + struct sc_desc_t; + + template + class atomic_nbptr_t; + class nbptr_t{ + template + friend class atomic_nbptr_t; + inline bool is_desc() const { + return (cnt & 3UL) == 1UL; + } + inline sc_desc_t* get_desc() const { + assert(is_desc()); + return reinterpret_cast(val); + } + public: + uint64_t val; + uint64_t cnt; + template + inline T get_val() const { + static_assert(sizeof(T) == sizeof(uint64_t), "sizes do not match"); + return reinterpret_cast(val); + } + nbptr_t(uint64_t v, uint64_t c) : val(v), cnt(c) {}; + nbptr_t() : nbptr_t(0, 0) {}; + + inline bool operator==(const nbptr_t & b) const{ + return val==b.val && cnt==b.cnt; + } + inline bool operator!=(const nbptr_t & b) const{ + return !operator==(b); + } + }__attribute__((aligned(16))); + + template + class atomic_nbptr_t{ + static_assert(sizeof(T) == sizeof(uint64_t), "sizes do not match"); + public: + // for cnt in nbptr: + // desc: ....01 + // real val: ....00 + std::atomic nbptr; + nbptr_t load(); + nbptr_t load_verify(); + inline T load_val(){ + return reinterpret_cast(load().val); + } + bool CAS_verify(nbptr_t expected, const T& desired); + inline bool CAS_verify(nbptr_t expected, const nbptr_t& desired){ + return CAS_verify(expected,desired.get_val()); + } + // CAS doesn't check epoch nor cnt + bool CAS(nbptr_t expected, const T& desired); + inline bool CAS(nbptr_t expected, const nbptr_t& desired){ + return CAS(expected,desired.get_val()); + } + void store(const T& desired); + inline void store(const nbptr_t& desired){ + store(desired.get_val()); + } + atomic_nbptr_t(const T& v) : nbptr(nbptr_t(reinterpret_cast(v), 0)){}; + atomic_nbptr_t() : atomic_nbptr_t(T()){}; + }; + + struct sc_desc_t{ + private: + // for cnt in nbptr: + // in progress: ....01 + // committed: ....10 + // aborted: ....11 + std::atomic nbptr; + const uint64_t old_val; + const uint64_t new_val; + const uint64_t cas_epoch; + inline bool abort(nbptr_t _d){ + // bring cnt from ..01 to ..11 + nbptr_t expected (_d.val, (_d.cnt & ~0x3UL) | 1UL); // in progress + nbptr_t desired(expected); + desired.cnt += 2; + return nbptr.compare_exchange_strong(expected, desired); + } + inline bool commit(nbptr_t _d){ + // bring cnt from ..01 to ..10 + nbptr_t expected (_d.val, (_d.cnt & ~0x3UL) | 1UL); // in progress + nbptr_t desired(expected); + desired.cnt += 1; + return nbptr.compare_exchange_strong(expected, desired); + } + inline bool committed(nbptr_t _d) const { + return (_d.cnt & 0x3UL) == 2UL; + } + inline bool in_progress(nbptr_t _d) const { + return (_d.cnt & 0x3UL) == 1UL; + } + inline bool match(nbptr_t old_d, nbptr_t new_d) const { + return ((old_d.cnt & ~0x3UL) == (new_d.cnt & ~0x3UL)) && + (old_d.val == new_d.val); + } + void cleanup(nbptr_t old_d){ + // must be called after desc is aborted or committed + nbptr_t new_d = nbptr.load(); + if(!match(old_d,new_d)) return; + assert(!in_progress(new_d)); + nbptr_t expected(reinterpret_cast(this),(new_d.cnt & ~0x3UL) | 1UL); + if(committed(new_d)) { + // bring cnt from ..10 to ..00 + reinterpret_cast*>( + new_d.val)->nbptr.compare_exchange_strong( + expected, + nbptr_t(new_val,new_d.cnt + 2)); + } else { + //aborted + // bring cnt from ..11 to ..00 + reinterpret_cast*>( + new_d.val)->nbptr.compare_exchange_strong( + expected, + nbptr_t(old_val,new_d.cnt + 1)); + } + } + public: + inline bool committed() const { + return committed(nbptr.load()); + } + inline bool in_progress() const { + return in_progress(nbptr.load()); + } + // TODO: try_complete used to be inline. Try to make it inline again when refactoring is finished. + void try_complete(pds::EpochSys* esys, uint64_t addr); + + sc_desc_t( uint64_t c, uint64_t a, uint64_t o, + uint64_t n, uint64_t e) : + nbptr(nbptr_t(a,c)), old_val(o), new_val(n), cas_epoch(e){}; + sc_desc_t() : sc_desc_t(0,0,0,0,0){}; + }; } #endif \ No newline at end of file diff --git a/src/persist/Recoverable.cpp b/src/persist/Recoverable.cpp index 6135c963..a80683aa 100644 --- a/src/persist/Recoverable.cpp +++ b/src/persist/Recoverable.cpp @@ -21,4 +21,24 @@ void Recoverable::init_thread(GlobalTestConfig*, LocalTestConfig* ltc){ void Recoverable::init_thread(int tid){ _esys->init_thread(tid); +} + +namespace pds{ + + void sc_desc_t::try_complete(pds::EpochSys* esys, uint64_t addr){ + nbptr_t _d = nbptr.load(); + int ret = 0; + if(_d.val!=addr) return; + if(in_progress(_d)){ + if(pds::esys->check_epoch(cas_epoch)){ + ret = 2; + ret |= commit(_d); + } else { + ret = 4; + ret |= abort(_d); + } + } + cleanup(_d); + } + } \ No newline at end of file diff --git a/src/persist/Recoverable.hpp b/src/persist/Recoverable.hpp index 6bf5775e..ffb100f2 100644 --- a/src/persist/Recoverable.hpp +++ b/src/persist/Recoverable.hpp @@ -206,4 +206,163 @@ T* TOKEN_CONCAT(set_, n)(int i, t TOKEN_CONCAT(tmp_, n)){\ } +namespace pds{ + + template + void atomic_nbptr_t::store(const T& desired){ + // this function must be used only when there's no data race + nbptr_t r = nbptr.load(); + nbptr_t new_r(reinterpret_cast(desired),r.cnt); + nbptr.store(new_r); + } + +#ifdef VISIBLE_READ + // implementation of load and cas for visible reads + + template + nbptr_t atomic_nbptr_t::load(){ + nbptr_t r; + while(true){ + r = nbptr.load(); + nbptr_t ret(r.val,r.cnt+1); + if(nbptr.compare_exchange_strong(r, ret)) + return ret; + } + } + + template + nbptr_t atomic_nbptr_t::load_verify(){ + assert(pds::esys->epochs[pds::EpochSys::tid].ui != NULL_EPOCH); + nbptr_t r; + while(true){ + r = nbptr.load(); + if(pds::esys->check_epoch(pds::esys->epochs[pds::EpochSys::tid].ui)){ + nbptr_t ret(r.val,r.cnt+1); + if(nbptr.compare_exchange_strong(r, ret)){ + return r; + } + } else { + throw EpochVerifyException(); + } + } + } + + template + bool atomic_nbptr_t::CAS_verify(nbptr_t expected, const T& desired){ + assert(pds::esys->epochs[pds::EpochSys::tid].ui != NULL_EPOCH); + if(pds::esys->check_epoch(pds::esys->epochs[pds::EpochSys::tid].ui)){ + nbptr_t new_r(reinterpret_cast(desired),expected.cnt+1); + return nbptr.compare_exchange_strong(expected, new_r); + } else { + return false; + } + } + + template + bool atomic_nbptr_t::CAS(nbptr_t expected, const T& desired){ + nbptr_t new_r(reinterpret_cast(desired),expected.cnt+1); + return nbptr.compare_exchange_strong(expected, new_r); + } + +#else /* !VISIBLE_READ */ + /* implementation of load and cas for invisible reads */ + + template + nbptr_t atomic_nbptr_t::load(){ + nbptr_t r; + do { + r = nbptr.load(); + if(r.is_desc()) { + sc_desc_t* D = r.get_desc(); + D->try_complete(pds::esys, reinterpret_cast(this)); + } + } while(r.is_desc()); + return r; + } + + template + nbptr_t atomic_nbptr_t::load_verify(){ + // invisible read doesn't need to verify epoch even if it's a + // linearization point + // this saves users from catching EpochVerifyException + return load(); + } + + // extern std::atomic abort_cnt; + // extern std::atomic total_cnt; + + template + bool atomic_nbptr_t::CAS_verify(nbptr_t expected, const T& desired){ + assert(pds::esys->epochs[pds::EpochSys::tid].ui != NULL_EPOCH); + // total_cnt.fetch_add(1); +#ifdef USE_TSX + unsigned status = _xbegin(); + if (status == _XBEGIN_STARTED) { + nbptr_t r = nbptr.load(); + if(!r.is_desc()){ + if( r.cnt!=expected.cnt || + r.val!=expected.val || + !pds::esys->check_epoch(pds::esys->epochs[pds::EpochSys::tid].ui)){ + _xend(); + return false; + } else { + nbptr_t new_r (reinterpret_cast(desired), r.cnt+4); + nbptr.store(new_r); + _xend(); + return true; + } + } else { + // we only help complete descriptor, but not retry + _xend(); + r.get_desc()->try_complete(pds::esys, reinterpret_cast(this)); + return false; + } + // execution won't reach here; program should have returned + assert(0); + } +#endif + // txn fails; fall back routine + // abort_cnt.fetch_add(1); + nbptr_t r = nbptr.load(); + if(r.is_desc()){ + sc_desc_t* D = r.get_desc(); + D->try_complete(pds::esys, reinterpret_cast(this)); + return false; + } else { + if( r.cnt!=expected.cnt || + r.val!=expected.val) { + return false; + } + } + // now r.cnt must be ..00, and r.cnt+1 is ..01, which means "nbptr + // contains a descriptor" and "a descriptor is in progress" + assert((r.cnt & 3UL) == 0UL); + new (&pds::esys->local_descs[pds::EpochSys::tid].ui) sc_desc_t(r.cnt+1, + reinterpret_cast(this), + expected.val, + reinterpret_cast(desired), + pds::esys->epochs[pds::EpochSys::tid].ui); + nbptr_t new_r(reinterpret_cast(&pds::esys->local_descs[pds::EpochSys::tid].ui), r.cnt+1); + if(!nbptr.compare_exchange_strong(r,new_r)){ + return false; + } + pds::esys->local_descs[pds::EpochSys::tid].ui.try_complete(pds::esys, reinterpret_cast(this)); + if(pds::esys->local_descs[pds::EpochSys::tid].ui.committed()) return true; + else return false; + } + + template + bool atomic_nbptr_t::CAS(nbptr_t expected, const T& desired){ + // CAS doesn't check epoch; just cas ptr to desired, with cnt+=4 + assert(!expected.is_desc()); + nbptr_t new_r(reinterpret_cast(desired), expected.cnt + 4); + if(!nbptr.compare_exchange_strong(expected,new_r)){ + return false; + } + return true; + } + +#endif /* !VISIBLE_READ */ +} // namespace pds + #endif \ No newline at end of file diff --git a/src/rideables/MontageHashTable.hpp b/src/rideables/MontageHashTable.hpp index d521c7ee..4cdb0834 100644 --- a/src/rideables/MontageHashTable.hpp +++ b/src/rideables/MontageHashTable.hpp @@ -205,10 +205,10 @@ class MontageHashTable : public RMap, public Recoverable{ int recover(bool simulated){ if (simulated){ - pds::recover_mode(); // PDELETE --> noop + recover_mode(); // PDELETE --> noop // clear transient structures. clear(); - pds::online_mode(); // re-enable PDELETE. + online_mode(); // re-enable PDELETE. } int rec_cnt = 0; @@ -217,7 +217,7 @@ class MontageHashTable : public RMap, public Recoverable{ rec_thd = stoi(gtc->getEnv("RecoverThread")); } auto begin = chrono::high_resolution_clock::now(); - std::unordered_map* recovered = pds::recover(rec_thd); + std::unordered_map* recovered = Recoverable::recover(rec_thd); auto end = chrono::high_resolution_clock::now(); auto dur = end - begin; auto dur_ms = std::chrono::duration_cast(dur).count(); @@ -237,7 +237,7 @@ class MontageHashTable : public RMap, public Recoverable{ begin = chrono::high_resolution_clock::now(); #pragma omp parallel num_threads(rec_thd) { - pds::init_thread(omp_get_thread_num()); + Recoverable::init_thread(omp_get_thread_num()); #pragma omp for for(size_t i = 0; i < payloadVector.size(); ++i){ //re-insert payload. diff --git a/src/rideables/MontageLfHashTable.hpp b/src/rideables/MontageLfHashTable.hpp index 32ef2105..e017698d 100644 --- a/src/rideables/MontageLfHashTable.hpp +++ b/src/rideables/MontageLfHashTable.hpp @@ -17,7 +17,6 @@ #include "RCUTracker.hpp" #include "CustomTypes.hpp" #include "Recoverable.hpp" -#include "DCSS.hpp" template class MontageLfHashTable : public RMap, Recoverable{ diff --git a/src/rideables/MontageMSQueue.hpp b/src/rideables/MontageMSQueue.hpp index a2dd4f90..78e9a9e8 100644 --- a/src/rideables/MontageMSQueue.hpp +++ b/src/rideables/MontageMSQueue.hpp @@ -11,7 +11,6 @@ #include "CustomTypes.hpp" #include "Recoverable.hpp" #include "persist_struct_api.hpp" -#include "DCSS.hpp" using namespace pds; From 0f1aa2e4aecb39c05d3dd083b46acccd3a6f05eb Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Fri, 13 Nov 2020 15:48:08 -0500 Subject: [PATCH 30/56] use new API in MontageLfHashTable and MontageMSQueue --- Makefile | 2 +- src/persist/EpochSys.hpp | 16 +++---- src/persist/PersistStructs.hpp | 12 +++-- src/persist/Recoverable.cpp | 5 +- src/persist/Recoverable.hpp | 57 ++++++++++++++--------- src/rideables/MontageLfHashTable.hpp | 69 +++++++++++++++------------- src/rideables/MontageMSQueue.hpp | 29 ++++++------ 7 files changed, 107 insertions(+), 83 deletions(-) diff --git a/Makefile b/Makefile index c25807e6..dda58bfb 100644 --- a/Makefile +++ b/Makefile @@ -118,7 +118,7 @@ ARCHIVEDIR:=./lib # -since we do pattern matching between this list and the # source files, the file path specified must be the same # type (absolute or relative) -EXECUTABLES:= ./src/main.cpp ./unit_test/dcss.cpp +EXECUTABLES:= ./src/main.cpp # A list of source files contained in the # source directory to exclude from the build diff --git a/src/persist/EpochSys.hpp b/src/persist/EpochSys.hpp index c80eac81..fe255ca2 100644 --- a/src/persist/EpochSys.hpp +++ b/src/persist/EpochSys.hpp @@ -58,23 +58,22 @@ class EpochSys{ std::mutex dedicated_epoch_advancer_lock; - /* public members for API */ + // system mode that toggles on/off PDELETE for recovery purpose. + SysMode sys_mode = ONLINE; + + /* public members for API */ // TODO: put these into Recoverable // current epoch of each thread. padded* epochs = nullptr; - // local descriptors for DCSS - // TODO: maybe put this into a derived class for NB data structures? - padded* local_descs = nullptr; // containers for pending allocations padded>* pending_allocs = nullptr; - // system mode that toggles on/off PDELETE for recovery purpose. - SysMode sys_mode = ONLINE; + EpochSys(GlobalTestConfig* _gtc) : uid_generator(_gtc->task_num), gtc(_gtc) { epochs = new padded[gtc->task_num]; for(int i = 0; i < gtc->task_num; i++){ epochs[i].ui = NULL_EPOCH; } - local_descs = new padded[gtc->task_num]; + pending_allocs = new padded>[gtc->task_num]; reset(); // TODO: change to recover() later on. } @@ -96,7 +95,6 @@ class EpochSys{ delete to_be_persisted; delete to_be_freed; delete epochs; - delete local_descs; } void parse_env(); @@ -124,6 +122,8 @@ class EpochSys{ // API // ///////// + // TODO: put these into Recoverable. + static void init_thread(int _tid){ EpochSys::tid = _tid; } diff --git a/src/persist/PersistStructs.hpp b/src/persist/PersistStructs.hpp index d3a2b1a4..1e2a804a 100644 --- a/src/persist/PersistStructs.hpp +++ b/src/persist/PersistStructs.hpp @@ -9,6 +9,8 @@ #include "Persistent.hpp" #include "common_macros.hpp" +class Recoverable; + namespace pds{ struct OldSeeNewException : public std::exception { const char * what () const throw () { @@ -180,12 +182,12 @@ namespace pds{ // desc: ....01 // real val: ....00 std::atomic nbptr; - nbptr_t load(); - nbptr_t load_verify(); - inline T load_val(){ + nbptr_t load(Recoverable* ds); + nbptr_t load_verify(Recoverable* ds); + inline T load_val(Recoverable* ds){ return reinterpret_cast(load().val); } - bool CAS_verify(nbptr_t expected, const T& desired); + bool CAS_verify(Recoverable* ds, nbptr_t expected, const T& desired); inline bool CAS_verify(nbptr_t expected, const nbptr_t& desired){ return CAS_verify(expected,desired.get_val()); } @@ -265,7 +267,7 @@ namespace pds{ return in_progress(nbptr.load()); } // TODO: try_complete used to be inline. Try to make it inline again when refactoring is finished. - void try_complete(pds::EpochSys* esys, uint64_t addr); + void try_complete(Recoverable* ds, uint64_t addr); sc_desc_t( uint64_t c, uint64_t a, uint64_t o, uint64_t n, uint64_t e) : diff --git a/src/persist/Recoverable.cpp b/src/persist/Recoverable.cpp index a80683aa..9a5b7d2d 100644 --- a/src/persist/Recoverable.cpp +++ b/src/persist/Recoverable.cpp @@ -8,6 +8,7 @@ Recoverable::Recoverable(GlobalTestConfig* gtc){ // init main thread pds::init_thread(0); + local_descs = new padded[gtc->task_num]; // TODO: replace this with _esys initialization. _esys = pds::esys; } @@ -25,12 +26,12 @@ void Recoverable::init_thread(int tid){ namespace pds{ - void sc_desc_t::try_complete(pds::EpochSys* esys, uint64_t addr){ + void sc_desc_t::try_complete(Recoverable* ds, uint64_t addr){ nbptr_t _d = nbptr.load(); int ret = 0; if(_d.val!=addr) return; if(in_progress(_d)){ - if(pds::esys->check_epoch(cas_epoch)){ + if(ds->check_epoch(cas_epoch)){ ret = 2; ret |= commit(_d); } else { diff --git a/src/persist/Recoverable.hpp b/src/persist/Recoverable.hpp index ffb100f2..dfe7ae41 100644 --- a/src/persist/Recoverable.hpp +++ b/src/persist/Recoverable.hpp @@ -7,7 +7,15 @@ // TODO: report recover errors/exceptions class Recoverable{ + // TODO: get rid of these. + template friend class pds::atomic_nbptr_t; + friend class pds::nbptr_t; + pds::EpochSys* _esys = nullptr; + + // local descriptors for DCSS + // TODO: maybe put this into a derived class for NB data structures? + padded* local_descs = nullptr; public: // return num of blocks recovered. virtual int recover(bool simulated = false) = 0; @@ -19,6 +27,9 @@ class Recoverable{ bool check_epoch(){ return _esys->check_epoch(); } + bool check_epoch(uint64_t c){ + return _esys->check_epoch(c); + } void begin_op(){ _esys->begin_op(); } @@ -108,6 +119,10 @@ class Recoverable{ void flush(){ _esys->flush(); } + + pds::sc_desc_t* get_dcss_desc(){ + return &local_descs[pds::EpochSys::tid].ui; + } }; ///////////////////////////// @@ -220,7 +235,7 @@ namespace pds{ // implementation of load and cas for visible reads template - nbptr_t atomic_nbptr_t::load(){ + nbptr_t atomic_nbptr_t::load(Recoverable* ds){ nbptr_t r; while(true){ r = nbptr.load(); @@ -231,12 +246,12 @@ namespace pds{ } template - nbptr_t atomic_nbptr_t::load_verify(){ - assert(pds::esys->epochs[pds::EpochSys::tid].ui != NULL_EPOCH); + nbptr_t atomic_nbptr_t::load_verify(Recoverable* ds){ + assert(ds->_esys->epochs[pds::EpochSys::tid].ui != NULL_EPOCH); nbptr_t r; while(true){ r = nbptr.load(); - if(pds::esys->check_epoch(pds::esys->epochs[pds::EpochSys::tid].ui)){ + if(ds->_esys->check_epoch(ds->_esys->epochs[pds::EpochSys::tid].ui)){ nbptr_t ret(r.val,r.cnt+1); if(nbptr.compare_exchange_strong(r, ret)){ return r; @@ -248,9 +263,9 @@ namespace pds{ } template - bool atomic_nbptr_t::CAS_verify(nbptr_t expected, const T& desired){ - assert(pds::esys->epochs[pds::EpochSys::tid].ui != NULL_EPOCH); - if(pds::esys->check_epoch(pds::esys->epochs[pds::EpochSys::tid].ui)){ + bool atomic_nbptr_t::CAS_verify(Recoverable* ds, nbptr_t expected, const T& desired){ + assert(ds->_esys->epochs[pds::EpochSys::tid].ui != NULL_EPOCH); + if(ds->_esys->check_epoch(ds->_esys->epochs[pds::EpochSys::tid].ui)){ nbptr_t new_r(reinterpret_cast(desired),expected.cnt+1); return nbptr.compare_exchange_strong(expected, new_r); } else { @@ -268,32 +283,32 @@ namespace pds{ /* implementation of load and cas for invisible reads */ template - nbptr_t atomic_nbptr_t::load(){ + nbptr_t atomic_nbptr_t::load(Recoverable* ds){ nbptr_t r; do { r = nbptr.load(); if(r.is_desc()) { sc_desc_t* D = r.get_desc(); - D->try_complete(pds::esys, reinterpret_cast(this)); + D->try_complete(ds, reinterpret_cast(this)); } } while(r.is_desc()); return r; } template - nbptr_t atomic_nbptr_t::load_verify(){ + nbptr_t atomic_nbptr_t::load_verify(Recoverable* ds){ // invisible read doesn't need to verify epoch even if it's a // linearization point // this saves users from catching EpochVerifyException - return load(); + return load(ds); } // extern std::atomic abort_cnt; // extern std::atomic total_cnt; template - bool atomic_nbptr_t::CAS_verify(nbptr_t expected, const T& desired){ - assert(pds::esys->epochs[pds::EpochSys::tid].ui != NULL_EPOCH); + bool atomic_nbptr_t::CAS_verify(Recoverable* ds, nbptr_t expected, const T& desired){ + assert(ds->_esys->epochs[pds::EpochSys::tid].ui != NULL_EPOCH); // total_cnt.fetch_add(1); #ifdef USE_TSX unsigned status = _xbegin(); @@ -302,7 +317,7 @@ namespace pds{ if(!r.is_desc()){ if( r.cnt!=expected.cnt || r.val!=expected.val || - !pds::esys->check_epoch(pds::esys->epochs[pds::EpochSys::tid].ui)){ + !ds->check_epoch()){ _xend(); return false; } else { @@ -314,7 +329,7 @@ namespace pds{ } else { // we only help complete descriptor, but not retry _xend(); - r.get_desc()->try_complete(pds::esys, reinterpret_cast(this)); + r.get_desc()->try_complete(ds, reinterpret_cast(this)); return false; } // execution won't reach here; program should have returned @@ -326,7 +341,7 @@ namespace pds{ nbptr_t r = nbptr.load(); if(r.is_desc()){ sc_desc_t* D = r.get_desc(); - D->try_complete(pds::esys, reinterpret_cast(this)); + D->try_complete(ds, reinterpret_cast(this)); return false; } else { if( r.cnt!=expected.cnt || @@ -337,17 +352,17 @@ namespace pds{ // now r.cnt must be ..00, and r.cnt+1 is ..01, which means "nbptr // contains a descriptor" and "a descriptor is in progress" assert((r.cnt & 3UL) == 0UL); - new (&pds::esys->local_descs[pds::EpochSys::tid].ui) sc_desc_t(r.cnt+1, + new (ds->get_dcss_desc()) sc_desc_t(r.cnt+1, reinterpret_cast(this), expected.val, reinterpret_cast(desired), - pds::esys->epochs[pds::EpochSys::tid].ui); - nbptr_t new_r(reinterpret_cast(&pds::esys->local_descs[pds::EpochSys::tid].ui), r.cnt+1); + ds->_esys->epochs[pds::EpochSys::tid].ui); + nbptr_t new_r(reinterpret_cast(ds->get_dcss_desc()), r.cnt+1); if(!nbptr.compare_exchange_strong(r,new_r)){ return false; } - pds::esys->local_descs[pds::EpochSys::tid].ui.try_complete(pds::esys, reinterpret_cast(this)); - if(pds::esys->local_descs[pds::EpochSys::tid].ui.committed()) return true; + ds->get_dcss_desc()->try_complete(ds, reinterpret_cast(this)); + if(ds->get_dcss_desc()->committed()) return true; else return false; } diff --git a/src/rideables/MontageLfHashTable.hpp b/src/rideables/MontageLfHashTable.hpp index e017698d..5387aa34 100644 --- a/src/rideables/MontageLfHashTable.hpp +++ b/src/rideables/MontageLfHashTable.hpp @@ -40,23 +40,28 @@ class MontageLfHashTable : public RMap, Recoverable{ }; struct Node{ + MontageLfHashTable* ds; K key; MarkPtr next; Payload* payload;// TODO: does it have to be atomic? - Node(K k, V v, Node* n):key(k),next(n),payload(PNEW(Payload,k,v)){}; + Node(MontageLfHashTable* ds_, K k, V v, Node* n): + ds(ds_),key(k),next(n),payload(ds_->pnew(k,v)){}; ~Node(){ - PRECLAIM(payload); + ds->preclaim(payload); } void rm_payload(){ // call it before END_OP but after linearization point assert(payload!=nullptr && "payload shouldn't be null"); - PRETIRE(payload); + ds->pretire(payload); } V get_val(){ // call it within BEGIN_OP and END_OP assert(payload!=nullptr && "payload shouldn't be null"); - return (V)payload->get_val(); + return (V)payload->get_val(ds); + } + V get_unsafe_val(){ + return (V)payload->get_unsafe_val(ds); } }; std::hash hash_fn; @@ -119,8 +124,8 @@ optional MontageLfHashTable::get(K key, int tid) { tracker.start_op(tid); // hold epoch from advancing so that the node we find won't be deleted if(findNode(prev,curr,next,key,tid)) { - BEGIN_OP_AUTOEND(); - res=curr.get_val()->get_val();//never old see new as we find node before BEGIN_OP + MontageOpHolder(this); + res=curr.get_val()->get_unsafe_val();//never old see new as we find node before BEGIN_OP } tracker.end_op(tid); @@ -134,18 +139,18 @@ optional MontageLfHashTable::put(K key, V val, int tid) { MarkPtr* prev=nullptr; nbptr_t curr; nbptr_t next; - tmpNode = new Node(key, val, nullptr); + tmpNode = new Node(this, key, val, nullptr); tracker.start_op(tid); while(true) { if(findNode(prev,curr,next,key,tid)) { // exists; replace tmpNode->next.ptr.store(curr); - BEGIN_OP(tmpNode->payload); + begin_op(); res=curr.get_val()->get_val(); - if(prev->ptr.CAS_verify(curr,tmpNode)) { + if(prev->ptr.CAS_verify(this,curr,tmpNode)) { curr.get_val()->rm_payload(); - END_OP; + end_op(); // mark curr; since findNode only finds the first node >= key, it's ok to have duplicated keys temporarily while(!curr.get_val()->next.ptr.CAS(next,setMark(next))); if(tmpNode->next.ptr.CAS(curr,next)) { @@ -155,18 +160,18 @@ optional MontageLfHashTable::put(K key, V val, int tid) { } break; } - ABORT_OP; + abort_op(); } else { //does not exist; insert. res={}; tmpNode->next.ptr.store(curr); - BEGIN_OP(tmpNode->payload); - if(prev->ptr.CAS_verify(curr,tmpNode)) { - END_OP; + begin_op(); + if(prev->ptr.CAS_verify(this,curr,tmpNode)) { + end_op(); break; } - ABORT_OP; + abort_op(); } } tracker.end_op(tid); @@ -181,7 +186,7 @@ bool MontageLfHashTable::insert(K key, V val, int tid){ MarkPtr* prev=nullptr; nbptr_t curr; nbptr_t next; - tmpNode = new Node(key, val, nullptr); + tmpNode = new Node(this, key, val, nullptr); tracker.start_op(tid); while(true) { @@ -193,13 +198,13 @@ bool MontageLfHashTable::insert(K key, V val, int tid){ else { //does not exist, insert. tmpNode->next.ptr.store(curr); - BEGIN_OP(tmpNode->payload); - if(prev->ptr.CAS_verify(curr,tmpNode)) { - END_OP; + begin_op(); + if(prev->ptr.CAS_verify(this,curr,tmpNode)) { + end_op(); res=true; break; } - ABORT_OP; + abort_op(); } } tracker.end_op(tid); @@ -220,14 +225,14 @@ optional MontageLfHashTable::remove(K key, int tid) { res={}; break; } - BEGIN_OP(); + begin_op(); res=curr.get_val()->get_val(); - if(!curr.get_val()->next.ptr.CAS_verify(next,setMark(next))) { - ABORT_OP; + if(!curr.get_val()->next.ptr.CAS_verify(this,next,setMark(next))) { + abort_op(); continue; } curr.get_val()->rm_payload(); - END_OP; + end_op(); if(prev->ptr.CAS(curr,next)) { tracker.retire(curr.get_val(),tid); } else { @@ -247,17 +252,17 @@ optional MontageLfHashTable::replace(K key, V val, int tid) { MarkPtr* prev=nullptr; nbptr_t curr; nbptr_t next; - tmpNode = new Node(key, val, nullptr); + tmpNode = new Node(this, key, val, nullptr); tracker.start_op(tid); while(true){ if(findNode(prev,curr,next,key,tid)){ tmpNode->next.ptr.store(curr); - BEGIN_OP(tmpNode->payload); + abort_op(); res=curr.get_val()->get_val(); - if(prev->ptr.CAS_verify(curr,tmpNode)){ + if(prev->ptr.CAS_verify(this,curr,tmpNode)){ curr.get_val()->rm_payload(); - END_OP; + end_op(); // mark curr; since findNode only finds the first node >= key, it's ok to have duplicated keys temporarily while(!curr.get_val()->next.ptr.CAS(next,setMark(next))); if(tmpNode->next.ptr.CAS(curr,next)) { @@ -267,7 +272,7 @@ optional MontageLfHashTable::replace(K key, V val, int tid) { } break; } - ABORT_OP; + abort_op(); } else{//does not exist res={}; @@ -285,15 +290,15 @@ bool MontageLfHashTable::findNode(MarkPtr* &prev, nbptr_t &curr, nbptr_t &n size_t idx=hash_fn(key)%idxSize; bool cmark=false; prev=&buckets[idx].ui; - curr=getPtr(prev->ptr.load()); + curr=getPtr(prev->ptr.load(this)); while(true){//to lock old and curr if(curr.get_val()==nullptr) return false; - next=curr.get_val()->next.ptr.load(); + next=curr.get_val()->next.ptr.load(this); cmark=getMark(next); next=getPtr(next); auto ckey=curr.get_val()->key; - if(prev->ptr.load()!=curr) break;//retry + if(prev->ptr.load(this)!=curr) break;//retry if(!cmark) { if(ckey>=key) return ckey==key; prev=&(curr.get_val()->next); diff --git a/src/rideables/MontageMSQueue.hpp b/src/rideables/MontageMSQueue.hpp index 78e9a9e8..df0d76c7 100644 --- a/src/rideables/MontageMSQueue.hpp +++ b/src/rideables/MontageMSQueue.hpp @@ -29,20 +29,23 @@ class MontageMSQueue : public RQueue, Recoverable{ private: struct Node{ + MontageMSQueue* ds; atomic_nbptr_t next; Payload* payload; Node(): next(nullptr), payload(nullptr){}; - Node(T v): next(nullptr), payload(PNEW(Payload, v)){ + Node(MontageMSQueue* ds_, T v): ds(ds_), next(nullptr), payload(ds_->pnew(v)){ assert(esys->epochs[EpochSys::tid].ui == NULL_EPOCH); }; void set_sn(uint64_t s){ assert(payload!=nullptr && "payload shouldn't be null"); - payload->set_unsafe_sn(s); + payload->set_unsafe_sn(ds,s); } - ~Node(){ - PRECLAIM(payload); + ~Node(){ + if (payload){ + ds->preclaim(payload); + } } }; @@ -95,20 +98,18 @@ void MontageMSQueue::enqueue(T v, int tid){ if(next.get_val() == nullptr) { // directly set m_sn and BEGIN_OP will flush it new_node->set_sn(s); - BEGIN_OP(); - new_node->payload->set_epoch(esys->epochs[EpochSys::tid].ui); + begin_op(); /* set_sn must happen before PDELETE of payload since it's * before linearization point. * Also, this must set sn in place since we still remain in * the same epoch. */ // new_node->set_sn(s); - if((cur_tail->next).CAS_verify(next, new_node)){ - esys->register_alloc_pblk(new_node->payload, esys->epochs[EpochSys::tid].ui); - END_OP; + if((cur_tail->next).CAS_verify(this, next, new_node)){ + end_op(); break; } - ABORT_OP; + abort_op(); } else { tail.compare_exchange_strong(cur_tail, next.get_val()); // try to swing tail to next node } @@ -136,17 +137,17 @@ optional MontageMSQueue::dequeue(int tid){ } tail.compare_exchange_strong(cur_tail, next); // tail is falling behind; try to update } else { - BEGIN_OP(); + begin_op(); Payload* payload = next->payload;// get payload for PDELETE if(head.CAS_verify(cur_head, next)){ res = (T)payload->get_val();// old see new is impossible - PRETIRE(payload); // semantically we are removing next from queue - END_OP; + pretire(payload); // semantically we are removing next from queue + end_op(); cur_head.get_val()->payload = payload; // let payload have same lifetime as dummy node tracker.retire(cur_head.get_val(), tid); break; } - ABORT_OP; + abort_op(); } } } From 07f30389faec42dfde6a9688f8a87897f3281477 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Fri, 13 Nov 2020 16:09:26 -0500 Subject: [PATCH 31/56] use new API in MontageGraph --- src/persist/Recoverable.hpp | 2 +- src/rideables/MontageGraph.hpp | 43 +++++++++++++++--------------- src/rideables/MontageHashTable.hpp | 2 +- 3 files changed, 24 insertions(+), 23 deletions(-) diff --git a/src/persist/Recoverable.hpp b/src/persist/Recoverable.hpp index dfe7ae41..caf78f14 100644 --- a/src/persist/Recoverable.hpp +++ b/src/persist/Recoverable.hpp @@ -107,7 +107,7 @@ class Recoverable{ T* openwrite_pblk(T* b){ return _esys->openwrite_pblk(b); } - std::unordered_map* recover(const int rec_thd=10){ + std::unordered_map* recover_pblks(const int rec_thd=10){ return _esys->recover(rec_thd); } void recover_mode(){ diff --git a/src/rideables/MontageGraph.hpp b/src/rideables/MontageGraph.hpp index f672222f..15ac8313 100644 --- a/src/rideables/MontageGraph.hpp +++ b/src/rideables/MontageGraph.hpp @@ -63,6 +63,7 @@ class MontageGraph : public RGraph, public Recoverable{ }; class tVertex { public: + MontageGraph* ds; Vertex *payload = nullptr; int id; // cached id @@ -73,11 +74,11 @@ class MontageGraph : public RGraph, public Recoverable{ std::mutex lck; - tVertex(int id, int lbl) { - payload = PNEW(Vertex, id, lbl); + tVertex(MontageGraph* ds_, int id, int lbl): ds(ds_) { + payload = ds->pnew(id, lbl); this->id = id; } - tVertex(Vertex* p) { + tVertex(MontageGraph* ds_, Vertex* p): ds(ds_) { // Use this method for recovery to avoid having to call PNEW when the block already exists. payload = p; this->id = p->get_unsafe_id(); @@ -85,7 +86,7 @@ class MontageGraph : public RGraph, public Recoverable{ ~tVertex() { if (payload){ - PDELETE(payload); + ds->pdelete(payload); } } @@ -122,11 +123,11 @@ class MontageGraph : public RGraph, public Recoverable{ }; MontageGraph(GlobalTestConfig* gtc) : Recoverable(gtc) { - BEGIN_OP_AUTOEND(); + MontageOpHolder(this); idxToVertex = new tVertex*[numVertices]; // Initialize... for (size_t i = 0; i < numVertices; i++) { - idxToVertex[i] = new tVertex(i, -1); + idxToVertex[i] = new tVertex(this, i, -1); } } @@ -138,13 +139,13 @@ class MontageGraph : public RGraph, public Recoverable{ // Thread-safe and does not leak edges void clear() { - // BEGIN_OP_AUTOEND(); + // MontageOpHolder(this); for (size_t i = 0; i < numVertices; i++) { idxToVertex[i]->lock(); } for (size_t i = 0; i < numVertices; i++) { for (Relation *r : idxToVertex[i]->adjacency_list) { - PDELETE(r); + pdelete(r); } idxToVertex[i]->adjacency_list.clear(); idxToVertex[i]->dest_list.clear(); @@ -174,7 +175,7 @@ class MontageGraph : public RGraph, public Recoverable{ } { - BEGIN_OP_AUTOEND(); + MontageOpHolder(this); Relation* r = PNEW(Relation,v1, v2, weight); v1->adjacency_list.insert(r); v2->dest_list.insert(r); @@ -199,7 +200,7 @@ class MontageGraph : public RGraph, public Recoverable{ // We utilize `get_unsafe` API because the Relation destination and vertex id will not change at all. v->lock(); { - BEGIN_OP_AUTOEND(); + MontageOpHolder(this); if (std::any_of(v->adjacency_list.begin(), v->adjacency_list.end(), [=] (Relation *r) { return r->get_unsafe_dest() == v2; })) { retval = true; @@ -229,7 +230,7 @@ class MontageGraph : public RGraph, public Recoverable{ } { - BEGIN_OP_AUTOEND(); + MontageOpHolder(this); // Scan v1 for an edge containing v2 in its adjacency list... Relation *rdel = nullptr; for (Relation *r : v1->adjacency_list) { @@ -242,7 +243,7 @@ class MontageGraph : public RGraph, public Recoverable{ if (rdel){ v2->dest_list.erase(rdel); - PDELETE(rdel); + pdelete(rdel); } } @@ -264,7 +265,7 @@ class MontageGraph : public RGraph, public Recoverable{ * @param l The new label for the node */ bool set_lbl(int id, int l) { - BEGIN_OP_AUTOEND(); + MontageOpHolder(this); tVertex *v = idxToVertex[id]; v->lock(); v->set_lbl(l); @@ -280,7 +281,7 @@ class MontageGraph : public RGraph, public Recoverable{ * @param w the new weight value */ bool set_weight(int src, int dest, int w) { - BEGIN_OP_AUTOEND(); + MontageOpHolder(this); bool retval = false; /* tVertex *v = idxToVertex[src]; @@ -298,19 +299,19 @@ class MontageGraph : public RGraph, public Recoverable{ int recover(bool simulated) { if (simulated) { - pds::recover_mode(); + recover_mode(); delete idxToVertex; idxToVertex = new tVertex*[numVertices]; #pragma omp parallel for for (size_t i = 0; i < numVertices; i++) { idxToVertex[i] = nullptr; } - pds::online_mode(); + online_mode(); } int block_cnt = 0; auto begin = chrono::high_resolution_clock::now(); - std::unordered_map* recovered = pds::recover(); + std::unordered_map* recovered = recover_pblks(); auto end = chrono::high_resolution_clock::now(); auto dur = end - begin; auto dur_ms = std::chrono::duration_cast(dur).count(); @@ -320,7 +321,7 @@ class MontageGraph : public RGraph, public Recoverable{ std::vector relationVector; std::vector vertexVector; { - BEGIN_OP_AUTOEND(); + MontageOpHolder(this); for (auto itr = recovered->begin(); itr != recovered->end(); ++itr) { // iterate through all recovered blocks. Sort the blocks into vectors containing the different // payloads to be iterated over later. @@ -365,7 +366,7 @@ class MontageGraph : public RGraph, public Recoverable{ std::cerr << "Somehow recovered vertex " << id << " twice!" << std::endl; continue; } - tVertex* new_node = new tVertex(vertexVector[i]); + tVertex* new_node = new tVertex(this, vertexVector[i]); idxToVertex[id] = new_node; } } @@ -528,9 +529,9 @@ class MontageGraph : public RGraph, public Recoverable{ v->adjacency_list.clear(); v->dest_list.clear(); { - BEGIN_OP_AUTOEND() + MontageOpHolder(this); for (Relation *r : garbageList) { - PDELETE(r); + pdelete(r); } } diff --git a/src/rideables/MontageHashTable.hpp b/src/rideables/MontageHashTable.hpp index 4cdb0834..8130e5e9 100644 --- a/src/rideables/MontageHashTable.hpp +++ b/src/rideables/MontageHashTable.hpp @@ -217,7 +217,7 @@ class MontageHashTable : public RMap, public Recoverable{ rec_thd = stoi(gtc->getEnv("RecoverThread")); } auto begin = chrono::high_resolution_clock::now(); - std::unordered_map* recovered = Recoverable::recover(rec_thd); + std::unordered_map* recovered = recover_pblks(rec_thd); auto end = chrono::high_resolution_clock::now(); auto dur = end - begin; auto dur_ms = std::chrono::duration_cast(dur).count(); From b2716a6be1945707389d10350f51afc31d3146bf Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Fri, 13 Nov 2020 16:25:40 -0500 Subject: [PATCH 32/56] use new API in MontageNatarajanTree --- src/rideables/MontageNatarajanTree.hpp | 70 ++++++++++++++------------ 1 file changed, 38 insertions(+), 32 deletions(-) diff --git a/src/rideables/MontageNatarajanTree.hpp b/src/rideables/MontageNatarajanTree.hpp index 4089fcc2..1cde7ee7 100644 --- a/src/rideables/MontageNatarajanTree.hpp +++ b/src/rideables/MontageNatarajanTree.hpp @@ -30,20 +30,23 @@ class MontageNatarajanTree : public RMap, public Recoverable{ /* transient structs */ enum Level { finite = -1, inf0 = 0, inf1 = 1, inf2 = 2}; struct Node{ + MontageNatarajanTree* ds; Level level; std::atomic left; std::atomic right; K key; Payload* payload;// TODO: does it have to be atomic? - Node(K k, V val, Node* l=nullptr, Node* r=nullptr):level(finite),left(l),right(r),key(k),payload(PNEW(Payload, key, val)){ }; - Node(Level lev, Node* l=nullptr, Node* r=nullptr):level(lev),left(l),right(r),key(),payload(nullptr){ + Node(MontageNatarajanTree* ds_, K k, V val, Node* l=nullptr, Node* r=nullptr): + ds(ds_), level(finite),left(l),right(r),key(k),payload(PNEW(Payload, key, val)){ }; + Node(MontageNatarajanTree* ds_, Level lev, Node* l=nullptr, Node* r=nullptr): + ds(ds_), level(lev),left(l),right(r),key(),payload(nullptr){ assert(lev != finite && "use constructor with another signature for concrete nodes!"); }; ~Node(){ if(payload!=nullptr){ // this is a leaf - PRECLAIM(payload); + ds->preclaim(payload); } } @@ -51,12 +54,15 @@ class MontageNatarajanTree : public RMap, public Recoverable{ // call it before END_OP but after linearization point assert(level == finite); assert(payload!=nullptr && "payload shouldn't be null"); - PRETIRE(payload); + ds->pretire(payload); } - V get_val(){ + V get_val(MontageNatarajanTree* ds){ // call it within BEGIN_OP and END_OP assert(payload!=nullptr && "payload shouldn't be null"); - return (V)payload->get_val(); + return (V)payload->get_val(ds); + } + V get_unsafe_val(MontageNatarajanTree* ds){ + return (V)payload->get_unsafe_val(ds); } //not thread-safe void set(K k, Node* l=nullptr, Node* r=nullptr){ @@ -82,8 +88,8 @@ class MontageNatarajanTree : public RMap, public Recoverable{ /* variables */ RCUTracker tracker; const V defV{}; - Node r{inf2}; - Node s{inf1}; + Node r{this,inf2}; + Node s{this,inf1}; padded* records; const size_t GET_POINTER_BITS = 0xfffffffffffffffc;//for machine 64-bit or less. @@ -151,10 +157,10 @@ class MontageNatarajanTree : public RMap, public Recoverable{ public: MontageNatarajanTree(GlobalTestConfig* gtc): Recoverable(gtc), tracker(gtc->task_num, 100, 1000, true){ - r.right.store(new Node(inf2)); + r.right.store(new Node(this,inf2)); r.left.store(&s); - s.right.store(new Node(inf1)); - s.left.store(new Node(inf0)); + s.right.store(new Node(this,inf1)); + s.left.store(new Node(this,inf0)); records = new padded[gtc->task_num]{}; }; ~MontageNatarajanTree(){}; @@ -293,8 +299,8 @@ optional MontageNatarajanTree::get(K key, int tid){ seek(key,tid); leaf=getPtr(seekRecord->leaf); if(nodeEqual(key,leaf)){ - BEGIN_OP_AUTOEND(); - res = leaf->get_val();//never old see new as we find node before BEGIN_OP + MontageOpHolder(this); + res = leaf->get_unsafe_val(this);//never old see new as we find node before BEGIN_OP } tracker.end_op(tid); @@ -306,8 +312,8 @@ optional MontageNatarajanTree::put(K key, V val, int tid){ optional res={}; SeekRecord* seekRecord=&(records[tid].ui); - Node* newInternal=new Node(inf2); - Node* newLeaf=new Node(key,val); + Node* newInternal=new Node(this,inf2); + Node* newLeaf=new Node(this,key,val); Node* parent=nullptr; Node* leaf=nullptr; @@ -344,14 +350,14 @@ optional MontageNatarajanTree::put(K key, V val, int tid){ newInternal->set(std::max(key,leaf->key),newLeft,newRight); Node* tmpExpected=getPtr(leaf); - BEGIN_OP(newLeaf->payload); + begin_op(); if(childAddr->compare_exchange_strong(tmpExpected,getPtr(newInternal))){ - END_OP; + end_op(); res={}; break;//insertion succeeds } else{//fails; help conflicting delete operation - ABORT_OP; + abort_op(); Node* tmpChild=childAddr->load(); if(getPtr(tmpChild)==leaf && (getFlg(tmpChild)||getTg(tmpChild))) cleanup(key,tid); @@ -363,16 +369,16 @@ optional MontageNatarajanTree::put(K key, V val, int tid){ childAddr=&(parent->left); else childAddr=&(parent->right); - BEGIN_OP(newLeaf->payload); - res=leaf->get_val(); + begin_op(); + res=leaf->get_val(this); if(childAddr->compare_exchange_strong(leaf,newLeaf)){ leaf->rm_payload(); - END_OP; + end_op(); delete(newInternal);// this is always local so no need to use tracker tracker.retire(leaf,tid); break; } - ABORT_OP; + abort_op(); } } @@ -386,8 +392,8 @@ bool MontageNatarajanTree::insert(K key, V val, int tid){ bool res=false; SeekRecord* seekRecord=&(records[tid].ui); - Node* newInternal=new Node(inf2); - Node* newLeaf=new Node(key,val); + Node* newInternal=new Node(this,inf2); + Node* newLeaf=new Node(this,key,val); Node* parent=nullptr; Node* leaf=nullptr; @@ -424,14 +430,14 @@ bool MontageNatarajanTree::insert(K key, V val, int tid){ newInternal->set(std::max(key,leaf->key),newLeft,newRight); Node* tmpExpected=getPtr(leaf); - BEGIN_OP(newLeaf->payload); + begin_op(); if(childAddr->compare_exchange_strong(tmpExpected,getPtr(newInternal))){ - END_OP; + end_op(); res=true; break; } else{//fails; help conflicting delete operation - ABORT_OP; + abort_op(); Node* tmpChild=childAddr->load(); if(getPtr(tmpChild)==leaf && (getFlg(tmpChild)||getTg(tmpChild))) cleanup(key,tid); @@ -477,8 +483,8 @@ optional MontageNatarajanTree::remove(K key, int tid){ } Node* tmpExpected=leaf; - BEGIN_OP(); - res=leaf->get_val(); + begin_op(); + res=leaf->get_val(this); if(childAddr->compare_exchange_strong(tmpExpected, mixPtrFlgTg(tmpExpected,true,false))){ /* @@ -486,12 +492,12 @@ optional MontageNatarajanTree::remove(K key, int tid){ * before the leaf is found which is of course before that BEGIN_OP */ leaf->rm_payload(); - END_OP; + end_op(); injecting=false; if(cleanup(key,tid)) break; } else{ - ABORT_OP; + abort_op(); Node* tmpChild=childAddr->load(); if(getPtr(tmpChild)==leaf && (getFlg(tmpChild)||getTg(tmpChild))) cleanup(key,tid); @@ -516,7 +522,7 @@ optional MontageNatarajanTree::replace(K key, V val, int tid){ optional res={}; // SeekRecord* seekRecord=&(records[tid].ui); - // Node* newLeaf=new Node(key,val); + // Node* newLeaf=new Node(this,key,val); // Node* parent=nullptr; // Node* leaf=nullptr; From f77693a4275266a54ae7966225f63641c9b2921e Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Fri, 13 Nov 2020 16:31:25 -0500 Subject: [PATCH 33/56] trivial --- src/rideables/MontageGraph.hpp | 6 +++--- src/rideables/MontageNatarajanTree.hpp | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/rideables/MontageGraph.hpp b/src/rideables/MontageGraph.hpp index 15ac8313..b9d7964f 100644 --- a/src/rideables/MontageGraph.hpp +++ b/src/rideables/MontageGraph.hpp @@ -81,7 +81,7 @@ class MontageGraph : public RGraph, public Recoverable{ tVertex(MontageGraph* ds_, Vertex* p): ds(ds_) { // Use this method for recovery to avoid having to call PNEW when the block already exists. payload = p; - this->id = p->get_unsafe_id(); + this->id = p->get_unsafe_id(ds); } ~tVertex() { @@ -91,10 +91,10 @@ class MontageGraph : public RGraph, public Recoverable{ } void set_lbl(int l) { - payload = payload->set_lbl(l); + payload = payload->set_lbl(ds, l); } int get_lbl() { - return payload->get_lbl(); + return payload->get_lbl(ds); } // Immutable diff --git a/src/rideables/MontageNatarajanTree.hpp b/src/rideables/MontageNatarajanTree.hpp index 1cde7ee7..11271eca 100644 --- a/src/rideables/MontageNatarajanTree.hpp +++ b/src/rideables/MontageNatarajanTree.hpp @@ -56,12 +56,12 @@ class MontageNatarajanTree : public RMap, public Recoverable{ assert(payload!=nullptr && "payload shouldn't be null"); ds->pretire(payload); } - V get_val(MontageNatarajanTree* ds){ + V get_val(){ // call it within BEGIN_OP and END_OP assert(payload!=nullptr && "payload shouldn't be null"); return (V)payload->get_val(ds); } - V get_unsafe_val(MontageNatarajanTree* ds){ + V get_unsafe_val(){ return (V)payload->get_unsafe_val(ds); } //not thread-safe @@ -300,7 +300,7 @@ optional MontageNatarajanTree::get(K key, int tid){ leaf=getPtr(seekRecord->leaf); if(nodeEqual(key,leaf)){ MontageOpHolder(this); - res = leaf->get_unsafe_val(this);//never old see new as we find node before BEGIN_OP + res = leaf->get_unsafe_val();//never old see new as we find node before BEGIN_OP } tracker.end_op(tid); @@ -370,7 +370,7 @@ optional MontageNatarajanTree::put(K key, V val, int tid){ else childAddr=&(parent->right); begin_op(); - res=leaf->get_val(this); + res=leaf->get_val(); if(childAddr->compare_exchange_strong(leaf,newLeaf)){ leaf->rm_payload(); end_op(); @@ -484,7 +484,7 @@ optional MontageNatarajanTree::remove(K key, int tid){ Node* tmpExpected=leaf; begin_op(); - res=leaf->get_val(this); + res=leaf->get_val(); if(childAddr->compare_exchange_strong(tmpExpected, mixPtrFlgTg(tmpExpected,true,false))){ /* From 0073eacedf941adfa698c05d4848b1955afc62b2 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Fri, 13 Nov 2020 16:35:34 -0500 Subject: [PATCH 34/56] use new API in MontageQueue --- src/rideables/MontageQueue.hpp | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/rideables/MontageQueue.hpp b/src/rideables/MontageQueue.hpp index af724f72..e1bae9ce 100644 --- a/src/rideables/MontageQueue.hpp +++ b/src/rideables/MontageQueue.hpp @@ -30,13 +30,15 @@ class MontageQueue : public RQueue, public Recoverable{ private: struct Node{ + MontageQueue* ds; Node* next; Payload* payload; T val; // for debug purpose Node(): next(nullptr), payload(nullptr){}; // Node(): next(nullptr){}; - Node(T v, uint64_t n=0): next(nullptr), payload(PNEW(Payload, v, n)), val(v){}; + Node(MontageQueue* ds_, T v, uint64_t n=0): + ds(ds_), next(nullptr), payload(ds_->pnew(v, n)), val(v){}; // Node(T v, uint64_t n): next(nullptr), val(v){}; void set_sn(uint64_t s){ @@ -46,11 +48,11 @@ class MontageQueue : public RQueue, public Recoverable{ T get_val(){ assert(payload!=nullptr && "payload shouldn't be null"); // old-see-new never happens for locking ds - return (T)payload->get_unsafe_val(); + return (T)payload->get_unsafe_val(ds); // return val; } ~Node(){ - PDELETE(payload); + ds->pdelete(payload); } }; @@ -86,12 +88,12 @@ class MontageQueue : public RQueue, public Recoverable{ template void MontageQueue::enqueue(T val, int tid){ - Node* new_node = new Node(val); + Node* new_node = new Node(this, val); std::lock_guard lk(lock); // no read or write so impossible to have old see new exception new_node->set_sn(global_sn); global_sn++; - BEGIN_OP_AUTOEND(new_node->payload); + MontageOpHolder(this); if(tail == nullptr) { head = tail = new_node; return; @@ -105,7 +107,7 @@ optional MontageQueue::dequeue(int tid){ optional res = {}; // while(true){ lock.lock(); - BEGIN_OP_AUTOEND(); + MontageOpHolder(this); // try { if(head == nullptr) { lock.unlock(); From e44c2d33b314735b434a29e95cd239851ff42f17 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Fri, 13 Nov 2020 16:41:11 -0500 Subject: [PATCH 35/56] removed global PBlk getters and setters && trivial fixes --- src/persist/Recoverable.hpp | 32 -------------------------------- src/rideables/MontageGraph.hpp | 24 ++++++++++++------------ src/rideables/MontageQueue.hpp | 2 +- 3 files changed, 13 insertions(+), 45 deletions(-) diff --git a/src/persist/Recoverable.hpp b/src/persist/Recoverable.hpp index caf78f14..c319c004 100644 --- a/src/persist/Recoverable.hpp +++ b/src/persist/Recoverable.hpp @@ -145,16 +145,10 @@ public:\ t TOKEN_CONCAT(get_, n)(Recoverable* ds) const{\ return ds->openread_pblk(this)->TOKEN_CONCAT(m_, n);\ }\ -t TOKEN_CONCAT(get_, n)() const{\ - return pds::esys->openread_pblk(this)->TOKEN_CONCAT(m_, n);\ -}\ /* get method open a pblk for read. Allows old-see-new reads. */\ t TOKEN_CONCAT(get_unsafe_, n)(Recoverable* ds) const{\ return ds->openread_pblk_unsafe(this)->TOKEN_CONCAT(m_, n);\ }\ -t TOKEN_CONCAT(get_unsafe_, n)() const{\ - return pds::esys->openread_pblk_unsafe(this)->TOKEN_CONCAT(m_, n);\ -}\ /* set method open a pblk for write. return a new copy when necessary */\ template \ T* TOKEN_CONCAT(set_, n)(Recoverable* ds, const in_type& TOKEN_CONCAT(tmp_, n)){\ @@ -164,22 +158,10 @@ T* TOKEN_CONCAT(set_, n)(Recoverable* ds, const in_type& TOKEN_CONCAT(tmp_, n)){ ds->register_update_pblk(ret);\ return ret;\ }\ -template \ -T* TOKEN_CONCAT(set_, n)(const in_type& TOKEN_CONCAT(tmp_, n)){\ - assert(pds::esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ - auto ret = pds::esys->openwrite_pblk(this);\ - ret->TOKEN_CONCAT(m_, n) = TOKEN_CONCAT(tmp_, n);\ - pds::esys->register_update_pblk(ret);\ - return ret;\ -}\ /* set the field by the parameter. called only outside BEGIN_OP and END_OP */\ template \ void TOKEN_CONCAT(set_unsafe_, n)(Recoverable* ds, const in_type& TOKEN_CONCAT(tmp_, n)){\ TOKEN_CONCAT(m_, n) = TOKEN_CONCAT(tmp_, n);\ -}\ -template \ -void TOKEN_CONCAT(set_unsafe_, n)(const in_type& TOKEN_CONCAT(tmp_, n)){\ - TOKEN_CONCAT(m_, n) = TOKEN_CONCAT(tmp_, n);\ } /** @@ -194,16 +176,10 @@ protected:\ t TOKEN_CONCAT(get_, n)(Recoverable* ds, int i) const{\ return ds->openread_pblk(this)->TOKEN_CONCAT(m_, n)[i];\ }\ -t TOKEN_CONCAT(get_, n)(int i) const{\ - return pds::esys->openread_pblk(this)->TOKEN_CONCAT(m_, n)[i];\ -}\ /* get method open a pblk for read. Allows old-see-new reads. */\ t TOKEN_CONCAT(get_unsafe_, n)(Recoverable* ds, int i) const{\ return ds->openread_pblk_unsafe(this)->TOKEN_CONCAT(m_, n)[i];\ }\ -t TOKEN_CONCAT(get_unsafe_, n)(int i) const{\ - return pds::esys->openread_pblk_unsafe(this)->TOKEN_CONCAT(m_, n)[i];\ -}\ /* set method open a pblk for write. return a new copy when necessary */\ T* TOKEN_CONCAT(set_, n)(Recoverable* ds, int i, t TOKEN_CONCAT(tmp_, n)){\ assert(ds->epochs[EpochSys::tid].ui != NULL_EPOCH);\ @@ -211,16 +187,8 @@ T* TOKEN_CONCAT(set_, n)(Recoverable* ds, int i, t TOKEN_CONCAT(tmp_, n)){\ ret->TOKEN_CONCAT(m_, n)[i] = TOKEN_CONCAT(tmp_, n);\ ds->register_update_pblk(ret);\ return ret;\ -}\ -T* TOKEN_CONCAT(set_, n)(int i, t TOKEN_CONCAT(tmp_, n)){\ - assert(pds::esys->epochs[EpochSys::tid].ui != NULL_EPOCH);\ - auto ret = pds::esys->openwrite_pblk(this);\ - ret->TOKEN_CONCAT(m_, n)[i] = TOKEN_CONCAT(tmp_, n);\ - pds::esys->register_update_pblk(ret);\ - return ret;\ } - namespace pds{ template diff --git a/src/rideables/MontageGraph.hpp b/src/rideables/MontageGraph.hpp index b9d7964f..429ab55e 100644 --- a/src/rideables/MontageGraph.hpp +++ b/src/rideables/MontageGraph.hpp @@ -202,7 +202,7 @@ class MontageGraph : public RGraph, public Recoverable{ { MontageOpHolder(this); if (std::any_of(v->adjacency_list.begin(), v->adjacency_list.end(), - [=] (Relation *r) { return r->get_unsafe_dest() == v2; })) { + [=] (Relation *r) { return r->get_unsafe_dest(this) == v2; })) { retval = true; } } @@ -234,7 +234,7 @@ class MontageGraph : public RGraph, public Recoverable{ // Scan v1 for an edge containing v2 in its adjacency list... Relation *rdel = nullptr; for (Relation *r : v1->adjacency_list) { - if (r->get_unsafe_dest() == v2->get_id()) { + if (r->get_unsafe_dest(this) == v2->get_id()) { rdel = r; v1->adjacency_list.erase(r); break; @@ -331,7 +331,7 @@ class MontageGraph : public RGraph, public Recoverable{ // Should these be parallel? I'm not sure.. BasePayload* b = reinterpret_cast(itr->second); - switch (b->get_unsafe_tag()) { + switch (b->get_unsafe_tag(this)) { case 0: { Vertex* v = reinterpret_cast(itr->second); @@ -346,7 +346,7 @@ class MontageGraph : public RGraph, public Recoverable{ } default: { - std::cerr << "Found bad tag " << b->get_unsafe_tag() << std::endl; + std::cerr << "Found bad tag " << b->get_unsafe_tag(this) << std::endl; } } } @@ -361,7 +361,7 @@ class MontageGraph : public RGraph, public Recoverable{ pds::init_thread(omp_get_thread_num()); #pragma omp for for (size_t i = 0; i < vertexVector.size(); ++i) { - int id = vertexVector[i]->get_unsafe_id(); + int id = vertexVector[i]->get_unsafe_id(this); if (idxToVertex[id] != nullptr) { std::cerr << "Somehow recovered vertex " << id << " twice!" << std::endl; continue; @@ -393,8 +393,8 @@ class MontageGraph : public RGraph, public Recoverable{ #pragma omp for for (size_t i = 0; i < relationVector.size(); ++i) { Relation *e = relationVector[i]; - int id1 = e->get_unsafe_src(); - int id2 = e->get_unsafe_dest(); + int id1 = e->get_unsafe_src(this); + int id2 = e->get_unsafe_dest(this); RelationWrapper item = { id1, id2, e }; if (id1 < 0 || (size_t) id1 >= numVertices || id2 < 0 || (size_t) id2 >= numVertices) { std::cerr << "Found a relation with a bad edge: (" << id1 << "," << id2 << ")" << std::endl; @@ -469,10 +469,10 @@ class MontageGraph : public RGraph, public Recoverable{ v->lock(); uint64_t seq = v->seqNumber; for (Relation *r : v->adjacency_list) { - vertices.push_back(r->get_unsafe_dest()); + vertices.push_back(r->get_unsafe_dest(this)); } for (Relation *r : v->dest_list) { - vertices.push_back(r->get_unsafe_src()); + vertices.push_back(r->get_unsafe_src(this)); } vertices.push_back(id); @@ -504,7 +504,7 @@ class MontageGraph : public RGraph, public Recoverable{ std::vector toRemoveList; for (Relation *r : _v->adjacency_list) { - if (r->get_unsafe_src() == id) { + if (r->get_unsafe_src(this) == id) { toRemoveList.push_back(r); } } @@ -515,7 +515,7 @@ class MontageGraph : public RGraph, public Recoverable{ toRemoveList.clear(); for (Relation *r : _v->dest_list) { - if (r->get_unsafe_dest() == id) { + if (r->get_unsafe_dest(this) == id) { toRemoveList.push_back(r); } } @@ -548,7 +548,7 @@ class MontageGraph : public RGraph, public Recoverable{ void for_each_edge(int v, std::function fn) { idxToVertex[v]->lock(); for (Relation *r : idxToVertex[v]->adjacency_list) { - if (!fn(r->get_unsafe_dest())) { + if (!fn(r->get_unsafe_dest(this))) { break; } } diff --git a/src/rideables/MontageQueue.hpp b/src/rideables/MontageQueue.hpp index e1bae9ce..8d034432 100644 --- a/src/rideables/MontageQueue.hpp +++ b/src/rideables/MontageQueue.hpp @@ -43,7 +43,7 @@ class MontageQueue : public RQueue, public Recoverable{ void set_sn(uint64_t s){ assert(payload!=nullptr && "payload shouldn't be null"); - payload->set_unsafe_sn(s); + payload->set_unsafe_sn(ds, s); } T get_val(){ assert(payload!=nullptr && "payload shouldn't be null"); From 522a8080bf41b31507a37b0f75f782a4af96a465 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Fri, 13 Nov 2020 17:29:31 -0500 Subject: [PATCH 36/56] file orginaze && cleanups --- ext/threadcached/include/memcached.h | 2 +- ext/ycsb-tcd/ycsbc.cc | 2 +- src/persist/EpochSys.cpp | 2 - src/persist/PString.hpp | 1 - src/persist/{ => api}/Recoverable.cpp | 15 +++--- src/persist/{ => api}/Recoverable.hpp | 10 ++-- src/persist/api/montage_global_api.cpp | 5 ++ ...{pblk_naked.hpp => montage_global_api.hpp} | 47 ++++++++++--------- src/persist/api/persist_struct_api.hpp | 6 --- src/rideables/DLGraph.hpp | 2 - src/rideables/HOHHashTable.hpp | 34 ++++++++------ src/rideables/MODHashTable.hpp | 1 - src/rideables/MODQueue.hpp | 1 - src/rideables/MontageGraph.hpp | 8 ++-- src/rideables/MontageHashTable.hpp | 1 - src/rideables/MontageMSQueue.hpp | 1 - src/rideables/MontageNatarajanTree.hpp | 3 +- src/rideables/MontageQueue.hpp | 2 +- src/rideables/NVMGraph.hpp | 2 +- src/rideables/PriorityQueue.hpp | 24 +++++----- src/rideables/TGraph.hpp | 1 - src/rideables/Toy.hpp | 10 ++-- src/rideables/UnbalancedTree.hpp | 37 +++++++++------ src/tests/ChurnTest.hpp | 9 ---- src/tests/GraphRecoveryTest.hpp | 8 +--- src/tests/GraphTest.hpp | 27 ++++++----- src/tests/HeapChurnTest.hpp | 1 - src/tests/QueueChurnTest.hpp | 1 - src/tests/QueueTest.hpp | 1 - src/tests/RecoverVerifyTest.hpp | 8 +--- 30 files changed, 125 insertions(+), 147 deletions(-) rename src/persist/{ => api}/Recoverable.cpp (86%) rename src/persist/{ => api}/Recoverable.hpp (98%) create mode 100644 src/persist/api/montage_global_api.cpp rename src/persist/api/{pblk_naked.hpp => montage_global_api.hpp} (67%) delete mode 100644 src/persist/api/persist_struct_api.hpp diff --git a/ext/threadcached/include/memcached.h b/ext/threadcached/include/memcached.h index 8a3721a2..e742720c 100644 --- a/ext/threadcached/include/memcached.h +++ b/ext/threadcached/include/memcached.h @@ -398,7 +398,7 @@ extern unsigned stats_id; * Structure for storing items within memcached. */ #ifdef MONTAGE -#include "persist_struct_api.hpp" +#include "montage_global_api.hpp" using namespace pds; struct item : public PBlk{ diff --git a/ext/ycsb-tcd/ycsbc.cc b/ext/ycsb-tcd/ycsbc.cc index 369e4490..911ec13b 100644 --- a/ext/ycsb-tcd/ycsbc.cc +++ b/ext/ycsb-tcd/ycsbc.cc @@ -23,7 +23,7 @@ using namespace std; #ifdef MONTAGE #include "TestConfig.hpp" -#include "persist_struct_api.hpp" +#include "montage_global_api.hpp" #endif /* MONTAGE */ void UsageMessage(const char *command); diff --git a/src/persist/EpochSys.cpp b/src/persist/EpochSys.cpp index 3e8d6e57..d793d431 100644 --- a/src/persist/EpochSys.cpp +++ b/src/persist/EpochSys.cpp @@ -4,8 +4,6 @@ #include namespace pds{ - EpochSys* esys = nullptr; - thread_local int EpochSys::tid = -1; void EpochSys::parse_env(){ diff --git a/src/persist/PString.hpp b/src/persist/PString.hpp index 13739a9f..93e76752 100644 --- a/src/persist/PString.hpp +++ b/src/persist/PString.hpp @@ -4,7 +4,6 @@ #include #include -#include "persist_struct_api.hpp" #include "pptr.hpp" using namespace pds; diff --git a/src/persist/Recoverable.cpp b/src/persist/api/Recoverable.cpp similarity index 86% rename from src/persist/Recoverable.cpp rename to src/persist/api/Recoverable.cpp index 9a5b7d2d..5e6cfd89 100644 --- a/src/persist/Recoverable.cpp +++ b/src/persist/api/Recoverable.cpp @@ -2,18 +2,17 @@ Recoverable::Recoverable(GlobalTestConfig* gtc){ // init Persistent allocator + // TODO: put this into EpochSys. Persistent::init(); - // init epoch system - pds::init(gtc); - // init main thread - pds::init_thread(0); - + local_descs = new padded[gtc->task_num]; - // TODO: replace this with _esys initialization. - _esys = pds::esys; + // init main thread + pds::EpochSys::init_thread(0); + // init epoch system + _esys = new pds::EpochSys(gtc); } Recoverable::~Recoverable(){ - pds::finalize(); + delete _esys; Persistent::finalize(); } void Recoverable::init_thread(GlobalTestConfig*, LocalTestConfig* ltc){ diff --git a/src/persist/Recoverable.hpp b/src/persist/api/Recoverable.hpp similarity index 98% rename from src/persist/Recoverable.hpp rename to src/persist/api/Recoverable.hpp index c319c004..41438fd8 100644 --- a/src/persist/Recoverable.hpp +++ b/src/persist/api/Recoverable.hpp @@ -3,7 +3,6 @@ #include "TestConfig.hpp" #include "EpochSys.hpp" -#include "pblk_naked.hpp" // TODO: report recover errors/exceptions class Recoverable{ @@ -51,9 +50,6 @@ class Recoverable{ MontageOpHolder(pds::EpochSys* _esys): esys_(_esys){ esys_->begin_op(); } - MontageOpHolder(): esys_(pds::esys){ - esys_->begin_op(); - } ~MontageOpHolder(){ esys_->end_op(); } @@ -67,9 +63,6 @@ class Recoverable{ MontageOpHolderReadOnly(pds::EpochSys* _esys): esys_(_esys){ esys_->begin_op(); } - MontageOpHolderReadOnly(): esys_(pds::esys){ - esys_->begin_op(); - } ~MontageOpHolderReadOnly(){ esys_->end_readonly_op(); } @@ -119,6 +112,9 @@ class Recoverable{ void flush(){ _esys->flush(); } + void simulate_crash(){ + _esys->simulate_crash(); + } pds::sc_desc_t* get_dcss_desc(){ return &local_descs[pds::EpochSys::tid].ui; diff --git a/src/persist/api/montage_global_api.cpp b/src/persist/api/montage_global_api.cpp new file mode 100644 index 00000000..29a29499 --- /dev/null +++ b/src/persist/api/montage_global_api.cpp @@ -0,0 +1,5 @@ +#include "montage_global_api.hpp" + +namespace pds{ + EpochSys* global_esys = nullptr; +} \ No newline at end of file diff --git a/src/persist/api/pblk_naked.hpp b/src/persist/api/montage_global_api.hpp similarity index 67% rename from src/persist/api/pblk_naked.hpp rename to src/persist/api/montage_global_api.hpp index 7a4588ed..21ca2795 100644 --- a/src/persist/api/pblk_naked.hpp +++ b/src/persist/api/montage_global_api.hpp @@ -1,5 +1,5 @@ -#ifndef PBLK_NAKED_HPP -#define PBLK_NAKED_HPP +#ifndef PERSIST_STRUCT_API_HPP +#define PERSIST_STRUCT_API_HPP #include "TestConfig.hpp" #include "EpochSys.hpp" @@ -12,7 +12,7 @@ namespace pds{ - extern EpochSys* esys; + extern EpochSys* global_esys; inline void init(GlobalTestConfig* gtc){ // here we assume that pds::init is called before pds::init_thread, hence the assertion. @@ -21,39 +21,39 @@ namespace pds{ if (EpochSys::tid == -1){ EpochSys::tid = 0; } - esys = new EpochSys(gtc); + global_esys = new EpochSys(gtc); } inline void init_thread(int id) { EpochSys::tid = id; - // esys->init_thread(id); + // global_esys->init_thread(id); } inline void finalize(){ - delete esys; - esys = nullptr; // for debugging. + delete global_esys; + global_esys = nullptr; // for debugging. } #define CHECK_EPOCH() ({\ - esys->check_epoch();}) + global_esys->check_epoch();}) // TODO: get rid of arguments in rideables. #define BEGIN_OP( ... ) ({ \ - esys->begin_op();}) + global_esys->begin_op();}) // end current operation by reducing transaction count of our epoch. // if our operation is already aborted, do nothing. #define END_OP ({\ - esys->end_op(); }) + global_esys->end_op(); }) // end current operation by reducing transaction count of our epoch. // if our operation is already aborted, do nothing. #define END_READONLY_OP ({\ - esys->end_readonly_op(); }) + global_esys->end_readonly_op(); }) - // end current epoch and not move towards next epoch in esys. + // end current epoch and not move towards next epoch in global_esys. #define ABORT_OP ({ \ - esys->abort_op(); }) + global_esys->abort_op(); }) #define BEGIN_OP_AUTOEND( ... ) \ Recoverable::MontageOpHolder __holder; @@ -62,39 +62,40 @@ namespace pds{ Recoverable::MontageOpHolderReadOnly __holder; #define PNEW(t, ...) ({\ - esys->register_alloc_pblk(new t(__VA_ARGS__));}) + global_esys->register_alloc_pblk(new t(__VA_ARGS__));}) #define PDELETE(b) ({\ - esys->pdelete(b);}) + global_esys->pdelete(b);}) #define PRETIRE(b) ({\ - esys->pretire(b);}) + global_esys->pretire(b);}) #define PRECLAIM(b) ({\ - esys->preclaim(b);}) + global_esys->preclaim(b);}) // Hs: This is for "owned" PBlk's, currently not used in code base. // may be useful for "data" blocks like dynamically-sized // persistent String payload. #define PDELETE_DATA(b) ({\ - if (esys->sys_mode == ONLINE) {\ + if (global_esys->sys_mode == ONLINE) {\ delete(b);\ }}) inline std::unordered_map* recover(const int rec_thd=10){ - return esys->recover(rec_thd); + return global_esys->recover(rec_thd); } inline void flush(){ - esys->flush(); + global_esys->flush(); } inline void recover_mode(){ - esys->recover_mode(); + global_esys->recover_mode(); } inline void online_mode(){ - esys->online_mode(); + global_esys->online_mode(); } } -#endif + +#endif \ No newline at end of file diff --git a/src/persist/api/persist_struct_api.hpp b/src/persist/api/persist_struct_api.hpp deleted file mode 100644 index 2a451bf5..00000000 --- a/src/persist/api/persist_struct_api.hpp +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef PERSIST_STRUCT_API_HPP -#define PERSIST_STRUCT_API_HPP - -#include "pblk_naked.hpp" - -#endif \ No newline at end of file diff --git a/src/rideables/DLGraph.hpp b/src/rideables/DLGraph.hpp index f67edc55..c1f22154 100644 --- a/src/rideables/DLGraph.hpp +++ b/src/rideables/DLGraph.hpp @@ -19,7 +19,6 @@ #include #include #include "RCUTracker.hpp" -#include "persist_struct_api.hpp" #define DLGRAPH_FLUSH(addr) asm volatile ("clflush (%0)" :: "r"(addr)) #define DLGRAPH_FLUSHOPT(addr) asm volatile ("clflushopt (%0)" :: "r"(addr)) @@ -154,7 +153,6 @@ class DLGraph : public RGraph { // Thread-safe and does not leak edges void clear() { - // BEGIN_OP_AUTOEND(); for (size_t i = 0; i < numVertices; i++) { idxToVertex[i]->lock(); } diff --git a/src/rideables/HOHHashTable.hpp b/src/rideables/HOHHashTable.hpp index d50acec1..d9ba6052 100644 --- a/src/rideables/HOHHashTable.hpp +++ b/src/rideables/HOHHashTable.hpp @@ -3,7 +3,7 @@ #include "TestConfig.hpp" #include "RMap.hpp" -#include "persist_struct_api.hpp" +#include "Recoverable.hpp" #include "CustomTypes.hpp" #include "ConcurrentPrimitives.hpp" #include @@ -11,7 +11,7 @@ using namespace pds; template -class HOHHashTable : public RMap{ +class HOHHashTable : public RMap, public Recoverable{ public: class Payload : public PBlk{ @@ -25,45 +25,49 @@ class HOHHashTable : public RMap{ }; struct ListNode{ + HOHHashTable* ds; // Transient-to-persistent pointer Payload* payload = nullptr; // Transient-to-transient pointers ListNode* next = nullptr; std::mutex lock; ListNode(){} - ListNode(K key, V val){ - payload = PNEW(Payload, key, val); + ListNode(HOHHashTable* ds_, K key, V val): ds(ds_){ + payload = ds->pnew(key, val); } K get_key(){ assert(payload!=nullptr && "payload shouldn't be null"); - return (K)payload->get_key(); + return (K)payload->get_key(ds); } V get_val(){ assert(payload!=nullptr && "payload shouldn't be null"); - return (V)payload->get_val(); + return (V)payload->get_val(ds); } void set_val(V v){ assert(payload!=nullptr && "payload shouldn't be null"); - payload = payload->set_val(v); + payload = payload->set_val(ds, v); } ~ListNode(){ - PDELETE(payload); + ds->pdelete(payload); } }; std::hash hash_fn; padded* buckets[idxSize]; - HOHHashTable(GlobalTestConfig* gtc){ + HOHHashTable(GlobalTestConfig* gtc): Recoverable(gtc){ for(size_t i = 0; i < idxSize; i++){ buckets[i] = new padded(); } } + int recover(bool simulated){ + errexit("recover of HOHHashTable not implemented"); + } optional get(K key, int tid){ size_t idx=hash_fn(key)%idxSize; while(true){ - BEGIN_OP_AUTOEND(); + MontageOpHolder(this); try{ HOHLockHolder holder; holder.hold(&buckets[idx]->ui.lock); @@ -84,9 +88,9 @@ class HOHHashTable : public RMap{ optional put(K key, V val, int tid){ size_t idx=hash_fn(key)%idxSize; - ListNode* new_node = new ListNode(key, val); + ListNode* new_node = new ListNode(this, key, val); while(true){ - BEGIN_OP_AUTOEND(new_node->payload); + MontageOpHolder(this); try{ HOHLockHolder holder; holder.hold(&buckets[idx]->ui.lock); @@ -119,9 +123,9 @@ class HOHHashTable : public RMap{ bool insert(K key, V val, int tid){ size_t idx=hash_fn(key)%idxSize; - ListNode* new_node = new ListNode(key, val); + ListNode* new_node = new ListNode(this, key, val); while(true){ - BEGIN_OP_AUTOEND(new_node->payload); + MontageOpHolder(this); try{ HOHLockHolder holder; holder.hold(&buckets[idx]->ui.lock); @@ -158,7 +162,7 @@ class HOHHashTable : public RMap{ optional remove(K key, int tid){ size_t idx=hash_fn(key)%idxSize; while(true){ - BEGIN_OP_AUTOEND(); + MontageOpHolder(this); try{ HOHLockHolder holder; holder.hold(&buckets[idx]->ui.lock); diff --git a/src/rideables/MODHashTable.hpp b/src/rideables/MODHashTable.hpp index b2fd7ee7..1b1313fb 100644 --- a/src/rideables/MODHashTable.hpp +++ b/src/rideables/MODHashTable.hpp @@ -6,7 +6,6 @@ #include "TestConfig.hpp" #include "RMap.hpp" -#include "persist_struct_api.hpp" #include "CustomTypes.hpp" #include "ConcurrentPrimitives.hpp" #include diff --git a/src/rideables/MODQueue.hpp b/src/rideables/MODQueue.hpp index 050ad7ed..3ae6c28f 100644 --- a/src/rideables/MODQueue.hpp +++ b/src/rideables/MODQueue.hpp @@ -6,7 +6,6 @@ #include "TestConfig.hpp" #include "RQueue.hpp" -#include "persist_struct_api.hpp" #include "CustomTypes.hpp" #include "ConcurrentPrimitives.hpp" #include diff --git a/src/rideables/MontageGraph.hpp b/src/rideables/MontageGraph.hpp index 429ab55e..8638b339 100644 --- a/src/rideables/MontageGraph.hpp +++ b/src/rideables/MontageGraph.hpp @@ -17,7 +17,7 @@ #include #include #include -#include "persist_struct_api.hpp" +#include "Recoverable.hpp" #include #include @@ -176,7 +176,7 @@ class MontageGraph : public RGraph, public Recoverable{ { MontageOpHolder(this); - Relation* r = PNEW(Relation,v1, v2, weight); + Relation* r = pnew(v1, v2, weight); v1->adjacency_list.insert(r); v2->dest_list.insert(r); } @@ -358,7 +358,7 @@ class MontageGraph : public RGraph, public Recoverable{ begin = chrono::high_resolution_clock::now(); #pragma omp parallel { - pds::init_thread(omp_get_thread_num()); + Recoverable::init_thread(omp_get_thread_num()); #pragma omp for for (size_t i = 0; i < vertexVector.size(); ++i) { int id = vertexVector[i]->get_unsafe_id(this); @@ -389,7 +389,7 @@ class MontageGraph : public RGraph, public Recoverable{ #pragma omp parallel { int tid = omp_get_thread_num(); - pds::init_thread(tid); + Recoverable::init_thread(tid); #pragma omp for for (size_t i = 0; i < relationVector.size(); ++i) { Relation *e = relationVector[i]; diff --git a/src/rideables/MontageHashTable.hpp b/src/rideables/MontageHashTable.hpp index 8130e5e9..750d3649 100644 --- a/src/rideables/MontageHashTable.hpp +++ b/src/rideables/MontageHashTable.hpp @@ -3,7 +3,6 @@ #include "TestConfig.hpp" #include "RMap.hpp" -#include "persist_struct_api.hpp" #include "CustomTypes.hpp" #include "ConcurrentPrimitives.hpp" #include "Recoverable.hpp" diff --git a/src/rideables/MontageMSQueue.hpp b/src/rideables/MontageMSQueue.hpp index df0d76c7..8941876d 100644 --- a/src/rideables/MontageMSQueue.hpp +++ b/src/rideables/MontageMSQueue.hpp @@ -10,7 +10,6 @@ #include "RCUTracker.hpp" #include "CustomTypes.hpp" #include "Recoverable.hpp" -#include "persist_struct_api.hpp" using namespace pds; diff --git a/src/rideables/MontageNatarajanTree.hpp b/src/rideables/MontageNatarajanTree.hpp index 11271eca..8f112df7 100644 --- a/src/rideables/MontageNatarajanTree.hpp +++ b/src/rideables/MontageNatarajanTree.hpp @@ -10,7 +10,6 @@ #include "RCUTracker.hpp" #include "CustomTypes.hpp" #include "Recoverable.hpp" -#include "persist_struct_api.hpp" using namespace pds; @@ -38,7 +37,7 @@ class MontageNatarajanTree : public RMap, public Recoverable{ Payload* payload;// TODO: does it have to be atomic? Node(MontageNatarajanTree* ds_, K k, V val, Node* l=nullptr, Node* r=nullptr): - ds(ds_), level(finite),left(l),right(r),key(k),payload(PNEW(Payload, key, val)){ }; + ds(ds_), level(finite),left(l),right(r),key(k),payload(ds_->pnew(key, val)){ }; Node(MontageNatarajanTree* ds_, Level lev, Node* l=nullptr, Node* r=nullptr): ds(ds_), level(lev),left(l),right(r),key(),payload(nullptr){ assert(lev != finite && "use constructor with another signature for concrete nodes!"); diff --git a/src/rideables/MontageQueue.hpp b/src/rideables/MontageQueue.hpp index 8d034432..bda3f507 100644 --- a/src/rideables/MontageQueue.hpp +++ b/src/rideables/MontageQueue.hpp @@ -10,7 +10,7 @@ #include "RCUTracker.hpp" #include "CustomTypes.hpp" #include "Recoverable.hpp" -#include "persist_struct_api.hpp" +#include "Recoverable.hpp" #include using namespace pds; diff --git a/src/rideables/NVMGraph.hpp b/src/rideables/NVMGraph.hpp index ca170254..c042a654 100644 --- a/src/rideables/NVMGraph.hpp +++ b/src/rideables/NVMGraph.hpp @@ -17,7 +17,7 @@ #include #include #include "RCUTracker.hpp" -#include "persist_struct_api.hpp" +#include "Recoverable.hpp" using namespace pds; diff --git a/src/rideables/PriorityQueue.hpp b/src/rideables/PriorityQueue.hpp index ff11731d..d5a03d74 100644 --- a/src/rideables/PriorityQueue.hpp +++ b/src/rideables/PriorityQueue.hpp @@ -9,13 +9,13 @@ #include "ConcurrentPrimitives.hpp" #include "RCUTracker.hpp" #include "CustomTypes.hpp" -#include "persist_struct_api.hpp" +#include "Recoverable.hpp" #include "HeapQueue.hpp" using namespace pds; //Wentao: TODO to fix later template -class PriorityQueue : public HeapQueue{ +class PriorityQueue : public HeapQueue, public Recoverable{ public: class Payload : public PBlk{ GENERATE_FIELD(K, key, Payload); @@ -29,25 +29,27 @@ class PriorityQueue : public HeapQueue{ private: struct Node{ + PriorityQueue* ds; K key; Node* next; Payload* payload; Node():key(0), next(nullptr), payload(nullptr){}; - Node(K k, V val): key(k), next(nullptr), payload(PNEW(Payload, k, val)){}; + Node(PriorityQueue* ds_, K k, V val): + ds(ds_), key(k), next(nullptr), payload(ds->pnew(k, val)){}; V get_val(){ assert(payload != nullptr && "payload shouldn't be null"); - return (V)payload->get_val(); + return (V)payload->get_val(ds); } void set_sn(uint64_t s){ assert(payload != nullptr && "payload shouldn't be null"); - payload->set_sn(s); + payload->set_sn(ds,s); } ~Node(){ - PDELETE(payload); + ds->pdelete(payload); } }; @@ -70,7 +72,7 @@ class PriorityQueue : public HeapQueue{ template void PriorityQueue::enqueue(K key, V val, int tid){ - Node* new_node = new Node(key, val); + Node* new_node = new Node(this, key, val); std::unique_lock lock(mtx); if(head->next == nullptr){ head->next = new_node; @@ -88,9 +90,9 @@ void PriorityQueue::enqueue(K key, V val, int tid){ } } uint64_t s = global_sn.fetch_add(1); - BEGIN_OP(new_node->payload); + begin_op(); new_node->set_sn(s); - END_OP; + end_op(); } template @@ -102,10 +104,10 @@ optional PriorityQueue::dequeue(int tid){ }else{ Node* target = head->next; head->next = target->next; - BEGIN_OP(); + begin_op(); res = (V)target->payload->get_val(); delete(target); - END_OP; + end_op(); } return res; } diff --git a/src/rideables/TGraph.hpp b/src/rideables/TGraph.hpp index eda9aa2b..6eb0f304 100644 --- a/src/rideables/TGraph.hpp +++ b/src/rideables/TGraph.hpp @@ -18,7 +18,6 @@ #include #include #include "RCUTracker.hpp" -#include "persist_struct_api.hpp" using namespace pds; diff --git a/src/rideables/Toy.hpp b/src/rideables/Toy.hpp index f5a7c2f3..2713bac1 100644 --- a/src/rideables/Toy.hpp +++ b/src/rideables/Toy.hpp @@ -1,9 +1,9 @@ #ifndef TOY_HPP #define TOY_HPP -#include "TestConfig.hpp" -#include "persist_struct_api.hpp" #include +#include "TestConfig.hpp" + using namespace pds; @@ -25,9 +25,9 @@ class Toy : public Rideable{ } void run(int tid){ - Payload* p = PNEW(Payload, 1, 1); - BEGIN_OP(p, p); - END_OP; + // Payload* p = PNEW(Payload, 1, 1); + // BEGIN_OP(p, p); + // END_OP; } void run_parallel(int tid){ diff --git a/src/rideables/UnbalancedTree.hpp b/src/rideables/UnbalancedTree.hpp index 521e6597..cb835b47 100644 --- a/src/rideables/UnbalancedTree.hpp +++ b/src/rideables/UnbalancedTree.hpp @@ -3,7 +3,7 @@ #include "TestConfig.hpp" #include "RMap.hpp" -#include "persist_struct_api.hpp" +#include "Recoverable.hpp" #include "CustomTypes.hpp" #include #include @@ -11,7 +11,7 @@ using namespace pds; template -class UnbalancedTree : public RMap{ +class UnbalancedTree : public RMap, public Recoverable{ const optional NONE = {}; // to prevent compiler warnings. TODO: switch to std::optional<>. public: class Payload : public PBlk{ @@ -26,6 +26,7 @@ class UnbalancedTree : public RMap{ }; struct TreeNode{ + UnbalancedTree* ds; // Transient-to-persistent pointer Payload* payload = nullptr; // Transient-to-transient pointers @@ -34,43 +35,49 @@ class UnbalancedTree : public RMap{ std::mutex lock; - TreeNode(K key, V val){ - payload = PNEW(Payload, key, val); + TreeNode(UnbalancedTree* ds_, K key, V val): ds(ds_){ + payload = ds->pnew(key, val); } K get_key(){ assert(payload!=nullptr && "payload shouldn't be null"); - return (K)payload->get_key(); + return (K)payload->get_key(ds); } V get_val(){ assert(payload!=nullptr && "payload shouldn't be null"); - return (V)payload->get_val(); + return (V)payload->get_val(ds); } int get_deleted(){ assert(payload!=nullptr && "payload shouldn't be null"); - return (int)payload->get_deleted(); + return (int)payload->get_deleted(ds); } void set_val(V v){ assert(payload!=nullptr && "payload shouldn't be null"); - payload = payload->set_val(v); + payload = payload->set_val(ds, v); } void set_deleted(int d){ assert(payload!=nullptr && "payload shouldn't be null"); - payload = payload->set_deleted(d); + payload = payload->set_deleted(ds, d); } ~TreeNode(){ - PDELETE(payload); + ds->pdelete(payload); } }; TreeNode* root = nullptr; - UnbalancedTree(GlobalTestConfig* gtc){ + UnbalancedTree(GlobalTestConfig* gtc): Recoverable(gtc){ root = nullptr; } + int recover(bool simulated){ + errexit("recover of UnbalancedTree not implemented"); + return 0; + } + + optional get(K key, int tid){ while(true){ - BEGIN_OP_AUTOEND(); + MontageOpHolder(this); if (!root){ return NONE; } else { @@ -111,7 +118,7 @@ class UnbalancedTree : public RMap{ optional put(K key, V val, int tid){ while(true){ - BEGIN_OP_AUTOEND(); + MontageOpHolder(this); if (!root){ root = new TreeNode(key, val); } else { @@ -157,7 +164,7 @@ class UnbalancedTree : public RMap{ bool insert(K key, V val, int tid){ while(true){ - BEGIN_OP_AUTOEND(); + MontageOpHolder(this); if (!root){ root = new TreeNode(key, val); return true; @@ -207,7 +214,7 @@ class UnbalancedTree : public RMap{ optional remove(K key, int tid){ while(true){ - BEGIN_OP_AUTOEND(); + MontageOpHolder(this); if (!root){ return NONE; } else { diff --git a/src/tests/ChurnTest.hpp b/src/tests/ChurnTest.hpp index 7e57f503..bf606aa2 100644 --- a/src/tests/ChurnTest.hpp +++ b/src/tests/ChurnTest.hpp @@ -4,7 +4,6 @@ #include "TestConfig.hpp" #include "AllocatorMacro.hpp" #include "Persistent.hpp" -#include "persist_struct_api.hpp" class ChurnTest : public Test{ #ifdef PRONTO @@ -108,12 +107,6 @@ void ChurnTest::init(GlobalTestConfig* gtc){ assert(sigaction(SIGUSR1, &sa, NULL) == 0); #endif - // // init Persistent allocator - // Persistent::init(); - - // // init epoch system - // pds::init(gtc); - getRideable(gtc); if(gtc->verbose){ @@ -179,8 +172,6 @@ void ChurnTest::cleanup(GlobalTestConfig* gtc){ Savitar_core_finalize(); pthread_mutex_destroy(&snapshot_lock); #endif - // pds::finalize(); - // Persistent::finalize(); } #ifdef PRONTO diff --git a/src/tests/GraphRecoveryTest.hpp b/src/tests/GraphRecoveryTest.hpp index 281d74e1..db1ef45b 100644 --- a/src/tests/GraphRecoveryTest.hpp +++ b/src/tests/GraphRecoveryTest.hpp @@ -35,8 +35,6 @@ class GraphRecoveryTest : public Test { void init(GlobalTestConfig *gtc) { std::cout << "initializing" << std::endl; - // Persistent::init(); - // pds::init(gtc); pthread_barrier_init(&pthread_barrier, NULL, gtc->task_num); @@ -59,7 +57,6 @@ class GraphRecoveryTest : public Test { errexit("GraphRecoveryTest must be run on Recoverable type object."); } - // pds::init_thread(0); /* set interval to inf so this won't be killed by timeout */ gtc->interval = numeric_limits::max(); std::cout << "Finished init func" << std::endl; @@ -102,7 +99,6 @@ class GraphRecoveryTest : public Test { pthread_barrier_wait(&pthread_barrier); auto begin = chrono::high_resolution_clock::now(); g->init_thread(gtc, ltc); - // pds::init_thread(ltc->tid); // Loop through the files in parallel int num_threads = gtc->task_num; int tid = ltc->tid; @@ -128,10 +124,10 @@ class GraphRecoveryTest : public Test { int tid = ltc->tid; if (tid == 0){ - pds::flush(); + rec->flush(); } pthread_barrier_wait(&pthread_barrier); - pds::esys->simulate_crash(); + rec->simulate_crash(); if (tid == 0){ std::cout<<"crashed."< #include #include "RGraph.hpp" +#include "Recoverable.hpp" #include -void ErdosRenyi(RGraph *g, int numVertices, double p=0.5) { - size_t x = numVertices; - size_t numEdges = (x * x) * p; - #pragma omp parallel - { - std::mt19937_64 gen_p(std::chrono::system_clock::now().time_since_epoch().count() + omp_get_thread_num()); - pds::init_thread(omp_get_thread_num()); - #pragma omp for - for (size_t i = 0; i < numEdges; i++) { - g->add_edge(gen_p() % numVertices, gen_p() % numVertices, 1); - } - } -} +// void ErdosRenyi(RGraph *g, int numVertices, double p=0.5) { +// size_t x = numVertices; +// size_t numEdges = (x * x) * p; +// #pragma omp parallel +// { +// std::mt19937_64 gen_p(std::chrono::system_clock::now().time_since_epoch().count() + omp_get_thread_num()); +// Recoverable::init_thread(omp_get_thread_num()); +// #pragma omp for +// for (size_t i = 0; i < numEdges; i++) { +// g->add_edge(gen_p() % numVertices, gen_p() % numVertices, 1); +// } +// } +// } class GraphTest : public Test { public: diff --git a/src/tests/HeapChurnTest.hpp b/src/tests/HeapChurnTest.hpp index 23c53c8b..b16a96a7 100644 --- a/src/tests/HeapChurnTest.hpp +++ b/src/tests/HeapChurnTest.hpp @@ -6,7 +6,6 @@ #include "AllocatorMacro.hpp" #include "Persistent.hpp" -#include "persist_struct_api.hpp" #include "TestConfig.hpp" #include "HeapQueue.hpp" diff --git a/src/tests/QueueChurnTest.hpp b/src/tests/QueueChurnTest.hpp index 11cc03e0..27008f09 100644 --- a/src/tests/QueueChurnTest.hpp +++ b/src/tests/QueueChurnTest.hpp @@ -7,7 +7,6 @@ #include "AllocatorMacro.hpp" #include "Persistent.hpp" -#include "persist_struct_api.hpp" #include "TestConfig.hpp" #include "RQueue.hpp" diff --git a/src/tests/QueueTest.hpp b/src/tests/QueueTest.hpp index 63af25ef..26c4f6e0 100644 --- a/src/tests/QueueTest.hpp +++ b/src/tests/QueueTest.hpp @@ -7,7 +7,6 @@ #include "AllocatorMacro.hpp" #include "Persistent.hpp" -#include "persist_struct_api.hpp" #include "TestConfig.hpp" #include "RQueue.hpp" #include diff --git a/src/tests/RecoverVerifyTest.hpp b/src/tests/RecoverVerifyTest.hpp index b2f5be98..b36ab779 100644 --- a/src/tests/RecoverVerifyTest.hpp +++ b/src/tests/RecoverVerifyTest.hpp @@ -9,7 +9,6 @@ #include "TestConfig.hpp" #include "AllocatorMacro.hpp" #include "Persistent.hpp" -#include "persist_struct_api.hpp" #include "Recoverable.hpp" template @@ -34,7 +33,6 @@ class RecoverVerifyTest : public Test{ template void RecoverVerifyTest::parInit(GlobalTestConfig* gtc, LocalTestConfig* ltc){ m->init_thread(gtc, ltc); - // pds::init_thread(ltc->tid); } template @@ -43,10 +41,8 @@ void RecoverVerifyTest::init(GlobalTestConfig* gtc){ errexit("RecoverVerifyTest only runs on single thread."); } // // init Persistent allocator - // Persistent::init(); // // init epoch system - // pds::init(gtc); Rideable* ptr = gtc->allocRideable(); m = dynamic_cast*>(ptr); @@ -113,9 +109,9 @@ int RecoverVerifyTest::execute(GlobalTestConfig* gtc, LocalTestConfig* ltc) auto dur_ms = std::chrono::duration_cast(dur).count(); std::cout<<"insert finished. Spent "<< dur_ms << "ms" <flush(); std::cout<<"epochsys flushed."<simulate_crash(); + rec->simulate_crash(); std::cout<<"crashed."<recover(true); std::cout<<"recover returned."< Date: Fri, 13 Nov 2020 17:43:12 -0500 Subject: [PATCH 37/56] clean up --- Makefile | 2 +- src/persist/api/Recoverable.cpp | 12 +++++++----- src/tests/GraphTest.hpp | 3 --- src/tests/HeapChurnTest.hpp | 6 ------ src/tests/KVTest.hpp | 6 ------ src/tests/MapTest.hpp | 8 -------- src/tests/QueueChurnTest.hpp | 7 ------- src/tests/QueueTest.hpp | 7 ------- src/tests/SetChurnTest.hpp | 1 - src/tests/TGraphConstructionTest.hpp | 4 ---- src/tests/ToyTest.hpp | 2 -- src/tests/YCSBTest.hpp | 6 ------ unit_test/dcss.cpp | 1 + 13 files changed, 9 insertions(+), 56 deletions(-) diff --git a/Makefile b/Makefile index dda58bfb..67fcba62 100644 --- a/Makefile +++ b/Makefile @@ -118,7 +118,7 @@ ARCHIVEDIR:=./lib # -since we do pattern matching between this list and the # source files, the file path specified must be the same # type (absolute or relative) -EXECUTABLES:= ./src/main.cpp +EXECUTABLES:= ./src/main.cpp #./unit_test/dcss.cpp # A list of source files contained in the # source directory to exclude from the build diff --git a/src/persist/api/Recoverable.cpp b/src/persist/api/Recoverable.cpp index 5e6cfd89..d4402486 100644 --- a/src/persist/api/Recoverable.cpp +++ b/src/persist/api/Recoverable.cpp @@ -27,15 +27,17 @@ namespace pds{ void sc_desc_t::try_complete(Recoverable* ds, uint64_t addr){ nbptr_t _d = nbptr.load(); - int ret = 0; + // int ret = 0; if(_d.val!=addr) return; if(in_progress(_d)){ if(ds->check_epoch(cas_epoch)){ - ret = 2; - ret |= commit(_d); + // ret = 2; + // ret |= commit(_d); + commit(_d); } else { - ret = 4; - ret |= abort(_d); + // ret = 4; + // ret |= abort(_d); + abort(_d); } } cleanup(_d); diff --git a/src/tests/GraphTest.hpp b/src/tests/GraphTest.hpp index c9b9e9e2..9b56a9bc 100644 --- a/src/tests/GraphTest.hpp +++ b/src/tests/GraphTest.hpp @@ -47,8 +47,6 @@ class GraphTest : public Test { } void init(GlobalTestConfig *gtc) { - // Persistent::init(); - // pds::init(gtc); uint64_t new_ops = total_ops / gtc->task_num; thd_ops = new uint64_t[gtc->task_num]; for (int i = 0; itask_num; i++) { @@ -98,7 +96,6 @@ class GraphTest : public Test { } void parInit(GlobalTestConfig *gtc, LocalTestConfig *ltc) { - // pds::init_thread(ltc->tid); g->init_thread(gtc, ltc); size_t x = max_verts; size_t numEdges = (x * x) * 0.5; diff --git a/src/tests/HeapChurnTest.hpp b/src/tests/HeapChurnTest.hpp index b16a96a7..1862c2ef 100644 --- a/src/tests/HeapChurnTest.hpp +++ b/src/tests/HeapChurnTest.hpp @@ -29,15 +29,9 @@ class HeapChurnTest : public Test{ void parInit(GlobalTestConfig* gtc, LocalTestConfig* ltc){ q->init_thread(gtc, ltc); - // pds::init_thread(ltc->tid); } void init(GlobalTestConfig* gtc){ - // // init Persistent allocator - // Persistent::init(); - - // // init epoch system - // pds::init(gtc); getRideable(gtc); diff --git a/src/tests/KVTest.hpp b/src/tests/KVTest.hpp index e1a11ad5..13fcc6ea 100644 --- a/src/tests/KVTest.hpp +++ b/src/tests/KVTest.hpp @@ -40,14 +40,8 @@ class KVTest : public Test{ } void parInit(GlobalTestConfig* gtc, LocalTestConfig* ltc){ m->init_thread(gtc, ltc); - // pds::init_thread(ltc->tid); } void init(GlobalTestConfig* gtc){ - // // init Persistent allocator - // Persistent::init(); - - // // init epoch system - // pds::init(gtc); if(gtc->checkEnv("ValueSize")){ val_size = atoi((gtc->getEnv("ValueSize")).c_str()); diff --git a/src/tests/MapTest.hpp b/src/tests/MapTest.hpp index 9c4a4b0e..25f5d291 100644 --- a/src/tests/MapTest.hpp +++ b/src/tests/MapTest.hpp @@ -97,7 +97,6 @@ class MapTest : public Test{ if(ltc->tid==0) doPrefill(gtc); #endif - // pds::init_thread(ltc->tid); } void init(GlobalTestConfig* gtc){ #ifdef PRONTO @@ -115,11 +114,6 @@ class MapTest : public Test{ assert(sigaction(SIGSEGV, &sa, NULL) == 0); assert(sigaction(SIGUSR1, &sa, NULL) == 0); #endif - // // init Persistent allocator - // Persistent::init(); - - // // init epoch system - // pds::init(gtc); getRideable(gtc); @@ -177,7 +171,6 @@ class MapTest : public Test{ } } void doPrefill(GlobalTestConfig* gtc){ - // pds::init_thread(0); if (this->prefill > 0){ /* Wentao: * to avoid repeated k during prefilling, we instead @@ -261,7 +254,6 @@ inline std::string MapTest::fromInt(uint64_t v){ template<> inline void MapTest::doPrefill(GlobalTestConfig* gtc){ // randomly prefill until specified amount of keys are successfully inserted - // pds::init_thread(0); if (this->prefill > 0){ std::mt19937_64 gen_k(0); // int stride = this->range/this->prefill; diff --git a/src/tests/QueueChurnTest.hpp b/src/tests/QueueChurnTest.hpp index 27008f09..21a32fca 100644 --- a/src/tests/QueueChurnTest.hpp +++ b/src/tests/QueueChurnTest.hpp @@ -69,7 +69,6 @@ class QueueChurnTest : public Test{ if(ltc->tid==0) doPrefill(gtc); #endif - // pds::init_thread(ltc->tid); } void init(GlobalTestConfig* gtc){ @@ -88,11 +87,6 @@ class QueueChurnTest : public Test{ assert(sigaction(SIGSEGV, &sa, NULL) == 0); assert(sigaction(SIGUSR1, &sa, NULL) == 0); #endif - // // init Persistent allocator - // Persistent::init(); - - // // init epoch system - // pds::init(gtc); if(gtc->checkEnv("ValueSize")){ val_size = atoi((gtc->getEnv("ValueSize")).c_str()); @@ -176,7 +170,6 @@ class QueueChurnTest : public Test{ } } void doPrefill(GlobalTestConfig* gtc){ - // pds::init_thread(0); if (this->prefill > 0){ int i = 0; while(iprefill){ diff --git a/src/tests/QueueTest.hpp b/src/tests/QueueTest.hpp index 26c4f6e0..a24df93e 100644 --- a/src/tests/QueueTest.hpp +++ b/src/tests/QueueTest.hpp @@ -87,7 +87,6 @@ class QueueTest : public Test{ if(ltc->tid==0) doPrefill(gtc,0); #endif - // pds::init_thread(ltc->tid); } void init(GlobalTestConfig* gtc){ @@ -106,11 +105,6 @@ class QueueTest : public Test{ assert(sigaction(SIGSEGV, &sa, NULL) == 0); assert(sigaction(SIGUSR1, &sa, NULL) == 0); #endif - // // init Persistent allocator - // Persistent::init(); - - // // init epoch system - // pds::init(gtc); if(gtc->checkEnv("ValueSize")){ val_size = atoi((gtc->getEnv("ValueSize")).c_str()); @@ -206,7 +200,6 @@ class QueueTest : public Test{ } } void doPrefill(GlobalTestConfig* gtc, int tid){ - // pds::init_thread(tid); if(this->prefill > 0){ int i = 0; for(i = 0; i < this->prefill; i++){ diff --git a/src/tests/SetChurnTest.hpp b/src/tests/SetChurnTest.hpp index 92a87dd8..bafca770 100644 --- a/src/tests/SetChurnTest.hpp +++ b/src/tests/SetChurnTest.hpp @@ -28,7 +28,6 @@ class SetChurnTest : public ChurnTest{ } } void doPrefill(GlobalTestConfig* gtc){ - // pds::init_thread(0); // prefill deterministically: if (this->prefill > 0){ /* Wentao: diff --git a/src/tests/TGraphConstructionTest.hpp b/src/tests/TGraphConstructionTest.hpp index d4ccbe1c..71607c76 100644 --- a/src/tests/TGraphConstructionTest.hpp +++ b/src/tests/TGraphConstructionTest.hpp @@ -34,8 +34,6 @@ class TGraphConstructionTest : public Test { void init(GlobalTestConfig *gtc) { std::cout << "initializing" << std::endl; - // Persistent::init(); - // pds::init(gtc); uint64_t new_ops = total_ops / gtc->task_num; thd_ops = new uint64_t[gtc->task_num]; for (auto i = 0; itask_num; i++) { @@ -51,7 +49,6 @@ class TGraphConstructionTest : public Test { errexit("TGraphConstructionTest must be run on RGraph type object."); } - // pds::init_thread(0); /* set interval to inf so this won't be killed by timeout */ gtc->interval = numeric_limits::max(); std::cout << "Finished init func" << std::endl; @@ -81,7 +78,6 @@ class TGraphConstructionTest : public Test { return 0; } void parInit(GlobalTestConfig *gtc, LocalTestConfig *ltc) { - // pds::init_thread(ltc->tid); g->init_thread(gtc, ltc); // Loop through the files in parallel int num_threads = gtc->task_num; diff --git a/src/tests/ToyTest.hpp b/src/tests/ToyTest.hpp index 23625c6b..44840e65 100644 --- a/src/tests/ToyTest.hpp +++ b/src/tests/ToyTest.hpp @@ -17,7 +17,6 @@ class ToyTest : public Test{ // if (!t){ // errexit("ToyTest must be run on Toy."); // } - // pds::init(gtc); Persistent::init(); } @@ -28,7 +27,6 @@ class ToyTest : public Test{ // called by all threads in parallel void parInit(GlobalTestConfig* gtc, LocalTestConfig* ltc){ t->init_thread(gtc, ltc); - // pds::init_thread(ltc->tid); } // runs the test // returns number of operations completed by that thread diff --git a/src/tests/YCSBTest.hpp b/src/tests/YCSBTest.hpp index b7640265..ed208863 100644 --- a/src/tests/YCSBTest.hpp +++ b/src/tests/YCSBTest.hpp @@ -44,14 +44,8 @@ class YCSBTest : public Test{ } void parInit(GlobalTestConfig* gtc, LocalTestConfig* ltc){ m->init_thread(gtc, ltc); - // pds::init_thread(ltc->tid); } void init(GlobalTestConfig* gtc){ - // // init Persistent allocator - // Persistent::init(); - - // // init epoch system - // pds::init(gtc); if(gtc->checkEnv("ValueSize")){ val_size = atoi((gtc->getEnv("ValueSize")).c_str()); diff --git a/unit_test/dcss.cpp b/unit_test/dcss.cpp index d4549f16..894fb859 100644 --- a/unit_test/dcss.cpp +++ b/unit_test/dcss.cpp @@ -1,6 +1,7 @@ #include "Persistent.hpp" #include "Recoverable.hpp" #include "TestConfig.hpp" +#include "montage_global_api.hpp" #include #include #include From 5734b8b3882eae050e2832edb380a5aeb768e92a Mon Sep 17 00:00:00 2001 From: Wentao Cai Date: Sat, 14 Nov 2020 00:21:53 -0500 Subject: [PATCH 38/56] replace nbptr with lin_var --- src/persist/PersistStructs.hpp | 108 +++++++++++++-------------- src/persist/api/Recoverable.cpp | 2 +- src/persist/api/Recoverable.hpp | 74 +++++++++--------- src/rideables/MontageLfHashTable.hpp | 38 +++++----- src/rideables/MontageMSQueue.hpp | 8 +- unit_test/dcss.cpp | 2 +- 6 files changed, 116 insertions(+), 116 deletions(-) diff --git a/src/persist/PersistStructs.hpp b/src/persist/PersistStructs.hpp index 1e2a804a..c8a2b174 100644 --- a/src/persist/PersistStructs.hpp +++ b/src/persist/PersistStructs.hpp @@ -109,29 +109,29 @@ namespace pds{ * We provides following double-compare-single-swap (DCSS) API for * nonblocking data structures to use: * - * atomic_nbptr_t: atomic double word for storing pointers + * atomic_lin_var: atomic double word for storing pointers * that point to nodes, which link payloads in. It contains following * functions: * * store(T val): * store 64-bit long data without sync; cnt doesn't increment * - * store(nbptr_t d): store(d.val) + * store(lin_var d): store(d.val) * - * nbptr_t load(): - * load nbptr without verifying epoch + * lin_var load(): + * load var without verifying epoch * - * nbptr_t load_verify(): - * load nbptr and verify epoch, used as lin point; + * lin_var load_verify(): + * load var and verify epoch, used as lin point; * for invisible reads this won't verify epoch * - * bool CAS(nbptr_t expected, T desired): + * bool CAS(lin_var expected, T desired): * CAS in desired value and increment cnt if expected - * matches current nbptr + * matches current var * - * bool CAS_verify(nbptr_t expected, T desired): + * bool CAS_verify(lin_var expected, T desired): * CAS in desired value and increment cnt if expected - * matches current nbptr and global epoch doesn't change + * matches current var and global epoch doesn't change * since BEGIN_OP */ @@ -144,10 +144,10 @@ namespace pds{ struct sc_desc_t; template - class atomic_nbptr_t; - class nbptr_t{ + class atomic_lin_var; + class lin_var{ template - friend class atomic_nbptr_t; + friend class atomic_lin_var; inline bool is_desc() const { return (cnt & 3UL) == 1UL; } @@ -163,115 +163,115 @@ namespace pds{ static_assert(sizeof(T) == sizeof(uint64_t), "sizes do not match"); return reinterpret_cast(val); } - nbptr_t(uint64_t v, uint64_t c) : val(v), cnt(c) {}; - nbptr_t() : nbptr_t(0, 0) {}; + lin_var(uint64_t v, uint64_t c) : val(v), cnt(c) {}; + lin_var() : lin_var(0, 0) {}; - inline bool operator==(const nbptr_t & b) const{ + inline bool operator==(const lin_var & b) const{ return val==b.val && cnt==b.cnt; } - inline bool operator!=(const nbptr_t & b) const{ + inline bool operator!=(const lin_var & b) const{ return !operator==(b); } }__attribute__((aligned(16))); template - class atomic_nbptr_t{ + class atomic_lin_var{ static_assert(sizeof(T) == sizeof(uint64_t), "sizes do not match"); public: - // for cnt in nbptr: + // for cnt in var: // desc: ....01 // real val: ....00 - std::atomic nbptr; - nbptr_t load(Recoverable* ds); - nbptr_t load_verify(Recoverable* ds); + std::atomic var; + lin_var load(Recoverable* ds); + lin_var load_verify(Recoverable* ds); inline T load_val(Recoverable* ds){ return reinterpret_cast(load().val); } - bool CAS_verify(Recoverable* ds, nbptr_t expected, const T& desired); - inline bool CAS_verify(nbptr_t expected, const nbptr_t& desired){ + bool CAS_verify(Recoverable* ds, lin_var expected, const T& desired); + inline bool CAS_verify(lin_var expected, const lin_var& desired){ return CAS_verify(expected,desired.get_val()); } // CAS doesn't check epoch nor cnt - bool CAS(nbptr_t expected, const T& desired); - inline bool CAS(nbptr_t expected, const nbptr_t& desired){ + bool CAS(lin_var expected, const T& desired); + inline bool CAS(lin_var expected, const lin_var& desired){ return CAS(expected,desired.get_val()); } void store(const T& desired); - inline void store(const nbptr_t& desired){ + inline void store(const lin_var& desired){ store(desired.get_val()); } - atomic_nbptr_t(const T& v) : nbptr(nbptr_t(reinterpret_cast(v), 0)){}; - atomic_nbptr_t() : atomic_nbptr_t(T()){}; + atomic_lin_var(const T& v) : var(lin_var(reinterpret_cast(v), 0)){}; + atomic_lin_var() : atomic_lin_var(T()){}; }; struct sc_desc_t{ private: - // for cnt in nbptr: + // for cnt in var: // in progress: ....01 // committed: ....10 // aborted: ....11 - std::atomic nbptr; + std::atomic var; const uint64_t old_val; const uint64_t new_val; const uint64_t cas_epoch; - inline bool abort(nbptr_t _d){ + inline bool abort(lin_var _d){ // bring cnt from ..01 to ..11 - nbptr_t expected (_d.val, (_d.cnt & ~0x3UL) | 1UL); // in progress - nbptr_t desired(expected); + lin_var expected (_d.val, (_d.cnt & ~0x3UL) | 1UL); // in progress + lin_var desired(expected); desired.cnt += 2; - return nbptr.compare_exchange_strong(expected, desired); + return var.compare_exchange_strong(expected, desired); } - inline bool commit(nbptr_t _d){ + inline bool commit(lin_var _d){ // bring cnt from ..01 to ..10 - nbptr_t expected (_d.val, (_d.cnt & ~0x3UL) | 1UL); // in progress - nbptr_t desired(expected); + lin_var expected (_d.val, (_d.cnt & ~0x3UL) | 1UL); // in progress + lin_var desired(expected); desired.cnt += 1; - return nbptr.compare_exchange_strong(expected, desired); + return var.compare_exchange_strong(expected, desired); } - inline bool committed(nbptr_t _d) const { + inline bool committed(lin_var _d) const { return (_d.cnt & 0x3UL) == 2UL; } - inline bool in_progress(nbptr_t _d) const { + inline bool in_progress(lin_var _d) const { return (_d.cnt & 0x3UL) == 1UL; } - inline bool match(nbptr_t old_d, nbptr_t new_d) const { + inline bool match(lin_var old_d, lin_var new_d) const { return ((old_d.cnt & ~0x3UL) == (new_d.cnt & ~0x3UL)) && (old_d.val == new_d.val); } - void cleanup(nbptr_t old_d){ + void cleanup(lin_var old_d){ // must be called after desc is aborted or committed - nbptr_t new_d = nbptr.load(); + lin_var new_d = var.load(); if(!match(old_d,new_d)) return; assert(!in_progress(new_d)); - nbptr_t expected(reinterpret_cast(this),(new_d.cnt & ~0x3UL) | 1UL); + lin_var expected(reinterpret_cast(this),(new_d.cnt & ~0x3UL) | 1UL); if(committed(new_d)) { // bring cnt from ..10 to ..00 - reinterpret_cast*>( - new_d.val)->nbptr.compare_exchange_strong( + reinterpret_cast*>( + new_d.val)->var.compare_exchange_strong( expected, - nbptr_t(new_val,new_d.cnt + 2)); + lin_var(new_val,new_d.cnt + 2)); } else { //aborted // bring cnt from ..11 to ..00 - reinterpret_cast*>( - new_d.val)->nbptr.compare_exchange_strong( + reinterpret_cast*>( + new_d.val)->var.compare_exchange_strong( expected, - nbptr_t(old_val,new_d.cnt + 1)); + lin_var(old_val,new_d.cnt + 1)); } } public: inline bool committed() const { - return committed(nbptr.load()); + return committed(var.load()); } inline bool in_progress() const { - return in_progress(nbptr.load()); + return in_progress(var.load()); } // TODO: try_complete used to be inline. Try to make it inline again when refactoring is finished. void try_complete(Recoverable* ds, uint64_t addr); sc_desc_t( uint64_t c, uint64_t a, uint64_t o, uint64_t n, uint64_t e) : - nbptr(nbptr_t(a,c)), old_val(o), new_val(n), cas_epoch(e){}; + var(lin_var(a,c)), old_val(o), new_val(n), cas_epoch(e){}; sc_desc_t() : sc_desc_t(0,0,0,0,0){}; }; } diff --git a/src/persist/api/Recoverable.cpp b/src/persist/api/Recoverable.cpp index d4402486..8d5c865f 100644 --- a/src/persist/api/Recoverable.cpp +++ b/src/persist/api/Recoverable.cpp @@ -26,7 +26,7 @@ void Recoverable::init_thread(int tid){ namespace pds{ void sc_desc_t::try_complete(Recoverable* ds, uint64_t addr){ - nbptr_t _d = nbptr.load(); + lin_var _d = var.load(); // int ret = 0; if(_d.val!=addr) return; if(in_progress(_d)){ diff --git a/src/persist/api/Recoverable.hpp b/src/persist/api/Recoverable.hpp index 41438fd8..138cc8cd 100644 --- a/src/persist/api/Recoverable.hpp +++ b/src/persist/api/Recoverable.hpp @@ -7,8 +7,8 @@ class Recoverable{ // TODO: get rid of these. - template friend class pds::atomic_nbptr_t; - friend class pds::nbptr_t; + template friend class pds::atomic_lin_var; + friend class pds::lin_var; pds::EpochSys* _esys = nullptr; @@ -188,36 +188,36 @@ T* TOKEN_CONCAT(set_, n)(Recoverable* ds, int i, t TOKEN_CONCAT(tmp_, n)){\ namespace pds{ template - void atomic_nbptr_t::store(const T& desired){ + void atomic_lin_var::store(const T& desired){ // this function must be used only when there's no data race - nbptr_t r = nbptr.load(); - nbptr_t new_r(reinterpret_cast(desired),r.cnt); - nbptr.store(new_r); + lin_var r = var.load(); + lin_var new_r(reinterpret_cast(desired),r.cnt); + var.store(new_r); } #ifdef VISIBLE_READ // implementation of load and cas for visible reads template - nbptr_t atomic_nbptr_t::load(Recoverable* ds){ - nbptr_t r; + lin_var atomic_lin_var::load(Recoverable* ds){ + lin_var r; while(true){ - r = nbptr.load(); - nbptr_t ret(r.val,r.cnt+1); - if(nbptr.compare_exchange_strong(r, ret)) + r = var.load(); + lin_var ret(r.val,r.cnt+1); + if(var.compare_exchange_strong(r, ret)) return ret; } } template - nbptr_t atomic_nbptr_t::load_verify(Recoverable* ds){ + lin_var atomic_lin_var::load_verify(Recoverable* ds){ assert(ds->_esys->epochs[pds::EpochSys::tid].ui != NULL_EPOCH); - nbptr_t r; + lin_var r; while(true){ - r = nbptr.load(); + r = var.load(); if(ds->_esys->check_epoch(ds->_esys->epochs[pds::EpochSys::tid].ui)){ - nbptr_t ret(r.val,r.cnt+1); - if(nbptr.compare_exchange_strong(r, ret)){ + lin_var ret(r.val,r.cnt+1); + if(var.compare_exchange_strong(r, ret)){ return r; } } else { @@ -227,30 +227,30 @@ namespace pds{ } template - bool atomic_nbptr_t::CAS_verify(Recoverable* ds, nbptr_t expected, const T& desired){ + bool atomic_lin_var::CAS_verify(Recoverable* ds, lin_var expected, const T& desired){ assert(ds->_esys->epochs[pds::EpochSys::tid].ui != NULL_EPOCH); if(ds->_esys->check_epoch(ds->_esys->epochs[pds::EpochSys::tid].ui)){ - nbptr_t new_r(reinterpret_cast(desired),expected.cnt+1); - return nbptr.compare_exchange_strong(expected, new_r); + lin_var new_r(reinterpret_cast(desired),expected.cnt+1); + return var.compare_exchange_strong(expected, new_r); } else { return false; } } template - bool atomic_nbptr_t::CAS(nbptr_t expected, const T& desired){ - nbptr_t new_r(reinterpret_cast(desired),expected.cnt+1); - return nbptr.compare_exchange_strong(expected, new_r); + bool atomic_lin_var::CAS(lin_var expected, const T& desired){ + lin_var new_r(reinterpret_cast(desired),expected.cnt+1); + return var.compare_exchange_strong(expected, new_r); } #else /* !VISIBLE_READ */ /* implementation of load and cas for invisible reads */ template - nbptr_t atomic_nbptr_t::load(Recoverable* ds){ - nbptr_t r; + lin_var atomic_lin_var::load(Recoverable* ds){ + lin_var r; do { - r = nbptr.load(); + r = var.load(); if(r.is_desc()) { sc_desc_t* D = r.get_desc(); D->try_complete(ds, reinterpret_cast(this)); @@ -260,7 +260,7 @@ namespace pds{ } template - nbptr_t atomic_nbptr_t::load_verify(Recoverable* ds){ + lin_var atomic_lin_var::load_verify(Recoverable* ds){ // invisible read doesn't need to verify epoch even if it's a // linearization point // this saves users from catching EpochVerifyException @@ -271,13 +271,13 @@ namespace pds{ // extern std::atomic total_cnt; template - bool atomic_nbptr_t::CAS_verify(Recoverable* ds, nbptr_t expected, const T& desired){ + bool atomic_lin_var::CAS_verify(Recoverable* ds, lin_var expected, const T& desired){ assert(ds->_esys->epochs[pds::EpochSys::tid].ui != NULL_EPOCH); // total_cnt.fetch_add(1); #ifdef USE_TSX unsigned status = _xbegin(); if (status == _XBEGIN_STARTED) { - nbptr_t r = nbptr.load(); + lin_var r = var.load(); if(!r.is_desc()){ if( r.cnt!=expected.cnt || r.val!=expected.val || @@ -285,8 +285,8 @@ namespace pds{ _xend(); return false; } else { - nbptr_t new_r (reinterpret_cast(desired), r.cnt+4); - nbptr.store(new_r); + lin_var new_r (reinterpret_cast(desired), r.cnt+4); + var.store(new_r); _xend(); return true; } @@ -302,7 +302,7 @@ namespace pds{ #endif // txn fails; fall back routine // abort_cnt.fetch_add(1); - nbptr_t r = nbptr.load(); + lin_var r = var.load(); if(r.is_desc()){ sc_desc_t* D = r.get_desc(); D->try_complete(ds, reinterpret_cast(this)); @@ -313,7 +313,7 @@ namespace pds{ return false; } } - // now r.cnt must be ..00, and r.cnt+1 is ..01, which means "nbptr + // now r.cnt must be ..00, and r.cnt+1 is ..01, which means "var // contains a descriptor" and "a descriptor is in progress" assert((r.cnt & 3UL) == 0UL); new (ds->get_dcss_desc()) sc_desc_t(r.cnt+1, @@ -321,8 +321,8 @@ namespace pds{ expected.val, reinterpret_cast(desired), ds->_esys->epochs[pds::EpochSys::tid].ui); - nbptr_t new_r(reinterpret_cast(ds->get_dcss_desc()), r.cnt+1); - if(!nbptr.compare_exchange_strong(r,new_r)){ + lin_var new_r(reinterpret_cast(ds->get_dcss_desc()), r.cnt+1); + if(!var.compare_exchange_strong(r,new_r)){ return false; } ds->get_dcss_desc()->try_complete(ds, reinterpret_cast(this)); @@ -331,11 +331,11 @@ namespace pds{ } template - bool atomic_nbptr_t::CAS(nbptr_t expected, const T& desired){ + bool atomic_lin_var::CAS(lin_var expected, const T& desired){ // CAS doesn't check epoch; just cas ptr to desired, with cnt+=4 assert(!expected.is_desc()); - nbptr_t new_r(reinterpret_cast(desired), expected.cnt + 4); - if(!nbptr.compare_exchange_strong(expected,new_r)){ + lin_var new_r(reinterpret_cast(desired), expected.cnt + 4); + if(!var.compare_exchange_strong(expected,new_r)){ return false; } return true; diff --git a/src/rideables/MontageLfHashTable.hpp b/src/rideables/MontageLfHashTable.hpp index 5387aa34..12f587f1 100644 --- a/src/rideables/MontageLfHashTable.hpp +++ b/src/rideables/MontageLfHashTable.hpp @@ -34,7 +34,7 @@ class MontageLfHashTable : public RMap, Recoverable{ struct Node; struct MarkPtr{ - atomic_nbptr_t ptr; + atomic_lin_var ptr; MarkPtr(Node* n):ptr(n){}; MarkPtr():ptr(nullptr){}; }; @@ -67,21 +67,21 @@ class MontageLfHashTable : public RMap, Recoverable{ std::hash hash_fn; const int idxSize=1000000;//number of buckets for hash table padded* buckets=new padded[idxSize]{}; - bool findNode(MarkPtr* &prev, nbptr_t &curr, nbptr_t &next, K key, int tid); + bool findNode(MarkPtr* &prev, lin_var &curr, lin_var &next, K key, int tid); RCUTracker tracker; const uint64_t MARK_MASK = ~0x1; - inline nbptr_t getPtr(const nbptr_t& d){ - return nbptr_t(d.val & MARK_MASK, d.cnt); + inline lin_var getPtr(const lin_var& d){ + return lin_var(d.val & MARK_MASK, d.cnt); } - inline bool getMark(const nbptr_t& d){ + inline bool getMark(const lin_var& d){ return (bool)(d.val & 1); } - inline nbptr_t mixPtrMark(const nbptr_t& d, bool mk){ - return nbptr_t(d.val | mk, d.cnt); + inline lin_var mixPtrMark(const lin_var& d, bool mk){ + return lin_var(d.val | mk, d.cnt); } - inline Node* setMark(const nbptr_t& d){ + inline Node* setMark(const lin_var& d){ return reinterpret_cast(d.val | 1); } public: @@ -118,8 +118,8 @@ template optional MontageLfHashTable::get(K key, int tid) { optional res={}; MarkPtr* prev=nullptr; - nbptr_t curr; - nbptr_t next; + lin_var curr; + lin_var next; tracker.start_op(tid); // hold epoch from advancing so that the node we find won't be deleted @@ -137,8 +137,8 @@ optional MontageLfHashTable::put(K key, V val, int tid) { optional res={}; Node* tmpNode = nullptr; MarkPtr* prev=nullptr; - nbptr_t curr; - nbptr_t next; + lin_var curr; + lin_var next; tmpNode = new Node(this, key, val, nullptr); tracker.start_op(tid); @@ -184,8 +184,8 @@ bool MontageLfHashTable::insert(K key, V val, int tid){ bool res=false; Node* tmpNode = nullptr; MarkPtr* prev=nullptr; - nbptr_t curr; - nbptr_t next; + lin_var curr; + lin_var next; tmpNode = new Node(this, key, val, nullptr); tracker.start_op(tid); @@ -216,8 +216,8 @@ template optional MontageLfHashTable::remove(K key, int tid) { optional res={}; MarkPtr* prev=nullptr; - nbptr_t curr; - nbptr_t next; + lin_var curr; + lin_var next; tracker.start_op(tid); while(true) { @@ -250,8 +250,8 @@ optional MontageLfHashTable::replace(K key, V val, int tid) { optional res={}; Node* tmpNode = nullptr; MarkPtr* prev=nullptr; - nbptr_t curr; - nbptr_t next; + lin_var curr; + lin_var next; tmpNode = new Node(this, key, val, nullptr); tracker.start_op(tid); @@ -285,7 +285,7 @@ optional MontageLfHashTable::replace(K key, V val, int tid) { } template -bool MontageLfHashTable::findNode(MarkPtr* &prev, nbptr_t &curr, nbptr_t &next, K key, int tid){ +bool MontageLfHashTable::findNode(MarkPtr* &prev, lin_var &curr, lin_var &next, K key, int tid){ while(true){ size_t idx=hash_fn(key)%idxSize; bool cmark=false; diff --git a/src/rideables/MontageMSQueue.hpp b/src/rideables/MontageMSQueue.hpp index 8941876d..6ab4040b 100644 --- a/src/rideables/MontageMSQueue.hpp +++ b/src/rideables/MontageMSQueue.hpp @@ -29,7 +29,7 @@ class MontageMSQueue : public RQueue, Recoverable{ private: struct Node{ MontageMSQueue* ds; - atomic_nbptr_t next; + atomic_lin_var next; Payload* payload; Node(): next(nullptr), payload(nullptr){}; @@ -53,7 +53,7 @@ class MontageMSQueue : public RQueue, Recoverable{ private: // dequeue pops node from head - atomic_nbptr_t head; + atomic_lin_var head; // enqueue pushes node to tail std::atomic tail; RCUTracker tracker; @@ -92,7 +92,7 @@ void MontageMSQueue::enqueue(T v, int tid){ // Node* cur_head = head.load(); cur_tail = tail.load(); uint64_t s = global_sn.fetch_add(1); - nbptr_t next = cur_tail->next.load(); + lin_var next = cur_tail->next.load(); if(cur_tail == tail.load()){ if(next.get_val() == nullptr) { // directly set m_sn and BEGIN_OP will flush it @@ -123,7 +123,7 @@ optional MontageMSQueue::dequeue(int tid){ optional res = {}; tracker.start_op(tid); while(true){ - nbptr_t cur_head = head.load(); + lin_var cur_head = head.load(); Node* cur_tail = tail.load(); Node* next = cur_head.get_val()->next.load_val(); diff --git a/unit_test/dcss.cpp b/unit_test/dcss.cpp index 894fb859..48714161 100644 --- a/unit_test/dcss.cpp +++ b/unit_test/dcss.cpp @@ -14,7 +14,7 @@ namespace dcas{ const int THREAD_NUM = 1; const int CNT_UPPER = 100000; - atomic_nbptr_t d; + atomic_lin_var d; atomic real; pthread_barrier_t pthread_barrier; void barrier() From 204c67fccfe7b0628c477d4b8b48d22ebb4d524c Mon Sep 17 00:00:00 2001 From: Wentao Cai Date: Sat, 14 Nov 2020 01:19:56 -0500 Subject: [PATCH 39/56] optimize pnew. see what you think --- src/persist/pnew.hpp | 85 +++----------------------------------------- 1 file changed, 5 insertions(+), 80 deletions(-) diff --git a/src/persist/pnew.hpp b/src/persist/pnew.hpp index 1808780a..6b6a72ac 100644 --- a/src/persist/pnew.hpp +++ b/src/persist/pnew.hpp @@ -3,87 +3,12 @@ // TODO: replace `new` operator of T with // per-heap allocation and placement new. - -template -T* pnew(){ - T* ret = new T(); - _esys->register_alloc_pblk(ret); - return ret; -} - -template -T* pnew(T1 a1){ - T* ret = new T(a1); - _esys->register_alloc_pblk(ret); - return ret; -} - -template -T* pnew(T1 a1, T2 a2){ - T* ret = new T(a1, a2); - _esys->register_alloc_pblk(ret); - return ret; -} - -template -T* pnew(T1 a1, T2 a2, T3 a3){ - T* ret = new T(a1, a2, a3); - _esys->register_alloc_pblk(ret); - return ret; -} - -template -T* pnew(T1 a1, T2 a2, T3 a3, T4 a4){ - T* ret = new T(a1, a2, a3, a4); - _esys->register_alloc_pblk(ret); - return ret; -} - -template -T* pnew(T1 a1, T2 a2, T3 a3, T4 a4, T5 a5){ - T* ret = new T(a1, a2, a3, a4, a5); - _esys->register_alloc_pblk(ret); - return ret; -} - -template -T* pnew(T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, T6 a6){ - T* ret = new T(a1, a2, a3, a4, a5, a6); - _esys->register_alloc_pblk(ret); - return ret; -} - -template -T* pnew(T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, T6 a6, T7 a7){ - T* ret = new T(a1, a2, a3, a4, a5, a6, a7); - _esys->register_alloc_pblk(ret); - return ret; -} - -template -T* pnew(T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, T6 a6, T7 a7, T8 a8){ - T* ret = new T(a1, a2, a3, a4, a5, a6, a7, a8); - _esys->register_alloc_pblk(ret); - return ret; -} - -template -T* pnew(T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, T6 a6, T7 a7, T8 a8, T9 a9){ - T* ret = new T(a1, a2, a3, a4, a5, a6, a7, a8, a9); - _esys->register_alloc_pblk(ret); - return ret; -} - -template -T* pnew(T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, T6 a6, T7 a7, T8 a8, T9 a9, T10 a10){ - T* ret = new T(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10); +template +T* pnew(Types... args) +{ + T* ret = new T(args...); _esys->register_alloc_pblk(ret); return ret; -} +} // add more as needed. \ No newline at end of file From d56b26c4a7a8853b04fb3ea7f5e9c79ec87fc997 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Sat, 14 Nov 2020 21:13:21 -0500 Subject: [PATCH 40/56] added rideable deletions in tests --- src/tests/HeapChurnTest.hpp | 2 +- src/tests/KVTest.hpp | 2 +- src/tests/SetChurnTest.hpp | 4 ++++ src/tests/ToyTest.hpp | 3 +++ 4 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/tests/HeapChurnTest.hpp b/src/tests/HeapChurnTest.hpp index 1862c2ef..734ba631 100644 --- a/src/tests/HeapChurnTest.hpp +++ b/src/tests/HeapChurnTest.hpp @@ -86,7 +86,7 @@ class HeapChurnTest : public Test{ } void cleanup(GlobalTestConfig* gtc){ - Persistent::finalize(); + delete g; } void getRideable(GlobalTestConfig* gtc){ Rideable* ptr = gtc->allocRideable(); diff --git a/src/tests/KVTest.hpp b/src/tests/KVTest.hpp index 13fcc6ea..651de401 100644 --- a/src/tests/KVTest.hpp +++ b/src/tests/KVTest.hpp @@ -90,7 +90,7 @@ class KVTest : public Test{ return ops; } void cleanup(GlobalTestConfig* gtc){ - Persistent::finalize(); + delete m; for(int i=0;itask_num;i++){ delete traces[i]; } diff --git a/src/tests/SetChurnTest.hpp b/src/tests/SetChurnTest.hpp index bafca770..148e6930 100644 --- a/src/tests/SetChurnTest.hpp +++ b/src/tests/SetChurnTest.hpp @@ -63,6 +63,10 @@ class SetChurnTest : public ChurnTest{ s->remove(k,tid); } } + void cleanup(GlobalTestConfig* gtc){ + ChurnTest::cleanup(gtc); + delete s; + } }; diff --git a/src/tests/ToyTest.hpp b/src/tests/ToyTest.hpp index 44840e65..df436d0a 100644 --- a/src/tests/ToyTest.hpp +++ b/src/tests/ToyTest.hpp @@ -213,6 +213,9 @@ class ToyTest : public Test{ return ops; } + void cleanup(GlobalTestConfig* gtc){ + delete t; + } }; From 9738c3325bc193b1a31f23916f9a01d4df64f2fe Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Sat, 14 Nov 2020 21:21:06 -0500 Subject: [PATCH 41/56] got rid of pnew.hpp --- src/persist/api/Recoverable.hpp | 14 +++++++++----- src/persist/pnew.hpp | 14 -------------- 2 files changed, 9 insertions(+), 19 deletions(-) delete mode 100644 src/persist/pnew.hpp diff --git a/src/persist/api/Recoverable.hpp b/src/persist/api/Recoverable.hpp index 138cc8cd..9ae965f5 100644 --- a/src/persist/api/Recoverable.hpp +++ b/src/persist/api/Recoverable.hpp @@ -67,11 +67,15 @@ class Recoverable{ esys_->end_readonly_op(); } }; - - // pnew is in a separate file since there are a bunch of them. - // add more as needed. - #include "pnew.hpp" - + // TODO: replace `new` operator of T with + // per-heap allocation and placement new. + template + T* pnew(Types... args) + { + T* ret = new T(args...); + _esys->register_alloc_pblk(ret); + return ret; + } template void register_update_pblk(T* b){ _esys->register_update_pblk(b); diff --git a/src/persist/pnew.hpp b/src/persist/pnew.hpp deleted file mode 100644 index 6b6a72ac..00000000 --- a/src/persist/pnew.hpp +++ /dev/null @@ -1,14 +0,0 @@ -// NOTE: don't include this file elsewhere! -// this is supposed to be a part of Recoverable.hpp - -// TODO: replace `new` operator of T with -// per-heap allocation and placement new. -template -T* pnew(Types... args) -{ - T* ret = new T(args...); - _esys->register_alloc_pblk(ret); - return ret; -} - -// add more as needed. \ No newline at end of file From 848f940adab691061fe46c499fe7b2b1bdfbc612 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Sat, 14 Nov 2020 22:41:21 -0500 Subject: [PATCH 42/56] moved stuff in PersistStructs.hpp into EpochSys.hpp and Recoverable.hpp --- src/persist/EpochSys.hpp | 83 +++++++++- src/persist/PersistStructs.hpp | 279 -------------------------------- src/persist/api/Recoverable.hpp | 185 +++++++++++++++++++++ 3 files changed, 267 insertions(+), 280 deletions(-) delete mode 100644 src/persist/PersistStructs.hpp diff --git a/src/persist/EpochSys.hpp b/src/persist/EpochSys.hpp index fe255ca2..b76e619d 100644 --- a/src/persist/EpochSys.hpp +++ b/src/persist/EpochSys.hpp @@ -15,7 +15,6 @@ #include "persist_utils.hpp" #include "common_macros.hpp" -#include "PersistStructs.hpp" #include "TransactionTrackers.hpp" #include "PerThreadContainers.hpp" #include "ToBePersistedContainers.hpp" @@ -24,6 +23,88 @@ namespace pds{ +struct OldSeeNewException : public std::exception { + const char * what () const throw () { + return "OldSeeNewException not handled."; + } +}; + +enum PBlkType {INIT, ALLOC, UPDATE, DELETE, RECLAIMED, EPOCH, OWNED}; + +class EpochSys; + + +///////////////////////////// +// PBlk-related structures // +///////////////////////////// + +class PBlk : public Persistent{ + friend class EpochSys; +protected: + // Wentao: the first word should NOT be any persistent value for + // epoch-system-level recovery (i.e., epoch), as Ralloc repurposes the first + // word for block free list, which may interfere with the recovery. + // Currently we use (transient) "reserved" as the first word. If we decide to + // remove this field, we need to either prepend another dummy word, or + // change the block free list in Ralloc. + + // transient. + void* _reserved; + + uint64_t epoch = NULL_EPOCH; + PBlkType blktype = INIT; + uint64_t owner_id = 0; // TODO: make consider abandon this field and use id all the time. + uint64_t id = 0; + pptr retire = nullptr; + // bool persisted = false; // For debug purposes. Might not be needed at the end. + + // void call_persist(){ // For debug purposes. Might not be needed at the end. + // persist(); + // persisted = true; + // } +public: + void set_epoch(uint64_t e){ + // only for testing + epoch=e; + } + // id gets inited by EpochSys instance. + PBlk(): epoch(NULL_EPOCH), blktype(INIT), owner_id(0), retire(nullptr){} + // id gets inited by EpochSys instance. + PBlk(const PBlk* owner): + blktype(OWNED), owner_id(owner->blktype==OWNED? owner->owner_id : owner->id) {} + PBlk(const PBlk& oth): blktype(oth.blktype==OWNED? OWNED:INIT), owner_id(oth.owner_id), id(oth.id) {} + inline uint64_t get_id() {return id;} + virtual pptr get_data() {return nullptr;} + virtual ~PBlk(){ + // Wentao: we need to zeroize epoch and flush it, avoiding it left after free + epoch = NULL_EPOCH; + // persist_func::clwb(&epoch); + } +}; + +template +class PBlkArray : public PBlk{ + friend class EpochSys; + size_t size; + // NOTE: see EpochSys::alloc_pblk_array() for its sementical allocators. + PBlkArray(): PBlk(){} + PBlkArray(PBlk* owner) : PBlk(owner), content((T*)((char*)this + sizeof(PBlkArray))){} +public: + PBlkArray(const PBlkArray& oth): PBlk(oth), size(oth.size), + content((T*)((char*)this + sizeof(PBlkArray))){} + virtual ~PBlkArray(){}; + T* content; //transient ptr + inline size_t get_size()const{return size;} +}; + +struct Epoch : public PBlk{ + std::atomic global_epoch; + void persist(){} + Epoch(){ + global_epoch.store(NULL_EPOCH, std::memory_order_relaxed); + } +}; + ////////////////// // Epoch System // ////////////////// diff --git a/src/persist/PersistStructs.hpp b/src/persist/PersistStructs.hpp deleted file mode 100644 index c8a2b174..00000000 --- a/src/persist/PersistStructs.hpp +++ /dev/null @@ -1,279 +0,0 @@ -#ifndef PERSIST_STRUCTS_HPP -#define PERSIST_STRUCTS_HPP - -#include -#include -#include -#include - -#include "Persistent.hpp" -#include "common_macros.hpp" - -class Recoverable; - -namespace pds{ - struct OldSeeNewException : public std::exception { - const char * what () const throw () { - return "OldSeeNewException not handled."; - } - }; - - enum PBlkType {INIT, ALLOC, UPDATE, DELETE, RECLAIMED, EPOCH, OWNED}; - - class EpochSys; - - - ///////////////////////////// - // PBlk-related structures // - ///////////////////////////// - - class PBlk : public Persistent{ - friend class EpochSys; - protected: - // Wentao: the first word should NOT be any persistent value for - // epoch-system-level recovery (i.e., epoch), as Ralloc repurposes the first - // word for block free list, which may interfere with the recovery. - // Currently we use (transient) "reserved" as the first word. If we decide to - // remove this field, we need to either prepend another dummy word, or - // change the block free list in Ralloc. - - // transient. - void* _reserved; - - uint64_t epoch = NULL_EPOCH; - PBlkType blktype = INIT; - uint64_t owner_id = 0; // TODO: make consider abandon this field and use id all the time. - uint64_t id = 0; - pptr retire = nullptr; - // bool persisted = false; // For debug purposes. Might not be needed at the end. - - // void call_persist(){ // For debug purposes. Might not be needed at the end. - // persist(); - // persisted = true; - // } - public: - void set_epoch(uint64_t e){ - // only for testing - epoch=e; - } - // id gets inited by EpochSys instance. - PBlk(): epoch(NULL_EPOCH), blktype(INIT), owner_id(0), retire(nullptr){} - // id gets inited by EpochSys instance. - PBlk(const PBlk* owner): - blktype(OWNED), owner_id(owner->blktype==OWNED? owner->owner_id : owner->id) {} - PBlk(const PBlk& oth): blktype(oth.blktype==OWNED? OWNED:INIT), owner_id(oth.owner_id), id(oth.id) {} - inline uint64_t get_id() {return id;} - virtual pptr get_data() {return nullptr;} - virtual ~PBlk(){ - // Wentao: we need to zeroize epoch and flush it, avoiding it left after free - epoch = NULL_EPOCH; - // persist_func::clwb(&epoch); - } - }; - - template - class PBlkArray : public PBlk{ - friend class EpochSys; - size_t size; - // NOTE: see EpochSys::alloc_pblk_array() for its sementical allocators. - PBlkArray(): PBlk(){} - PBlkArray(PBlk* owner) : PBlk(owner), content((T*)((char*)this + sizeof(PBlkArray))){} - public: - PBlkArray(const PBlkArray& oth): PBlk(oth), size(oth.size), - content((T*)((char*)this + sizeof(PBlkArray))){} - virtual ~PBlkArray(){}; - T* content; //transient ptr - inline size_t get_size()const{return size;} - }; - - struct Epoch : public PBlk{ - std::atomic global_epoch; - void persist(){} - Epoch(){ - global_epoch.store(NULL_EPOCH, std::memory_order_relaxed); - } - }; - - //////////////////////////////////////// - // counted pointer-related structures // - //////////////////////////////////////// - - /* - * Macro VISIBLE_READ determines which version of API will be used. - * Macro USE_TSX determines whether TSX (Intel HTM) will be used. - * - * We highly recommend you to use default invisible read version, - * since it doesn't need you to handle EpochVerifyException and you - * can call just load rather than load_verify throughout your program - * - * We provides following double-compare-single-swap (DCSS) API for - * nonblocking data structures to use: - * - * atomic_lin_var: atomic double word for storing pointers - * that point to nodes, which link payloads in. It contains following - * functions: - * - * store(T val): - * store 64-bit long data without sync; cnt doesn't increment - * - * store(lin_var d): store(d.val) - * - * lin_var load(): - * load var without verifying epoch - * - * lin_var load_verify(): - * load var and verify epoch, used as lin point; - * for invisible reads this won't verify epoch - * - * bool CAS(lin_var expected, T desired): - * CAS in desired value and increment cnt if expected - * matches current var - * - * bool CAS_verify(lin_var expected, T desired): - * CAS in desired value and increment cnt if expected - * matches current var and global epoch doesn't change - * since BEGIN_OP - */ - - struct EpochVerifyException : public std::exception { - const char * what () const throw () { - return "Epoch in which operation wants to linearize has passed; retry required."; - } - }; - - struct sc_desc_t; - - template - class atomic_lin_var; - class lin_var{ - template - friend class atomic_lin_var; - inline bool is_desc() const { - return (cnt & 3UL) == 1UL; - } - inline sc_desc_t* get_desc() const { - assert(is_desc()); - return reinterpret_cast(val); - } - public: - uint64_t val; - uint64_t cnt; - template - inline T get_val() const { - static_assert(sizeof(T) == sizeof(uint64_t), "sizes do not match"); - return reinterpret_cast(val); - } - lin_var(uint64_t v, uint64_t c) : val(v), cnt(c) {}; - lin_var() : lin_var(0, 0) {}; - - inline bool operator==(const lin_var & b) const{ - return val==b.val && cnt==b.cnt; - } - inline bool operator!=(const lin_var & b) const{ - return !operator==(b); - } - }__attribute__((aligned(16))); - - template - class atomic_lin_var{ - static_assert(sizeof(T) == sizeof(uint64_t), "sizes do not match"); - public: - // for cnt in var: - // desc: ....01 - // real val: ....00 - std::atomic var; - lin_var load(Recoverable* ds); - lin_var load_verify(Recoverable* ds); - inline T load_val(Recoverable* ds){ - return reinterpret_cast(load().val); - } - bool CAS_verify(Recoverable* ds, lin_var expected, const T& desired); - inline bool CAS_verify(lin_var expected, const lin_var& desired){ - return CAS_verify(expected,desired.get_val()); - } - // CAS doesn't check epoch nor cnt - bool CAS(lin_var expected, const T& desired); - inline bool CAS(lin_var expected, const lin_var& desired){ - return CAS(expected,desired.get_val()); - } - void store(const T& desired); - inline void store(const lin_var& desired){ - store(desired.get_val()); - } - atomic_lin_var(const T& v) : var(lin_var(reinterpret_cast(v), 0)){}; - atomic_lin_var() : atomic_lin_var(T()){}; - }; - - struct sc_desc_t{ - private: - // for cnt in var: - // in progress: ....01 - // committed: ....10 - // aborted: ....11 - std::atomic var; - const uint64_t old_val; - const uint64_t new_val; - const uint64_t cas_epoch; - inline bool abort(lin_var _d){ - // bring cnt from ..01 to ..11 - lin_var expected (_d.val, (_d.cnt & ~0x3UL) | 1UL); // in progress - lin_var desired(expected); - desired.cnt += 2; - return var.compare_exchange_strong(expected, desired); - } - inline bool commit(lin_var _d){ - // bring cnt from ..01 to ..10 - lin_var expected (_d.val, (_d.cnt & ~0x3UL) | 1UL); // in progress - lin_var desired(expected); - desired.cnt += 1; - return var.compare_exchange_strong(expected, desired); - } - inline bool committed(lin_var _d) const { - return (_d.cnt & 0x3UL) == 2UL; - } - inline bool in_progress(lin_var _d) const { - return (_d.cnt & 0x3UL) == 1UL; - } - inline bool match(lin_var old_d, lin_var new_d) const { - return ((old_d.cnt & ~0x3UL) == (new_d.cnt & ~0x3UL)) && - (old_d.val == new_d.val); - } - void cleanup(lin_var old_d){ - // must be called after desc is aborted or committed - lin_var new_d = var.load(); - if(!match(old_d,new_d)) return; - assert(!in_progress(new_d)); - lin_var expected(reinterpret_cast(this),(new_d.cnt & ~0x3UL) | 1UL); - if(committed(new_d)) { - // bring cnt from ..10 to ..00 - reinterpret_cast*>( - new_d.val)->var.compare_exchange_strong( - expected, - lin_var(new_val,new_d.cnt + 2)); - } else { - //aborted - // bring cnt from ..11 to ..00 - reinterpret_cast*>( - new_d.val)->var.compare_exchange_strong( - expected, - lin_var(old_val,new_d.cnt + 1)); - } - } - public: - inline bool committed() const { - return committed(var.load()); - } - inline bool in_progress() const { - return in_progress(var.load()); - } - // TODO: try_complete used to be inline. Try to make it inline again when refactoring is finished. - void try_complete(Recoverable* ds, uint64_t addr); - - sc_desc_t( uint64_t c, uint64_t a, uint64_t o, - uint64_t n, uint64_t e) : - var(lin_var(a,c)), old_val(o), new_val(n), cas_epoch(e){}; - sc_desc_t() : sc_desc_t(0,0,0,0,0){}; - }; -} - -#endif \ No newline at end of file diff --git a/src/persist/api/Recoverable.hpp b/src/persist/api/Recoverable.hpp index 9ae965f5..62c11f8a 100644 --- a/src/persist/api/Recoverable.hpp +++ b/src/persist/api/Recoverable.hpp @@ -5,6 +5,191 @@ #include "EpochSys.hpp" // TODO: report recover errors/exceptions +class Recoverable; + +namespace pds{ + //////////////////////////////////////// + // counted pointer-related structures // + //////////////////////////////////////// + + /* + * Macro VISIBLE_READ determines which version of API will be used. + * Macro USE_TSX determines whether TSX (Intel HTM) will be used. + * + * We highly recommend you to use default invisible read version, + * since it doesn't need you to handle EpochVerifyException and you + * can call just load rather than load_verify throughout your program + * + * We provides following double-compare-single-swap (DCSS) API for + * nonblocking data structures to use: + * + * atomic_lin_var: atomic double word for storing pointers + * that point to nodes, which link payloads in. It contains following + * functions: + * + * store(T val): + * store 64-bit long data without sync; cnt doesn't increment + * + * store(lin_var d): store(d.val) + * + * lin_var load(): + * load var without verifying epoch + * + * lin_var load_verify(): + * load var and verify epoch, used as lin point; + * for invisible reads this won't verify epoch + * + * bool CAS(lin_var expected, T desired): + * CAS in desired value and increment cnt if expected + * matches current var + * + * bool CAS_verify(lin_var expected, T desired): + * CAS in desired value and increment cnt if expected + * matches current var and global epoch doesn't change + * since BEGIN_OP + */ + + struct EpochVerifyException : public std::exception { + const char * what () const throw () { + return "Epoch in which operation wants to linearize has passed; retry required."; + } + }; + + struct sc_desc_t; + + template + class atomic_lin_var; + class lin_var{ + template + friend class atomic_lin_var; + inline bool is_desc() const { + return (cnt & 3UL) == 1UL; + } + inline sc_desc_t* get_desc() const { + assert(is_desc()); + return reinterpret_cast(val); + } + public: + uint64_t val; + uint64_t cnt; + template + inline T get_val() const { + static_assert(sizeof(T) == sizeof(uint64_t), "sizes do not match"); + return reinterpret_cast(val); + } + lin_var(uint64_t v, uint64_t c) : val(v), cnt(c) {}; + lin_var() : lin_var(0, 0) {}; + + inline bool operator==(const lin_var & b) const{ + return val==b.val && cnt==b.cnt; + } + inline bool operator!=(const lin_var & b) const{ + return !operator==(b); + } + }__attribute__((aligned(16))); + + template + class atomic_lin_var{ + static_assert(sizeof(T) == sizeof(uint64_t), "sizes do not match"); + public: + // for cnt in var: + // desc: ....01 + // real val: ....00 + std::atomic var; + lin_var load(Recoverable* ds); + lin_var load_verify(Recoverable* ds); + inline T load_val(Recoverable* ds){ + return reinterpret_cast(load().val); + } + bool CAS_verify(Recoverable* ds, lin_var expected, const T& desired); + inline bool CAS_verify(lin_var expected, const lin_var& desired){ + return CAS_verify(expected,desired.get_val()); + } + // CAS doesn't check epoch nor cnt + bool CAS(lin_var expected, const T& desired); + inline bool CAS(lin_var expected, const lin_var& desired){ + return CAS(expected,desired.get_val()); + } + void store(const T& desired); + inline void store(const lin_var& desired){ + store(desired.get_val()); + } + atomic_lin_var(const T& v) : var(lin_var(reinterpret_cast(v), 0)){}; + atomic_lin_var() : atomic_lin_var(T()){}; + }; + + struct sc_desc_t{ + private: + // for cnt in var: + // in progress: ....01 + // committed: ....10 + // aborted: ....11 + std::atomic var; + const uint64_t old_val; + const uint64_t new_val; + const uint64_t cas_epoch; + inline bool abort(lin_var _d){ + // bring cnt from ..01 to ..11 + lin_var expected (_d.val, (_d.cnt & ~0x3UL) | 1UL); // in progress + lin_var desired(expected); + desired.cnt += 2; + return var.compare_exchange_strong(expected, desired); + } + inline bool commit(lin_var _d){ + // bring cnt from ..01 to ..10 + lin_var expected (_d.val, (_d.cnt & ~0x3UL) | 1UL); // in progress + lin_var desired(expected); + desired.cnt += 1; + return var.compare_exchange_strong(expected, desired); + } + inline bool committed(lin_var _d) const { + return (_d.cnt & 0x3UL) == 2UL; + } + inline bool in_progress(lin_var _d) const { + return (_d.cnt & 0x3UL) == 1UL; + } + inline bool match(lin_var old_d, lin_var new_d) const { + return ((old_d.cnt & ~0x3UL) == (new_d.cnt & ~0x3UL)) && + (old_d.val == new_d.val); + } + void cleanup(lin_var old_d){ + // must be called after desc is aborted or committed + lin_var new_d = var.load(); + if(!match(old_d,new_d)) return; + assert(!in_progress(new_d)); + lin_var expected(reinterpret_cast(this),(new_d.cnt & ~0x3UL) | 1UL); + if(committed(new_d)) { + // bring cnt from ..10 to ..00 + reinterpret_cast*>( + new_d.val)->var.compare_exchange_strong( + expected, + lin_var(new_val,new_d.cnt + 2)); + } else { + //aborted + // bring cnt from ..11 to ..00 + reinterpret_cast*>( + new_d.val)->var.compare_exchange_strong( + expected, + lin_var(old_val,new_d.cnt + 1)); + } + } + public: + inline bool committed() const { + return committed(var.load()); + } + inline bool in_progress() const { + return in_progress(var.load()); + } + // TODO: try_complete used to be inline. Try to make it inline again when refactoring is finished. + void try_complete(Recoverable* ds, uint64_t addr); + + sc_desc_t( uint64_t c, uint64_t a, uint64_t o, + uint64_t n, uint64_t e) : + var(lin_var(a,c)), old_val(o), new_val(n), cas_epoch(e){}; + sc_desc_t() : sc_desc_t(0,0,0,0,0){}; + }; +} + class Recoverable{ // TODO: get rid of these. template friend class pds::atomic_lin_var; From 6e841d8873a1783a93d8bdbb079adcc44db762bc Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Sun, 15 Nov 2020 13:58:11 -0500 Subject: [PATCH 43/56] removed friend classes in Recoverable --- src/persist/api/Recoverable.hpp | 23 +++++++++++------------ src/persist/api/montage_global_api.hpp | 2 -- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/src/persist/api/Recoverable.hpp b/src/persist/api/Recoverable.hpp index 62c11f8a..df3eb659 100644 --- a/src/persist/api/Recoverable.hpp +++ b/src/persist/api/Recoverable.hpp @@ -191,10 +191,6 @@ namespace pds{ } class Recoverable{ - // TODO: get rid of these. - template friend class pds::atomic_lin_var; - friend class pds::lin_var; - pds::EpochSys* _esys = nullptr; // local descriptors for DCSS @@ -308,6 +304,9 @@ class Recoverable{ pds::sc_desc_t* get_dcss_desc(){ return &local_descs[pds::EpochSys::tid].ui; } + uint64_t get_local_epoch(){ + return _esys->epochs[pds::EpochSys::tid].ui; + } }; ///////////////////////////// @@ -337,7 +336,7 @@ t TOKEN_CONCAT(get_unsafe_, n)(Recoverable* ds) const{\ /* set method open a pblk for write. return a new copy when necessary */\ template \ T* TOKEN_CONCAT(set_, n)(Recoverable* ds, const in_type& TOKEN_CONCAT(tmp_, n)){\ - assert(ds->epochs[EpochSys::tid].ui != NULL_EPOCH);\ + assert(ds->get_local_epoch() != NULL_EPOCH);\ auto ret = ds->openwrite_pblk(this);\ ret->TOKEN_CONCAT(m_, n) = TOKEN_CONCAT(tmp_, n);\ ds->register_update_pblk(ret);\ @@ -367,7 +366,7 @@ t TOKEN_CONCAT(get_unsafe_, n)(Recoverable* ds, int i) const{\ }\ /* set method open a pblk for write. return a new copy when necessary */\ T* TOKEN_CONCAT(set_, n)(Recoverable* ds, int i, t TOKEN_CONCAT(tmp_, n)){\ - assert(ds->epochs[EpochSys::tid].ui != NULL_EPOCH);\ + assert(ds->get_local_epoch() != NULL_EPOCH);\ auto ret = ds->openwrite_pblk(this);\ ret->TOKEN_CONCAT(m_, n)[i] = TOKEN_CONCAT(tmp_, n);\ ds->register_update_pblk(ret);\ @@ -400,11 +399,11 @@ namespace pds{ template lin_var atomic_lin_var::load_verify(Recoverable* ds){ - assert(ds->_esys->epochs[pds::EpochSys::tid].ui != NULL_EPOCH); + assert(ds->get_local_epoch() != NULL_EPOCH); lin_var r; while(true){ r = var.load(); - if(ds->_esys->check_epoch(ds->_esys->epochs[pds::EpochSys::tid].ui)){ + if(ds->_esys->check_epoch()){ lin_var ret(r.val,r.cnt+1); if(var.compare_exchange_strong(r, ret)){ return r; @@ -417,8 +416,8 @@ namespace pds{ template bool atomic_lin_var::CAS_verify(Recoverable* ds, lin_var expected, const T& desired){ - assert(ds->_esys->epochs[pds::EpochSys::tid].ui != NULL_EPOCH); - if(ds->_esys->check_epoch(ds->_esys->epochs[pds::EpochSys::tid].ui)){ + assert(ds->get_local_epoch() != NULL_EPOCH); + if(ds->_esys->check_epoch()){ lin_var new_r(reinterpret_cast(desired),expected.cnt+1); return var.compare_exchange_strong(expected, new_r); } else { @@ -461,7 +460,7 @@ namespace pds{ template bool atomic_lin_var::CAS_verify(Recoverable* ds, lin_var expected, const T& desired){ - assert(ds->_esys->epochs[pds::EpochSys::tid].ui != NULL_EPOCH); + assert(ds->get_local_epoch() != NULL_EPOCH); // total_cnt.fetch_add(1); #ifdef USE_TSX unsigned status = _xbegin(); @@ -509,7 +508,7 @@ namespace pds{ reinterpret_cast(this), expected.val, reinterpret_cast(desired), - ds->_esys->epochs[pds::EpochSys::tid].ui); + ds->get_local_epoch()); lin_var new_r(reinterpret_cast(ds->get_dcss_desc()), r.cnt+1); if(!var.compare_exchange_strong(r,new_r)){ return false; diff --git a/src/persist/api/montage_global_api.hpp b/src/persist/api/montage_global_api.hpp index 21ca2795..72bca494 100644 --- a/src/persist/api/montage_global_api.hpp +++ b/src/persist/api/montage_global_api.hpp @@ -8,8 +8,6 @@ #include -// This api is inspired by object-based RSTM's api. - namespace pds{ extern EpochSys* global_esys; From dbd1801931c49b61c403d164873069d3e125ebbe Mon Sep 17 00:00:00 2001 From: Wentao Cai Date: Sun, 15 Nov 2020 16:12:28 -0500 Subject: [PATCH 44/56] elaborate on comment of register_alloc_pblk --- src/persist/EpochSys.hpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/persist/EpochSys.hpp b/src/persist/EpochSys.hpp index b76e619d..4da8da25 100644 --- a/src/persist/EpochSys.hpp +++ b/src/persist/EpochSys.hpp @@ -344,6 +344,9 @@ class EpochSys{ void validate_access(const PBlk* b, uint64_t c); // register the allocation of a PBlk during a transaction. + // called for new blocks at both pnew (holding them in + // pending_allocs) and begin_op (registering them with the + // acquired epoch). template T* register_alloc_pblk(T* b, uint64_t c); From b5d73951c914c8ccdceb4f72cf5b2f1facd8cad5 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Sun, 15 Nov 2020 19:59:16 -0500 Subject: [PATCH 45/56] putting API-related logic into Recoverable and global api header --- src/persist/EpochSys.hpp | 148 +------------------------ src/persist/api/Recoverable.cpp | 13 ++- src/persist/api/Recoverable.hpp | 109 ++++++++++++------ src/persist/api/montage_global_api.cpp | 2 +- src/persist/api/montage_global_api.hpp | 68 +++++++----- 5 files changed, 137 insertions(+), 203 deletions(-) diff --git a/src/persist/EpochSys.hpp b/src/persist/EpochSys.hpp index b76e619d..c5064b76 100644 --- a/src/persist/EpochSys.hpp +++ b/src/persist/EpochSys.hpp @@ -32,7 +32,7 @@ struct OldSeeNewException : public std::exception { enum PBlkType {INIT, ALLOC, UPDATE, DELETE, RECLAIMED, EPOCH, OWNED}; class EpochSys; - +class Recoverable; ///////////////////////////// // PBlk-related structures // @@ -40,6 +40,7 @@ class EpochSys; class PBlk : public Persistent{ friend class EpochSys; + friend class Recoverable; protected: // Wentao: the first word should NOT be any persistent value for // epoch-system-level recovery (i.e., epoch), as Ralloc repurposes the first @@ -142,20 +143,7 @@ class EpochSys{ // system mode that toggles on/off PDELETE for recovery purpose. SysMode sys_mode = ONLINE; - /* public members for API */ // TODO: put these into Recoverable - // current epoch of each thread. - padded* epochs = nullptr; - // containers for pending allocations - padded>* pending_allocs = nullptr; - - EpochSys(GlobalTestConfig* _gtc) : uid_generator(_gtc->task_num), gtc(_gtc) { - epochs = new padded[gtc->task_num]; - for(int i = 0; i < gtc->task_num; i++){ - epochs[i].ui = NULL_EPOCH; - } - - pending_allocs = new padded>[gtc->task_num]; reset(); // TODO: change to recover() later on. } @@ -175,7 +163,6 @@ class EpochSys{ delete trans_tracker; delete to_be_persisted; delete to_be_freed; - delete epochs; } void parse_env(); @@ -199,131 +186,14 @@ class EpochSys{ Persistent::simulate_crash(tid); } - ///////// - // API // - ///////// - - // TODO: put these into Recoverable. + //////////////// + // Operations // + //////////////// static void init_thread(int _tid){ EpochSys::tid = _tid; } - bool check_epoch(){ - return check_epoch(epochs[tid].ui); - } - - void begin_op(){ - assert(epochs[tid].ui == NULL_EPOCH); - epochs[tid].ui = begin_transaction(); - // TODO: any room for optimization here? - // TODO: put pending_allocs-related stuff into operations? - for (auto b = pending_allocs[tid].ui.begin(); - b != pending_allocs[tid].ui.end(); b++){ - register_alloc_pblk(*b, epochs[tid].ui); - } - } - - void end_op(){ - if (epochs[tid].ui != NULL_EPOCH){ - end_transaction(epochs[tid].ui); - epochs[tid].ui = NULL_EPOCH; - } - pending_allocs[tid].ui.clear(); - } - - void end_readonly_op(){ - if (epochs[tid].ui != NULL_EPOCH){ - end_readonly_transaction(epochs[tid].ui); - epochs[tid].ui = NULL_EPOCH; - } - assert(pending_allocs[tid].ui.empty()); - } - - void abort_op(){ - assert(epochs[tid].ui != NULL_EPOCH); - abort_transaction(epochs[tid].ui); - epochs[tid].ui = NULL_EPOCH; - } - - template - void pdelete(T* b){ - ASSERT_DERIVE(T, PBlk); - ASSERT_COPY(T); - - if (sys_mode == ONLINE){ - if (epochs[tid].ui != NULL_EPOCH){ - free_pblk(b, epochs[tid].ui); - } else { - if (b->epoch == NULL_EPOCH){ - assert(pending_allocs[tid].ui.find(b) != pending_allocs[tid].ui.end()); - pending_allocs[tid].ui.erase(b); - } - delete b; - } - } - } - - template - void pretire(T* b){ - assert(eochs[tid].ui != NULL_EPOCH); - retire_pblk(b, epochs[tid].ui); - } - - template - void preclaim(T* b){ - if (epochs[tid].ui == NULL_EPOCH){ - begin_op(); - } - reclaim_pblk(b, epochs[tid].ui); - if (epochs[tid].ui == NULL_EPOCH){ - end_op(); - } - } - - template - T* register_alloc_pblk(T* b){ - return register_alloc_pblk(b, epochs[tid].ui); - } - - template - void register_update_pblk(T* b){ - register_update_pblk(b, epochs[tid].ui); - } - - template - const T* openread_pblk(const T* b){ - assert(epochs[tid].ui != NULL_EPOCH); - return openread_pblk(b, epochs[tid].ui); - } - - template - const T* openread_pblk_unsafe(const T* b){ - if (epochs[tid].ui != NULL_EPOCH){ - return openread_pblk_unsafe(b, epochs[tid].ui); - } else { - return b; - } - } - - template - T* openwrite_pblk(T* b){ - assert(epochs[tid].ui != NULL_EPOCH); - return openwrite_pblk(b, epochs[tid].ui); - } - - void recover_mode(){ - sys_mode = RECOVER; // PDELETE -> nop - } - - void online_mode(){ - sys_mode = ONLINE; - } - - //////////////// - // Operations // - //////////////// - // check if global is the same as c. bool check_epoch(uint64_t c); @@ -419,13 +289,7 @@ T* EpochSys::register_alloc_pblk(T* b, uint64_t c){ ASSERT_COPY(T); PBlk* blk = b; - if (c == NULL_EPOCH){ - // register alloc before BEGIN_OP, put it into pending_allocs bucket and - // return. Will be done by the BEGIN_OP that calls this again with a - // non-NULL c. - pending_allocs[tid].ui.insert(blk); - return b; - } + assert(c != NULL_EPOCH); blk->epoch = c; // Wentao: It's possible that payload is registered multiple times assert(blk->blktype == INIT || blk->blktype == OWNED || diff --git a/src/persist/api/Recoverable.cpp b/src/persist/api/Recoverable.cpp index 8d5c865f..a2690e70 100644 --- a/src/persist/api/Recoverable.cpp +++ b/src/persist/api/Recoverable.cpp @@ -4,7 +4,11 @@ Recoverable::Recoverable(GlobalTestConfig* gtc){ // init Persistent allocator // TODO: put this into EpochSys. Persistent::init(); - + epochs = new padded[gtc->task_num]; + for(int i = 0; i < gtc->task_num; i++){ + epochs[i].ui = NULL_EPOCH; + } + pending_allocs = new padded>[gtc->task_num]; local_descs = new padded[gtc->task_num]; // init main thread pds::EpochSys::init_thread(0); @@ -13,14 +17,17 @@ Recoverable::Recoverable(GlobalTestConfig* gtc){ } Recoverable::~Recoverable(){ delete _esys; + delete local_descs; + delete pending_allocs; + delete epochs; Persistent::finalize(); } void Recoverable::init_thread(GlobalTestConfig*, LocalTestConfig* ltc){ - _esys->init_thread(ltc->tid); + EpochSys::init_thread(ltc->tid); } void Recoverable::init_thread(int tid){ - _esys->init_thread(tid); + EpochSys::init_thread(tid); } namespace pds{ diff --git a/src/persist/api/Recoverable.hpp b/src/persist/api/Recoverable.hpp index df3eb659..397693bf 100644 --- a/src/persist/api/Recoverable.hpp +++ b/src/persist/api/Recoverable.hpp @@ -192,7 +192,11 @@ namespace pds{ class Recoverable{ pds::EpochSys* _esys = nullptr; - + + // current epoch of each thread. + padded* epochs = nullptr; + // containers for pending allocations + padded>* pending_allocs = nullptr; // local descriptors for DCSS // TODO: maybe put this into a derived class for NB data structures? padded* local_descs = nullptr; @@ -205,94 +209,135 @@ class Recoverable{ void init_thread(GlobalTestConfig*, LocalTestConfig* ltc); void init_thread(int tid); bool check_epoch(){ - return _esys->check_epoch(); + return _esys->check_epoch(epochs[pds::EpochSys::tid].ui); } bool check_epoch(uint64_t c){ return _esys->check_epoch(c); } void begin_op(){ - _esys->begin_op(); + assert(epochs[pds::EpochSys::tid].ui == NULL_EPOCH); + epochs[pds::EpochSys::tid].ui = _esys->begin_transaction(); + // TODO: any room for optimization here? + // TODO: put pending_allocs-related stuff into operations? + for (auto b = pending_allocs[pds::EpochSys::tid].ui.begin(); + b != pending_allocs[pds::EpochSys::tid].ui.end(); b++){ + _esys->register_alloc_pblk(*b, epochs[pds::EpochSys::tid].ui); + } } void end_op(){ - _esys->end_op(); + if (epochs[pds::EpochSys::tid].ui != NULL_EPOCH){ + _esys->end_transaction(epochs[pds::EpochSys::tid].ui); + epochs[pds::EpochSys::tid].ui = NULL_EPOCH; + } + pending_allocs[pds::EpochSys::tid].ui.clear(); } void end_readonly_op(){ - _esys->end_readonly_op(); + if (epochs[pds::EpochSys::tid].ui != NULL_EPOCH){ + _esys->end_readonly_transaction(epochs[pds::EpochSys::tid].ui); + epochs[pds::EpochSys::tid].ui = NULL_EPOCH; + } + assert(pending_allocs[pds::EpochSys::tid].ui.empty()); } void abort_op(){ - _esys->abort_op(); + assert(epochs[pds::EpochSys::tid].ui != NULL_EPOCH); + _esys->abort_transaction(epochs[pds::EpochSys::tid].ui); + epochs[pds::EpochSys::tid].ui = NULL_EPOCH; } class MontageOpHolder{ - pds::EpochSys* esys_; + Recoverable* ds = nullptr; public: - MontageOpHolder(Recoverable* ds): esys_(ds->_esys){ - esys_->begin_op(); - } - MontageOpHolder(pds::EpochSys* _esys): esys_(_esys){ - esys_->begin_op(); + MontageOpHolder(Recoverable* ds_): ds(ds_){ + ds->begin_op(); } ~MontageOpHolder(){ - esys_->end_op(); + ds->end_op(); } }; class MontageOpHolderReadOnly{ - pds::EpochSys* esys_; + Recoverable* ds = nullptr; public: - MontageOpHolderReadOnly(Recoverable* ds): esys_(ds->_esys){ - esys_->begin_op(); - } - MontageOpHolderReadOnly(pds::EpochSys* _esys): esys_(_esys){ - esys_->begin_op(); + MontageOpHolderReadOnly(Recoverable* ds_): ds(ds_){ + ds->begin_op(); } ~MontageOpHolderReadOnly(){ - esys_->end_readonly_op(); + ds->end_readonly_op(); } }; // TODO: replace `new` operator of T with // per-heap allocation and placement new. template T* pnew(Types... args) - { + { T* ret = new T(args...); - _esys->register_alloc_pblk(ret); + if (epochs[pds::EpochSys::tid].ui == NULL_EPOCH){ + _esys->pending_allocs[EpochSys::tid].ui.insert(ret); + } else { + _esys->register_alloc_pblk(ret, epochs[pds::EpochSys::tid].ui); + } return ret; } template void register_update_pblk(T* b){ - _esys->register_update_pblk(b); + _esys->register_update_pblk(b, epochs[pds::EpochSys::tid].ui); } template void pdelete(T* b){ - _esys->pdelete(b); + ASSERT_DERIVE(T, pds::PBlk); + ASSERT_COPY(T); + + if (sys_mode == pds::ONLINE){ + if (epochs[pds::EpochSys::tid].ui != NULL_EPOCH){ + _esys->free_pblk(b, epochs[pds::EpochSys::tid].ui); + } else { + if (b->epoch == NULL_EPOCH){ + assert(pending_allocs[pds::EpochSys::tid].ui.find(b) != pending_allocs[pds::EpochSys::tid].ui.end()); + pending_allocs[pds::EpochSys::tid].ui.erase(b); + } + delete b; + } + } } template void pretire(T* b){ - _esys->pretire(b); + assert(eochs[pds::EpochSys::tid].ui != NULL_EPOCH); + _esys->retire_pblk(b, epochs[pds::EpochSys::tid].ui); } template void preclaim(T* b){ - _esys->pdelete(b); + if (epochs[pds::EpochSys::tid].ui == NULL_EPOCH){ + begin_op(); + } + _esys->reclaim_pblk(b, epochs[pds::EpochSys::tid].ui); + if (epochs[pds::EpochSys::tid].ui == NULL_EPOCH){ + end_op(); + } } template const T* openread_pblk(const T* b){ - return _esys->openread_pblk(b); + assert(epochs[pds::EpochSys::tid].ui != NULL_EPOCH); + return _esys->openread_pblk(b, epochs[pds::EpochSys::tid].ui); } template const T* openread_pblk_unsafe(const T* b){ - return _esys->openread_pblk_unsafe(b); + if (epochs[pds::EpochSys::tid].ui != NULL_EPOCH){ + return _esys->openread_pblk_unsafe(b, epochs[pds::EpochSys::tid].ui); + } else { + return b; + } } template T* openwrite_pblk(T* b){ - return _esys->openwrite_pblk(b); + assert(epochs[pds::EpochSys::tid].ui != NULL_EPOCH); + return _esys->openwrite_pblk(b, epochs[pds::EpochSys::tid].ui); } std::unordered_map* recover_pblks(const int rec_thd=10){ return _esys->recover(rec_thd); } void recover_mode(){ - _esys->recover_mode(); + _esys->sys_mode = pds::RECOVER; // PDELETE -> nop } void online_mode(){ - _esys->online_mode(); + _esys->sys_mode = pds::ONLINE; } void flush(){ _esys->flush(); @@ -305,7 +350,7 @@ class Recoverable{ return &local_descs[pds::EpochSys::tid].ui; } uint64_t get_local_epoch(){ - return _esys->epochs[pds::EpochSys::tid].ui; + return epochs[pds::EpochSys::tid].ui; } }; diff --git a/src/persist/api/montage_global_api.cpp b/src/persist/api/montage_global_api.cpp index 29a29499..a6c7586e 100644 --- a/src/persist/api/montage_global_api.cpp +++ b/src/persist/api/montage_global_api.cpp @@ -1,5 +1,5 @@ #include "montage_global_api.hpp" namespace pds{ - EpochSys* global_esys = nullptr; + GlobalRecoverable* global_recoverable = nullptr; } \ No newline at end of file diff --git a/src/persist/api/montage_global_api.hpp b/src/persist/api/montage_global_api.hpp index 72bca494..74fdb23f 100644 --- a/src/persist/api/montage_global_api.hpp +++ b/src/persist/api/montage_global_api.hpp @@ -9,8 +9,22 @@ #include namespace pds{ + class GlobalRecoverable: public Recoverable{ + std::unordered_map* recovered_pblks = nullptr; + public: + GlobalRecoverable(GlobalTestConfig* gtc): Recoverable(gtc){} + ~GlobalRecoverable(){ + if (recovered_pblks){ + delete recovered_pblks; + } + } + std::unordered_map* get_recovered(){ + return recovered_pblks; + } + }; - extern EpochSys* global_esys; + extern GlobalRecoverable* global_recoverable; + inline void init(GlobalTestConfig* gtc){ // here we assume that pds::init is called before pds::init_thread, hence the assertion. @@ -19,80 +33,84 @@ namespace pds{ if (EpochSys::tid == -1){ EpochSys::tid = 0; } - global_esys = new EpochSys(gtc); + global_recoverable = new GlobalRecoverable(gtc); } inline void init_thread(int id) { EpochSys::tid = id; - // global_esys->init_thread(id); + // esys_global->init_thread(id); } inline void finalize(){ - delete global_esys; - global_esys = nullptr; // for debugging. + delete global_recoverable; + global_recoverable = nullptr; // for debugging. } #define CHECK_EPOCH() ({\ - global_esys->check_epoch();}) + global_recoverable->check_epoch();}) + + #define CHECK_EPOCH(c) ({\ + global_recoverable->check_epoch(c);}) // TODO: get rid of arguments in rideables. #define BEGIN_OP( ... ) ({ \ - global_esys->begin_op();}) + global_recoverable->begin_op();}) // end current operation by reducing transaction count of our epoch. // if our operation is already aborted, do nothing. #define END_OP ({\ - global_esys->end_op(); }) + global_recoverable->end_op();}) // end current operation by reducing transaction count of our epoch. // if our operation is already aborted, do nothing. #define END_READONLY_OP ({\ - global_esys->end_readonly_op(); }) + global_recoverable->end_readonly_op();}) - // end current epoch and not move towards next epoch in global_esys. + // end current epoch and not move towards next epoch in global_recoverable. #define ABORT_OP ({ \ - global_esys->abort_op(); }) + global_recoverable->abort_op();}) #define BEGIN_OP_AUTOEND( ... ) \ - Recoverable::MontageOpHolder __holder; + Recoverable::MontageOpHolder __holder(global_recoverable); #define BEGIN_READONLY_OP_AUTOEND( ... ) \ - Recoverable::MontageOpHolderReadOnly __holder; + Recoverable::MontageOpHolderReadOnly __holder_readonly(global_recoverable); #define PNEW(t, ...) ({\ - global_esys->register_alloc_pblk(new t(__VA_ARGS__));}) + global_recoverable->pnew(__VA_ARGS__));}) #define PDELETE(b) ({\ - global_esys->pdelete(b);}) + global_recoverable->pdelete(b);}) #define PRETIRE(b) ({\ - global_esys->pretire(b);}) + global_recoverable->pretire(b);}) #define PRECLAIM(b) ({\ - global_esys->preclaim(b);}) + global_recoverable->preclaim(b);}) // Hs: This is for "owned" PBlk's, currently not used in code base. // may be useful for "data" blocks like dynamically-sized // persistent String payload. - #define PDELETE_DATA(b) ({\ - if (global_esys->sys_mode == ONLINE) {\ - delete(b);\ - }}) + // #define PDELETE_DATA(b) ({\ + // if (global_recoverable->sys_mode == ONLINE) {\ + // delete(b);\ + // }}) inline std::unordered_map* recover(const int rec_thd=10){ - return global_esys->recover(rec_thd); + global_recoverable->recover(rec_thd); + return global_recoverable->get_recovered(); } inline void flush(){ - global_esys->flush(); + global_recoverable->flush(); } inline void recover_mode(){ - global_esys->recover_mode(); + global_recoverable->recover_mode(); } inline void online_mode(){ - global_esys->online_mode(); + global_recoverable->online_mode(); } } From c31f373e5b2eac3947dea026f371d77fbca8fc19 Mon Sep 17 00:00:00 2001 From: Wentao Cai Date: Sun, 15 Nov 2020 21:56:22 -0500 Subject: [PATCH 46/56] reset epochs of blocks in pending_allocs during abort_op --- src/persist/api/Recoverable.hpp | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/persist/api/Recoverable.hpp b/src/persist/api/Recoverable.hpp index bd86fe3a..bee38abb 100644 --- a/src/persist/api/Recoverable.hpp +++ b/src/persist/api/Recoverable.hpp @@ -240,6 +240,17 @@ class Recoverable{ } void abort_op(){ assert(epochs[pds::EpochSys::tid].ui != NULL_EPOCH); + // TODO: any room for optimization here? + for (auto b = pending_allocs[tid].ui.begin(); + b != pending_allocs[tid].ui.end(); b++){ + // reset epochs registered in pending blocks + (*b)->epoch = NULL_EPOCH; + // TODO: is get_data() still in use? + PBlk* data = (*b)->get_data(); + if (data){ + data->epoch = NULL_EPOCH; + } + } _esys->abort_transaction(epochs[pds::EpochSys::tid].ui); epochs[pds::EpochSys::tid].ui = NULL_EPOCH; } From 3dd98dd32c3f983382638cef25128708c16d804a Mon Sep 17 00:00:00 2001 From: Wentao Cai Date: Sun, 15 Nov 2020 22:21:32 -0500 Subject: [PATCH 47/56] add reset_alloc_pblk in EpochSys ensure epoch is NULL before registering pblk in pending_allocs --- src/persist/EpochSys.hpp | 23 ++++++++++++++++++++--- src/persist/api/Recoverable.hpp | 12 ++++-------- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/src/persist/EpochSys.hpp b/src/persist/EpochSys.hpp index 75652362..b28c1588 100644 --- a/src/persist/EpochSys.hpp +++ b/src/persist/EpochSys.hpp @@ -224,6 +224,9 @@ class EpochSys{ template T* register_alloc_pblk(T* b, uint64_t c); + template + T* reset_alloc_pblk(T* b); + template PBlkArray* alloc_pblk_array(size_t s, uint64_t c); @@ -298,9 +301,7 @@ T* EpochSys::register_alloc_pblk(T* b, uint64_t c){ PBlk* blk = b; assert(c != NULL_EPOCH); blk->epoch = c; - // Wentao: It's possible that payload is registered multiple times - assert(blk->blktype == INIT || blk->blktype == OWNED || - blk->blktype == ALLOC); + assert(blk->blktype == INIT || blk->blktype == OWNED); if (blk->blktype == INIT){ blk->blktype = ALLOC; } @@ -316,6 +317,22 @@ T* EpochSys::register_alloc_pblk(T* b, uint64_t c){ return b; } +template +T* EpochSys::reset_alloc_pblk(T* b){ + ASSERT_DERIVE(T, PBlk); + ASSERT_COPY(T); + PBlk* blk = b; + blk->epoch = NULL_EPOCH; + assert(blk->blktype == ALLOC); + blk->blktype = INIT; + PBlk* data = blk->get_data(); + if (data){ + reset_alloc_pblk(data); + } + return b; +} + + template PBlkArray* EpochSys::alloc_pblk_array(size_t s, uint64_t c){ PBlkArray* ret = static_cast*>( diff --git a/src/persist/api/Recoverable.hpp b/src/persist/api/Recoverable.hpp index bee38abb..f5808e3d 100644 --- a/src/persist/api/Recoverable.hpp +++ b/src/persist/api/Recoverable.hpp @@ -221,6 +221,7 @@ class Recoverable{ // TODO: put pending_allocs-related stuff into operations? for (auto b = pending_allocs[pds::EpochSys::tid].ui.begin(); b != pending_allocs[pds::EpochSys::tid].ui.end(); b++){ + assert((*b)->epoch == NULL_EPOCH); _esys->register_alloc_pblk(*b, epochs[pds::EpochSys::tid].ui); } } @@ -241,15 +242,10 @@ class Recoverable{ void abort_op(){ assert(epochs[pds::EpochSys::tid].ui != NULL_EPOCH); // TODO: any room for optimization here? - for (auto b = pending_allocs[tid].ui.begin(); - b != pending_allocs[tid].ui.end(); b++){ + for (auto b = pending_allocs[pds::EpochSys::tid].ui.begin(); + b != pending_allocs[pds::EpochSys::tid].ui.end(); b++){ // reset epochs registered in pending blocks - (*b)->epoch = NULL_EPOCH; - // TODO: is get_data() still in use? - PBlk* data = (*b)->get_data(); - if (data){ - data->epoch = NULL_EPOCH; - } + _esys->reset_alloc_pblk(*b); } _esys->abort_transaction(epochs[pds::EpochSys::tid].ui); epochs[pds::EpochSys::tid].ui = NULL_EPOCH; From be27552142261d0372831ef207cdf010706db231 Mon Sep 17 00:00:00 2001 From: Wentao Cai Date: Sun, 15 Nov 2020 22:37:51 -0500 Subject: [PATCH 48/56] minor fix --- src/persist/api/Recoverable.hpp | 4 ++-- src/rideables/MontageLfHashTable.hpp | 6 ++++-- src/rideables/MontageMSQueue.hpp | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/persist/api/Recoverable.hpp b/src/persist/api/Recoverable.hpp index f5808e3d..de3dfe06 100644 --- a/src/persist/api/Recoverable.hpp +++ b/src/persist/api/Recoverable.hpp @@ -221,7 +221,7 @@ class Recoverable{ // TODO: put pending_allocs-related stuff into operations? for (auto b = pending_allocs[pds::EpochSys::tid].ui.begin(); b != pending_allocs[pds::EpochSys::tid].ui.end(); b++){ - assert((*b)->epoch == NULL_EPOCH); + assert((*b)->get_epoch() == NULL_EPOCH); _esys->register_alloc_pblk(*b, epochs[pds::EpochSys::tid].ui); } } @@ -306,7 +306,7 @@ class Recoverable{ } template void pretire(T* b){ - assert(eochs[pds::EpochSys::tid].ui != NULL_EPOCH); + assert(epochs[pds::EpochSys::tid].ui != NULL_EPOCH); _esys->retire_pblk(b, epochs[pds::EpochSys::tid].ui); } template diff --git a/src/rideables/MontageLfHashTable.hpp b/src/rideables/MontageLfHashTable.hpp index 12f587f1..e43bc88b 100644 --- a/src/rideables/MontageLfHashTable.hpp +++ b/src/rideables/MontageLfHashTable.hpp @@ -45,7 +45,9 @@ class MontageLfHashTable : public RMap, Recoverable{ MarkPtr next; Payload* payload;// TODO: does it have to be atomic? Node(MontageLfHashTable* ds_, K k, V v, Node* n): - ds(ds_),key(k),next(n),payload(ds_->pnew(k,v)){}; + ds(ds_),key(k),next(n),payload(ds_->pnew(k,v)){ + // assert(ds->epochs[pds::EpochSys::tid].ui == NULL_EPOCH); + }; ~Node(){ ds->preclaim(payload); } @@ -258,7 +260,7 @@ optional MontageLfHashTable::replace(K key, V val, int tid) { while(true){ if(findNode(prev,curr,next,key,tid)){ tmpNode->next.ptr.store(curr); - abort_op(); + begin_op(); res=curr.get_val()->get_val(); if(prev->ptr.CAS_verify(this,curr,tmpNode)){ curr.get_val()->rm_payload(); diff --git a/src/rideables/MontageMSQueue.hpp b/src/rideables/MontageMSQueue.hpp index 6ab4040b..a5bcd6ba 100644 --- a/src/rideables/MontageMSQueue.hpp +++ b/src/rideables/MontageMSQueue.hpp @@ -34,7 +34,7 @@ class MontageMSQueue : public RQueue, Recoverable{ Node(): next(nullptr), payload(nullptr){}; Node(MontageMSQueue* ds_, T v): ds(ds_), next(nullptr), payload(ds_->pnew(v)){ - assert(esys->epochs[EpochSys::tid].ui == NULL_EPOCH); + // assert(ds->epochs[EpochSys::tid].ui == NULL_EPOCH); }; void set_sn(uint64_t s){ From a05c494b35b82133c2fe5911c37a01bdd16a7c2b Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Sun, 15 Nov 2020 23:27:14 -0500 Subject: [PATCH 49/56] debug --- src/persist/api/Recoverable.hpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/persist/api/Recoverable.hpp b/src/persist/api/Recoverable.hpp index de3dfe06..7373471c 100644 --- a/src/persist/api/Recoverable.hpp +++ b/src/persist/api/Recoverable.hpp @@ -224,6 +224,7 @@ class Recoverable{ assert((*b)->get_epoch() == NULL_EPOCH); _esys->register_alloc_pblk(*b, epochs[pds::EpochSys::tid].ui); } + assert(epochs[pds::EpochSys::tid].ui != NULL_EPOCH); } void end_op(){ if (epochs[pds::EpochSys::tid].ui != NULL_EPOCH){ @@ -315,7 +316,7 @@ class Recoverable{ begin_op(); } _esys->reclaim_pblk(b, epochs[pds::EpochSys::tid].ui); - if (epochs[pds::EpochSys::tid].ui == NULL_EPOCH){ + if (epochs[pds::EpochSys::tid].ui != NULL_EPOCH){ end_op(); } } From 3c78531dbab87779839b7593465eaec9d24090e6 Mon Sep 17 00:00:00 2001 From: Wentao Cai Date: Sun, 15 Nov 2020 23:28:59 -0500 Subject: [PATCH 50/56] minor fix --- src/persist/api/Recoverable.hpp | 8 +++++--- src/rideables/MontageMSQueue.hpp | 10 +++++----- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/persist/api/Recoverable.hpp b/src/persist/api/Recoverable.hpp index de3dfe06..b59ff367 100644 --- a/src/persist/api/Recoverable.hpp +++ b/src/persist/api/Recoverable.hpp @@ -99,11 +99,11 @@ namespace pds{ lin_var load(Recoverable* ds); lin_var load_verify(Recoverable* ds); inline T load_val(Recoverable* ds){ - return reinterpret_cast(load().val); + return reinterpret_cast(load(ds).val); } bool CAS_verify(Recoverable* ds, lin_var expected, const T& desired); - inline bool CAS_verify(lin_var expected, const lin_var& desired){ - return CAS_verify(expected,desired.get_val()); + inline bool CAS_verify(Recoverable* ds, lin_var expected, const lin_var& desired){ + return CAS_verify(ds, expected,desired.get_val()); } // CAS doesn't check epoch nor cnt bool CAS(lin_var expected, const T& desired); @@ -226,6 +226,7 @@ class Recoverable{ } } void end_op(){ + assert(epochs[pds::EpochSys::tid].ui != NULL_EPOCH); if (epochs[pds::EpochSys::tid].ui != NULL_EPOCH){ _esys->end_transaction(epochs[pds::EpochSys::tid].ui); epochs[pds::EpochSys::tid].ui = NULL_EPOCH; @@ -233,6 +234,7 @@ class Recoverable{ pending_allocs[pds::EpochSys::tid].ui.clear(); } void end_readonly_op(){ + assert(epochs[pds::EpochSys::tid].ui != NULL_EPOCH); if (epochs[pds::EpochSys::tid].ui != NULL_EPOCH){ _esys->end_readonly_transaction(epochs[pds::EpochSys::tid].ui); epochs[pds::EpochSys::tid].ui = NULL_EPOCH; diff --git a/src/rideables/MontageMSQueue.hpp b/src/rideables/MontageMSQueue.hpp index a5bcd6ba..eee46c50 100644 --- a/src/rideables/MontageMSQueue.hpp +++ b/src/rideables/MontageMSQueue.hpp @@ -92,7 +92,7 @@ void MontageMSQueue::enqueue(T v, int tid){ // Node* cur_head = head.load(); cur_tail = tail.load(); uint64_t s = global_sn.fetch_add(1); - lin_var next = cur_tail->next.load(); + lin_var next = cur_tail->next.load(this); if(cur_tail == tail.load()){ if(next.get_val() == nullptr) { // directly set m_sn and BEGIN_OP will flush it @@ -123,11 +123,11 @@ optional MontageMSQueue::dequeue(int tid){ optional res = {}; tracker.start_op(tid); while(true){ - lin_var cur_head = head.load(); + lin_var cur_head = head.load(this); Node* cur_tail = tail.load(); - Node* next = cur_head.get_val()->next.load_val(); + Node* next = cur_head.get_val()->next.load_val(this); - if(cur_head == head.load()){ + if(cur_head == head.load(this)){ if(cur_head.get_val() == cur_tail){ // queue is empty if(next == nullptr) { @@ -138,7 +138,7 @@ optional MontageMSQueue::dequeue(int tid){ } else { begin_op(); Payload* payload = next->payload;// get payload for PDELETE - if(head.CAS_verify(cur_head, next)){ + if(head.CAS_verify(this, cur_head, next)){ res = (T)payload->get_val();// old see new is impossible pretire(payload); // semantically we are removing next from queue end_op(); From 7a3d5c735e769f3527bc082dd338b72a4b1240b8 Mon Sep 17 00:00:00 2001 From: Wentao Cai Date: Sun, 15 Nov 2020 23:33:26 -0500 Subject: [PATCH 51/56] debug montage msq --- src/rideables/MontageMSQueue.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rideables/MontageMSQueue.hpp b/src/rideables/MontageMSQueue.hpp index eee46c50..73dbf006 100644 --- a/src/rideables/MontageMSQueue.hpp +++ b/src/rideables/MontageMSQueue.hpp @@ -85,7 +85,7 @@ class MontageMSQueue : public RQueue, Recoverable{ template void MontageMSQueue::enqueue(T v, int tid){ - Node* new_node = new Node(v); + Node* new_node = new Node(this,v); Node* cur_tail = nullptr; tracker.start_op(tid); while(true){ @@ -139,7 +139,7 @@ optional MontageMSQueue::dequeue(int tid){ begin_op(); Payload* payload = next->payload;// get payload for PDELETE if(head.CAS_verify(this, cur_head, next)){ - res = (T)payload->get_val();// old see new is impossible + res = (T)payload->get_val(this);// old see new is impossible pretire(payload); // semantically we are removing next from queue end_op(); cur_head.get_val()->payload = payload; // let payload have same lifetime as dummy node From a5d972715bd13d2e06cedc63d70c4e036a20b435 Mon Sep 17 00:00:00 2001 From: Wentao Cai Date: Sun, 15 Nov 2020 23:45:59 -0500 Subject: [PATCH 52/56] add Persistent::init() and finalize() in all persistent rideables --- src/main.cpp | 2 +- src/rideables/DLGraph.hpp | 5 +++++ src/rideables/DaliUnorderedMap.hpp | 2 ++ src/rideables/FriedmanQueue.hpp | 5 ++++- src/rideables/HashTable.hpp | 4 ++++ src/rideables/NVMGraph.hpp | 5 +++++ src/rideables/NVMSOFTHashTable.hpp | 7 ++++++- src/rideables/PLockfreeHashTable.hpp | 8 ++++++-- src/rideables/SOFTHashTable.hpp | 7 ++++++- 9 files changed, 39 insertions(+), 6 deletions(-) diff --git a/src/main.cpp b/src/main.cpp index 79f0b6dd..f9d3f351 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -88,7 +88,7 @@ int main(int argc, char *argv[]) /* queues */ // gtc.addRideableOption(new MSQueueFactory(), "MSQueue");//transient // gtc.addRideableOption(new FriedmanQueueFactory(), "FriedmanQueue");//comparison - // gtc.addRideableOption(new MontageMSQueueFactory(), "MontageMSQueue"); + gtc.addRideableOption(new MontageMSQueueFactory(), "MontageMSQueue"); #if !defined(MNEMOSYNE) and !defined(PRONTO) gtc.addRideableOption(new QueueFactory(), "TransientQueue"); gtc.addRideableOption(new QueueFactory(), "TransientQueue"); diff --git a/src/rideables/DLGraph.hpp b/src/rideables/DLGraph.hpp index c1f22154..d6ad3b7b 100644 --- a/src/rideables/DLGraph.hpp +++ b/src/rideables/DLGraph.hpp @@ -141,6 +141,7 @@ class DLGraph : public RGraph { }; DLGraph(GlobalTestConfig* gtc) { + Persistent::init(); idxToVertex = new tVertex*[numVertices]; // Initialize... for (size_t i = 0; i < numVertices; i++) { @@ -148,6 +149,10 @@ class DLGraph : public RGraph { } } + ~DLGraph(){ + Persistent::finalize(); + } + tVertex** idxToVertex; // Transient set of transient vertices to index map diff --git a/src/rideables/DaliUnorderedMap.hpp b/src/rideables/DaliUnorderedMap.hpp index 852e5c8a..0907809c 100644 --- a/src/rideables/DaliUnorderedMap.hpp +++ b/src/rideables/DaliUnorderedMap.hpp @@ -221,6 +221,7 @@ class DaliUnorderedMap : public RMap{ public: DaliUnorderedMap(GlobalTestConfig* gtc): task_num(gtc->task_num){ + Persistent::init(); buckets = (Bucket*)RP_malloc(sizeof(Bucket)*idxSize); new (buckets) Bucket [idxSize] (); flist = new FList(); @@ -259,6 +260,7 @@ class DaliUnorderedMap : public RMap{ ~DaliUnorderedMap(){ std::cout<<"current epoch: "<load()< get(K key, int tid); diff --git a/src/rideables/FriedmanQueue.hpp b/src/rideables/FriedmanQueue.hpp index 9039403d..fa994c41 100644 --- a/src/rideables/FriedmanQueue.hpp +++ b/src/rideables/FriedmanQueue.hpp @@ -50,6 +50,7 @@ class FriedmanQueue : public RQueue{ public: FriedmanQueue(int task_num): tracker(task_num, 100, 1000, true){ + Persistent::init(); head = (atomic_pptr*)RP_malloc(sizeof(atomic_pptr)); new (head) atomic_pptr(); tail = (atomic_pptr*)RP_malloc(sizeof(atomic_pptr)); @@ -69,7 +70,9 @@ class FriedmanQueue : public RQueue{ } } - ~FriedmanQueue(){}; + ~FriedmanQueue(){ + Persistent::finalize(); + }; void enqueue(std::string value, int tid); optional dequeue(int tid); diff --git a/src/rideables/HashTable.hpp b/src/rideables/HashTable.hpp index 2df89872..4d61e71c 100644 --- a/src/rideables/HashTable.hpp +++ b/src/rideables/HashTable.hpp @@ -206,9 +206,13 @@ class NVMHashTable : public RMap{ Bucket* buckets; NVMHashTable(GlobalTestConfig* gtc){ + Persistent::init(); buckets = (Bucket*)RP_malloc(sizeof(Bucket)*idxSize); new (buckets) Bucket [idxSize] (); }; + ~NVMHashTable(){ + Persistent::finalize(); + } optional get(std::string key, int tid){ diff --git a/src/rideables/NVMGraph.hpp b/src/rideables/NVMGraph.hpp index c042a654..c532ea93 100644 --- a/src/rideables/NVMGraph.hpp +++ b/src/rideables/NVMGraph.hpp @@ -106,6 +106,7 @@ class NVMGraph : public RGraph { }; NVMGraph(GlobalTestConfig* gtc) { + Persistent::init(); idxToVertex = new tVertex*[numVertices]; // Initialize... for (size_t i = 0; i < numVertices; i++) { @@ -113,6 +114,10 @@ class NVMGraph : public RGraph { } } + ~NVMGraph(){ + Persistent::finalize(); + } + tVertex** idxToVertex; // Transient set of transient vertices to index map // Thread-safe and does not leak edges diff --git a/src/rideables/NVMSOFTHashTable.hpp b/src/rideables/NVMSOFTHashTable.hpp index 3e09cd23..9e3f31ab 100644 --- a/src/rideables/NVMSOFTHashTable.hpp +++ b/src/rideables/NVMSOFTHashTable.hpp @@ -193,13 +193,18 @@ class NVMSOFTHashTable : public RMap NVMSOFTHashTable(GlobalTestConfig* gtc) : tracker(gtc->task_num, 100, 1000, true){ + Persistent::init(); for(size_t i=0;inext.store(new Node(ptail, false), std::memory_order_release); } - }; + } + + ~NVMSOFTHashTable(){ + Persistent::finalize(); + } private: std::hash hash_fn; diff --git a/src/rideables/PLockfreeHashTable.hpp b/src/rideables/PLockfreeHashTable.hpp index 9867ea07..0a8f989c 100644 --- a/src/rideables/PLockfreeHashTable.hpp +++ b/src/rideables/PLockfreeHashTable.hpp @@ -97,8 +97,12 @@ class PLockfreeHashTable : public RMap{ return mixPtrMark(mptr,true); } public: - PLockfreeHashTable(int task_num) : tracker(task_num, 100, 1000, true) {}; - ~PLockfreeHashTable(){}; + PLockfreeHashTable(int task_num) : tracker(task_num, 100, 1000, true) { + Persistent::init(); + }; + ~PLockfreeHashTable(){ + Persistent::finalize(); + }; optional get(std::string key, int tid); optional put(std::string key, std::string val, int tid); diff --git a/src/rideables/SOFTHashTable.hpp b/src/rideables/SOFTHashTable.hpp index 45cc9935..b251d8db 100644 --- a/src/rideables/SOFTHashTable.hpp +++ b/src/rideables/SOFTHashTable.hpp @@ -201,11 +201,16 @@ class SOFTHashTable : public RMap SOFTHashTable(GlobalTestConfig* gtc) : tracker(gtc->task_num, 100, 1000, true){ + Persistent::init(); for(size_t i=0;inext.store(new Node(std::string(1,(char)127), "", nullptr, false), std::memory_order_release); } - }; + } + + ~SOFTHashTable(){ + Persistent::finalize(); + } private: std::hash hash_fn; From bbbb4e04a7aca0c0fda21ff3c1a96253a0af0083 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Mon, 16 Nov 2020 15:38:40 -0500 Subject: [PATCH 53/56] removed in all rideable hpp files && added copy constructors to payload classes --- src/persist/PString.hpp | 9 +++-- src/persist/TransactionTrackers.cpp | 4 +-- src/rideables/DLGraph.hpp | 1 - src/rideables/DaliUnorderedMap.hpp | 4 +-- src/rideables/HOHHashTable.hpp | 8 ++--- src/rideables/HashTable.hpp | 2 -- src/rideables/MSQueue.hpp | 2 -- src/rideables/MontageGraph.hpp | 8 ++--- src/rideables/MontageHashTable.hpp | 15 ++++---- src/rideables/MontageLfHashTable.hpp | 49 +++++++++++++------------- src/rideables/MontageMSQueue.hpp | 23 ++++++------ src/rideables/MontageNatarajanTree.hpp | 13 ++++--- src/rideables/MontageQueue.hpp | 8 ++--- src/rideables/NVMGraph.hpp | 6 ++-- src/rideables/PriorityQueue.hpp | 11 +++--- src/rideables/Queue.hpp | 2 -- src/rideables/TGraph.hpp | 2 -- src/rideables/Toy.hpp | 6 ++-- src/rideables/UnbalancedTree.hpp | 20 +++++------ 19 files changed, 88 insertions(+), 105 deletions(-) diff --git a/src/persist/PString.hpp b/src/persist/PString.hpp index 93e76752..ad19c812 100644 --- a/src/persist/PString.hpp +++ b/src/persist/PString.hpp @@ -5,7 +5,8 @@ #include #include "pptr.hpp" -using namespace pds; + +namespace pds{ // extern __thread int pds::_tid; @@ -157,14 +158,16 @@ class TrivialPString { } }; +} // namespace pds + namespace std { - template struct hash> { + template struct hash> { #ifndef MNEMOSYNE static hash str_hash; #else static hash str_hash; #endif - size_t operator()(const TrivialPString & x) const { + size_t operator()(const pds::TrivialPString & x) const { return str_hash(x.std_str()); } }; diff --git a/src/persist/TransactionTrackers.cpp b/src/persist/TransactionTrackers.cpp index e570dd3e..6604cf3b 100644 --- a/src/persist/TransactionTrackers.cpp +++ b/src/persist/TransactionTrackers.cpp @@ -1,7 +1,7 @@ #include "TransactionTrackers.hpp" #include "EpochSys.hpp" -namespace pds{ +using namespace pds; bool PerEpochTransactionTracker::consistent_set(uint64_t target, uint64_t c){ assert(EpochSys::tid != -1); @@ -159,5 +159,3 @@ void FenceEndTransactionTracker::set_unregister(paddedAtomic* indicators){ } FenceEndTransactionTracker::FenceEndTransactionTracker(atomic* ge, int task_num): NoFenceTransactionTracker(ge, task_num){} - -} \ No newline at end of file diff --git a/src/rideables/DLGraph.hpp b/src/rideables/DLGraph.hpp index c1f22154..6a3321ae 100644 --- a/src/rideables/DLGraph.hpp +++ b/src/rideables/DLGraph.hpp @@ -24,7 +24,6 @@ #define DLGRAPH_FLUSHOPT(addr) asm volatile ("clflushopt (%0)" :: "r"(addr)) #define DLGRAPH_SFENCE() asm volatile ("sfence" ::: "memory") -using namespace pds; template class DLGraph : public RGraph { diff --git a/src/rideables/DaliUnorderedMap.hpp b/src/rideables/DaliUnorderedMap.hpp index 852e5c8a..a8f087e2 100644 --- a/src/rideables/DaliUnorderedMap.hpp +++ b/src/rideables/DaliUnorderedMap.hpp @@ -591,8 +591,8 @@ optional DaliUnorderedMap::replace(K key, V val, int tid) { template <> struct DaliUnorderedMap::Node{ // TODO: This should be pptr rather than basic_string - TrivialPString key; - TrivialPString val; + pds::TrivialPString key; + pds::TrivialPString val; pptr next; Node(std::string k, optional v, Node* n):key(k),val(v.has_value()?v.value():""),next(n){ key.flush(); diff --git a/src/rideables/HOHHashTable.hpp b/src/rideables/HOHHashTable.hpp index d9ba6052..fc0151e6 100644 --- a/src/rideables/HOHHashTable.hpp +++ b/src/rideables/HOHHashTable.hpp @@ -8,19 +8,17 @@ #include "ConcurrentPrimitives.hpp" #include -using namespace pds; - template class HOHHashTable : public RMap, public Recoverable{ public: - class Payload : public PBlk{ + class Payload : public pds::PBlk{ GENERATE_FIELD(K, key, Payload); GENERATE_FIELD(V, val, Payload); public: Payload(){} Payload(K x, V y): m_key(x), m_val(y){} - // Payload(const Payload& oth): PBlk(oth), m_key(oth.m_key), m_val(oth.m_val){} + Payload(const Payload& oth): pds::PBlk(oth), m_key(oth.m_key), m_val(oth.m_val){} void persist(){} }; @@ -202,7 +200,7 @@ class HOHHashTableFactory : public RideableFactory{ #include #include "PString.hpp" template <> -class HOHHashTable::Payload : public PBlk{ +class HOHHashTable::Payload : public pds::PBlk{ GENERATE_FIELD(PString, key, Payload); GENERATE_FIELD(PString, val, Payload); diff --git a/src/rideables/HashTable.hpp b/src/rideables/HashTable.hpp index 2df89872..84fc0896 100644 --- a/src/rideables/HashTable.hpp +++ b/src/rideables/HashTable.hpp @@ -8,8 +8,6 @@ #include "ConcurrentPrimitives.hpp" #include -using namespace pds; - template class DRAMHashTable : public RMap{ public: diff --git a/src/rideables/MSQueue.hpp b/src/rideables/MSQueue.hpp index 81221700..4316ee77 100644 --- a/src/rideables/MSQueue.hpp +++ b/src/rideables/MSQueue.hpp @@ -10,8 +10,6 @@ #include "RCUTracker.hpp" #include "CustomTypes.hpp" -using namespace pds; - template class MSQueue : public RQueue{ private: diff --git a/src/rideables/MontageGraph.hpp b/src/rideables/MontageGraph.hpp index 8638b339..afa64827 100644 --- a/src/rideables/MontageGraph.hpp +++ b/src/rideables/MontageGraph.hpp @@ -21,8 +21,6 @@ #include #include -using namespace pds; - /** * SimpleGraph class. Labels are of templated type K. */ @@ -32,12 +30,12 @@ class MontageGraph : public RGraph, public Recoverable{ public: class tVertex; - class BasePayload : public PBlk { + class BasePayload : public pds::PBlk { protected: GENERATE_FIELD(int, tag, BasePayload); public: BasePayload(){} - BasePayload(const BasePayload& oth): PBlk(oth){} + BasePayload(const BasePayload& oth): pds::PBlk(oth){} void persist(); }; @@ -311,7 +309,7 @@ class MontageGraph : public RGraph, public Recoverable{ int block_cnt = 0; auto begin = chrono::high_resolution_clock::now(); - std::unordered_map* recovered = recover_pblks(); + std::unordered_map* recovered = recover_pblks(); auto end = chrono::high_resolution_clock::now(); auto dur = end - begin; auto dur_ms = std::chrono::duration_cast(dur).count(); diff --git a/src/rideables/MontageHashTable.hpp b/src/rideables/MontageHashTable.hpp index 750d3649..2a7b6f91 100644 --- a/src/rideables/MontageHashTable.hpp +++ b/src/rideables/MontageHashTable.hpp @@ -9,19 +9,17 @@ #include #include -using namespace pds; - template class MontageHashTable : public RMap, public Recoverable{ public: - class Payload : public PBlk{ + class Payload : public pds::PBlk{ GENERATE_FIELD(K, key, Payload); GENERATE_FIELD(V, val, Payload); public: Payload(){} Payload(K x, V y): m_key(x), m_val(y){} - // Payload(const Payload& oth): PBlk(oth), m_key(oth.m_key), m_val(oth.m_val){} + Payload(const Payload& oth): pds::PBlk(oth), m_key(oth.m_key), m_val(oth.m_val){} void persist(){} }__attribute__((aligned(CACHELINE_SIZE))); @@ -216,7 +214,7 @@ class MontageHashTable : public RMap, public Recoverable{ rec_thd = stoi(gtc->getEnv("RecoverThread")); } auto begin = chrono::high_resolution_clock::now(); - std::unordered_map* recovered = recover_pblks(rec_thd); + std::unordered_map* recovered = recover_pblks(rec_thd); auto end = chrono::high_resolution_clock::now(); auto dur = end - begin; auto dur_ms = std::chrono::duration_cast(dur).count(); @@ -283,12 +281,13 @@ class MontageHashTableFactory : public RideableFactory{ #include #include "PString.hpp" template <> -class MontageHashTable::Payload : public PBlk{ - GENERATE_FIELD(PString, key, Payload); - GENERATE_FIELD(PString, val, Payload); +class MontageHashTable::Payload : public pds::PBlk{ + GENERATE_FIELD(pds::PString, key, Payload); + GENERATE_FIELD(pds::PString, val, Payload); public: Payload(const std::string& k, const std::string& v) : m_key(this, k), m_val(this, v){} + Payload(const Payload& oth) : pds::PBlk(oth), m_key(this, oth.m_key), m_val(this, oth.m_val){} void persist(){} }; diff --git a/src/rideables/MontageLfHashTable.hpp b/src/rideables/MontageLfHashTable.hpp index e43bc88b..d3468409 100644 --- a/src/rideables/MontageLfHashTable.hpp +++ b/src/rideables/MontageLfHashTable.hpp @@ -21,20 +21,20 @@ template class MontageLfHashTable : public RMap, Recoverable{ public: - class Payload : public PBlk{ + class Payload : public pds::PBlk{ GENERATE_FIELD(K, key, Payload); GENERATE_FIELD(V, val, Payload); public: Payload(){} Payload(K x, V y): m_key(x), m_val(y){} - // Payload(const Payload& oth): PBlk(oth), m_key(oth.m_key), m_val(oth.m_val){} + Payload(const Payload& oth): pds::PBlk(oth), m_key(oth.m_key), m_val(oth.m_val){} void persist(){} }; private: struct Node; struct MarkPtr{ - atomic_lin_var ptr; + pds::atomic_lin_var ptr; MarkPtr(Node* n):ptr(n){}; MarkPtr():ptr(nullptr){}; }; @@ -69,21 +69,21 @@ class MontageLfHashTable : public RMap, Recoverable{ std::hash hash_fn; const int idxSize=1000000;//number of buckets for hash table padded* buckets=new padded[idxSize]{}; - bool findNode(MarkPtr* &prev, lin_var &curr, lin_var &next, K key, int tid); + bool findNode(MarkPtr* &prev, pds::lin_var &curr, pds::lin_var &next, K key, int tid); RCUTracker tracker; const uint64_t MARK_MASK = ~0x1; - inline lin_var getPtr(const lin_var& d){ - return lin_var(d.val & MARK_MASK, d.cnt); + inline pds::lin_var getPtr(const pds::lin_var& d){ + return pds::lin_var(d.val & MARK_MASK, d.cnt); } - inline bool getMark(const lin_var& d){ + inline bool getMark(const pds::lin_var& d){ return (bool)(d.val & 1); } - inline lin_var mixPtrMark(const lin_var& d, bool mk){ - return lin_var(d.val | mk, d.cnt); + inline pds::lin_var mixPtrMark(const pds::lin_var& d, bool mk){ + return pds::lin_var(d.val | mk, d.cnt); } - inline Node* setMark(const lin_var& d){ + inline Node* setMark(const pds::lin_var& d){ return reinterpret_cast(d.val | 1); } public: @@ -120,8 +120,8 @@ template optional MontageLfHashTable::get(K key, int tid) { optional res={}; MarkPtr* prev=nullptr; - lin_var curr; - lin_var next; + pds::lin_var curr; + pds::lin_var next; tracker.start_op(tid); // hold epoch from advancing so that the node we find won't be deleted @@ -139,8 +139,8 @@ optional MontageLfHashTable::put(K key, V val, int tid) { optional res={}; Node* tmpNode = nullptr; MarkPtr* prev=nullptr; - lin_var curr; - lin_var next; + pds::lin_var curr; + pds::lin_var next; tmpNode = new Node(this, key, val, nullptr); tracker.start_op(tid); @@ -186,8 +186,8 @@ bool MontageLfHashTable::insert(K key, V val, int tid){ bool res=false; Node* tmpNode = nullptr; MarkPtr* prev=nullptr; - lin_var curr; - lin_var next; + pds::lin_var curr; + pds::lin_var next; tmpNode = new Node(this, key, val, nullptr); tracker.start_op(tid); @@ -218,8 +218,8 @@ template optional MontageLfHashTable::remove(K key, int tid) { optional res={}; MarkPtr* prev=nullptr; - lin_var curr; - lin_var next; + pds::lin_var curr; + pds::lin_var next; tracker.start_op(tid); while(true) { @@ -252,8 +252,8 @@ optional MontageLfHashTable::replace(K key, V val, int tid) { optional res={}; Node* tmpNode = nullptr; MarkPtr* prev=nullptr; - lin_var curr; - lin_var next; + pds::lin_var curr; + pds::lin_var next; tmpNode = new Node(this, key, val, nullptr); tracker.start_op(tid); @@ -287,7 +287,7 @@ optional MontageLfHashTable::replace(K key, V val, int tid) { } template -bool MontageLfHashTable::findNode(MarkPtr* &prev, lin_var &curr, lin_var &next, K key, int tid){ +bool MontageLfHashTable::findNode(MarkPtr* &prev, pds::lin_var &curr, pds::lin_var &next, K key, int tid){ while(true){ size_t idx=hash_fn(key)%idxSize; bool cmark=false; @@ -320,12 +320,13 @@ bool MontageLfHashTable::findNode(MarkPtr* &prev, lin_var &curr, lin_var &n #include #include "PString.hpp" template <> -class MontageLfHashTable::Payload : public PBlk{ - GENERATE_FIELD(PString, key, Payload); - GENERATE_FIELD(PString, val, Payload); +class MontageLfHashTable::Payload : public pds::PBlk{ + GENERATE_FIELD(pds::PString, key, Payload); + GENERATE_FIELD(pds::PString, val, Payload); public: Payload(std::string k, std::string v) : m_key(this, k), m_val(this, v){} + Payload(const Payload& oth) : pds::PBlk(oth), m_key(this, oth.m_key), m_val(this, oth.m_val){} void persist(){} }; diff --git a/src/rideables/MontageMSQueue.hpp b/src/rideables/MontageMSQueue.hpp index a5bcd6ba..987bca89 100644 --- a/src/rideables/MontageMSQueue.hpp +++ b/src/rideables/MontageMSQueue.hpp @@ -11,25 +11,23 @@ #include "CustomTypes.hpp" #include "Recoverable.hpp" -using namespace pds; - template class MontageMSQueue : public RQueue, Recoverable{ public: - class Payload : public PBlk{ + class Payload : public pds::PBlk{ GENERATE_FIELD(T, val, Payload); GENERATE_FIELD(uint64_t, sn, Payload); public: - Payload(): PBlk(){} - Payload(T v): PBlk(), m_val(v), m_sn(0){} - // Payload(const Payload& oth): PBlk(oth), m_sn(0), m_val(oth.m_val){} + Payload(): pds::PBlk(){} + Payload(T v): pds::PBlk(), m_val(v), m_sn(0){} + Payload(const Payload& oth): pds::PBlk(oth), m_sn(0), m_val(oth.m_val){} void persist(){} }; private: struct Node{ MontageMSQueue* ds; - atomic_lin_var next; + pds::atomic_lin_var next; Payload* payload; Node(): next(nullptr), payload(nullptr){}; @@ -53,7 +51,7 @@ class MontageMSQueue : public RQueue, Recoverable{ private: // dequeue pops node from head - atomic_lin_var head; + pds::atomic_lin_var head; // enqueue pushes node to tail std::atomic tail; RCUTracker tracker; @@ -92,7 +90,7 @@ void MontageMSQueue::enqueue(T v, int tid){ // Node* cur_head = head.load(); cur_tail = tail.load(); uint64_t s = global_sn.fetch_add(1); - lin_var next = cur_tail->next.load(); + pds::lin_var next = cur_tail->next.load(); if(cur_tail == tail.load()){ if(next.get_val() == nullptr) { // directly set m_sn and BEGIN_OP will flush it @@ -123,7 +121,7 @@ optional MontageMSQueue::dequeue(int tid){ optional res = {}; tracker.start_op(tid); while(true){ - lin_var cur_head = head.load(); + pds::lin_var cur_head = head.load(); Node* cur_tail = tail.load(); Node* next = cur_head.get_val()->next.load_val(); @@ -165,12 +163,13 @@ class MontageMSQueueFactory : public RideableFactory{ #include #include "PString.hpp" template <> -class MontageMSQueue::Payload : public PBlk{ - GENERATE_FIELD(PString, val, Payload); +class MontageMSQueue::Payload : public pds::PBlk{ + GENERATE_FIELD(pds::PString, val, Payload); GENERATE_FIELD(uint64_t, sn, Payload); public: Payload(std::string v) : m_val(this, v), m_sn(0){} + Payload(const Payload& oth) : pds::PBlk(oth), m_val(this, oth.m_val), m_sn(oth.m_sn){} void persist(){} }; diff --git a/src/rideables/MontageNatarajanTree.hpp b/src/rideables/MontageNatarajanTree.hpp index 8f112df7..2e63f9f9 100644 --- a/src/rideables/MontageNatarajanTree.hpp +++ b/src/rideables/MontageNatarajanTree.hpp @@ -11,18 +11,16 @@ #include "CustomTypes.hpp" #include "Recoverable.hpp" -using namespace pds; - template class MontageNatarajanTree : public RMap, public Recoverable{ public: - class Payload : public PBlk{ + class Payload : public pds::PBlk{ GENERATE_FIELD(K, key, Payload); GENERATE_FIELD(V, val, Payload); public: Payload(){} Payload(K x, V y): m_key(x), m_val(y){} - // Payload(const Payload& oth): PBlk(oth), m_key(oth.m_key), m_val(oth.m_val){} + Payload(const Payload& oth): PBlk(oth), m_key(oth.m_key), m_val(oth.m_val){} void persist(){} }; private: @@ -563,12 +561,13 @@ optional MontageNatarajanTree::replace(K key, V val, int tid){ #include #include "PString.hpp" template <> -class MontageNatarajanTree::Payload : public PBlk{ - GENERATE_FIELD(PString, key, Payload); - GENERATE_FIELD(PString, val, Payload); +class MontageNatarajanTree::Payload : public pds::PBlk{ + GENERATE_FIELD(pds::PString, key, Payload); + GENERATE_FIELD(pds::PString, val, Payload); public: Payload(std::string k, std::string v) : m_key(this, k), m_val(this, v){} + Payload(const Payload& oth) : pds::PBlk(oth), m_key(this, oth.m_key), m_val(this, oth.m_val){} void persist(){} }; diff --git a/src/rideables/MontageQueue.hpp b/src/rideables/MontageQueue.hpp index bda3f507..1dc86cb0 100644 --- a/src/rideables/MontageQueue.hpp +++ b/src/rideables/MontageQueue.hpp @@ -13,12 +13,11 @@ #include "Recoverable.hpp" #include -using namespace pds; template class MontageQueue : public RQueue, public Recoverable{ public: - class Payload : public PBlk{ + class Payload : public pds::PBlk{ GENERATE_FIELD(T, val, Payload); GENERATE_FIELD(uint64_t, sn, Payload); public: @@ -139,12 +138,13 @@ class MontageQueueFactory : public RideableFactory{ #include #include "PString.hpp" template <> -class MontageQueue::Payload : public PBlk{ - GENERATE_FIELD(PString, val, Payload); +class MontageQueue::Payload : public pds::PBlk{ + GENERATE_FIELD(pds::PString, val, Payload); GENERATE_FIELD(uint64_t, sn, Payload); public: Payload(std::string v, uint64_t n) : m_val(this, v), m_sn(n){} + Payload(const Payload& oth) : pds::PBlk(oth), m_val(this, oth.m_val), m_sn(oth.m_sn){} void persist(){} }; diff --git a/src/rideables/NVMGraph.hpp b/src/rideables/NVMGraph.hpp index c042a654..8757687d 100644 --- a/src/rideables/NVMGraph.hpp +++ b/src/rideables/NVMGraph.hpp @@ -19,8 +19,6 @@ #include "RCUTracker.hpp" #include "Recoverable.hpp" -using namespace pds; - /** * SimpleGraph class. Labels are of templated type K. */ @@ -37,7 +35,7 @@ class NVMGraph : public RGraph { public: Vertex(){} Vertex(int id, int lbl): id(id), lbl(lbl){} - Vertex(const Vertex& oth): PBlk(oth), id(oth.id), lbl(oth.lbl) {} + Vertex(const Vertex& oth): id(oth.id), lbl(oth.lbl) {} bool operator==(const Vertex& oth) const { return id==oth.id;} void set_lbl(int lbl) { this->lbl = lbl; } int get_lbl() { return this->lbl; } @@ -54,7 +52,7 @@ class NVMGraph : public RGraph { Relation(){} Relation(Vertex* src, Vertex* dest, int weight): weight(weight), src(src->id), dest(dest->id){} Relation(tVertex *src, tVertex *dest, int weight): weight(weight), src(src->get_id()), dest(dest->get_id()){} - Relation(const Relation& oth): PBlk(oth), weight(oth.weight), src(oth.src), dest(oth.dest){} + Relation(const Relation& oth): weight(oth.weight), src(oth.src), dest(oth.dest){} void set_weight(int weight) { this->weight = weight; } int get_weight() { return this->weight; } int get_src() { return this->src; } diff --git a/src/rideables/PriorityQueue.hpp b/src/rideables/PriorityQueue.hpp index d5a03d74..ba1cd118 100644 --- a/src/rideables/PriorityQueue.hpp +++ b/src/rideables/PriorityQueue.hpp @@ -12,18 +12,18 @@ #include "Recoverable.hpp" #include "HeapQueue.hpp" -using namespace pds; //Wentao: TODO to fix later template class PriorityQueue : public HeapQueue, public Recoverable{ public: - class Payload : public PBlk{ + class Payload : public pds::PBlk{ GENERATE_FIELD(K, key, Payload); GENERATE_FIELD(V, val, Payload); GENERATE_FIELD(uint64_t, sn, Payload); public: Payload(){} Payload(K k, V v):m_key(k), m_val(v), m_sn(0){} + Payload(const Payload& oth): pds::PBlk(oth), m_key(oth.m_key), m_val(oth.m_val), m_sn(oth.m_sn){} void persist(){} }; @@ -123,13 +123,14 @@ class PriorityQueueFactory : public RideableFactory{ #include #include "PString.hpp" template<> -class PriorityQueue::Payload : public PBlk{ - GENERATE_FIELD(PString, key, Payload); - GENERATE_FIELD(PString, val, Payload); +class PriorityQueue::Payload : public pds::PBlk{ + GENERATE_FIELD(pds::PString, key, Payload); + GENERATE_FIELD(pds::PString, val, Payload); GENERATE_FIELD(uint64_t, sn, Payload); public: Payload(std::string k, std::string v):m_key(this, k), m_val(this, v), m_sn(0){} + Payload(const Payload& oth): pds::PBlk(oth), m_key(this, oth.m_key), m_val(this, oth.m_val), m_sn(oth.m_sn){} void persist(){} }; diff --git a/src/rideables/Queue.hpp b/src/rideables/Queue.hpp index 5054e412..a8bb4a46 100644 --- a/src/rideables/Queue.hpp +++ b/src/rideables/Queue.hpp @@ -8,8 +8,6 @@ #include "ConcurrentPrimitives.hpp" #include -using namespace pds; - template class DRAMQueue : public RQueue{ private: diff --git a/src/rideables/TGraph.hpp b/src/rideables/TGraph.hpp index 6eb0f304..257627f2 100644 --- a/src/rideables/TGraph.hpp +++ b/src/rideables/TGraph.hpp @@ -19,8 +19,6 @@ #include #include "RCUTracker.hpp" -using namespace pds; - /** * SimpleGraph class. Labels are of templated type K. */ diff --git a/src/rideables/Toy.hpp b/src/rideables/Toy.hpp index 2713bac1..c037915f 100644 --- a/src/rideables/Toy.hpp +++ b/src/rideables/Toy.hpp @@ -5,17 +5,15 @@ #include "TestConfig.hpp" -using namespace pds; - class Toy : public Rideable{ public: - class Payload : public PBlk{ + class Payload : public pds::PBlk{ GENERATE_FIELD(int, key, Payload); GENERATE_FIELD(int, val, Payload); public: Payload(){} Payload(int x, int y): m_key(x), m_val(y){} - Payload(const Payload& oth): PBlk(oth), m_key(oth.m_key), m_val(oth.m_val){} + Payload(const Payload& oth): pds::PBlk(oth), m_key(oth.m_key), m_val(oth.m_val){} void persist(){} }; diff --git a/src/rideables/UnbalancedTree.hpp b/src/rideables/UnbalancedTree.hpp index cb835b47..130f5724 100644 --- a/src/rideables/UnbalancedTree.hpp +++ b/src/rideables/UnbalancedTree.hpp @@ -8,20 +8,19 @@ #include #include -using namespace pds; template class UnbalancedTree : public RMap, public Recoverable{ const optional NONE = {}; // to prevent compiler warnings. TODO: switch to std::optional<>. public: - class Payload : public PBlk{ + class Payload : public pds::PBlk{ GENERATE_FIELD(K, key, Payload); GENERATE_FIELD(V, val, Payload); GENERATE_FIELD(int, deleted, Payload); public: Payload(){} Payload(K x, V y): m_key(x), m_val(y), m_deleted(false){} - // Payload(const Payload& oth): PBlk(oth), m_key(oth.m_key), m_val(oth.m_val), m_deleted(oth.m_deleted){} + Payload(const Payload& oth): pds::PBlk(oth), m_key(oth.m_key), m_val(oth.m_val), m_deleted(oth.m_deleted){} void persist(){} }; @@ -84,7 +83,7 @@ class UnbalancedTree : public RMap, public Recoverable{ try{ HOHLockHolder lock_holder; return do_get(&lock_holder, root, key); - } catch(OldSeeNewException& e){ + } catch(pds::OldSeeNewException& e){ continue; } } @@ -125,7 +124,7 @@ class UnbalancedTree : public RMap, public Recoverable{ try{ HOHLockHolder lock_holder; return do_put(&lock_holder, root, key, val); - } catch (OldSeeNewException& e){ + } catch (pds::OldSeeNewException& e){ continue; } } @@ -172,7 +171,7 @@ class UnbalancedTree : public RMap, public Recoverable{ try{ HOHLockHolder lock_holder; return do_insert(&lock_holder, root, key, val); - } catch (OldSeeNewException& e){ + } catch (pds::OldSeeNewException& e){ continue; } } @@ -221,7 +220,7 @@ class UnbalancedTree : public RMap, public Recoverable{ try{ HOHLockHolder lock_holder; return do_remove(&lock_holder, root, key); - } catch (OldSeeNewException& e){ + } catch (pds::OldSeeNewException& e){ continue; } } @@ -326,13 +325,14 @@ class UnbalancedTreeFactory : public RideableFactory{ #include #include "PString.hpp" template <> -class UnbalancedTree::Payload : public PBlk{ - GENERATE_FIELD(PString, key, Payload); - GENERATE_FIELD(PString, val, Payload); +class UnbalancedTree::Payload : public pds::PBlk{ + GENERATE_FIELD(pds::PString, key, Payload); + GENERATE_FIELD(pds::PString, val, Payload); GENERATE_FIELD(int, deleted, Payload); public: Payload(std::string k, std::string v) : m_key(this, k), m_val(this, v), m_deleted(false){} + Payload(const Payload& oth) : pds::PBlk(oth), m_key(this, oth.m_key), m_val(this, oth.m_val), m_deleted(oth.m_deleted){} void persist(){} }; #endif \ No newline at end of file From a27f8457f7bdee927b6970fc6a0bfdabddd39a15 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Sun, 22 Nov 2020 00:45:09 -0500 Subject: [PATCH 54/56] debug --- src/persist/api/Recoverable.hpp | 4 +++- src/rideables/MontageMSQueue.hpp | 9 +++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/src/persist/api/Recoverable.hpp b/src/persist/api/Recoverable.hpp index 0efa51ce..5b5639b6 100644 --- a/src/persist/api/Recoverable.hpp +++ b/src/persist/api/Recoverable.hpp @@ -314,11 +314,13 @@ class Recoverable{ } template void preclaim(T* b){ + bool not_in_operation = false; if (epochs[pds::EpochSys::tid].ui == NULL_EPOCH){ + not_in_operation = true; begin_op(); } _esys->reclaim_pblk(b, epochs[pds::EpochSys::tid].ui); - if (epochs[pds::EpochSys::tid].ui != NULL_EPOCH){ + if (not_in_operation){ end_op(); } } diff --git a/src/rideables/MontageMSQueue.hpp b/src/rideables/MontageMSQueue.hpp index 6f09b81d..74eaf66c 100644 --- a/src/rideables/MontageMSQueue.hpp +++ b/src/rideables/MontageMSQueue.hpp @@ -26,14 +26,15 @@ class MontageMSQueue : public RQueue, Recoverable{ private: struct Node{ - MontageMSQueue* ds; + MontageMSQueue* ds = nullptr; pds::atomic_lin_var next; Payload* payload; - Node(): next(nullptr), payload(nullptr){}; + Node(): next(nullptr), payload(nullptr){} + Node(MontageMSQueue* ds_): ds(ds_), next(nullptr), payload(nullptr){} Node(MontageMSQueue* ds_, T v): ds(ds_), next(nullptr), payload(ds_->pnew(v)){ // assert(ds->epochs[EpochSys::tid].ui == NULL_EPOCH); - }; + } void set_sn(uint64_t s){ assert(payload!=nullptr && "payload shouldn't be null"); @@ -61,7 +62,7 @@ class MontageMSQueue : public RQueue, Recoverable{ Recoverable(gtc), global_sn(0), head(nullptr), tail(nullptr), tracker(gtc->task_num, 100, 1000, true){ - Node* dummy = new Node(); + Node* dummy = new Node(this); head.store(dummy); tail.store(dummy); } From 7f4153858302c0544f7ab8355a527e23174a6e71 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Sun, 22 Nov 2020 15:53:41 -0500 Subject: [PATCH 55/56] debug --- src/rideables/HOHHashTable.hpp | 8 ++++---- src/rideables/MontageGraph.hpp | 20 +++++++++++--------- src/rideables/MontageHashTable.hpp | 6 +++--- src/rideables/MontageLfHashTable.hpp | 2 +- src/rideables/MontageNatarajanTree.hpp | 2 +- src/rideables/MontageQueue.hpp | 4 ++-- src/rideables/UnbalancedTree.hpp | 8 ++++---- src/tests/GraphRecoveryTest.hpp | 2 +- 8 files changed, 27 insertions(+), 25 deletions(-) diff --git a/src/rideables/HOHHashTable.hpp b/src/rideables/HOHHashTable.hpp index fc0151e6..8d9d4ae5 100644 --- a/src/rideables/HOHHashTable.hpp +++ b/src/rideables/HOHHashTable.hpp @@ -65,7 +65,7 @@ class HOHHashTable : public RMap, public Recoverable{ optional get(K key, int tid){ size_t idx=hash_fn(key)%idxSize; while(true){ - MontageOpHolder(this); + MontageOpHolder _holder(this); try{ HOHLockHolder holder; holder.hold(&buckets[idx]->ui.lock); @@ -88,7 +88,7 @@ class HOHHashTable : public RMap, public Recoverable{ size_t idx=hash_fn(key)%idxSize; ListNode* new_node = new ListNode(this, key, val); while(true){ - MontageOpHolder(this); + MontageOpHolder _holder(this); try{ HOHLockHolder holder; holder.hold(&buckets[idx]->ui.lock); @@ -123,7 +123,7 @@ class HOHHashTable : public RMap, public Recoverable{ size_t idx=hash_fn(key)%idxSize; ListNode* new_node = new ListNode(this, key, val); while(true){ - MontageOpHolder(this); + MontageOpHolder _holder(this); try{ HOHLockHolder holder; holder.hold(&buckets[idx]->ui.lock); @@ -160,7 +160,7 @@ class HOHHashTable : public RMap, public Recoverable{ optional remove(K key, int tid){ size_t idx=hash_fn(key)%idxSize; while(true){ - MontageOpHolder(this); + MontageOpHolder _holder(this); try{ HOHLockHolder holder; holder.hold(&buckets[idx]->ui.lock); diff --git a/src/rideables/MontageGraph.hpp b/src/rideables/MontageGraph.hpp index afa64827..bce9eb2b 100644 --- a/src/rideables/MontageGraph.hpp +++ b/src/rideables/MontageGraph.hpp @@ -121,7 +121,7 @@ class MontageGraph : public RGraph, public Recoverable{ }; MontageGraph(GlobalTestConfig* gtc) : Recoverable(gtc) { - MontageOpHolder(this); + MontageOpHolder _holder(this); idxToVertex = new tVertex*[numVertices]; // Initialize... for (size_t i = 0; i < numVertices; i++) { @@ -129,6 +129,8 @@ class MontageGraph : public RGraph, public Recoverable{ } } + ~MontageGraph() {} + void init_thread(GlobalTestConfig* gtc, LocalTestConfig* ltc){ Recoverable::init_thread(gtc, ltc); } @@ -137,7 +139,7 @@ class MontageGraph : public RGraph, public Recoverable{ // Thread-safe and does not leak edges void clear() { - // MontageOpHolder(this); + // MontageOpHolder _holder(this); for (size_t i = 0; i < numVertices; i++) { idxToVertex[i]->lock(); } @@ -173,7 +175,7 @@ class MontageGraph : public RGraph, public Recoverable{ } { - MontageOpHolder(this); + MontageOpHolder _holder(this); Relation* r = pnew(v1, v2, weight); v1->adjacency_list.insert(r); v2->dest_list.insert(r); @@ -198,7 +200,7 @@ class MontageGraph : public RGraph, public Recoverable{ // We utilize `get_unsafe` API because the Relation destination and vertex id will not change at all. v->lock(); { - MontageOpHolder(this); + MontageOpHolder _holder(this); if (std::any_of(v->adjacency_list.begin(), v->adjacency_list.end(), [=] (Relation *r) { return r->get_unsafe_dest(this) == v2; })) { retval = true; @@ -228,7 +230,7 @@ class MontageGraph : public RGraph, public Recoverable{ } { - MontageOpHolder(this); + MontageOpHolder _holder(this); // Scan v1 for an edge containing v2 in its adjacency list... Relation *rdel = nullptr; for (Relation *r : v1->adjacency_list) { @@ -263,7 +265,7 @@ class MontageGraph : public RGraph, public Recoverable{ * @param l The new label for the node */ bool set_lbl(int id, int l) { - MontageOpHolder(this); + MontageOpHolder _holder(this); tVertex *v = idxToVertex[id]; v->lock(); v->set_lbl(l); @@ -279,7 +281,7 @@ class MontageGraph : public RGraph, public Recoverable{ * @param w the new weight value */ bool set_weight(int src, int dest, int w) { - MontageOpHolder(this); + MontageOpHolder _holder(this); bool retval = false; /* tVertex *v = idxToVertex[src]; @@ -319,7 +321,7 @@ class MontageGraph : public RGraph, public Recoverable{ std::vector relationVector; std::vector vertexVector; { - MontageOpHolder(this); + MontageOpHolder _holder(this); for (auto itr = recovered->begin(); itr != recovered->end(); ++itr) { // iterate through all recovered blocks. Sort the blocks into vectors containing the different // payloads to be iterated over later. @@ -527,7 +529,7 @@ class MontageGraph : public RGraph, public Recoverable{ v->adjacency_list.clear(); v->dest_list.clear(); { - MontageOpHolder(this); + MontageOpHolder _holder(this); for (Relation *r : garbageList) { pdelete(r); } diff --git a/src/rideables/MontageHashTable.hpp b/src/rideables/MontageHashTable.hpp index 2a7b6f91..46381671 100644 --- a/src/rideables/MontageHashTable.hpp +++ b/src/rideables/MontageHashTable.hpp @@ -93,7 +93,7 @@ class MontageHashTable : public RMap, public Recoverable{ ListNode* new_node = new ListNode(this, key, val); // while(true){ std::lock_guard lk(buckets[idx].lock); - MontageOpHolder(this); + MontageOpHolder _holder(this); // try{ ListNode* curr = buckets[idx].head.next; ListNode* prev = &buckets[idx].head; @@ -126,7 +126,7 @@ class MontageHashTable : public RMap, public Recoverable{ ListNode* new_node = new ListNode(this, key, val); // while(true){ std::lock_guard lk(buckets[idx].lock); - MontageOpHolder(this); + MontageOpHolder _holder(this); // try{ ListNode* curr = buckets[idx].head.next; ListNode* prev = &buckets[idx].head; @@ -161,7 +161,7 @@ class MontageHashTable : public RMap, public Recoverable{ size_t idx=hash_fn(key)%idxSize; // while(true){ std::lock_guard lk(buckets[idx].lock); - MontageOpHolder(this); + MontageOpHolder _holder(this); // try{ ListNode* curr = buckets[idx].head.next; ListNode* prev = &buckets[idx].head; diff --git a/src/rideables/MontageLfHashTable.hpp b/src/rideables/MontageLfHashTable.hpp index d3468409..59cebf89 100644 --- a/src/rideables/MontageLfHashTable.hpp +++ b/src/rideables/MontageLfHashTable.hpp @@ -126,7 +126,7 @@ optional MontageLfHashTable::get(K key, int tid) { tracker.start_op(tid); // hold epoch from advancing so that the node we find won't be deleted if(findNode(prev,curr,next,key,tid)) { - MontageOpHolder(this); + MontageOpHolder _holder(this); res=curr.get_val()->get_unsafe_val();//never old see new as we find node before BEGIN_OP } tracker.end_op(tid); diff --git a/src/rideables/MontageNatarajanTree.hpp b/src/rideables/MontageNatarajanTree.hpp index 2e63f9f9..9d6e42dc 100644 --- a/src/rideables/MontageNatarajanTree.hpp +++ b/src/rideables/MontageNatarajanTree.hpp @@ -296,7 +296,7 @@ optional MontageNatarajanTree::get(K key, int tid){ seek(key,tid); leaf=getPtr(seekRecord->leaf); if(nodeEqual(key,leaf)){ - MontageOpHolder(this); + MontageOpHolder _holder(this); res = leaf->get_unsafe_val();//never old see new as we find node before BEGIN_OP } diff --git a/src/rideables/MontageQueue.hpp b/src/rideables/MontageQueue.hpp index 1dc86cb0..a8c513c2 100644 --- a/src/rideables/MontageQueue.hpp +++ b/src/rideables/MontageQueue.hpp @@ -92,7 +92,7 @@ void MontageQueue::enqueue(T val, int tid){ // no read or write so impossible to have old see new exception new_node->set_sn(global_sn); global_sn++; - MontageOpHolder(this); + MontageOpHolder _holder(this); if(tail == nullptr) { head = tail = new_node; return; @@ -106,7 +106,7 @@ optional MontageQueue::dequeue(int tid){ optional res = {}; // while(true){ lock.lock(); - MontageOpHolder(this); + MontageOpHolder _holder(this); // try { if(head == nullptr) { lock.unlock(); diff --git a/src/rideables/UnbalancedTree.hpp b/src/rideables/UnbalancedTree.hpp index 130f5724..cd996007 100644 --- a/src/rideables/UnbalancedTree.hpp +++ b/src/rideables/UnbalancedTree.hpp @@ -76,7 +76,7 @@ class UnbalancedTree : public RMap, public Recoverable{ optional get(K key, int tid){ while(true){ - MontageOpHolder(this); + MontageOpHolder _holder(this); if (!root){ return NONE; } else { @@ -117,7 +117,7 @@ class UnbalancedTree : public RMap, public Recoverable{ optional put(K key, V val, int tid){ while(true){ - MontageOpHolder(this); + MontageOpHolder _holder(this); if (!root){ root = new TreeNode(key, val); } else { @@ -163,7 +163,7 @@ class UnbalancedTree : public RMap, public Recoverable{ bool insert(K key, V val, int tid){ while(true){ - MontageOpHolder(this); + MontageOpHolder _holder(this); if (!root){ root = new TreeNode(key, val); return true; @@ -213,7 +213,7 @@ class UnbalancedTree : public RMap, public Recoverable{ optional remove(K key, int tid){ while(true){ - MontageOpHolder(this); + MontageOpHolder _holder(this); if (!root){ return NONE; } else { diff --git a/src/tests/GraphRecoveryTest.hpp b/src/tests/GraphRecoveryTest.hpp index db1ef45b..38943e74 100644 --- a/src/tests/GraphRecoveryTest.hpp +++ b/src/tests/GraphRecoveryTest.hpp @@ -50,7 +50,7 @@ class GraphRecoveryTest : public Test { Rideable* ptr = gtc->allocRideable(); g = dynamic_cast(ptr); if(!g){ - errexit("GraphTest must be run on RGraph type object."); + errexit("GraphRecoveryTest must be run on RGraph type object."); } rec = dynamic_cast(ptr); if (!rec){ From 731d283b2c074415b4aae963268002f3cf9b7f91 Mon Sep 17 00:00:00 2001 From: Haosen Wen Date: Sun, 22 Nov 2020 16:02:00 -0500 Subject: [PATCH 56/56] added POPEN API to the global api header --- src/persist/api/montage_global_api.hpp | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/persist/api/montage_global_api.hpp b/src/persist/api/montage_global_api.hpp index 47940a1c..3fd46b5f 100644 --- a/src/persist/api/montage_global_api.hpp +++ b/src/persist/api/montage_global_api.hpp @@ -31,7 +31,6 @@ namespace pds{ extern GlobalRecoverable* global_recoverable; - inline void init(GlobalTestConfig* gtc){ // here we assume that pds::init is called before pds::init_thread, hence the assertion. // if this assertion triggers, note that the order may be reversed. Evaluation needed. @@ -93,6 +92,18 @@ namespace pds{ #define PRECLAIM(b) ({\ global_recoverable->preclaim(b);}) + + #define POPEN_READ(b) ({\ + global_recoverable->openread_pblk(b);}) + + #define POPEN_UNSAFE_READ(b) ({\ + global_recoverable->openread_pblk_usnafe(b);}) + + #define POPEN_WRITE(b) ({\ + global_recoverable->openwrite_pblk(b);}) + + #define REGISTER_PUPDATE(b) ({\ + global_recoverable->register_update_pblk(b);}) // Hs: This is for "owned" PBlk's, currently not used in code base. // may be useful for "data" blocks like dynamically-sized