From a651ea0d491b20b2f12a5718552230d7369a36fd Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Mon, 2 Apr 2018 18:04:48 +0200 Subject: [PATCH 01/38] swarm: rename MemStore to InmemoryStore --- swarm/network/stream/common_test.go | 4 ++-- swarm/network/stream/intervals/store_test.go | 6 +++--- swarm/network/stream/intervals_test.go | 2 +- swarm/pss/client/client_test.go | 2 +- swarm/pss/pss_test.go | 2 +- swarm/state/{memstore.go => inmemorystore.go} | 18 +++++++++--------- swarm/storage/mock/mem/mem_test.go | 4 ++-- 7 files changed, 19 insertions(+), 19 deletions(-) rename swarm/state/{memstore.go => inmemorystore.go} (79%) diff --git a/swarm/network/stream/common_test.go b/swarm/network/stream/common_test.go index cf19e9bc2de9..ddde89ec96f8 100644 --- a/swarm/network/stream/common_test.go +++ b/swarm/network/stream/common_test.go @@ -78,7 +78,7 @@ func NewStreamerService(ctx *adapters.ServiceContext) (node.Service, error) { db := storage.NewDBAPI(store) delivery := NewDelivery(kad, db) deliveries[id] = delivery - r := NewRegistry(addr, delivery, db, state.NewMemStore(), &RegistryOptions{ + r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{ SkipCheck: defaultSkipCheck, }) RegisterSwarmSyncerServer(r, db) @@ -111,7 +111,7 @@ func newStreamerTester(t *testing.T) (*p2ptest.ProtocolTester, *Registry, *stora db := storage.NewDBAPI(localStore) delivery := NewDelivery(to, db) - streamer := NewRegistry(addr, delivery, db, state.NewMemStore(), &RegistryOptions{ + streamer := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{ SkipCheck: defaultSkipCheck, }) teardown := func() { diff --git a/swarm/network/stream/intervals/store_test.go b/swarm/network/stream/intervals/store_test.go index 5efb6ae8a6ce..0ab14c065c13 100644 --- a/swarm/network/stream/intervals/store_test.go +++ b/swarm/network/stream/intervals/store_test.go @@ -25,9 +25,9 @@ import ( var ErrNotFound = errors.New("not found") -// TestMemStore tests basic functionality of MemStore. -func TestMemStore(t *testing.T) { - testStore(t, state.NewMemStore()) +// TestInmemoryStore tests basic functionality of InmemoryStore. +func TestInmemoryStore(t *testing.T) { + testStore(t, state.NewInmemoryStore()) } // testStore is a helper function to test various Store implementations. diff --git a/swarm/network/stream/intervals_test.go b/swarm/network/stream/intervals_test.go index d0de0399394f..e83413ffe8ff 100644 --- a/swarm/network/stream/intervals_test.go +++ b/swarm/network/stream/intervals_test.go @@ -50,7 +50,7 @@ func newIntervalsStreamerService(ctx *adapters.ServiceContext) (node.Service, er db := storage.NewDBAPI(store) delivery := NewDelivery(kad, db) deliveries[id] = delivery - r := NewRegistry(addr, delivery, db, state.NewMemStore(), &RegistryOptions{ + r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{ SkipCheck: defaultSkipCheck, }) diff --git a/swarm/pss/client/client_test.go b/swarm/pss/client/client_test.go index 10f017119756..faa71bc7db36 100644 --- a/swarm/pss/client/client_test.go +++ b/swarm/pss/client/client_test.go @@ -212,7 +212,7 @@ func setupNetwork(numnodes int) (clients []*rpc.Client, err error) { } func newServices() adapters.Services { - stateStore := state.NewMemStore() + stateStore := state.NewInmemoryStore() kademlias := make(map[discover.NodeID]*network.Kademlia) kademlia := func(id discover.NodeID) *network.Kademlia { if k, ok := kademlias[id]; ok { diff --git a/swarm/pss/pss_test.go b/swarm/pss/pss_test.go index bbfb074dd381..1f8ec594fad0 100644 --- a/swarm/pss/pss_test.go +++ b/swarm/pss/pss_test.go @@ -1127,7 +1127,7 @@ func setupNetwork(numnodes int) (clients []*rpc.Client, err error) { } func newServices() adapters.Services { - stateStore := state.NewMemStore() + stateStore := state.NewInmemoryStore() kademlias := make(map[discover.NodeID]*network.Kademlia) kademlia := func(id discover.NodeID) *network.Kademlia { if k, ok := kademlias[id]; ok { diff --git a/swarm/state/memstore.go b/swarm/state/inmemorystore.go similarity index 79% rename from swarm/state/memstore.go rename to swarm/state/inmemorystore.go index 140697bdd09d..1ca25404a19e 100644 --- a/swarm/state/memstore.go +++ b/swarm/state/inmemorystore.go @@ -22,23 +22,23 @@ import ( "sync" ) -// MemStore is the reference implementation of Store interface that is supposed +// InmemoryStore is the reference implementation of Store interface that is supposed // to be used in tests. -type MemStore struct { +type InmemoryStore struct { db map[string][]byte mu sync.RWMutex } -// NewMemStore returns a new instance of MemStore. -func NewMemStore() *MemStore { - return &MemStore{ +// NewInmemoryStore returns a new instance of InmemoryStore. +func NewInmemoryStore() *InmemoryStore { + return &InmemoryStore{ db: make(map[string][]byte), } } // Get retrieves a value stored for a specific key. If there is no value found, // ErrNotFound is returned. -func (s *MemStore) Get(key string, i interface{}) (err error) { +func (s *InmemoryStore) Get(key string, i interface{}) (err error) { s.mu.RLock() defer s.mu.RUnlock() @@ -56,7 +56,7 @@ func (s *MemStore) Get(key string, i interface{}) (err error) { } // Put stores a value for a specific key. -func (s *MemStore) Put(key string, i interface{}) (err error) { +func (s *InmemoryStore) Put(key string, i interface{}) (err error) { s.mu.Lock() defer s.mu.Unlock() bytes := []byte{} @@ -77,7 +77,7 @@ func (s *MemStore) Put(key string, i interface{}) (err error) { } // Delete removes value stored under a specific key. -func (s *MemStore) Delete(key string) (err error) { +func (s *InmemoryStore) Delete(key string) (err error) { s.mu.Lock() defer s.mu.Unlock() @@ -89,6 +89,6 @@ func (s *MemStore) Delete(key string) (err error) { } // Close does not do anything. -func (s *MemStore) Close() error { +func (s *InmemoryStore) Close() error { return nil } diff --git a/swarm/storage/mock/mem/mem_test.go b/swarm/storage/mock/mem/mem_test.go index b93471c446ba..adcefaabb41d 100644 --- a/swarm/storage/mock/mem/mem_test.go +++ b/swarm/storage/mock/mem/mem_test.go @@ -22,9 +22,9 @@ import ( "github.com/ethereum/go-ethereum/swarm/storage/mock/test" ) -// TestDBStore is running test for a GlobalStore +// TestGlobalStore is running test for a GlobalStore // using test.MockStore function. -func TestMemStore(t *testing.T) { +func TestGlobalStore(t *testing.T) { test.MockStore(t, NewGlobalStore(), 100) } From 3ec0007ce47969bd33fa695bc5645bb532bb7a8c Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Mon, 2 Apr 2018 20:39:27 +0200 Subject: [PATCH 02/38] swarm/storage: MemStore tests - reproduce panic --- swarm/storage/memstore2_test.go | 95 +++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) create mode 100644 swarm/storage/memstore2_test.go diff --git a/swarm/storage/memstore2_test.go b/swarm/storage/memstore2_test.go new file mode 100644 index 000000000000..43397772426b --- /dev/null +++ b/swarm/storage/memstore2_test.go @@ -0,0 +1,95 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package storage + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/ethereum/go-ethereum/log" +) + +func newLDBStore(t *testing.T) (*LDBStore, func()) { + dir, err := ioutil.TempDir("", "bzz-storage-test") + if err != nil { + t.Fatal(err) + } + log.Trace("memstore.tempdir", "dir", dir) + fmt.Println(dir) + + db, err := NewLDBStore(dir, MakeHashFunc(SHA3Hash), defaultDbCapacity, testPoFunc) + if err != nil { + t.Fatal(err) + } + + cleanup := func() { + err := os.RemoveAll(dir) + if err != nil { + t.Fatal(err) + } + } + + return db, cleanup +} + +func newMemStore(t *testing.T) (*MemStore, func()) { + ldb, cleanup := newLDBStore(t) + ldb.setCapacity(singletonSwarmDbCapacity) + + memstore := NewMemStore(ldb, defaultCacheCapacity) + return memstore, cleanup +} + +func TestMemStore(t *testing.T) { + log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + + memStore, cleanup := newMemStore(t) + defer cleanup() + + tests := []struct { + n int // number of chunks to push to memStore + chunkSize int64 // size of chunk (by default in Swarm - 4096) + }{ + { + n: 10001, + chunkSize: 4096, + }, + } + + for _, tt := range tests { + var chunks []*Chunk + + for i := 0; i < tt.n; i++ { + chunks = append(chunks, NewChunk(nil, make(chan bool))) + } + + FakeChunk(tt.chunkSize, tt.n, chunks) + + for i := 0; i < tt.n; i++ { + memStore.Put(chunks[i]) + } + + for i := 0; i < tt.n; i++ { + _, err := memStore.Get(chunks[i].Key) + if err != nil { + t.Fatal(err) + } + } + } +} From 4809d949999bd774eb5509ddbc62983edd96233a Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Fri, 30 Mar 2018 18:55:56 +0200 Subject: [PATCH 03/38] swarm/storage: use dummy memstore --- swarm/storage/localstore.go | 6 +- swarm/storage/memstore.go | 603 ++++++++++++++++++------------------ swarm/swarm.go | 2 +- 3 files changed, 305 insertions(+), 306 deletions(-) diff --git a/swarm/storage/localstore.go b/swarm/storage/localstore.go index 3e19b5046e0a..6b85b7d93383 100644 --- a/swarm/storage/localstore.go +++ b/swarm/storage/localstore.go @@ -78,9 +78,9 @@ func NewTestLocalStoreForAddr(path string, basekey []byte) (*LocalStore, error) return localStore, nil } -func (self *LocalStore) CacheCounter() uint64 { - return uint64(self.memStore.Counter()) -} +//func (self *LocalStore) CacheCounter() uint64 { +//return uint64(self.memStore.Counter()) +//} // LocalStore is itself a chunk store // unsafe, in that the data is not integrity checked diff --git a/swarm/storage/memstore.go b/swarm/storage/memstore.go index 3e5c025daaf6..edaa3d217f37 100644 --- a/swarm/storage/memstore.go +++ b/swarm/storage/memstore.go @@ -19,11 +19,10 @@ package storage import ( + "bytes" "fmt" "sync" - "time" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" ) @@ -40,14 +39,14 @@ const ( defaultCacheCapacity = 5000 ) -type MemStore struct { - memtree *memTree - entryCnt, capacity uint // stored entries - accessCnt uint64 // access counter; oldest is thrown away when full - dbAccessCnt uint64 - ldbStore *LDBStore - lock sync.Mutex -} +//type MemStore struct { +//memtree *memTree +//entryCnt, capacity uint // stored entries +//accessCnt uint64 // access counter; oldest is thrown away when full +//dbAccessCnt uint64 +//ldbStore *LDBStore +//lock sync.Mutex +//} /* a hash prefix subtree containing subtrees or one storage entry (but never both) @@ -62,312 +61,312 @@ a hash prefix subtree containing subtrees or one storage entry (but never both) (access[] is a binary tree inside the multi-bit leveled hash tree) */ -func NewMemStore(d *LDBStore, capacity uint) (m *MemStore) { - m = &MemStore{} - m.memtree = newMemTree(memTreeFLW, nil, 0) - m.ldbStore = d - m.setCapacity(capacity) - return -} - -type memTree struct { - subtree []*memTree - parent *memTree - parentIdx uint - - bits uint // log2(subtree count) - width uint // subtree count - - entry *Chunk // if subtrees are present, entry should be nil - lastDBaccess uint64 - access []uint64 -} - -func newMemTree(b uint, parent *memTree, pidx uint) (node *memTree) { - node = new(memTree) - node.bits = b - node.width = 1 << b - node.subtree = make([]*memTree, node.width) - node.access = make([]uint64, node.width-1) - node.parent = parent - node.parentIdx = pidx - if parent != nil { - parent.subtree[pidx] = node - } - - return node -} +//func NewMemStore(d *LDBStore, capacity uint) (m *MemStore) { +//m = &MemStore{} +//m.memtree = newMemTree(memTreeFLW, nil, 0) +//m.ldbStore = d +//m.setCapacity(capacity) +//return +//} + +//type memTree struct { +//subtree []*memTree +//parent *memTree +//parentIdx uint + +//bits uint // log2(subtree count) +//width uint // subtree count + +//entry *Chunk // if subtrees are present, entry should be nil +//lastDBaccess uint64 +//access []uint64 +//} + +//func newMemTree(b uint, parent *memTree, pidx uint) (node *memTree) { +//node = new(memTree) +//node.bits = b +//node.width = 1 << b +//node.subtree = make([]*memTree, node.width) +//node.access = make([]uint64, node.width-1) +//node.parent = parent +//node.parentIdx = pidx +//if parent != nil { +//parent.subtree[pidx] = node +//} + +//return node +//} + +//func (node *memTree) updateAccess(a uint64) { +//aidx := uint(0) +//var aa uint64 +//oa := node.access[0] +//for node.access[aidx] == oa { +//node.access[aidx] = a +//if aidx > 0 { +//aa = node.access[((aidx-1)^1)+1] +//aidx = (aidx - 1) >> 1 +//} else { +//pidx := node.parentIdx +//node = node.parent +//if node == nil { +//return +//} +//nn := node.subtree[pidx^1] +//if nn != nil { +//aa = nn.access[0] +//} else { +//aa = 0 +//} +//aidx = (node.width + pidx - 2) >> 1 +//} + +//if (aa != 0) && (aa < a) { +//a = aa +//} +//} +//} + +//func (s *MemStore) setCapacity(c uint) { +//s.lock.Lock() +//defer s.lock.Unlock() + +//for c < s.entryCnt { +//s.removeOldest() +//} +//s.capacity = c +//} + +//func (s *MemStore) Counter() uint { +//return s.entryCnt +//} + +//// entry (not its copy) is going to be in MemStore +//func (s *MemStore) Put(entry *Chunk) { +//log.Trace("memstore.put", "key", entry.Key) +//if s.capacity == 0 { +//return +//} + +//s.lock.Lock() +//defer s.lock.Unlock() + +//if s.entryCnt >= s.capacity { +//s.removeOldest() +//} + +//s.accessCnt++ + +//memstorePutCounter.Inc(1) + +//node := s.memtree +//bitpos := uint(0) +//for node.entry == nil { +//l := entry.Key.bits(bitpos, node.bits) +//st := node.subtree[l] +//if st == nil { +//st = newMemTree(memTreeLW, node, l) +//bitpos += node.bits +//node = st +//break +//} +//bitpos += node.bits +//node = st +//} + +//if node.entry != nil { + +//if node.entry.Key.isEqual(entry.Key) { +//node.updateAccess(s.accessCnt) +//if entry.SData == nil { +//entry.Size = node.entry.Size +//entry.SData = node.entry.SData +//} +//if entry.ReqC == nil { +//entry.ReqC = node.entry.ReqC +//} +//entry.C = node.entry.C +//node.entry = entry +//return +//} + +//for node.entry != nil { + +//l := node.entry.Key.bits(bitpos, node.bits) +//st := node.subtree[l] +//if st == nil { +//st = newMemTree(memTreeLW, node, l) +//} +//st.entry = node.entry +//node.entry = nil +//st.updateAccess(node.access[0]) + +//l = entry.Key.bits(bitpos, node.bits) +//st = node.subtree[l] +//if st == nil { +//st = newMemTree(memTreeLW, node, l) +//} +//bitpos += node.bits +//node = st + +//} +//} + +//node.entry = entry +//node.lastDBaccess = s.dbAccessCnt +//node.updateAccess(s.accessCnt) +//s.entryCnt++ +//} + +//func (s *MemStore) Get(hash Key) (chunk *Chunk, err error) { +//log.Trace("memstore.get", "key", hash) +//s.lock.Lock() +//defer s.lock.Unlock() + +//node := s.memtree +//bitpos := uint(0) +//for node.entry == nil { +//l := hash.bits(bitpos, node.bits) +//st := node.subtree[l] +//if st == nil { +//log.Trace("memstore.get ErrChunkNotFound", "key", hash) +//return nil, ErrChunkNotFound +//} +//bitpos += node.bits +//node = st +//} + +//if node.entry.Key.isEqual(hash) { +//s.accessCnt++ +//node.updateAccess(s.accessCnt) +//chunk = node.entry +//if s.dbAccessCnt-node.lastDBaccess > dbForceUpdateAccessCnt { +//s.dbAccessCnt++ +//node.lastDBaccess = s.dbAccessCnt +//if s.ldbStore != nil { +//s.ldbStore.updateAccessCnt(hash) +//} +//} +//} else { +//err = ErrChunkNotFound +//} + +//log.Trace("memstore.get return", "key", hash, "chunk", chunk, "err", err) +//return +//} + +//func (s *MemStore) removeOldest() { +//defer metrics.GetOrRegisterResettingTimer("memstore.purge", metrics.DefaultRegistry).UpdateSince(time.Now()) + +//node := s.memtree +//log.Warn("purge memstore") +//for node.entry == nil { + +//aidx := uint(0) +//av := node.access[aidx] + +//for aidx < node.width/2-1 { +//if av == node.access[aidx*2+1] { +//node.access[aidx] = node.access[aidx*2+2] +//aidx = aidx*2 + 1 +//} else if av == node.access[aidx*2+2] { +//node.access[aidx] = node.access[aidx*2+1] +//aidx = aidx*2 + 2 +//} else { +//panic(nil) +//} +//} +//pidx := aidx*2 + 2 - node.width +//if (node.subtree[pidx] != nil) && (av == node.subtree[pidx].access[0]) { +//if node.subtree[pidx+1] != nil { +//node.access[aidx] = node.subtree[pidx+1].access[0] +//} else { +//node.access[aidx] = 0 +//} +//} else if (node.subtree[pidx+1] != nil) && (av == node.subtree[pidx+1].access[0]) { +//if node.subtree[pidx] != nil { +//node.access[aidx] = node.subtree[pidx].access[0] +//} else { +//node.access[aidx] = 0 +//} +//pidx++ +//} else { +//panic(nil) +//} + +////fmt.Println(pidx) +//node = node.subtree[pidx] + +//} + +//if node.entry.ReqC == nil { +//log.Trace(fmt.Sprintf("Memstore Clean: Waiting for chunk %v to be saved", node.entry.Key.Log())) +//<-node.entry.dbStoredC +//log.Trace(fmt.Sprintf("Memstore Clean: Chunk %v saved to DBStore. Ready to clear from mem.", node.entry.Key.Log())) + +//memstoreRemoveCounter.Inc(1) +//node.entry = nil +//s.entryCnt-- +//} else { +//return +//} + +//node.access[0] = 0 + +////--- + +//aidx := uint(0) +//for { +//aa := node.access[aidx] +//if aidx > 0 { +//aidx = (aidx - 1) >> 1 +//} else { +//pidx := node.parentIdx +//node = node.parent +//if node == nil { +//return +//} +//aidx = (node.width + pidx - 2) >> 1 +//} +//if (aa != 0) && ((aa < node.access[aidx]) || (node.access[aidx] == 0)) { +//node.access[aidx] = aa +//} +//} +//} -func (node *memTree) updateAccess(a uint64) { - aidx := uint(0) - var aa uint64 - oa := node.access[0] - for node.access[aidx] == oa { - node.access[aidx] = a - if aidx > 0 { - aa = node.access[((aidx-1)^1)+1] - aidx = (aidx - 1) >> 1 - } else { - pidx := node.parentIdx - node = node.parent - if node == nil { - return - } - nn := node.subtree[pidx^1] - if nn != nil { - aa = nn.access[0] - } else { - aa = 0 - } - aidx = (node.width + pidx - 2) >> 1 - } - - if (aa != 0) && (aa < a) { - a = aa - } - } +type MemStore struct { + m map[string]*Chunk + mu sync.RWMutex } -func (s *MemStore) setCapacity(c uint) { - s.lock.Lock() - defer s.lock.Unlock() - - for c < s.entryCnt { - s.removeOldest() +func NewMemStore(d *LDBStore, capacity uint) (m *MemStore) { + return &MemStore{ + m: make(map[string]*Chunk), } - s.capacity = c -} - -func (s *MemStore) Counter() uint { - return s.entryCnt } -// entry (not its copy) is going to be in MemStore -func (s *MemStore) Put(entry *Chunk) { - log.Trace("memstore.put", "key", entry.Key) - if s.capacity == 0 { - return +func (m *MemStore) Get(key Key) (*Chunk, error) { + m.mu.RLock() + defer m.mu.RUnlock() + c, ok := m.m[string(key[:])] + if !ok { + return nil, ErrChunkNotFound } - - s.lock.Lock() - defer s.lock.Unlock() - - if s.entryCnt >= s.capacity { - s.removeOldest() + if !bytes.Equal(c.Key, key) { + panic(fmt.Errorf("MemStore.Get: chunk key %s != req key %s", c.Key.Hex(), key.Hex())) } - - s.accessCnt++ - - memstorePutCounter.Inc(1) - - node := s.memtree - bitpos := uint(0) - for node.entry == nil { - l := entry.Key.bits(bitpos, node.bits) - st := node.subtree[l] - if st == nil { - st = newMemTree(memTreeLW, node, l) - bitpos += node.bits - node = st - break - } - bitpos += node.bits - node = st - } - - if node.entry != nil { - - if node.entry.Key.isEqual(entry.Key) { - node.updateAccess(s.accessCnt) - if entry.SData == nil { - entry.Size = node.entry.Size - entry.SData = node.entry.SData - } - if entry.ReqC == nil { - entry.ReqC = node.entry.ReqC - } - entry.C = node.entry.C - node.entry = entry - return - } - - for node.entry != nil { - - l := node.entry.Key.bits(bitpos, node.bits) - st := node.subtree[l] - if st == nil { - st = newMemTree(memTreeLW, node, l) - } - st.entry = node.entry - node.entry = nil - st.updateAccess(node.access[0]) - - l = entry.Key.bits(bitpos, node.bits) - st = node.subtree[l] - if st == nil { - st = newMemTree(memTreeLW, node, l) - } - bitpos += node.bits - node = st - - } - } - - node.entry = entry - node.lastDBaccess = s.dbAccessCnt - node.updateAccess(s.accessCnt) - s.entryCnt++ + return c, nil } -func (s *MemStore) Get(hash Key) (chunk *Chunk, err error) { - log.Trace("memstore.get", "key", hash) - s.lock.Lock() - defer s.lock.Unlock() - - node := s.memtree - bitpos := uint(0) - for node.entry == nil { - l := hash.bits(bitpos, node.bits) - st := node.subtree[l] - if st == nil { - log.Trace("memstore.get ErrChunkNotFound", "key", hash) - return nil, ErrChunkNotFound - } - bitpos += node.bits - node = st - } - - if node.entry.Key.isEqual(hash) { - s.accessCnt++ - node.updateAccess(s.accessCnt) - chunk = node.entry - if s.dbAccessCnt-node.lastDBaccess > dbForceUpdateAccessCnt { - s.dbAccessCnt++ - node.lastDBaccess = s.dbAccessCnt - if s.ldbStore != nil { - s.ldbStore.updateAccessCnt(hash) - } - } - } else { - err = ErrChunkNotFound - } - - log.Trace("memstore.get return", "key", hash, "chunk", chunk, "err", err) - return +func (m *MemStore) Put(c *Chunk) { + m.mu.Lock() + defer m.mu.Unlock() + m.m[string(c.Key[:])] = c } -func (s *MemStore) removeOldest() { - defer metrics.GetOrRegisterResettingTimer("memstore.purge", metrics.DefaultRegistry).UpdateSince(time.Now()) - - node := s.memtree - log.Warn("purge memstore") - for node.entry == nil { - - aidx := uint(0) - av := node.access[aidx] - - for aidx < node.width/2-1 { - if av == node.access[aidx*2+1] { - node.access[aidx] = node.access[aidx*2+2] - aidx = aidx*2 + 1 - } else if av == node.access[aidx*2+2] { - node.access[aidx] = node.access[aidx*2+1] - aidx = aidx*2 + 2 - } else { - panic(nil) - } - } - pidx := aidx*2 + 2 - node.width - if (node.subtree[pidx] != nil) && (av == node.subtree[pidx].access[0]) { - if node.subtree[pidx+1] != nil { - node.access[aidx] = node.subtree[pidx+1].access[0] - } else { - node.access[aidx] = 0 - } - } else if (node.subtree[pidx+1] != nil) && (av == node.subtree[pidx+1].access[0]) { - if node.subtree[pidx] != nil { - node.access[aidx] = node.subtree[pidx].access[0] - } else { - node.access[aidx] = 0 - } - pidx++ - } else { - panic(nil) - } - - //fmt.Println(pidx) - node = node.subtree[pidx] - - } - - if node.entry.ReqC == nil { - log.Trace(fmt.Sprintf("Memstore Clean: Waiting for chunk %v to be saved", node.entry.Key.Log())) - <-node.entry.dbStoredC - log.Trace(fmt.Sprintf("Memstore Clean: Chunk %v saved to DBStore. Ready to clear from mem.", node.entry.Key.Log())) +func (m *MemStore) setCapacity(n int) { - memstoreRemoveCounter.Inc(1) - node.entry = nil - s.entryCnt-- - } else { - return - } - - node.access[0] = 0 - - //--- - - aidx := uint(0) - for { - aa := node.access[aidx] - if aidx > 0 { - aidx = (aidx - 1) >> 1 - } else { - pidx := node.parentIdx - node = node.parent - if node == nil { - return - } - aidx = (node.width + pidx - 2) >> 1 - } - if (aa != 0) && ((aa < node.access[aidx]) || (node.access[aidx] == 0)) { - node.access[aidx] = aa - } - } } -// type MemStore struct { -// m map[string]*Chunk -// mu sync.RWMutex -// } - -// func NewMemStore(d *DbStore, capacity uint) (m *MemStore) { -// return &MemStore{ -// m: make(map[string]*Chunk), -// } -// } - -// func (m *MemStore) Get(key Key) (*Chunk, error) { -// m.mu.RLock() -// defer m.mu.RUnlock() -// c, ok := m.m[string(key[:])] -// if !ok { -// return nil, ErrNotFound -// } -// if !bytes.Equal(c.Key, key) { -// panic(fmt.Errorf("MemStore.Get: chunk key %s != req key %s", c.Key.Hex(), key.Hex())) -// } -// return c, nil -// } - -// func (m *MemStore) Put(c *Chunk) { -// m.mu.Lock() -// defer m.mu.Unlock() -// m.m[string(c.Key[:])] = c -// } - -// func (m *MemStore) setCapacity(n int) { - -// } - // Close memstore func (s *MemStore) Close() {} diff --git a/swarm/swarm.go b/swarm/swarm.go index ab4b3650e5a7..b37f518790aa 100644 --- a/swarm/swarm.go +++ b/swarm/swarm.go @@ -394,7 +394,7 @@ func (self *Swarm) periodicallyUpdateGauges() { } func (self *Swarm) updateGauges() { - cacheSizeGauge.Update(int64(self.lstore.CacheCounter())) + //cacheSizeGauge.Update(int64(self.lstore.CacheCounter())) uptimeGauge.Update(time.Since(startTime).Nanoseconds()) } From 1d54933cd17a436cd284e7ac3e03487fdcb41c4f Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Tue, 3 Apr 2018 14:27:51 +0200 Subject: [PATCH 04/38] swarm/storage: update test for MemStore and LDBStore --- swarm/storage/memstore2_test.go | 45 +++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 14 deletions(-) diff --git a/swarm/storage/memstore2_test.go b/swarm/storage/memstore2_test.go index 43397772426b..d298bd644e72 100644 --- a/swarm/storage/memstore2_test.go +++ b/swarm/storage/memstore2_test.go @@ -17,7 +17,6 @@ package storage import ( - "fmt" "io/ioutil" "os" "testing" @@ -31,7 +30,6 @@ func newLDBStore(t *testing.T) (*LDBStore, func()) { t.Fatal(err) } log.Trace("memstore.tempdir", "dir", dir) - fmt.Println(dir) db, err := NewLDBStore(dir, MakeHashFunc(SHA3Hash), defaultDbCapacity, testPoFunc) if err != nil { @@ -39,6 +37,7 @@ func newLDBStore(t *testing.T) (*LDBStore, func()) { } cleanup := func() { + db.Close() err := os.RemoveAll(dir) if err != nil { t.Fatal(err) @@ -48,26 +47,31 @@ func newLDBStore(t *testing.T) (*LDBStore, func()) { return db, cleanup } -func newMemStore(t *testing.T) (*MemStore, func()) { +func TestMemStoreAndLDBStore(t *testing.T) { ldb, cleanup := newLDBStore(t) ldb.setCapacity(singletonSwarmDbCapacity) - - memstore := NewMemStore(ldb, defaultCacheCapacity) - return memstore, cleanup -} - -func TestMemStore(t *testing.T) { - log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - - memStore, cleanup := newMemStore(t) defer cleanup() + memStore := NewMemStore(ldb, defaultCacheCapacity) + tests := []struct { n int // number of chunks to push to memStore chunkSize int64 // size of chunk (by default in Swarm - 4096) }{ { - n: 10001, + n: 1, + chunkSize: 4096, + }, + { + n: 201, + chunkSize: 4096, + }, + { + n: 20001, + chunkSize: 4096, + }, + { + n: 50001, chunkSize: 4096, }, } @@ -82,14 +86,27 @@ func TestMemStore(t *testing.T) { FakeChunk(tt.chunkSize, tt.n, chunks) for i := 0; i < tt.n; i++ { + go ldb.Put(chunks[i]) memStore.Put(chunks[i]) } for i := 0; i < tt.n; i++ { _, err := memStore.Get(chunks[i].Key) if err != nil { - t.Fatal(err) + if err == ErrChunkNotFound { + _, err := ldb.Get(chunks[i].Key) + if err != nil { + t.Fatal(err) + } + } else { + t.Fatal(err) + } } } + + // wait for all chunks to be stored before ending the test are cleaning up + for i := 0; i < tt.n; i++ { + <-chunks[i].dbStoredC + } } } From cc0ea9dfe39e0dd938d2322bc5133b9bbe601115 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Tue, 3 Apr 2018 14:33:18 +0200 Subject: [PATCH 05/38] swarm/storage: split 3 impl for memstore --- swarm/storage/memstore.go | 372 ----------------- swarm/storage/memstore_lrucache.go | 74 ++++ ...ore2_test.go => memstore_lrucache_test.go} | 0 swarm/storage/memstore_map.god | 77 ++++ swarm/storage/memstore_tree.god | 373 ++++++++++++++++++ 5 files changed, 524 insertions(+), 372 deletions(-) delete mode 100644 swarm/storage/memstore.go create mode 100644 swarm/storage/memstore_lrucache.go rename swarm/storage/{memstore2_test.go => memstore_lrucache_test.go} (100%) create mode 100644 swarm/storage/memstore_map.god create mode 100644 swarm/storage/memstore_tree.god diff --git a/swarm/storage/memstore.go b/swarm/storage/memstore.go deleted file mode 100644 index edaa3d217f37..000000000000 --- a/swarm/storage/memstore.go +++ /dev/null @@ -1,372 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// memory storage layer for the package blockhash - -package storage - -import ( - "bytes" - "fmt" - "sync" - - "github.com/ethereum/go-ethereum/metrics" -) - -//metrics variables -var ( - memstorePutCounter = metrics.NewRegisteredCounter("storage.db.memstore.put.count", nil) - memstoreRemoveCounter = metrics.NewRegisteredCounter("storage.db.memstore.rm.count", nil) -) - -const ( - memTreeLW = 2 // log2(subtree count) of the subtrees - memTreeFLW = 14 // log2(subtree count) of the root layer - dbForceUpdateAccessCnt = 1000 - defaultCacheCapacity = 5000 -) - -//type MemStore struct { -//memtree *memTree -//entryCnt, capacity uint // stored entries -//accessCnt uint64 // access counter; oldest is thrown away when full -//dbAccessCnt uint64 -//ldbStore *LDBStore -//lock sync.Mutex -//} - -/* -a hash prefix subtree containing subtrees or one storage entry (but never both) - -- access[0] stores the smallest (oldest) access count value in this subtree -- if it contains more subtrees and its subtree count is at least 4, access[1:2] - stores the smallest access count in the first and second halves of subtrees - (so that access[0] = min(access[1], access[2]) -- likewise, if subtree count is at least 8, - access[1] = min(access[3], access[4]) - access[2] = min(access[5], access[6]) - (access[] is a binary tree inside the multi-bit leveled hash tree) -*/ - -//func NewMemStore(d *LDBStore, capacity uint) (m *MemStore) { -//m = &MemStore{} -//m.memtree = newMemTree(memTreeFLW, nil, 0) -//m.ldbStore = d -//m.setCapacity(capacity) -//return -//} - -//type memTree struct { -//subtree []*memTree -//parent *memTree -//parentIdx uint - -//bits uint // log2(subtree count) -//width uint // subtree count - -//entry *Chunk // if subtrees are present, entry should be nil -//lastDBaccess uint64 -//access []uint64 -//} - -//func newMemTree(b uint, parent *memTree, pidx uint) (node *memTree) { -//node = new(memTree) -//node.bits = b -//node.width = 1 << b -//node.subtree = make([]*memTree, node.width) -//node.access = make([]uint64, node.width-1) -//node.parent = parent -//node.parentIdx = pidx -//if parent != nil { -//parent.subtree[pidx] = node -//} - -//return node -//} - -//func (node *memTree) updateAccess(a uint64) { -//aidx := uint(0) -//var aa uint64 -//oa := node.access[0] -//for node.access[aidx] == oa { -//node.access[aidx] = a -//if aidx > 0 { -//aa = node.access[((aidx-1)^1)+1] -//aidx = (aidx - 1) >> 1 -//} else { -//pidx := node.parentIdx -//node = node.parent -//if node == nil { -//return -//} -//nn := node.subtree[pidx^1] -//if nn != nil { -//aa = nn.access[0] -//} else { -//aa = 0 -//} -//aidx = (node.width + pidx - 2) >> 1 -//} - -//if (aa != 0) && (aa < a) { -//a = aa -//} -//} -//} - -//func (s *MemStore) setCapacity(c uint) { -//s.lock.Lock() -//defer s.lock.Unlock() - -//for c < s.entryCnt { -//s.removeOldest() -//} -//s.capacity = c -//} - -//func (s *MemStore) Counter() uint { -//return s.entryCnt -//} - -//// entry (not its copy) is going to be in MemStore -//func (s *MemStore) Put(entry *Chunk) { -//log.Trace("memstore.put", "key", entry.Key) -//if s.capacity == 0 { -//return -//} - -//s.lock.Lock() -//defer s.lock.Unlock() - -//if s.entryCnt >= s.capacity { -//s.removeOldest() -//} - -//s.accessCnt++ - -//memstorePutCounter.Inc(1) - -//node := s.memtree -//bitpos := uint(0) -//for node.entry == nil { -//l := entry.Key.bits(bitpos, node.bits) -//st := node.subtree[l] -//if st == nil { -//st = newMemTree(memTreeLW, node, l) -//bitpos += node.bits -//node = st -//break -//} -//bitpos += node.bits -//node = st -//} - -//if node.entry != nil { - -//if node.entry.Key.isEqual(entry.Key) { -//node.updateAccess(s.accessCnt) -//if entry.SData == nil { -//entry.Size = node.entry.Size -//entry.SData = node.entry.SData -//} -//if entry.ReqC == nil { -//entry.ReqC = node.entry.ReqC -//} -//entry.C = node.entry.C -//node.entry = entry -//return -//} - -//for node.entry != nil { - -//l := node.entry.Key.bits(bitpos, node.bits) -//st := node.subtree[l] -//if st == nil { -//st = newMemTree(memTreeLW, node, l) -//} -//st.entry = node.entry -//node.entry = nil -//st.updateAccess(node.access[0]) - -//l = entry.Key.bits(bitpos, node.bits) -//st = node.subtree[l] -//if st == nil { -//st = newMemTree(memTreeLW, node, l) -//} -//bitpos += node.bits -//node = st - -//} -//} - -//node.entry = entry -//node.lastDBaccess = s.dbAccessCnt -//node.updateAccess(s.accessCnt) -//s.entryCnt++ -//} - -//func (s *MemStore) Get(hash Key) (chunk *Chunk, err error) { -//log.Trace("memstore.get", "key", hash) -//s.lock.Lock() -//defer s.lock.Unlock() - -//node := s.memtree -//bitpos := uint(0) -//for node.entry == nil { -//l := hash.bits(bitpos, node.bits) -//st := node.subtree[l] -//if st == nil { -//log.Trace("memstore.get ErrChunkNotFound", "key", hash) -//return nil, ErrChunkNotFound -//} -//bitpos += node.bits -//node = st -//} - -//if node.entry.Key.isEqual(hash) { -//s.accessCnt++ -//node.updateAccess(s.accessCnt) -//chunk = node.entry -//if s.dbAccessCnt-node.lastDBaccess > dbForceUpdateAccessCnt { -//s.dbAccessCnt++ -//node.lastDBaccess = s.dbAccessCnt -//if s.ldbStore != nil { -//s.ldbStore.updateAccessCnt(hash) -//} -//} -//} else { -//err = ErrChunkNotFound -//} - -//log.Trace("memstore.get return", "key", hash, "chunk", chunk, "err", err) -//return -//} - -//func (s *MemStore) removeOldest() { -//defer metrics.GetOrRegisterResettingTimer("memstore.purge", metrics.DefaultRegistry).UpdateSince(time.Now()) - -//node := s.memtree -//log.Warn("purge memstore") -//for node.entry == nil { - -//aidx := uint(0) -//av := node.access[aidx] - -//for aidx < node.width/2-1 { -//if av == node.access[aidx*2+1] { -//node.access[aidx] = node.access[aidx*2+2] -//aidx = aidx*2 + 1 -//} else if av == node.access[aidx*2+2] { -//node.access[aidx] = node.access[aidx*2+1] -//aidx = aidx*2 + 2 -//} else { -//panic(nil) -//} -//} -//pidx := aidx*2 + 2 - node.width -//if (node.subtree[pidx] != nil) && (av == node.subtree[pidx].access[0]) { -//if node.subtree[pidx+1] != nil { -//node.access[aidx] = node.subtree[pidx+1].access[0] -//} else { -//node.access[aidx] = 0 -//} -//} else if (node.subtree[pidx+1] != nil) && (av == node.subtree[pidx+1].access[0]) { -//if node.subtree[pidx] != nil { -//node.access[aidx] = node.subtree[pidx].access[0] -//} else { -//node.access[aidx] = 0 -//} -//pidx++ -//} else { -//panic(nil) -//} - -////fmt.Println(pidx) -//node = node.subtree[pidx] - -//} - -//if node.entry.ReqC == nil { -//log.Trace(fmt.Sprintf("Memstore Clean: Waiting for chunk %v to be saved", node.entry.Key.Log())) -//<-node.entry.dbStoredC -//log.Trace(fmt.Sprintf("Memstore Clean: Chunk %v saved to DBStore. Ready to clear from mem.", node.entry.Key.Log())) - -//memstoreRemoveCounter.Inc(1) -//node.entry = nil -//s.entryCnt-- -//} else { -//return -//} - -//node.access[0] = 0 - -////--- - -//aidx := uint(0) -//for { -//aa := node.access[aidx] -//if aidx > 0 { -//aidx = (aidx - 1) >> 1 -//} else { -//pidx := node.parentIdx -//node = node.parent -//if node == nil { -//return -//} -//aidx = (node.width + pidx - 2) >> 1 -//} -//if (aa != 0) && ((aa < node.access[aidx]) || (node.access[aidx] == 0)) { -//node.access[aidx] = aa -//} -//} -//} - -type MemStore struct { - m map[string]*Chunk - mu sync.RWMutex -} - -func NewMemStore(d *LDBStore, capacity uint) (m *MemStore) { - return &MemStore{ - m: make(map[string]*Chunk), - } -} - -func (m *MemStore) Get(key Key) (*Chunk, error) { - m.mu.RLock() - defer m.mu.RUnlock() - c, ok := m.m[string(key[:])] - if !ok { - return nil, ErrChunkNotFound - } - if !bytes.Equal(c.Key, key) { - panic(fmt.Errorf("MemStore.Get: chunk key %s != req key %s", c.Key.Hex(), key.Hex())) - } - return c, nil -} - -func (m *MemStore) Put(c *Chunk) { - m.mu.Lock() - defer m.mu.Unlock() - m.m[string(c.Key[:])] = c -} - -func (m *MemStore) setCapacity(n int) { - -} - -// Close memstore -func (s *MemStore) Close() {} diff --git a/swarm/storage/memstore_lrucache.go b/swarm/storage/memstore_lrucache.go new file mode 100644 index 000000000000..74b63765710a --- /dev/null +++ b/swarm/storage/memstore_lrucache.go @@ -0,0 +1,74 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// memory storage layer for the package blockhash + +package storage + +import ( + "bytes" + "fmt" + + lru "github.com/hashicorp/golang-lru" +) + +const ( + defaultCacheCapacity = 5000 +) + +type MemStore struct { + //cache *lru.ARCCache + cache *lru.Cache +} + +func NewMemStore(_ *LDBStore, capacity uint) (m *MemStore) { + onEvicted := func(key interface{}, value interface{}) { + v := value.(*Chunk) + <-v.dbStoredC + } + c, err := lru.NewWithEvict(int(capacity), onEvicted) + if err != nil { + panic(err) + } + + return &MemStore{ + //cache: lru.NewARC(capacity), + cache: c, + } +} + +func (m *MemStore) Get(key Key) (*Chunk, error) { + c, ok := m.cache.Get(string(key)) + if !ok { + return nil, ErrChunkNotFound + } + chunk := c.(*Chunk) + if !bytes.Equal(chunk.Key, key) { + panic(fmt.Errorf("MemStore.Get: chunk key %s != req key %s", chunk.Key.Hex(), key.Hex())) + } + return chunk, nil +} + +func (m *MemStore) Put(c *Chunk) { + m.cache.Add(string(c.Key), c) +} + +func (m *MemStore) setCapacity(n int) { + //no-op +} + +// Close memstore +func (s *MemStore) Close() {} diff --git a/swarm/storage/memstore2_test.go b/swarm/storage/memstore_lrucache_test.go similarity index 100% rename from swarm/storage/memstore2_test.go rename to swarm/storage/memstore_lrucache_test.go diff --git a/swarm/storage/memstore_map.god b/swarm/storage/memstore_map.god new file mode 100644 index 000000000000..c2f2855268a8 --- /dev/null +++ b/swarm/storage/memstore_map.god @@ -0,0 +1,77 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// memory storage layer for the package blockhash + +package storage + +import ( + "bytes" + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/metrics" +) + +//metrics variables +var ( + memstorePutCounter = metrics.NewRegisteredCounter("storage.db.memstore.put.count", nil) + memstoreRemoveCounter = metrics.NewRegisteredCounter("storage.db.memstore.rm.count", nil) +) + +const ( + memTreeLW = 2 // log2(subtree count) of the subtrees + memTreeFLW = 14 // log2(subtree count) of the root layer + dbForceUpdateAccessCnt = 1000 + defaultCacheCapacity = 5000 +) + +type MemStore struct { + m map[string]*Chunk + mu sync.RWMutex +} + +func NewMemStore(d *LDBStore, capacity uint) (m *MemStore) { + return &MemStore{ + m: make(map[string]*Chunk), + } +} + +func (m *MemStore) Get(key Key) (*Chunk, error) { + m.mu.RLock() + defer m.mu.RUnlock() + c, ok := m.m[string(key[:])] + if !ok { + return nil, ErrChunkNotFound + } + if !bytes.Equal(c.Key, key) { + panic(fmt.Errorf("MemStore.Get: chunk key %s != req key %s", c.Key.Hex(), key.Hex())) + } + return c, nil +} + +func (m *MemStore) Put(c *Chunk) { + m.mu.Lock() + defer m.mu.Unlock() + m.m[string(c.Key[:])] = c +} + +func (m *MemStore) setCapacity(n int) { + +} + +// Close memstore +func (s *MemStore) Close() {} diff --git a/swarm/storage/memstore_tree.god b/swarm/storage/memstore_tree.god new file mode 100644 index 000000000000..3e5c025daaf6 --- /dev/null +++ b/swarm/storage/memstore_tree.god @@ -0,0 +1,373 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// memory storage layer for the package blockhash + +package storage + +import ( + "fmt" + "sync" + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" +) + +//metrics variables +var ( + memstorePutCounter = metrics.NewRegisteredCounter("storage.db.memstore.put.count", nil) + memstoreRemoveCounter = metrics.NewRegisteredCounter("storage.db.memstore.rm.count", nil) +) + +const ( + memTreeLW = 2 // log2(subtree count) of the subtrees + memTreeFLW = 14 // log2(subtree count) of the root layer + dbForceUpdateAccessCnt = 1000 + defaultCacheCapacity = 5000 +) + +type MemStore struct { + memtree *memTree + entryCnt, capacity uint // stored entries + accessCnt uint64 // access counter; oldest is thrown away when full + dbAccessCnt uint64 + ldbStore *LDBStore + lock sync.Mutex +} + +/* +a hash prefix subtree containing subtrees or one storage entry (but never both) + +- access[0] stores the smallest (oldest) access count value in this subtree +- if it contains more subtrees and its subtree count is at least 4, access[1:2] + stores the smallest access count in the first and second halves of subtrees + (so that access[0] = min(access[1], access[2]) +- likewise, if subtree count is at least 8, + access[1] = min(access[3], access[4]) + access[2] = min(access[5], access[6]) + (access[] is a binary tree inside the multi-bit leveled hash tree) +*/ + +func NewMemStore(d *LDBStore, capacity uint) (m *MemStore) { + m = &MemStore{} + m.memtree = newMemTree(memTreeFLW, nil, 0) + m.ldbStore = d + m.setCapacity(capacity) + return +} + +type memTree struct { + subtree []*memTree + parent *memTree + parentIdx uint + + bits uint // log2(subtree count) + width uint // subtree count + + entry *Chunk // if subtrees are present, entry should be nil + lastDBaccess uint64 + access []uint64 +} + +func newMemTree(b uint, parent *memTree, pidx uint) (node *memTree) { + node = new(memTree) + node.bits = b + node.width = 1 << b + node.subtree = make([]*memTree, node.width) + node.access = make([]uint64, node.width-1) + node.parent = parent + node.parentIdx = pidx + if parent != nil { + parent.subtree[pidx] = node + } + + return node +} + +func (node *memTree) updateAccess(a uint64) { + aidx := uint(0) + var aa uint64 + oa := node.access[0] + for node.access[aidx] == oa { + node.access[aidx] = a + if aidx > 0 { + aa = node.access[((aidx-1)^1)+1] + aidx = (aidx - 1) >> 1 + } else { + pidx := node.parentIdx + node = node.parent + if node == nil { + return + } + nn := node.subtree[pidx^1] + if nn != nil { + aa = nn.access[0] + } else { + aa = 0 + } + aidx = (node.width + pidx - 2) >> 1 + } + + if (aa != 0) && (aa < a) { + a = aa + } + } +} + +func (s *MemStore) setCapacity(c uint) { + s.lock.Lock() + defer s.lock.Unlock() + + for c < s.entryCnt { + s.removeOldest() + } + s.capacity = c +} + +func (s *MemStore) Counter() uint { + return s.entryCnt +} + +// entry (not its copy) is going to be in MemStore +func (s *MemStore) Put(entry *Chunk) { + log.Trace("memstore.put", "key", entry.Key) + if s.capacity == 0 { + return + } + + s.lock.Lock() + defer s.lock.Unlock() + + if s.entryCnt >= s.capacity { + s.removeOldest() + } + + s.accessCnt++ + + memstorePutCounter.Inc(1) + + node := s.memtree + bitpos := uint(0) + for node.entry == nil { + l := entry.Key.bits(bitpos, node.bits) + st := node.subtree[l] + if st == nil { + st = newMemTree(memTreeLW, node, l) + bitpos += node.bits + node = st + break + } + bitpos += node.bits + node = st + } + + if node.entry != nil { + + if node.entry.Key.isEqual(entry.Key) { + node.updateAccess(s.accessCnt) + if entry.SData == nil { + entry.Size = node.entry.Size + entry.SData = node.entry.SData + } + if entry.ReqC == nil { + entry.ReqC = node.entry.ReqC + } + entry.C = node.entry.C + node.entry = entry + return + } + + for node.entry != nil { + + l := node.entry.Key.bits(bitpos, node.bits) + st := node.subtree[l] + if st == nil { + st = newMemTree(memTreeLW, node, l) + } + st.entry = node.entry + node.entry = nil + st.updateAccess(node.access[0]) + + l = entry.Key.bits(bitpos, node.bits) + st = node.subtree[l] + if st == nil { + st = newMemTree(memTreeLW, node, l) + } + bitpos += node.bits + node = st + + } + } + + node.entry = entry + node.lastDBaccess = s.dbAccessCnt + node.updateAccess(s.accessCnt) + s.entryCnt++ +} + +func (s *MemStore) Get(hash Key) (chunk *Chunk, err error) { + log.Trace("memstore.get", "key", hash) + s.lock.Lock() + defer s.lock.Unlock() + + node := s.memtree + bitpos := uint(0) + for node.entry == nil { + l := hash.bits(bitpos, node.bits) + st := node.subtree[l] + if st == nil { + log.Trace("memstore.get ErrChunkNotFound", "key", hash) + return nil, ErrChunkNotFound + } + bitpos += node.bits + node = st + } + + if node.entry.Key.isEqual(hash) { + s.accessCnt++ + node.updateAccess(s.accessCnt) + chunk = node.entry + if s.dbAccessCnt-node.lastDBaccess > dbForceUpdateAccessCnt { + s.dbAccessCnt++ + node.lastDBaccess = s.dbAccessCnt + if s.ldbStore != nil { + s.ldbStore.updateAccessCnt(hash) + } + } + } else { + err = ErrChunkNotFound + } + + log.Trace("memstore.get return", "key", hash, "chunk", chunk, "err", err) + return +} + +func (s *MemStore) removeOldest() { + defer metrics.GetOrRegisterResettingTimer("memstore.purge", metrics.DefaultRegistry).UpdateSince(time.Now()) + + node := s.memtree + log.Warn("purge memstore") + for node.entry == nil { + + aidx := uint(0) + av := node.access[aidx] + + for aidx < node.width/2-1 { + if av == node.access[aidx*2+1] { + node.access[aidx] = node.access[aidx*2+2] + aidx = aidx*2 + 1 + } else if av == node.access[aidx*2+2] { + node.access[aidx] = node.access[aidx*2+1] + aidx = aidx*2 + 2 + } else { + panic(nil) + } + } + pidx := aidx*2 + 2 - node.width + if (node.subtree[pidx] != nil) && (av == node.subtree[pidx].access[0]) { + if node.subtree[pidx+1] != nil { + node.access[aidx] = node.subtree[pidx+1].access[0] + } else { + node.access[aidx] = 0 + } + } else if (node.subtree[pidx+1] != nil) && (av == node.subtree[pidx+1].access[0]) { + if node.subtree[pidx] != nil { + node.access[aidx] = node.subtree[pidx].access[0] + } else { + node.access[aidx] = 0 + } + pidx++ + } else { + panic(nil) + } + + //fmt.Println(pidx) + node = node.subtree[pidx] + + } + + if node.entry.ReqC == nil { + log.Trace(fmt.Sprintf("Memstore Clean: Waiting for chunk %v to be saved", node.entry.Key.Log())) + <-node.entry.dbStoredC + log.Trace(fmt.Sprintf("Memstore Clean: Chunk %v saved to DBStore. Ready to clear from mem.", node.entry.Key.Log())) + + memstoreRemoveCounter.Inc(1) + node.entry = nil + s.entryCnt-- + } else { + return + } + + node.access[0] = 0 + + //--- + + aidx := uint(0) + for { + aa := node.access[aidx] + if aidx > 0 { + aidx = (aidx - 1) >> 1 + } else { + pidx := node.parentIdx + node = node.parent + if node == nil { + return + } + aidx = (node.width + pidx - 2) >> 1 + } + if (aa != 0) && ((aa < node.access[aidx]) || (node.access[aidx] == 0)) { + node.access[aidx] = aa + } + } +} + +// type MemStore struct { +// m map[string]*Chunk +// mu sync.RWMutex +// } + +// func NewMemStore(d *DbStore, capacity uint) (m *MemStore) { +// return &MemStore{ +// m: make(map[string]*Chunk), +// } +// } + +// func (m *MemStore) Get(key Key) (*Chunk, error) { +// m.mu.RLock() +// defer m.mu.RUnlock() +// c, ok := m.m[string(key[:])] +// if !ok { +// return nil, ErrNotFound +// } +// if !bytes.Equal(c.Key, key) { +// panic(fmt.Errorf("MemStore.Get: chunk key %s != req key %s", c.Key.Hex(), key.Hex())) +// } +// return c, nil +// } + +// func (m *MemStore) Put(c *Chunk) { +// m.mu.Lock() +// defer m.mu.Unlock() +// m.m[string(c.Key[:])] = c +// } + +// func (m *MemStore) setCapacity(n int) { + +// } + +// Close memstore +func (s *MemStore) Close() {} From 9a40655aa68981a5788c745553ca17376a720e65 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Tue, 3 Apr 2018 14:45:29 +0200 Subject: [PATCH 06/38] swarm/storage: comment out biggest memstore test --- swarm/storage/memstore_lrucache_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/swarm/storage/memstore_lrucache_test.go b/swarm/storage/memstore_lrucache_test.go index d298bd644e72..914f6675f708 100644 --- a/swarm/storage/memstore_lrucache_test.go +++ b/swarm/storage/memstore_lrucache_test.go @@ -70,10 +70,10 @@ func TestMemStoreAndLDBStore(t *testing.T) { n: 20001, chunkSize: 4096, }, - { - n: 50001, - chunkSize: 4096, - }, + //{ + //n: 50001, + //chunkSize: 4096, + //}, } for _, tt := range tests { From a43c8a9c367a5dbd0535e77568d6885b781306ba Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Tue, 3 Apr 2018 14:52:58 +0200 Subject: [PATCH 07/38] swarm/storage: fix setCapacity for 0 --- swarm/storage/memstore_lrucache.go | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/swarm/storage/memstore_lrucache.go b/swarm/storage/memstore_lrucache.go index 74b63765710a..0a6834a3f9d4 100644 --- a/swarm/storage/memstore_lrucache.go +++ b/swarm/storage/memstore_lrucache.go @@ -31,10 +31,17 @@ const ( type MemStore struct { //cache *lru.ARCCache - cache *lru.Cache + cache *lru.Cache + disabled bool } func NewMemStore(_ *LDBStore, capacity uint) (m *MemStore) { + if capacity == 0 { + return &MemStore{ + disabled: true, + } + } + onEvicted := func(key interface{}, value interface{}) { v := value.(*Chunk) <-v.dbStoredC @@ -51,6 +58,10 @@ func NewMemStore(_ *LDBStore, capacity uint) (m *MemStore) { } func (m *MemStore) Get(key Key) (*Chunk, error) { + if m.disabled { + return nil, ErrChunkNotFound + } + c, ok := m.cache.Get(string(key)) if !ok { return nil, ErrChunkNotFound @@ -63,11 +74,18 @@ func (m *MemStore) Get(key Key) (*Chunk, error) { } func (m *MemStore) Put(c *Chunk) { + if m.disabled { + return + } m.cache.Add(string(c.Key), c) } func (m *MemStore) setCapacity(n int) { - //no-op + if n <= 0 { + m.disabled = true + } else { + m = NewMemStore(nil, uint(n)) + } } // Close memstore From f42a58d39f547f69b5f72f72c2b45cb9215d9ce7 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Tue, 3 Apr 2018 15:59:48 +0200 Subject: [PATCH 08/38] swarm/storage: add more tests cases --- swarm/storage/memstore_lrucache_test.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/swarm/storage/memstore_lrucache_test.go b/swarm/storage/memstore_lrucache_test.go index 914f6675f708..73f795117aaf 100644 --- a/swarm/storage/memstore_lrucache_test.go +++ b/swarm/storage/memstore_lrucache_test.go @@ -49,7 +49,7 @@ func newLDBStore(t *testing.T) (*LDBStore, func()) { func TestMemStoreAndLDBStore(t *testing.T) { ldb, cleanup := newLDBStore(t) - ldb.setCapacity(singletonSwarmDbCapacity) + ldb.setCapacity(50000) defer cleanup() memStore := NewMemStore(ldb, defaultCacheCapacity) @@ -67,13 +67,13 @@ func TestMemStoreAndLDBStore(t *testing.T) { chunkSize: 4096, }, { - n: 20001, + n: 9999, + chunkSize: 4096, + }, + { + n: 80001, chunkSize: 4096, }, - //{ - //n: 50001, - //chunkSize: 4096, - //}, } for _, tt := range tests { @@ -96,10 +96,10 @@ func TestMemStoreAndLDBStore(t *testing.T) { if err == ErrChunkNotFound { _, err := ldb.Get(chunks[i].Key) if err != nil { - t.Fatal(err) + t.Fatalf("couldn't get chunk %v from ldb, got error: %v", i, err) } } else { - t.Fatal(err) + t.Fatalf("got error from memstore: %v", err) } } } From 57fa8a867f4cd43083d50e56762b639cd00e00c5 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Tue, 3 Apr 2018 16:00:06 +0200 Subject: [PATCH 09/38] swarm/storage: comment out collectGarbage --- swarm/storage/ldbstore.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/swarm/storage/ldbstore.go b/swarm/storage/ldbstore.go index d0a76b440a50..90db493a64a3 100644 --- a/swarm/storage/ldbstore.go +++ b/swarm/storage/ldbstore.go @@ -619,10 +619,10 @@ func (s *LDBStore) writeBatches() { log.Error(fmt.Sprintf("DbStore: spawn batch write (%d chunks): %v", b.Len(), err)) } close(c) - if e >= s.capacity { - log.Trace(fmt.Sprintf("DbStore: collecting garbage...(%d chunks)", e)) - s.collectGarbage(gcArrayFreeRatio) - } + //if e >= s.capacity { + //log.Trace(fmt.Sprintf("DbStore: collecting garbage...(%d chunks)", e)) + //s.collectGarbage(gcArrayFreeRatio) + //} } log.Trace(fmt.Sprintf("DbStore: quit batch write loop")) } @@ -762,9 +762,9 @@ func (s *LDBStore) setCapacity(c uint64) { if ratio > 1 { ratio = 1 } - for s.entryCnt > c { - s.collectGarbage(ratio) - } + //for s.entryCnt > c { + //s.collectGarbage(ratio) + //} } } From fa8acc90fb44afe37e098eb3b57dcf920c1bc831 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Tue, 3 Apr 2018 16:44:34 +0200 Subject: [PATCH 10/38] swarm: cleanup --- swarm/storage/ldbstore.go | 7 --- swarm/storage/memstore_lrucache.go | 3 -- swarm/storage/memstore_map.god | 77 ------------------------------ swarm/swarm.go | 1 - 4 files changed, 88 deletions(-) delete mode 100644 swarm/storage/memstore_map.god diff --git a/swarm/storage/ldbstore.go b/swarm/storage/ldbstore.go index 90db493a64a3..a09834fdd7e4 100644 --- a/swarm/storage/ldbstore.go +++ b/swarm/storage/ldbstore.go @@ -619,10 +619,6 @@ func (s *LDBStore) writeBatches() { log.Error(fmt.Sprintf("DbStore: spawn batch write (%d chunks): %v", b.Len(), err)) } close(c) - //if e >= s.capacity { - //log.Trace(fmt.Sprintf("DbStore: collecting garbage...(%d chunks)", e)) - //s.collectGarbage(gcArrayFreeRatio) - //} } log.Trace(fmt.Sprintf("DbStore: quit batch write loop")) } @@ -762,9 +758,6 @@ func (s *LDBStore) setCapacity(c uint64) { if ratio > 1 { ratio = 1 } - //for s.entryCnt > c { - //s.collectGarbage(ratio) - //} } } diff --git a/swarm/storage/memstore_lrucache.go b/swarm/storage/memstore_lrucache.go index 0a6834a3f9d4..130491a19bde 100644 --- a/swarm/storage/memstore_lrucache.go +++ b/swarm/storage/memstore_lrucache.go @@ -30,7 +30,6 @@ const ( ) type MemStore struct { - //cache *lru.ARCCache cache *lru.Cache disabled bool } @@ -52,7 +51,6 @@ func NewMemStore(_ *LDBStore, capacity uint) (m *MemStore) { } return &MemStore{ - //cache: lru.NewARC(capacity), cache: c, } } @@ -88,5 +86,4 @@ func (m *MemStore) setCapacity(n int) { } } -// Close memstore func (s *MemStore) Close() {} diff --git a/swarm/storage/memstore_map.god b/swarm/storage/memstore_map.god deleted file mode 100644 index c2f2855268a8..000000000000 --- a/swarm/storage/memstore_map.god +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// memory storage layer for the package blockhash - -package storage - -import ( - "bytes" - "fmt" - "sync" - - "github.com/ethereum/go-ethereum/metrics" -) - -//metrics variables -var ( - memstorePutCounter = metrics.NewRegisteredCounter("storage.db.memstore.put.count", nil) - memstoreRemoveCounter = metrics.NewRegisteredCounter("storage.db.memstore.rm.count", nil) -) - -const ( - memTreeLW = 2 // log2(subtree count) of the subtrees - memTreeFLW = 14 // log2(subtree count) of the root layer - dbForceUpdateAccessCnt = 1000 - defaultCacheCapacity = 5000 -) - -type MemStore struct { - m map[string]*Chunk - mu sync.RWMutex -} - -func NewMemStore(d *LDBStore, capacity uint) (m *MemStore) { - return &MemStore{ - m: make(map[string]*Chunk), - } -} - -func (m *MemStore) Get(key Key) (*Chunk, error) { - m.mu.RLock() - defer m.mu.RUnlock() - c, ok := m.m[string(key[:])] - if !ok { - return nil, ErrChunkNotFound - } - if !bytes.Equal(c.Key, key) { - panic(fmt.Errorf("MemStore.Get: chunk key %s != req key %s", c.Key.Hex(), key.Hex())) - } - return c, nil -} - -func (m *MemStore) Put(c *Chunk) { - m.mu.Lock() - defer m.mu.Unlock() - m.m[string(c.Key[:])] = c -} - -func (m *MemStore) setCapacity(n int) { - -} - -// Close memstore -func (s *MemStore) Close() {} diff --git a/swarm/swarm.go b/swarm/swarm.go index b37f518790aa..f0e2b9bdb289 100644 --- a/swarm/swarm.go +++ b/swarm/swarm.go @@ -394,7 +394,6 @@ func (self *Swarm) periodicallyUpdateGauges() { } func (self *Swarm) updateGauges() { - //cacheSizeGauge.Update(int64(self.lstore.CacheCounter())) uptimeGauge.Update(time.Since(startTime).Nanoseconds()) } From 47cfe074f06cfcd920546f832b97217d173fb9e3 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Tue, 3 Apr 2018 16:58:08 +0200 Subject: [PATCH 11/38] swarm/storage: NewRandomChunk method rather than FakeChunk --- swarm/storage/memstore_lrucache_test.go | 35 +++++++++++++++++++++---- 1 file changed, 30 insertions(+), 5 deletions(-) diff --git a/swarm/storage/memstore_lrucache_test.go b/swarm/storage/memstore_lrucache_test.go index 73f795117aaf..b391ef174eb0 100644 --- a/swarm/storage/memstore_lrucache_test.go +++ b/swarm/storage/memstore_lrucache_test.go @@ -17,8 +17,11 @@ package storage import ( + "crypto/rand" + "encoding/binary" "io/ioutil" "os" + "sync" "testing" "github.com/ethereum/go-ethereum/log" @@ -55,8 +58,8 @@ func TestMemStoreAndLDBStore(t *testing.T) { memStore := NewMemStore(ldb, defaultCacheCapacity) tests := []struct { - n int // number of chunks to push to memStore - chunkSize int64 // size of chunk (by default in Swarm - 4096) + n int // number of chunks to push to memStore + chunkSize uint64 // size of chunk (by default in Swarm - 4096) }{ { n: 1, @@ -70,6 +73,10 @@ func TestMemStoreAndLDBStore(t *testing.T) { n: 9999, chunkSize: 4096, }, + { + n: 20001, + chunkSize: 4096, + }, { n: 80001, chunkSize: 4096, @@ -80,11 +87,9 @@ func TestMemStoreAndLDBStore(t *testing.T) { var chunks []*Chunk for i := 0; i < tt.n; i++ { - chunks = append(chunks, NewChunk(nil, make(chan bool))) + chunks = append(chunks, NewRandomChunk(tt.chunkSize)) } - FakeChunk(tt.chunkSize, tt.n, chunks) - for i := 0; i < tt.n; i++ { go ldb.Put(chunks[i]) memStore.Put(chunks[i]) @@ -110,3 +115,23 @@ func TestMemStoreAndLDBStore(t *testing.T) { } } } + +func NewRandomChunk(chunkSize uint64) *Chunk { + c := &Chunk{ + Key: make([]byte, 32), + ReqC: nil, + SData: make([]byte, chunkSize), + dbStoredC: make(chan bool), + dbStoredMu: &sync.Mutex{}, + } + + rand.Read(c.SData) + + binary.LittleEndian.PutUint64(c.SData[:8], chunkSize) + + hasher := MakeHashFunc(SHA3Hash)() + hasher.Write(c.SData) + copy(c.Key, hasher.Sum(nil)) + + return c +} From 1c3597f26a0a602aaee123942ac26378e4450643 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Tue, 3 Apr 2018 17:20:23 +0200 Subject: [PATCH 12/38] swarm/storage: test with request channel --- swarm/storage/common_test.go | 2 +- swarm/storage/memstore_lrucache_test.go | 32 ++++++++++++++++++------- 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/swarm/storage/common_test.go b/swarm/storage/common_test.go index 0ad114409562..41155e8d7f7a 100644 --- a/swarm/storage/common_test.go +++ b/swarm/storage/common_test.go @@ -34,7 +34,7 @@ import ( ) var ( - loglevel = flag.Int("loglevel", 2, "verbosity of logs") + loglevel = flag.Int("loglevel", 3, "verbosity of logs") ) func init() { diff --git a/swarm/storage/memstore_lrucache_test.go b/swarm/storage/memstore_lrucache_test.go index b391ef174eb0..01b368a5bb09 100644 --- a/swarm/storage/memstore_lrucache_test.go +++ b/swarm/storage/memstore_lrucache_test.go @@ -60,34 +60,43 @@ func TestMemStoreAndLDBStore(t *testing.T) { tests := []struct { n int // number of chunks to push to memStore chunkSize uint64 // size of chunk (by default in Swarm - 4096) + request bool // whether or not to set the ReqC channel on the random chunks }{ { n: 1, chunkSize: 4096, + request: false, }, { n: 201, chunkSize: 4096, + request: false, }, { - n: 9999, + n: 60001, chunkSize: 4096, + request: false, }, { - n: 20001, - chunkSize: 4096, - }, - { - n: 80001, + n: 60001, chunkSize: 4096, + request: true, }, } - for _, tt := range tests { + for i, tt := range tests { + log.Info("running test", "idx", i, "tt", tt) var chunks []*Chunk for i := 0; i < tt.n; i++ { - chunks = append(chunks, NewRandomChunk(tt.chunkSize)) + var c *Chunk + if tt.request { + c = NewRandomRequestChunk(tt.chunkSize) + } else { + c = NewRandomChunk(tt.chunkSize) + } + + chunks = append(chunks, c) } for i := 0; i < tt.n; i++ { @@ -135,3 +144,10 @@ func NewRandomChunk(chunkSize uint64) *Chunk { return c } + +func NewRandomRequestChunk(chunkSize uint64) *Chunk { + c := NewRandomChunk(chunkSize) + c.ReqC = make(chan bool) + + return c +} From c55641099330ab5d3180923ac017ce5b247108c4 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Tue, 3 Apr 2018 17:26:34 +0200 Subject: [PATCH 13/38] swarm/storage: remove commented out code --- swarm/storage/localstore.go | 4 ---- swarm/storage/memstore_tree.god | 1 - 2 files changed, 5 deletions(-) diff --git a/swarm/storage/localstore.go b/swarm/storage/localstore.go index 6b85b7d93383..e3d1cada0a84 100644 --- a/swarm/storage/localstore.go +++ b/swarm/storage/localstore.go @@ -78,10 +78,6 @@ func NewTestLocalStoreForAddr(path string, basekey []byte) (*LocalStore, error) return localStore, nil } -//func (self *LocalStore) CacheCounter() uint64 { -//return uint64(self.memStore.Counter()) -//} - // LocalStore is itself a chunk store // unsafe, in that the data is not integrity checked func (self *LocalStore) Put(chunk *Chunk) { diff --git a/swarm/storage/memstore_tree.god b/swarm/storage/memstore_tree.god index 3e5c025daaf6..9bb793151b9d 100644 --- a/swarm/storage/memstore_tree.god +++ b/swarm/storage/memstore_tree.god @@ -260,7 +260,6 @@ func (s *MemStore) removeOldest() { defer metrics.GetOrRegisterResettingTimer("memstore.purge", metrics.DefaultRegistry).UpdateSince(time.Now()) node := s.memtree - log.Warn("purge memstore") for node.entry == nil { aidx := uint(0) From 874113654f6ba5076b9075c16fd77eafc85284ac Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Wed, 4 Apr 2018 13:28:34 +0200 Subject: [PATCH 14/38] metrics: relax timer test --- metrics/timer_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/metrics/timer_test.go b/metrics/timer_test.go index c1f0ff9388fb..8638a2270bbb 100644 --- a/metrics/timer_test.go +++ b/metrics/timer_test.go @@ -47,8 +47,8 @@ func TestTimerStop(t *testing.T) { func TestTimerFunc(t *testing.T) { tm := NewTimer() tm.Time(func() { time.Sleep(50e6) }) - if max := tm.Max(); 35e6 > max || max > 95e6 { - t.Errorf("tm.Max(): 35e6 > %v || %v > 95e6\n", max, max) + if max := tm.Max(); 35e6 > max || max > 145e6 { + t.Errorf("tm.Max(): 35e6 > %v || %v > 145e6\n", max, max) } } From abbc732b0504ebb7b5256e60eec2b34af22d25f2 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Tue, 10 Apr 2018 16:32:44 +0300 Subject: [PATCH 15/38] pss: disable tests as on upstream branch --- swarm/pss/pss_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/swarm/pss/pss_test.go b/swarm/pss/pss_test.go index 1f8ec594fad0..b724a5222ae2 100644 --- a/swarm/pss/pss_test.go +++ b/swarm/pss/pss_test.go @@ -635,6 +635,7 @@ func worker(id int, jobs <-chan Job, rpcs map[discover.NodeID]*rpc.Client, pubke // nodes/msgs/addrbytes/adaptertype // if adaptertype is exec uses execadapter, simadapter otherwise func TestNetwork(t *testing.T) { + t.Skip("tests disabled as they deadlock on travis") if runtime.GOOS == "darwin" { t.Skip("Travis macOS build seems to be very slow, and these tests are flaky on it. Skipping until we find a solution.") } From 03412e0ea6cf729465782589f4a77f951596f663 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Wed, 11 Apr 2018 12:33:39 +0300 Subject: [PATCH 16/38] swarm/storage: use two LRU caches for chunks containing data and requests --- swarm/storage/dpa.go | 2 +- swarm/storage/dpa_test.go | 6 +-- swarm/storage/ldbstore.go | 7 ++++ swarm/storage/localstore.go | 4 +- swarm/storage/memstore_lrucache.go | 52 ++++++++++++++++++++++--- swarm/storage/memstore_lrucache_test.go | 25 +++++++++--- swarm/storage/memstore_test.go | 2 +- swarm/storage/resource.go | 2 +- 8 files changed, 82 insertions(+), 18 deletions(-) diff --git a/swarm/storage/dpa.go b/swarm/storage/dpa.go index bbdf71048346..3f66a73c9e8b 100644 --- a/swarm/storage/dpa.go +++ b/swarm/storage/dpa.go @@ -72,7 +72,7 @@ func NewLocalDPA(datadir string, basekey []byte) (*DPA, error) { } return NewDPA(&LocalStore{ - memStore: NewMemStore(dbStore, singletonSwarmCacheCapacity), + memStore: NewMemStore(dbStore, singletonSwarmCacheCapacity, singletonSwarmDbCapacity), DbStore: dbStore, }, NewDPAParams()), nil } diff --git a/swarm/storage/dpa_test.go b/swarm/storage/dpa_test.go index 1126f05a52e5..c1601807a30a 100644 --- a/swarm/storage/dpa_test.go +++ b/swarm/storage/dpa_test.go @@ -39,7 +39,7 @@ func testDpaRandom(toEncrypt bool, t *testing.T) { defer tdb.close() db := tdb.LDBStore db.setCapacity(50000) - memStore := NewMemStore(db, defaultCacheCapacity) + memStore := NewMemStore(db, defaultCacheCapacity, defaultCacheCapacity) localStore := &LocalStore{ memStore: memStore, DbStore: db, @@ -68,7 +68,7 @@ func testDpaRandom(toEncrypt bool, t *testing.T) { } ioutil.WriteFile("/tmp/slice.bzz.16M", slice, 0666) ioutil.WriteFile("/tmp/result.bzz.16M", resultSlice, 0666) - localStore.memStore = NewMemStore(db, defaultCacheCapacity) + localStore.memStore = NewMemStore(db, defaultCacheCapacity, defaultCacheCapacity) resultReader = dpa.Retrieve(key) for i := range resultSlice { resultSlice[i] = 0 @@ -97,7 +97,7 @@ func testDPA_capacity(toEncrypt bool, t *testing.T) { } defer tdb.close() db := tdb.LDBStore - memStore := NewMemStore(db, 0) + memStore := NewMemStore(db, 0, 0) localStore := &LocalStore{ memStore: memStore, DbStore: db, diff --git a/swarm/storage/ldbstore.go b/swarm/storage/ldbstore.go index a09834fdd7e4..6fdbf469d4fc 100644 --- a/swarm/storage/ldbstore.go +++ b/swarm/storage/ldbstore.go @@ -618,6 +618,10 @@ func (s *LDBStore) writeBatches() { if err != nil { log.Error(fmt.Sprintf("DbStore: spawn batch write (%d chunks): %v", b.Len(), err)) } + if e >= s.capacity { + log.Trace(fmt.Sprintf("DbStore: collecting garbage...(%d chunks)", e)) + s.collectGarbage(gcArrayFreeRatio) + } close(c) } log.Trace(fmt.Sprintf("DbStore: quit batch write loop")) @@ -758,6 +762,9 @@ func (s *LDBStore) setCapacity(c uint64) { if ratio > 1 { ratio = 1 } + for s.entryCnt > c { + s.collectGarbage(ratio) + } } } diff --git a/swarm/storage/localstore.go b/swarm/storage/localstore.go index e3d1cada0a84..908a10c704de 100644 --- a/swarm/storage/localstore.go +++ b/swarm/storage/localstore.go @@ -60,7 +60,7 @@ func NewLocalStore(hash SwarmHasher, params *StoreParams, basekey []byte, mockSt return nil, err } return &LocalStore{ - memStore: NewMemStore(dbStore, params.CacheCapacity), + memStore: NewMemStore(dbStore, params.CacheCapacity, singletonSwarmDbCapacity), DbStore: dbStore, }, nil } @@ -72,7 +72,7 @@ func NewTestLocalStoreForAddr(path string, basekey []byte) (*LocalStore, error) return nil, err } localStore := &LocalStore{ - memStore: NewMemStore(dbStore, singletonSwarmDbCapacity), + memStore: NewMemStore(dbStore, singletonSwarmDbCapacity, singletonSwarmDbCapacity), DbStore: dbStore, } return localStore, nil diff --git a/swarm/storage/memstore_lrucache.go b/swarm/storage/memstore_lrucache.go index 130491a19bde..cf560886e00a 100644 --- a/swarm/storage/memstore_lrucache.go +++ b/swarm/storage/memstore_lrucache.go @@ -21,6 +21,7 @@ package storage import ( "bytes" "fmt" + "sync" lru "github.com/hashicorp/golang-lru" ) @@ -31,11 +32,13 @@ const ( type MemStore struct { cache *lru.Cache + requests *lru.Cache + mu sync.Mutex disabled bool } -func NewMemStore(_ *LDBStore, capacity uint) (m *MemStore) { - if capacity == 0 { +func NewMemStore(_ *LDBStore, cacheCapacity uint, requestsCapacity uint) (m *MemStore) { + if cacheCapacity == 0 { return &MemStore{ disabled: true, } @@ -45,13 +48,19 @@ func NewMemStore(_ *LDBStore, capacity uint) (m *MemStore) { v := value.(*Chunk) <-v.dbStoredC } - c, err := lru.NewWithEvict(int(capacity), onEvicted) + c, err := lru.NewWithEvict(int(cacheCapacity), onEvicted) + if err != nil { + panic(err) + } + + r, err := lru.New(int(requestsCapacity)) if err != nil { panic(err) } return &MemStore{ - cache: c, + cache: c, + requests: r, } } @@ -60,6 +69,16 @@ func (m *MemStore) Get(key Key) (*Chunk, error) { return nil, ErrChunkNotFound } + m.mu.Lock() + defer m.mu.Unlock() + + r, ok := m.requests.Get(string(key)) + // it is a request + if ok { + return r.(*Chunk), nil + } + + // it is not a request c, ok := m.cache.Get(string(key)) if !ok { return nil, ErrChunkNotFound @@ -75,14 +94,37 @@ func (m *MemStore) Put(c *Chunk) { if m.disabled { return } + + m.mu.Lock() + defer m.mu.Unlock() + + // it is a request + if c.ReqC != nil { + select { + case <-c.ReqC: + ok := c.GetErrored() + if !ok { + m.requests.Remove(string(c.Key)) + return + } + m.cache.Add(string(c.Key), c) + m.requests.Remove(string(c.Key)) + default: + m.requests.Add(string(c.Key), c) + } + return + } + + // it is not a request m.cache.Add(string(c.Key), c) + m.requests.Remove(string(c.Key)) } func (m *MemStore) setCapacity(n int) { if n <= 0 { m.disabled = true } else { - m = NewMemStore(nil, uint(n)) + m = NewMemStore(nil, uint(n), singletonSwarmDbCapacity) } } diff --git a/swarm/storage/memstore_lrucache_test.go b/swarm/storage/memstore_lrucache_test.go index 01b368a5bb09..0771b51e6c3e 100644 --- a/swarm/storage/memstore_lrucache_test.go +++ b/swarm/storage/memstore_lrucache_test.go @@ -52,10 +52,12 @@ func newLDBStore(t *testing.T) (*LDBStore, func()) { func TestMemStoreAndLDBStore(t *testing.T) { ldb, cleanup := newLDBStore(t) - ldb.setCapacity(50000) + ldb.setCapacity(4000) defer cleanup() - memStore := NewMemStore(ldb, defaultCacheCapacity) + cacheCap := 200 + requestsCap := 10000 + memStore := NewMemStore(ldb, uint(cacheCap), uint(requestsCap)) tests := []struct { n int // number of chunks to push to memStore @@ -73,15 +75,20 @@ func TestMemStoreAndLDBStore(t *testing.T) { request: false, }, { - n: 60001, + n: 501, chunkSize: 4096, request: false, }, { - n: 60001, + n: 15001, chunkSize: 4096, - request: true, + request: false, }, + //{ + //n: 60001, + //chunkSize: 4096, + //request: true, + //}, } for i, tt := range tests { @@ -102,6 +109,14 @@ func TestMemStoreAndLDBStore(t *testing.T) { for i := 0; i < tt.n; i++ { go ldb.Put(chunks[i]) memStore.Put(chunks[i]) + + if got := memStore.cache.Len(); got > cacheCap { + t.Fatalf("expected to get cache capacity less than %v, but got %v", cacheCap, got) + } + + if got := memStore.requests.Len(); got > requestsCap { + t.Fatalf("expected to get requests capacity less than %v, but got %v", requestsCap, got) + } } for i := 0; i < tt.n; i++ { diff --git a/swarm/storage/memstore_test.go b/swarm/storage/memstore_test.go index 1da951a64d5a..74ce657c73be 100644 --- a/swarm/storage/memstore_test.go +++ b/swarm/storage/memstore_test.go @@ -19,7 +19,7 @@ package storage import "testing" func newTestMemStore() *MemStore { - return NewMemStore(nil, defaultCacheCapacity) + return NewMemStore(nil, defaultCacheCapacity, singletonSwarmDbCapacity) } func testMemStoreRandom(n int, processors int, chunksize int, t *testing.T) { diff --git a/swarm/storage/resource.go b/swarm/storage/resource.go index 6d0070268883..304232142f88 100644 --- a/swarm/storage/resource.go +++ b/swarm/storage/resource.go @@ -903,7 +903,7 @@ func NewTestResourceHandler(datadir string, ethClient headerGetter, validator Re return nil, err } localStore := &LocalStore{ - memStore: NewMemStore(dbStore, singletonSwarmDbCapacity), + memStore: NewMemStore(dbStore, singletonSwarmCacheCapacity, singletonSwarmDbCapacity), DbStore: dbStore, } resourceChunkStore := NewResourceChunkStore(localStore, nil) From bb741ccf158c3062427fd715f5a28c67d545a45f Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Wed, 11 Apr 2018 12:51:17 +0300 Subject: [PATCH 17/38] swarm/storage: move close to correct place --- swarm/storage/ldbstore.go | 2 +- swarm/storage/memstore_lrucache_test.go | 5 ----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/swarm/storage/ldbstore.go b/swarm/storage/ldbstore.go index 6fdbf469d4fc..d0a76b440a50 100644 --- a/swarm/storage/ldbstore.go +++ b/swarm/storage/ldbstore.go @@ -618,11 +618,11 @@ func (s *LDBStore) writeBatches() { if err != nil { log.Error(fmt.Sprintf("DbStore: spawn batch write (%d chunks): %v", b.Len(), err)) } + close(c) if e >= s.capacity { log.Trace(fmt.Sprintf("DbStore: collecting garbage...(%d chunks)", e)) s.collectGarbage(gcArrayFreeRatio) } - close(c) } log.Trace(fmt.Sprintf("DbStore: quit batch write loop")) } diff --git a/swarm/storage/memstore_lrucache_test.go b/swarm/storage/memstore_lrucache_test.go index 0771b51e6c3e..b32638696e62 100644 --- a/swarm/storage/memstore_lrucache_test.go +++ b/swarm/storage/memstore_lrucache_test.go @@ -79,11 +79,6 @@ func TestMemStoreAndLDBStore(t *testing.T) { chunkSize: 4096, request: false, }, - { - n: 15001, - chunkSize: 4096, - request: false, - }, //{ //n: 60001, //chunkSize: 4096, From 398e2e4fc400c1fe86dc13f56f44b7e34d4bd51a Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Wed, 11 Apr 2018 13:12:42 +0300 Subject: [PATCH 18/38] swarm/storage: amend tests --- swarm/storage/memstore_lrucache_test.go | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/swarm/storage/memstore_lrucache_test.go b/swarm/storage/memstore_lrucache_test.go index b32638696e62..eafaa7174cb9 100644 --- a/swarm/storage/memstore_lrucache_test.go +++ b/swarm/storage/memstore_lrucache_test.go @@ -56,7 +56,7 @@ func TestMemStoreAndLDBStore(t *testing.T) { defer cleanup() cacheCap := 200 - requestsCap := 10000 + requestsCap := 200 memStore := NewMemStore(ldb, uint(cacheCap), uint(requestsCap)) tests := []struct { @@ -79,11 +79,16 @@ func TestMemStoreAndLDBStore(t *testing.T) { chunkSize: 4096, request: false, }, - //{ - //n: 60001, - //chunkSize: 4096, - //request: true, - //}, + { + n: 60001, + chunkSize: 4096, + request: false, + }, + { + n: 100, + chunkSize: 4096, + request: true, + }, } for i, tt := range tests { From 1ce77753913b175be6b36f15d7cc8a43e231bea0 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Wed, 11 Apr 2018 16:57:33 +0300 Subject: [PATCH 19/38] swarm/storage: fix collectGarbage panics --- swarm/storage/ldbstore.go | 44 ++++++++++++------------- swarm/storage/memstore_lrucache_test.go | 2 +- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/swarm/storage/ldbstore.go b/swarm/storage/ldbstore.go index d0a76b440a50..d9f6b6234037 100644 --- a/swarm/storage/ldbstore.go +++ b/swarm/storage/ldbstore.go @@ -52,10 +52,6 @@ const ( gcArraySize = 10000 gcArrayFreeRatio = 0.1 - - // key prefixes for leveldb storage - kpIndex = 0 - kpData = 1 ) var ( @@ -79,11 +75,15 @@ type LDBStore struct { db *LDBDatabase // this should be stored in db, accessed transactionally - entryCnt, accessCnt, dataIdx, capacity uint64 - bucketCnt []uint64 + entryCnt uint64 // number of items in the LevelDB + accessCnt uint64 // ever-accumulating number increased every time we read/access an entry + dataIdx uint64 // similar to entryCnt, but we only increment it + capacity uint64 + bucketCnt []uint64 - gcPos, gcStartPos []byte - gcArray []*gcItem + gcPos []byte + gcStartPos []byte + gcArray []*gcItem hashfunc SwarmHasher po func(Key) uint8 @@ -126,10 +126,6 @@ func NewLDBStore(path string, hash SwarmHasher, capacity uint64, po func(Key) ui s.po = po s.setCapacity(capacity) - s.gcStartPos = make([]byte, 1) - s.gcStartPos[0] = kpIndex - s.gcArray = make([]*gcItem, gcArraySize) - s.bucketCnt = make([]uint64, 0x100) for i := 0; i < 0x100; i++ { k := make([]byte, 2) @@ -149,6 +145,9 @@ func NewLDBStore(path string, hash SwarmHasher, capacity uint64, po func(Key) ui s.dataIdx = BytesToU64(data) s.dataIdx++ + s.gcStartPos = make([]byte, 1) + s.gcStartPos[0] = keyIndex + s.gcArray = make([]*gcItem, gcArraySize) s.gcPos, _ = s.db.Get(keyGCPos) if s.gcPos == nil { s.gcPos = s.gcStartPos @@ -192,7 +191,6 @@ func BytesToU64(data []byte) uint64 { func U64ToBytes(val uint64) []byte { data := make([]byte, 8) - //binary.LittleEndian.PutUint64(data, val) binary.BigEndian.PutUint64(data, val) return data } @@ -294,8 +292,7 @@ func gcListSelect(list []*gcItem, left int, right int, n int) int { func (s *LDBStore) collectGarbage(ratio float32) { it := s.db.NewIterator() - it.Seek(s.gcPos) - if it.Valid() { + if it.Seek(s.gcPos) { s.gcPos = it.Key() } else { s.gcPos = nil @@ -304,16 +301,16 @@ func (s *LDBStore) collectGarbage(ratio float32) { for (gcnt < gcArraySize) && (uint64(gcnt) < s.entryCnt) { - if (s.gcPos == nil) || (s.gcPos[0] != kpIndex) { + if (s.gcPos == nil) || (s.gcPos[0] != keyIndex) { it.Seek(s.gcStartPos) if it.Valid() { s.gcPos = it.Key() } else { - s.gcPos = nil + s.gcPos = s.gcStartPos } } - if (s.gcPos == nil) || (s.gcPos[0] != kpIndex) { + if (s.gcPos == nil) || (s.gcPos[0] != keyIndex) { break } @@ -335,6 +332,9 @@ func (s *LDBStore) collectGarbage(ratio float32) { } it.Release() + if gcnt == 0 { + gcnt++ + } cutidx := gcListSelect(s.gcArray, 0, gcnt-1, int(float32(gcnt)*ratio)) cutval := s.gcArray[cutidx].value @@ -358,9 +358,9 @@ func (s *LDBStore) Export(out io.Writer) (int64, error) { it := s.db.NewIterator() defer it.Release() var count int64 - for ok := it.Seek([]byte{kpIndex}); ok; ok = it.Next() { + for ok := it.Seek([]byte{keyIndex}); ok; ok = it.Next() { key := it.Key() - if (key == nil) || (key[0] != kpIndex) { + if (key == nil) || (key[0] != keyIndex) { break } @@ -440,13 +440,13 @@ func (s *LDBStore) Import(in io.Reader) (int64, error) { func (s *LDBStore) Cleanup() { //Iterates over the database and checks that there are no faulty chunks it := s.db.NewIterator() - startPosition := []byte{kpIndex} + startPosition := []byte{keyIndex} it.Seek(startPosition) var key []byte var errorsFound, total int for it.Valid() { key = it.Key() - if (key == nil) || (key[0] != kpIndex) { + if (key == nil) || (key[0] != keyIndex) { break } total++ diff --git a/swarm/storage/memstore_lrucache_test.go b/swarm/storage/memstore_lrucache_test.go index eafaa7174cb9..285f797724f6 100644 --- a/swarm/storage/memstore_lrucache_test.go +++ b/swarm/storage/memstore_lrucache_test.go @@ -80,7 +80,7 @@ func TestMemStoreAndLDBStore(t *testing.T) { request: false, }, { - n: 60001, + n: 3100, chunkSize: 4096, request: false, }, From c4f493185f918d218a2283b6541465d4dc2defde Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Wed, 11 Apr 2018 17:37:41 +0300 Subject: [PATCH 20/38] swarm/storage: more tests for LDBStore --- swarm/storage/ldbstore_test.go | 110 +++++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) diff --git a/swarm/storage/ldbstore_test.go b/swarm/storage/ldbstore_test.go index faa0472a3341..dc82281d3b97 100644 --- a/swarm/storage/ldbstore_test.go +++ b/swarm/storage/ldbstore_test.go @@ -23,10 +23,13 @@ import ( "os" "sync" "testing" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/swarm/storage/mock/mem" + + ldberrors "github.com/syndtr/goleveldb/leveldb/errors" ) type testDbStore struct { @@ -271,3 +274,110 @@ func BenchmarkMockDbStoreGet_1_5k(b *testing.B) { func BenchmarkMockDbStoreGet_8_5k(b *testing.B) { benchmarkDbStoreGet(5000, 8, 4096, true, b) } + +// TestLDBStoreWithoutCollectGarbage tests that we can put a number of random chunks in the LevelDB store, and +// retrieve them, provided we don't hit the garbage collection and we are at least 20-30% below the specified capacity +func TestLDBStoreWithoutCollectGarbage(t *testing.T) { + chunkSize := uint64(4096) + capacity := 50 + n := 10 + + ldb, cleanup := newLDBStore(t) + ldb.setCapacity(uint64(capacity)) + defer cleanup() + + chunks := []*Chunk{} + for i := 0; i < n; i++ { + c := NewRandomChunk(chunkSize) + chunks = append(chunks, c) + log.Info("generate random chunk", "idx", i, "chunk", c) + } + + for i := 0; i < n; i++ { + go ldb.Put(chunks[i]) + } + + // wait for all chunks to be stored before ending the test are cleaning up + for i := 0; i < n; i++ { + <-chunks[i].dbStoredC + } + + log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) + + for i := 0; i < n; i++ { + ret, err := ldb.Get(chunks[i].Key) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(ret.SData, chunks[i].SData) { + t.Fatal("expected to get the same data back, but got smth else") + } + + log.Info("got back chunk", "chunk", ret) + } + + if ldb.entryCnt != uint64(n+1) { + t.Fatalf("expected entryCnt to be equal to %v, but got %v", n+1, ldb.entryCnt) + } + + if ldb.accessCnt != uint64(2*n+1) { + t.Fatalf("expected accessCnt to be equal to %v, but got %v", n+1, ldb.accessCnt) + } +} + +// TestLDBStoreCollectGarbage tests that we can put more chunks than LevelDB's capacity, and +// retrieve only some of them, because garbage collection must have cleared some of them +func TestLDBStoreCollectGarbage(t *testing.T) { + chunkSize := uint64(4096) + capacity := 5 + n := 2000 + + ldb, cleanup := newLDBStore(t) + ldb.setCapacity(uint64(capacity)) + defer cleanup() + + chunks := []*Chunk{} + for i := 0; i < n; i++ { + c := NewRandomChunk(chunkSize) + chunks = append(chunks, c) + log.Info("generate random chunk", "idx", i, "chunk", c) + } + + for i := 0; i < n; i++ { + go ldb.Put(chunks[i]) + } + + // wait for all chunks to be stored before ending the test are cleaning up + for i := 0; i < n; i++ { + <-chunks[i].dbStoredC + } + + log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) + + time.Sleep(5 * time.Second) + + var missing int + for i := 0; i < n; i++ { + ret, err := ldb.Get(chunks[i].Key) + if err == ErrChunkNotFound || err == ldberrors.ErrNotFound { + missing++ + continue + } + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(ret.SData, chunks[i].SData) { + t.Fatal("expected to get the same data back, but got smth else") + } + + log.Info("got back chunk", "chunk", ret) + } + + if missing < n-capacity { + t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", n-capacity, missing) + } + + log.Info("ldbstore", "total", n, "missing", missing) +} From d86b447af971a8dc97f1f155d8079a0161bbeaf5 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Thu, 12 Apr 2018 12:37:38 +0300 Subject: [PATCH 21/38] swarm/storage: more tests --- swarm/storage/ldbstore_test.go | 83 +++++++++++++++++++++++++++++++++- 1 file changed, 81 insertions(+), 2 deletions(-) diff --git a/swarm/storage/ldbstore_test.go b/swarm/storage/ldbstore_test.go index dc82281d3b97..82f5b053be06 100644 --- a/swarm/storage/ldbstore_test.go +++ b/swarm/storage/ldbstore_test.go @@ -28,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/swarm/storage/mock/mem" + colorable "github.com/mattn/go-colorable" ldberrors "github.com/syndtr/goleveldb/leveldb/errors" ) @@ -278,6 +279,9 @@ func BenchmarkMockDbStoreGet_8_5k(b *testing.B) { // TestLDBStoreWithoutCollectGarbage tests that we can put a number of random chunks in the LevelDB store, and // retrieve them, provided we don't hit the garbage collection and we are at least 20-30% below the specified capacity func TestLDBStoreWithoutCollectGarbage(t *testing.T) { + log.PrintOrigins(true) + log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) + chunkSize := uint64(4096) capacity := 50 n := 10 @@ -329,8 +333,11 @@ func TestLDBStoreWithoutCollectGarbage(t *testing.T) { // TestLDBStoreCollectGarbage tests that we can put more chunks than LevelDB's capacity, and // retrieve only some of them, because garbage collection must have cleared some of them func TestLDBStoreCollectGarbage(t *testing.T) { + log.PrintOrigins(true) + log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) + chunkSize := uint64(4096) - capacity := 5 + capacity := 500 n := 2000 ldb, cleanup := newLDBStore(t) @@ -345,7 +352,12 @@ func TestLDBStoreCollectGarbage(t *testing.T) { } for i := 0; i < n; i++ { - go ldb.Put(chunks[i]) + ldb.Put(chunks[i]) + + if i%100 == 0 { + log.Info("sleeping 1 sec...") + time.Sleep(1 * time.Second) + } } // wait for all chunks to be stored before ending the test are cleaning up @@ -376,8 +388,75 @@ func TestLDBStoreCollectGarbage(t *testing.T) { } if missing < n-capacity { + log.Info("ldbstore", "total", n, "missing", missing) + log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", n-capacity, missing) } log.Info("ldbstore", "total", n, "missing", missing) + log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) +} + +// TestLDBStoreAddRemove tests that we can put and then delete a given chunk +func TestLDBStoreAddRemove(t *testing.T) { + log.PrintOrigins(true) + log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) + + ldb, cleanup := newLDBStore(t) + ldb.setCapacity(200) + defer cleanup() + + n := 100 + + chunks := []*Chunk{} + for i := 0; i < n; i++ { + c := NewRandomChunk(chunkSize) + chunks = append(chunks, c) + log.Info("generate random chunk", "idx", i, "chunk", c) + } + + for i := 0; i < n; i++ { + go ldb.Put(chunks[i]) + } + + // wait for all chunks to be stored before ending the test are cleaning up + for i := 0; i < n; i++ { + <-chunks[i].dbStoredC + } + + for i := 0; i < n; i++ { + // delete all even index chunks + if i%2 == 0 { + + key := chunks[i].Key + ikey := getIndexKey(key) + + var indx dpaDBIndex + ldb.tryAccessIdx(ikey, &indx) + + ldb.delete(indx.Idx, ikey, ldb.po(key)) + } + } + + log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) + + for i := 0; i < n; i++ { + ret, err := ldb.Get(chunks[i].Key) + + if i%2 == 0 { + // expect even chunks to be missing + if err == nil || ret != nil { + t.Fatal("expected chunk to be missing, but got no error") + } + } else { + // expect odd chunks to be retrieved successfully + if err != nil { + t.Fatalf("expected no error, but got %s", err) + } + + if !bytes.Equal(ret.SData, chunks[i].SData) { + t.Fatal("expected to get the same data back, but got smth else") + } + } + } } From 2448dbee5ca30b8a56c838f6a1ca9fb532fd1831 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Fri, 13 Apr 2018 11:51:46 +0300 Subject: [PATCH 22/38] swarm/storage: working garbage collection --- swarm/storage/ldbstore.go | 104 ++++++++++++++++----------------- swarm/storage/ldbstore_test.go | 86 ++++++++++++++++++++++++--- 2 files changed, 126 insertions(+), 64 deletions(-) diff --git a/swarm/storage/ldbstore.go b/swarm/storage/ldbstore.go index d9f6b6234037..ca9d2403e73a 100644 --- a/swarm/storage/ldbstore.go +++ b/swarm/storage/ldbstore.go @@ -30,6 +30,7 @@ import ( "fmt" "io" "io/ioutil" + "sort" "sync" "github.com/ethereum/go-ethereum/log" @@ -50,7 +51,6 @@ const ( defaultDbCapacity = 5000000 defaultRadius = 0 // not yet used - gcArraySize = 10000 gcArrayFreeRatio = 0.1 ) @@ -69,6 +69,7 @@ type gcItem struct { idx uint64 value uint64 idxKey []byte + po uint8 } type LDBStore struct { @@ -83,7 +84,6 @@ type LDBStore struct { gcPos []byte gcStartPos []byte - gcArray []*gcItem hashfunc SwarmHasher po func(Key) uint8 @@ -138,6 +138,7 @@ func NewLDBStore(path string, hash SwarmHasher, capacity uint64, po func(Key) ui data, _ := s.db.Get(keyEntryCnt) s.entryCnt = BytesToU64(data) s.entryCnt++ + log.Trace("NewLDBStore s.entryCnt++", "entryCnt", s.entryCnt) data, _ = s.db.Get(keyAccessCnt) s.accessCnt = BytesToU64(data) s.accessCnt++ @@ -147,7 +148,6 @@ func NewLDBStore(path string, hash SwarmHasher, capacity uint64, po func(Key) ui s.gcStartPos = make([]byte, 1) s.gcStartPos[0] = keyIndex - s.gcArray = make([]*gcItem, gcArraySize) s.gcPos, _ = s.db.Get(keyGCPos) if s.gcPos == nil { s.gcPos = s.gcStartPos @@ -195,10 +195,6 @@ func U64ToBytes(val uint64) []byte { return data } -func getIndexGCValue(index *dpaDBIndex) uint64 { - return index.Access -} - func (s *LDBStore) updateIndexAccess(index *dpaDBIndex) { index.Access = s.accessCnt } @@ -292,61 +288,55 @@ func gcListSelect(list []*gcItem, left int, right int, n int) int { func (s *LDBStore) collectGarbage(ratio float32) { it := s.db.NewIterator() - if it.Seek(s.gcPos) { - s.gcPos = it.Key() - } else { - s.gcPos = nil - } - gcnt := 0 - - for (gcnt < gcArraySize) && (uint64(gcnt) < s.entryCnt) { + defer it.Release() - if (s.gcPos == nil) || (s.gcPos[0] != keyIndex) { - it.Seek(s.gcStartPos) - if it.Valid() { - s.gcPos = it.Key() - } else { - s.gcPos = s.gcStartPos - } - } + garbage := []*gcItem{} + gcnt := 0 - if (s.gcPos == nil) || (s.gcPos[0] != keyIndex) { + for ok := it.Seek([]byte{keyIndex}); ok && (gcnt < 5000) && (uint64(gcnt) < s.entryCnt); ok = it.Next() { + key := it.Key() + val := it.Value() + if (key == nil) || (key[0] != keyIndex) { break } - gci := new(gcItem) - gci.idxKey = s.gcPos + log.Trace("iterator", "key", fmt.Sprintf("%x", key), "value", fmt.Sprintf("%x", val)) + var index dpaDBIndex - decodeIndex(it.Value(), &index) - gci.idx = index.Idx - // the smaller, the more likely to be gc'd - gci.value = getIndexGCValue(&index) - s.gcArray[gcnt] = gci - gcnt++ - it.Next() - if it.Valid() { - s.gcPos = it.Key() - } else { - s.gcPos = nil + + hash := key[1:] + decodeIndex(val, &index) + po := s.po(hash) + + kkey := make([]byte, len(key)) + copy(kkey, key) + + gci := &gcItem{ + idxKey: kkey, + idx: index.Idx, + value: index.Access, + po: po, } - } - it.Release() - if gcnt == 0 { + log.Trace("gci.idxKey", "gcnt", gcnt, "idxKey", fmt.Sprintf("%x", gci.idxKey), "idx", gci.idx, "gci.value", gci.value) + + garbage = append(garbage, gci) gcnt++ } - cutidx := gcListSelect(s.gcArray, 0, gcnt-1, int(float32(gcnt)*ratio)) - cutval := s.gcArray[cutidx].value - - // actual gc - for i := 0; i < gcnt; i++ { - if s.gcArray[i].value <= cutval { - gcCounter.Inc(1) - s.delete(s.gcArray[i].idx, s.gcArray[i].idxKey, s.po(Key(s.gcPos[1:]))) - } + + sort.Slice(garbage[:gcnt], func(i, j int) bool { return garbage[i].value < garbage[j].value }) + + for k := 0; k < gcnt; k++ { + log.Trace("gcArray[]", "k", k, "idx", garbage[k].idx, "idxKey", fmt.Sprintf("%x", garbage[k].idxKey), "value", garbage[k].value) + } + + cutoff := int(float32(gcnt) * ratio) + log.Trace("cutoff", "cut", cutoff, "gcnt", gcnt) + for i := 0; i < cutoff; i++ { + s.delete(garbage[i].idx, garbage[i].idxKey, garbage[i].po) } - s.db.Put(keyGCPos, s.gcPos) + //s.db.Put(keyGCPos, s.gcPos) } // Export writes all chunks from the store to a tar archive, returning the @@ -520,11 +510,14 @@ func (s *LDBStore) ReIndex() { } func (s *LDBStore) delete(idx uint64, idxKey []byte, po uint8) { + log.Trace("LDBStore delete()", "idx", idx, "idxKey", fmt.Sprintf("%x", idxKey), "po", po) + batch := new(leveldb.Batch) batch.Delete(idxKey) batch.Delete(getDataKey(idx, po)) dbStoreDeleteCounter.Inc(1) s.entryCnt-- + log.Trace("delete s.entryCnt--", "entryCnt", s.entryCnt) s.bucketCnt[po]-- cntKey := make([]byte, 2) cntKey[0] = keyDistanceCnt @@ -594,6 +587,7 @@ func (s *LDBStore) doPut(chunk *Chunk, index *dpaDBIndex, po uint8) { index.Idx = s.dataIdx s.bucketCnt[po] = s.dataIdx s.entryCnt++ + log.Trace("doPut entryCnt++", "entryCnt", s.entryCnt) s.dataIdx++ cntKey := make([]byte, 2) @@ -612,17 +606,17 @@ func (s *LDBStore) writeBatches() { c := s.batchC s.batchC = make(chan bool) s.batch = new(leveldb.Batch) - s.lock.Unlock() err := s.writeBatch(b, e, d, a) // TODO: set this error on the batch, then tell the chunk if err != nil { - log.Error(fmt.Sprintf("DbStore: spawn batch write (%d chunks): %v", b.Len(), err)) + log.Error(fmt.Sprintf("spawn batch write (%d entries): %v", b.Len(), err)) } close(c) - if e >= s.capacity { - log.Trace(fmt.Sprintf("DbStore: collecting garbage...(%d chunks)", e)) + if e >= s.capacity && int(float32(e-1)*gcArrayFreeRatio) > 0 { + log.Trace(fmt.Sprintf("collecting garbage (%d chunks)", e)) s.collectGarbage(gcArrayFreeRatio) } + s.lock.Unlock() } log.Trace(fmt.Sprintf("DbStore: quit batch write loop")) } @@ -636,7 +630,7 @@ func (s *LDBStore) writeBatch(b *leveldb.Batch, entryCnt, dataIdx, accessCnt uin if err := s.db.Write(b); err != nil { return fmt.Errorf("unable to write batch: %v", err) } - log.Trace(fmt.Sprintf("DbStore: batch write (%d chunks) complete", l)) + log.Trace(fmt.Sprintf("batch write (%d entries)", l)) return nil } diff --git a/swarm/storage/ldbstore_test.go b/swarm/storage/ldbstore_test.go index 82f5b053be06..928d8dc94a47 100644 --- a/swarm/storage/ldbstore_test.go +++ b/swarm/storage/ldbstore_test.go @@ -23,7 +23,6 @@ import ( "os" "sync" "testing" - "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -353,11 +352,6 @@ func TestLDBStoreCollectGarbage(t *testing.T) { for i := 0; i < n; i++ { ldb.Put(chunks[i]) - - if i%100 == 0 { - log.Info("sleeping 1 sec...") - time.Sleep(1 * time.Second) - } } // wait for all chunks to be stored before ending the test are cleaning up @@ -367,8 +361,6 @@ func TestLDBStoreCollectGarbage(t *testing.T) { log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) - time.Sleep(5 * time.Second) - var missing int for i := 0; i < n; i++ { ret, err := ldb.Get(chunks[i].Key) @@ -419,7 +411,7 @@ func TestLDBStoreAddRemove(t *testing.T) { go ldb.Put(chunks[i]) } - // wait for all chunks to be stored before ending the test are cleaning up + // wait for all chunks to be stored before continuing for i := 0; i < n; i++ { <-chunks[i].dbStoredC } @@ -460,3 +452,79 @@ func TestLDBStoreAddRemove(t *testing.T) { } } } + +// TestLDBStoreRemoveThenCollectGarbage tests that we can delete chunks and that we can trigger garbage collection +func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) { + log.PrintOrigins(true) + log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(false)))) + + capacity := 10 + + ldb, cleanup := newLDBStore(t) + ldb.setCapacity(uint64(capacity)) + + n := 7 + + chunks := []*Chunk{} + for i := 0; i < capacity; i++ { + c := NewRandomChunk(chunkSize) + chunks = append(chunks, c) + log.Info("generate random chunk", "idx", i, "chunk", c) + } + + for i := 0; i < n; i++ { + ldb.Put(chunks[i]) + } + + // wait for all chunks to be stored before continuing + for i := 0; i < n; i++ { + <-chunks[i].dbStoredC + } + + // delete all chunks + for i := 0; i < n; i++ { + key := chunks[i].Key + ikey := getIndexKey(key) + + var indx dpaDBIndex + ldb.tryAccessIdx(ikey, &indx) + + ldb.delete(indx.Idx, ikey, ldb.po(key)) + } + + log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) + + cleanup() + + ldb, cleanup = newLDBStore(t) + ldb.setCapacity(uint64(capacity)) + + n = 10 + + for i := 0; i < n; i++ { + ldb.Put(chunks[i]) + } + + // wait for all chunks to be stored before continuing + for i := 0; i < n; i++ { + <-chunks[i].dbStoredC + } + + // expect for first chunk to be missing + idx := 0 + ret, err := ldb.Get(chunks[idx].Key) + if err == nil || ret != nil { + t.Fatal("expected first chunk to be missing, but got no error") + } + + // expect for last chunk to be present + idx = 9 + ret, err = ldb.Get(chunks[idx].Key) + if err != nil { + t.Fatalf("expected no error, but got %s", err) + } + + if !bytes.Equal(ret.SData, chunks[idx].SData) { + t.Fatal("expected to get the same data back, but got smth else") + } +} From 4b630d8221421ef90256bc7155d9aa0962a12789 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Fri, 13 Apr 2018 11:52:45 +0300 Subject: [PATCH 23/38] swarm/storage: fixup --- swarm/storage/ldbstore.go | 39 --------------------------------------- 1 file changed, 39 deletions(-) diff --git a/swarm/storage/ldbstore.go b/swarm/storage/ldbstore.go index ca9d2403e73a..6fe98aa1f859 100644 --- a/swarm/storage/ldbstore.go +++ b/swarm/storage/ldbstore.go @@ -185,7 +185,6 @@ func BytesToU64(data []byte) uint64 { if len(data) < 8 { return 0 } - //return binary.LittleEndian.Uint64(data) return binary.BigEndian.Uint64(data) } @@ -236,7 +235,6 @@ func encodeData(chunk *Chunk) []byte { func decodeIndex(data []byte, index *dpaDBIndex) error { dec := rlp.NewStream(bytes.NewReader(data), 0) return dec.Decode(index) - } func decodeData(data []byte, chunk *Chunk) { @@ -249,43 +247,6 @@ func decodeOldData(data []byte, chunk *Chunk) { chunk.Size = int64(binary.BigEndian.Uint64(data[0:8])) } -func gcListPartition(list []*gcItem, left int, right int, pivotIndex int) int { - pivotValue := list[pivotIndex].value - dd := list[pivotIndex] - list[pivotIndex] = list[right] - list[right] = dd - storeIndex := left - for i := left; i < right; i++ { - if list[i].value < pivotValue { - dd = list[storeIndex] - list[storeIndex] = list[i] - list[i] = dd - storeIndex++ - } - } - dd = list[storeIndex] - list[storeIndex] = list[right] - list[right] = dd - return storeIndex -} - -func gcListSelect(list []*gcItem, left int, right int, n int) int { - if left == right { - return left - } - pivotIndex := (left + right) / 2 - pivotIndex = gcListPartition(list, left, right, pivotIndex) - if n == pivotIndex { - return n - } else { - if n < pivotIndex { - return gcListSelect(list, left, pivotIndex-1, n) - } else { - return gcListSelect(list, pivotIndex+1, right, n) - } - } -} - func (s *LDBStore) collectGarbage(ratio float32) { it := s.db.NewIterator() defer it.Release() From 49214d72d298e431957773b102d9b654272c095a Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Fri, 13 Apr 2018 12:01:45 +0300 Subject: [PATCH 24/38] swarm/storage: better naming --- swarm/storage/dpa.go | 9 +++++---- swarm/storage/dpa_test.go | 4 ++-- swarm/storage/localstore.go | 6 +++--- swarm/storage/memstore_lrucache.go | 6 +----- swarm/storage/memstore_test.go | 2 +- swarm/storage/resource.go | 4 ++-- 6 files changed, 14 insertions(+), 17 deletions(-) diff --git a/swarm/storage/dpa.go b/swarm/storage/dpa.go index 3f66a73c9e8b..7f5a821f9fbc 100644 --- a/swarm/storage/dpa.go +++ b/swarm/storage/dpa.go @@ -35,8 +35,9 @@ implementation for storage or retrieval. */ const ( - singletonSwarmDbCapacity = 50000 - singletonSwarmCacheCapacity = 500 + defaultLDBCapacity = 50000 // capacity for LevelDB + defaultCacheCapacity = 500 // capacity for in-memory chunks' cache + defaultChunkRequestsCacheCapacity = 50000 // capacity for container holding outgoing requests for chunks. should be set to LevelDB capacity ) var ( @@ -66,13 +67,13 @@ func NewLocalDPA(datadir string, basekey []byte) (*DPA, error) { hash := MakeHashFunc("SHA3") - dbStore, err := NewLDBStore(datadir, hash, singletonSwarmDbCapacity, func(k Key) (ret uint8) { return uint8(Proximity(basekey[:], k[:])) }) + dbStore, err := NewLDBStore(datadir, hash, defaultLDBCapacity, func(k Key) (ret uint8) { return uint8(Proximity(basekey[:], k[:])) }) if err != nil { return nil, err } return NewDPA(&LocalStore{ - memStore: NewMemStore(dbStore, singletonSwarmCacheCapacity, singletonSwarmDbCapacity), + memStore: NewMemStore(dbStore, defaultCacheCapacity, defaultChunkRequestsCacheCapacity), DbStore: dbStore, }, NewDPAParams()), nil } diff --git a/swarm/storage/dpa_test.go b/swarm/storage/dpa_test.go index c1601807a30a..e5eebfe37dd5 100644 --- a/swarm/storage/dpa_test.go +++ b/swarm/storage/dpa_test.go @@ -39,7 +39,7 @@ func testDpaRandom(toEncrypt bool, t *testing.T) { defer tdb.close() db := tdb.LDBStore db.setCapacity(50000) - memStore := NewMemStore(db, defaultCacheCapacity, defaultCacheCapacity) + memStore := NewMemStore(db, defaultCacheCapacity, defaultChunkRequestsCacheCapacity) localStore := &LocalStore{ memStore: memStore, DbStore: db, @@ -68,7 +68,7 @@ func testDpaRandom(toEncrypt bool, t *testing.T) { } ioutil.WriteFile("/tmp/slice.bzz.16M", slice, 0666) ioutil.WriteFile("/tmp/result.bzz.16M", resultSlice, 0666) - localStore.memStore = NewMemStore(db, defaultCacheCapacity, defaultCacheCapacity) + localStore.memStore = NewMemStore(db, defaultCacheCapacity, defaultChunkRequestsCacheCapacity) resultReader = dpa.Retrieve(key) for i := range resultSlice { resultSlice[i] = 0 diff --git a/swarm/storage/localstore.go b/swarm/storage/localstore.go index 908a10c704de..b2d4ef76167e 100644 --- a/swarm/storage/localstore.go +++ b/swarm/storage/localstore.go @@ -60,19 +60,19 @@ func NewLocalStore(hash SwarmHasher, params *StoreParams, basekey []byte, mockSt return nil, err } return &LocalStore{ - memStore: NewMemStore(dbStore, params.CacheCapacity, singletonSwarmDbCapacity), + memStore: NewMemStore(dbStore, params.CacheCapacity, defaultChunkRequestsCacheCapacity), DbStore: dbStore, }, nil } func NewTestLocalStoreForAddr(path string, basekey []byte) (*LocalStore, error) { hasher := MakeHashFunc("SHA3") - dbStore, err := NewLDBStore(path, hasher, singletonSwarmDbCapacity, func(k Key) (ret uint8) { return uint8(Proximity(basekey[:], k[:])) }) + dbStore, err := NewLDBStore(path, hasher, defaultLDBCapacity, func(k Key) (ret uint8) { return uint8(Proximity(basekey[:], k[:])) }) if err != nil { return nil, err } localStore := &LocalStore{ - memStore: NewMemStore(dbStore, singletonSwarmDbCapacity, singletonSwarmDbCapacity), + memStore: NewMemStore(dbStore, defaultCacheCapacity, defaultChunkRequestsCacheCapacity), DbStore: dbStore, } return localStore, nil diff --git a/swarm/storage/memstore_lrucache.go b/swarm/storage/memstore_lrucache.go index cf560886e00a..0c851c0fe8a8 100644 --- a/swarm/storage/memstore_lrucache.go +++ b/swarm/storage/memstore_lrucache.go @@ -26,10 +26,6 @@ import ( lru "github.com/hashicorp/golang-lru" ) -const ( - defaultCacheCapacity = 5000 -) - type MemStore struct { cache *lru.Cache requests *lru.Cache @@ -124,7 +120,7 @@ func (m *MemStore) setCapacity(n int) { if n <= 0 { m.disabled = true } else { - m = NewMemStore(nil, uint(n), singletonSwarmDbCapacity) + m = NewMemStore(nil, uint(n), defaultChunkRequestsCacheCapacity) } } diff --git a/swarm/storage/memstore_test.go b/swarm/storage/memstore_test.go index 74ce657c73be..9146b298c353 100644 --- a/swarm/storage/memstore_test.go +++ b/swarm/storage/memstore_test.go @@ -19,7 +19,7 @@ package storage import "testing" func newTestMemStore() *MemStore { - return NewMemStore(nil, defaultCacheCapacity, singletonSwarmDbCapacity) + return NewMemStore(nil, defaultCacheCapacity, defaultChunkRequestsCacheCapacity) } func testMemStoreRandom(n int, processors int, chunksize int, t *testing.T) { diff --git a/swarm/storage/resource.go b/swarm/storage/resource.go index 304232142f88..1b06fba39e09 100644 --- a/swarm/storage/resource.go +++ b/swarm/storage/resource.go @@ -897,13 +897,13 @@ func NewTestResourceHandler(datadir string, ethClient headerGetter, validator Re path := filepath.Join(datadir, DbDirName) basekey := make([]byte, 32) hasher := MakeHashFunc(SHA3Hash) - dbStore, err := NewLDBStore(path, hasher, singletonSwarmDbCapacity, func(k Key) (ret uint8) { return uint8(Proximity(basekey[:], k[:])) }) + dbStore, err := NewLDBStore(path, hasher, defaultLDBCapacity, func(k Key) (ret uint8) { return uint8(Proximity(basekey[:], k[:])) }) dbStore.SetTrusted() if err != nil { return nil, err } localStore := &LocalStore{ - memStore: NewMemStore(dbStore, singletonSwarmCacheCapacity, singletonSwarmDbCapacity), + memStore: NewMemStore(dbStore, defaultCacheCapacity, defaultChunkRequestsCacheCapacity), DbStore: dbStore, } resourceChunkStore := NewResourceChunkStore(localStore, nil) From 69e136156607e538fc78f3fe4505bd8206cbc0f2 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Fri, 13 Apr 2018 12:08:35 +0300 Subject: [PATCH 25/38] swarm/storage: names fixup --- swarm/storage/ldbstore.go | 44 +++++++------------------ swarm/storage/ldbstore_test.go | 4 +-- swarm/storage/localstore.go | 2 +- swarm/storage/memstore_lrucache_test.go | 2 +- 4 files changed, 16 insertions(+), 36 deletions(-) diff --git a/swarm/storage/ldbstore.go b/swarm/storage/ldbstore.go index 6fe98aa1f859..691cf3eb7b3c 100644 --- a/swarm/storage/ldbstore.go +++ b/swarm/storage/ldbstore.go @@ -48,10 +48,8 @@ var ( ) const ( - defaultDbCapacity = 5000000 - defaultRadius = 0 // not yet used - gcArrayFreeRatio = 0.1 + maxGCitems = 5000 // max number of items to be gc'd per call to collectGarbage() ) var ( @@ -82,9 +80,6 @@ type LDBStore struct { capacity uint64 bucketCnt []uint64 - gcPos []byte - gcStartPos []byte - hashfunc SwarmHasher po func(Key) uint8 @@ -138,7 +133,6 @@ func NewLDBStore(path string, hash SwarmHasher, capacity uint64, po func(Key) ui data, _ := s.db.Get(keyEntryCnt) s.entryCnt = BytesToU64(data) s.entryCnt++ - log.Trace("NewLDBStore s.entryCnt++", "entryCnt", s.entryCnt) data, _ = s.db.Get(keyAccessCnt) s.accessCnt = BytesToU64(data) s.accessCnt++ @@ -146,12 +140,6 @@ func NewLDBStore(path string, hash SwarmHasher, capacity uint64, po func(Key) ui s.dataIdx = BytesToU64(data) s.dataIdx++ - s.gcStartPos = make([]byte, 1) - s.gcStartPos[0] = keyIndex - s.gcPos, _ = s.db.Get(keyGCPos) - if s.gcPos == nil { - s.gcPos = s.gcStartPos - } return s, nil } @@ -254,14 +242,18 @@ func (s *LDBStore) collectGarbage(ratio float32) { garbage := []*gcItem{} gcnt := 0 - for ok := it.Seek([]byte{keyIndex}); ok && (gcnt < 5000) && (uint64(gcnt) < s.entryCnt); ok = it.Next() { - key := it.Key() - val := it.Value() - if (key == nil) || (key[0] != keyIndex) { + for ok := it.Seek([]byte{keyIndex}); ok && (gcnt < maxGCitems) && (uint64(gcnt) < s.entryCnt); ok = it.Next() { + itkey := it.Key() + + if (itkey == nil) || (itkey[0] != keyIndex) { break } - log.Trace("iterator", "key", fmt.Sprintf("%x", key), "value", fmt.Sprintf("%x", val)) + // it.Key() contents change on next call to it.Next(), so we must copy it + key := make([]byte, len(it.Key())) + copy(key, it.Key()) + + val := it.Value() var index dpaDBIndex @@ -269,35 +261,23 @@ func (s *LDBStore) collectGarbage(ratio float32) { decodeIndex(val, &index) po := s.po(hash) - kkey := make([]byte, len(key)) - copy(kkey, key) - gci := &gcItem{ - idxKey: kkey, + idxKey: key, idx: index.Idx, - value: index.Access, + value: index.Access, // the smaller, the more likely to be gc'd. see sort comparator below. po: po, } - log.Trace("gci.idxKey", "gcnt", gcnt, "idxKey", fmt.Sprintf("%x", gci.idxKey), "idx", gci.idx, "gci.value", gci.value) - garbage = append(garbage, gci) gcnt++ } sort.Slice(garbage[:gcnt], func(i, j int) bool { return garbage[i].value < garbage[j].value }) - for k := 0; k < gcnt; k++ { - log.Trace("gcArray[]", "k", k, "idx", garbage[k].idx, "idxKey", fmt.Sprintf("%x", garbage[k].idxKey), "value", garbage[k].value) - } - cutoff := int(float32(gcnt) * ratio) - log.Trace("cutoff", "cut", cutoff, "gcnt", gcnt) for i := 0; i < cutoff; i++ { s.delete(garbage[i].idx, garbage[i].idxKey, garbage[i].po) } - - //s.db.Put(keyGCPos, s.gcPos) } // Export writes all chunks from the store to a tar archive, returning the diff --git a/swarm/storage/ldbstore_test.go b/swarm/storage/ldbstore_test.go index 928d8dc94a47..c0ae54bd6895 100644 --- a/swarm/storage/ldbstore_test.go +++ b/swarm/storage/ldbstore_test.go @@ -49,9 +49,9 @@ func newTestDbStore(mock bool) (*testDbStore, error) { addr := common.HexToAddress("0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed") mockStore := globalStore.NewNodeStore(addr) - db, err = NewMockDbStore(dir, MakeHashFunc(SHA3Hash), defaultDbCapacity, testPoFunc, mockStore) + db, err = NewMockDbStore(dir, MakeHashFunc(SHA3Hash), defaultLDBCapacity, testPoFunc, mockStore) } else { - db, err = NewLDBStore(dir, MakeHashFunc(SHA3Hash), defaultDbCapacity, testPoFunc) + db, err = NewLDBStore(dir, MakeHashFunc(SHA3Hash), defaultLDBCapacity, testPoFunc) } return &testDbStore{db, dir}, err diff --git a/swarm/storage/localstore.go b/swarm/storage/localstore.go index b2d4ef76167e..53a6e9f90ba4 100644 --- a/swarm/storage/localstore.go +++ b/swarm/storage/localstore.go @@ -40,7 +40,7 @@ type StoreParams struct { //create params with default values func NewDefaultStoreParams() (self *StoreParams) { return &StoreParams{ - DbCapacity: defaultDbCapacity, + DbCapacity: defaultLDBCapacity, CacheCapacity: defaultCacheCapacity, } } diff --git a/swarm/storage/memstore_lrucache_test.go b/swarm/storage/memstore_lrucache_test.go index 285f797724f6..08f99205207d 100644 --- a/swarm/storage/memstore_lrucache_test.go +++ b/swarm/storage/memstore_lrucache_test.go @@ -34,7 +34,7 @@ func newLDBStore(t *testing.T) (*LDBStore, func()) { } log.Trace("memstore.tempdir", "dir", dir) - db, err := NewLDBStore(dir, MakeHashFunc(SHA3Hash), defaultDbCapacity, testPoFunc) + db, err := NewLDBStore(dir, MakeHashFunc(SHA3Hash), defaultLDBCapacity, testPoFunc) if err != nil { t.Fatal(err) } From 0c41ebcd534759742324104328ed5d4fc7985622 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Fri, 13 Apr 2018 12:23:24 +0300 Subject: [PATCH 26/38] swarm/storage: fixup --- swarm/storage/ldbstore.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/swarm/storage/ldbstore.go b/swarm/storage/ldbstore.go index 691cf3eb7b3c..5ad2d71a3f0d 100644 --- a/swarm/storage/ldbstore.go +++ b/swarm/storage/ldbstore.go @@ -451,14 +451,11 @@ func (s *LDBStore) ReIndex() { } func (s *LDBStore) delete(idx uint64, idxKey []byte, po uint8) { - log.Trace("LDBStore delete()", "idx", idx, "idxKey", fmt.Sprintf("%x", idxKey), "po", po) - batch := new(leveldb.Batch) batch.Delete(idxKey) batch.Delete(getDataKey(idx, po)) dbStoreDeleteCounter.Inc(1) s.entryCnt-- - log.Trace("delete s.entryCnt--", "entryCnt", s.entryCnt) s.bucketCnt[po]-- cntKey := make([]byte, 2) cntKey[0] = keyDistanceCnt @@ -528,7 +525,6 @@ func (s *LDBStore) doPut(chunk *Chunk, index *dpaDBIndex, po uint8) { index.Idx = s.dataIdx s.bucketCnt[po] = s.dataIdx s.entryCnt++ - log.Trace("doPut entryCnt++", "entryCnt", s.entryCnt) s.dataIdx++ cntKey := make([]byte, 2) @@ -553,7 +549,7 @@ func (s *LDBStore) writeBatches() { log.Error(fmt.Sprintf("spawn batch write (%d entries): %v", b.Len(), err)) } close(c) - if e >= s.capacity && int(float32(e-1)*gcArrayFreeRatio) > 0 { + if e >= s.capacity { log.Trace(fmt.Sprintf("collecting garbage (%d chunks)", e)) s.collectGarbage(gcArrayFreeRatio) } From 4c955d330f73534d16ba2e6a2b9647801dff97c0 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Fri, 13 Apr 2018 12:27:05 +0300 Subject: [PATCH 27/38] swarm/storage: tests fixup --- swarm/storage/ldbstore_test.go | 24 ++++-------------------- 1 file changed, 4 insertions(+), 20 deletions(-) diff --git a/swarm/storage/ldbstore_test.go b/swarm/storage/ldbstore_test.go index c0ae54bd6895..6601693c8fbc 100644 --- a/swarm/storage/ldbstore_test.go +++ b/swarm/storage/ldbstore_test.go @@ -27,7 +27,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/swarm/storage/mock/mem" - colorable "github.com/mattn/go-colorable" ldberrors "github.com/syndtr/goleveldb/leveldb/errors" ) @@ -276,11 +275,8 @@ func BenchmarkMockDbStoreGet_8_5k(b *testing.B) { } // TestLDBStoreWithoutCollectGarbage tests that we can put a number of random chunks in the LevelDB store, and -// retrieve them, provided we don't hit the garbage collection and we are at least 20-30% below the specified capacity +// retrieve them, provided we don't hit the garbage collection func TestLDBStoreWithoutCollectGarbage(t *testing.T) { - log.PrintOrigins(true) - log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) - chunkSize := uint64(4096) capacity := 50 n := 10 @@ -332,9 +328,6 @@ func TestLDBStoreWithoutCollectGarbage(t *testing.T) { // TestLDBStoreCollectGarbage tests that we can put more chunks than LevelDB's capacity, and // retrieve only some of them, because garbage collection must have cleared some of them func TestLDBStoreCollectGarbage(t *testing.T) { - log.PrintOrigins(true) - log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) - chunkSize := uint64(4096) capacity := 500 n := 2000 @@ -380,20 +373,14 @@ func TestLDBStoreCollectGarbage(t *testing.T) { } if missing < n-capacity { - log.Info("ldbstore", "total", n, "missing", missing) - log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", n-capacity, missing) } - log.Info("ldbstore", "total", n, "missing", missing) - log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) + log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) } // TestLDBStoreAddRemove tests that we can put and then delete a given chunk func TestLDBStoreAddRemove(t *testing.T) { - log.PrintOrigins(true) - log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) - ldb, cleanup := newLDBStore(t) ldb.setCapacity(200) defer cleanup() @@ -455,9 +442,6 @@ func TestLDBStoreAddRemove(t *testing.T) { // TestLDBStoreRemoveThenCollectGarbage tests that we can delete chunks and that we can trigger garbage collection func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) { - log.PrintOrigins(true) - log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(false)))) - capacity := 10 ldb, cleanup := newLDBStore(t) @@ -510,14 +494,14 @@ func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) { <-chunks[i].dbStoredC } - // expect for first chunk to be missing + // expect for first chunk to be missing, because it has the smallest access value idx := 0 ret, err := ldb.Get(chunks[idx].Key) if err == nil || ret != nil { t.Fatal("expected first chunk to be missing, but got no error") } - // expect for last chunk to be present + // expect for last chunk to be present, as it has the largest access value idx = 9 ret, err = ldb.Get(chunks[idx].Key) if err != nil { From 9322249253d5c32098360fb8196f4b0d7cb78bc8 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Fri, 13 Apr 2018 12:30:16 +0300 Subject: [PATCH 28/38] swarm/storage: fix NewRandomChunk to allocate 8 bytes for chunk length --- swarm/storage/memstore_lrucache_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/swarm/storage/memstore_lrucache_test.go b/swarm/storage/memstore_lrucache_test.go index 08f99205207d..0872f240f38f 100644 --- a/swarm/storage/memstore_lrucache_test.go +++ b/swarm/storage/memstore_lrucache_test.go @@ -144,7 +144,7 @@ func NewRandomChunk(chunkSize uint64) *Chunk { c := &Chunk{ Key: make([]byte, 32), ReqC: nil, - SData: make([]byte, chunkSize), + SData: make([]byte, chunkSize+8), // SData should be chunkSize + 8 bytes reserved for length dbStoredC: make(chan bool), dbStoredMu: &sync.Mutex{}, } From 04a0d11528859e04430bf9263f6a22418d2c06ca Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Fri, 13 Apr 2018 12:45:56 +0300 Subject: [PATCH 29/38] swarm/storage: better comment --- swarm/storage/ldbstore_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/swarm/storage/ldbstore_test.go b/swarm/storage/ldbstore_test.go index 6601693c8fbc..0d560d51870e 100644 --- a/swarm/storage/ldbstore_test.go +++ b/swarm/storage/ldbstore_test.go @@ -296,7 +296,7 @@ func TestLDBStoreWithoutCollectGarbage(t *testing.T) { go ldb.Put(chunks[i]) } - // wait for all chunks to be stored before ending the test are cleaning up + // wait for all chunks to be stored for i := 0; i < n; i++ { <-chunks[i].dbStoredC } @@ -347,7 +347,7 @@ func TestLDBStoreCollectGarbage(t *testing.T) { ldb.Put(chunks[i]) } - // wait for all chunks to be stored before ending the test are cleaning up + // wait for all chunks to be stored for i := 0; i < n; i++ { <-chunks[i].dbStoredC } From ce3d732c4ae0a421551b3a835e847bf7d1ed1484 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Fri, 13 Apr 2018 14:20:49 +0300 Subject: [PATCH 30/38] swarm/storage: set default leveldb capacity to 20gb --- swarm/storage/dpa.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/swarm/storage/dpa.go b/swarm/storage/dpa.go index 3a10f0b860a4..9e7b6e946052 100644 --- a/swarm/storage/dpa.go +++ b/swarm/storage/dpa.go @@ -35,9 +35,9 @@ implementation for storage or retrieval. */ const ( - defaultLDBCapacity = 50000 // capacity for LevelDB - defaultCacheCapacity = 500 // capacity for in-memory chunks' cache - defaultChunkRequestsCacheCapacity = 50000 // capacity for container holding outgoing requests for chunks. should be set to LevelDB capacity + defaultLDBCapacity = 5000000 // capacity for LevelDB, by default 5*10^6*4096 bytes == 20GB + defaultCacheCapacity = 500 // capacity for in-memory chunks' cache + defaultChunkRequestsCacheCapacity = 5000000 // capacity for container holding outgoing requests for chunks. should be set to LevelDB capacity ) var ( From 51387aaea00b2cbd02b1ede6411086e7e9f6cc00 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Fri, 13 Apr 2018 15:32:16 +0300 Subject: [PATCH 31/38] swarm/storage: add sleep to wait for gc --- swarm/storage/ldbstore_test.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/swarm/storage/ldbstore_test.go b/swarm/storage/ldbstore_test.go index 0d560d51870e..bf97b3ab6531 100644 --- a/swarm/storage/ldbstore_test.go +++ b/swarm/storage/ldbstore_test.go @@ -23,6 +23,7 @@ import ( "os" "sync" "testing" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -340,7 +341,7 @@ func TestLDBStoreCollectGarbage(t *testing.T) { for i := 0; i < n; i++ { c := NewRandomChunk(chunkSize) chunks = append(chunks, c) - log.Info("generate random chunk", "idx", i, "chunk", c) + log.Trace("generate random chunk", "idx", i, "chunk", c) } for i := 0; i < n; i++ { @@ -354,6 +355,9 @@ func TestLDBStoreCollectGarbage(t *testing.T) { log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) + // wait for garbage collection to kick in on the responsible actor + time.Sleep(5 * time.Second) + var missing int for i := 0; i < n; i++ { ret, err := ldb.Get(chunks[i].Key) @@ -369,7 +373,7 @@ func TestLDBStoreCollectGarbage(t *testing.T) { t.Fatal("expected to get the same data back, but got smth else") } - log.Info("got back chunk", "chunk", ret) + log.Trace("got back chunk", "chunk", ret) } if missing < n-capacity { From cd54ac7c2c0cfa9ee447afd512084de56a8c0165 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Fri, 13 Apr 2018 17:18:11 +0300 Subject: [PATCH 32/38] swarm/storage: demote Info to Trace --- swarm/storage/ldbstore_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/swarm/storage/ldbstore_test.go b/swarm/storage/ldbstore_test.go index bf97b3ab6531..5c6585af2c7f 100644 --- a/swarm/storage/ldbstore_test.go +++ b/swarm/storage/ldbstore_test.go @@ -290,7 +290,7 @@ func TestLDBStoreWithoutCollectGarbage(t *testing.T) { for i := 0; i < n; i++ { c := NewRandomChunk(chunkSize) chunks = append(chunks, c) - log.Info("generate random chunk", "idx", i, "chunk", c) + log.Trace("generate random chunk", "idx", i, "chunk", c) } for i := 0; i < n; i++ { @@ -395,7 +395,7 @@ func TestLDBStoreAddRemove(t *testing.T) { for i := 0; i < n; i++ { c := NewRandomChunk(chunkSize) chunks = append(chunks, c) - log.Info("generate random chunk", "idx", i, "chunk", c) + log.Trace("generate random chunk", "idx", i, "chunk", c) } for i := 0; i < n; i++ { @@ -457,7 +457,7 @@ func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) { for i := 0; i < capacity; i++ { c := NewRandomChunk(chunkSize) chunks = append(chunks, c) - log.Info("generate random chunk", "idx", i, "chunk", c) + log.Trace("generate random chunk", "idx", i, "chunk", c) } for i := 0; i < n; i++ { From 0eeda9fb35b7e63af772f71939ce106045fbf2c1 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Mon, 16 Apr 2018 13:15:30 +0300 Subject: [PATCH 33/38] swarm/storage: remove bytes check and panic --- swarm/storage/memstore_lrucache.go | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/swarm/storage/memstore_lrucache.go b/swarm/storage/memstore_lrucache.go index 3bbf923d79fe..7e93a5a8e782 100644 --- a/swarm/storage/memstore_lrucache.go +++ b/swarm/storage/memstore_lrucache.go @@ -19,8 +19,6 @@ package storage import ( - "bytes" - "fmt" "sync" lru "github.com/hashicorp/golang-lru" @@ -79,11 +77,7 @@ func (m *MemStore) Get(key Key) (*Chunk, error) { if !ok { return nil, ErrChunkNotFound } - chunk := c.(*Chunk) - if !bytes.Equal(chunk.Key, key) { - panic(fmt.Errorf("MemStore.Get: chunk key %s != req key %s", chunk.Key.Hex(), key.Hex())) - } - return chunk, nil + return c.(*Chunk), nil } func (m *MemStore) Put(c *Chunk) { From cc6a4ab20e3e2a691a5f8f6313e469abdb5606ef Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Mon, 16 Apr 2018 13:20:51 +0300 Subject: [PATCH 34/38] swarm/storage: debug failed tests" --- swarm/storage/ldbstore.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/swarm/storage/ldbstore.go b/swarm/storage/ldbstore.go index 551943c9c0f4..37945b11399b 100644 --- a/swarm/storage/ldbstore.go +++ b/swarm/storage/ldbstore.go @@ -246,6 +246,7 @@ func decodeOldData(data []byte, chunk *Chunk) { } func (s *LDBStore) collectGarbage(ratio float32) { + log.Info("collect garbage", "ratio", ratio) it := s.db.NewIterator() defer it.Release() @@ -285,6 +286,7 @@ func (s *LDBStore) collectGarbage(ratio float32) { sort.Slice(garbage[:gcnt], func(i, j int) bool { return garbage[i].value < garbage[j].value }) cutoff := int(float32(gcnt) * ratio) + log.Info("cutoff", "cutoff", cutoff, "gcnt", gcnt) for i := 0; i < cutoff; i++ { s.delete(garbage[i].idx, garbage[i].idxKey, garbage[i].po) } From ccb52414fa539116e8f61a2fc7c4ff1e1dccb63c Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Mon, 16 Apr 2018 13:46:30 +0300 Subject: [PATCH 35/38] swarm/storage: more debug lines for failed test --- swarm/storage/ldbstore.go | 3 +- swarm/storage/memstore_tree.god | 374 -------------------------------- 2 files changed, 2 insertions(+), 375 deletions(-) delete mode 100644 swarm/storage/memstore_tree.god diff --git a/swarm/storage/ldbstore.go b/swarm/storage/ldbstore.go index 37945b11399b..ae5caf700d7d 100644 --- a/swarm/storage/ldbstore.go +++ b/swarm/storage/ldbstore.go @@ -563,7 +563,7 @@ func (s *LDBStore) writeBatches() { } close(c) if e >= s.capacity { - log.Trace(fmt.Sprintf("collecting garbage (%d chunks)", e)) + log.Info("collecting garbage", "entryCnt", e, "capacity", s.capacity) s.collectGarbage(gcArrayFreeRatio) } s.lock.Unlock() @@ -694,6 +694,7 @@ func (s *LDBStore) setCapacity(c uint64) { ratio = 1 } for s.entryCnt > c { + log.Info("collecting garbage (set.capacity)", "entryCnt", s.entryCnt, "capacity", c) s.collectGarbage(ratio) } } diff --git a/swarm/storage/memstore_tree.god b/swarm/storage/memstore_tree.god deleted file mode 100644 index 529b9dfa247e..000000000000 --- a/swarm/storage/memstore_tree.god +++ /dev/null @@ -1,374 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// memory storage layer for the package blockhash - -package storage - -import ( - "fmt" - "sync" - "time" - - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/metrics" -) - -//metrics variables -var ( - memstorePutCounter = metrics.NewRegisteredCounter("storage.db.memstore.put.count", nil) - memstoreRemoveCounter = metrics.NewRegisteredCounter("storage.db.memstore.rm.count", nil) -) - -const ( - memTreeLW = 2 // log2(subtree count) of the subtrees - memTreeFLW = 14 // log2(subtree count) of the root layer - dbForceUpdateAccessCnt = 1000 - defaultCacheCapacity = 5000 -) - -type MemStore struct { - memtree *memTree - entryCnt, capacity uint // stored entries - accessCnt uint64 // access counter; oldest is thrown away when full - dbAccessCnt uint64 - ldbStore *LDBStore - lock sync.Mutex -} - -/* -a hash prefix subtree containing subtrees or one storage entry (but never both) - -- access[0] stores the smallest (oldest) access count value in this subtree -- if it contains more subtrees and its subtree count is at least 4, access[1:2] - stores the smallest access count in the first and second halves of subtrees - (so that access[0] = min(access[1], access[2]) -- likewise, if subtree count is at least 8, - access[1] = min(access[3], access[4]) - access[2] = min(access[5], access[6]) - (access[] is a binary tree inside the multi-bit leveled hash tree) -*/ - -func NewMemStore(params *StoreParams, d *LDBStore) (m *MemStore) { - - capacity := params.CacheCapacity - m = &MemStore{} - m.memtree = newMemTree(memTreeFLW, nil, 0) - m.ldbStore = d - m.setCapacity(capacity) - return -} - -type memTree struct { - subtree []*memTree - parent *memTree - parentIdx uint - - bits uint // log2(subtree count) - width uint // subtree count - - entry *Chunk // if subtrees are present, entry should be nil - lastDBaccess uint64 - access []uint64 -} - -func newMemTree(b uint, parent *memTree, pidx uint) (node *memTree) { - node = new(memTree) - node.bits = b - node.width = 1 << b - node.subtree = make([]*memTree, node.width) - node.access = make([]uint64, node.width-1) - node.parent = parent - node.parentIdx = pidx - if parent != nil { - parent.subtree[pidx] = node - } - - return node -} - -func (node *memTree) updateAccess(a uint64) { - aidx := uint(0) - var aa uint64 - oa := node.access[0] - for node.access[aidx] == oa { - node.access[aidx] = a - if aidx > 0 { - aa = node.access[((aidx-1)^1)+1] - aidx = (aidx - 1) >> 1 - } else { - pidx := node.parentIdx - node = node.parent - if node == nil { - return - } - nn := node.subtree[pidx^1] - if nn != nil { - aa = nn.access[0] - } else { - aa = 0 - } - aidx = (node.width + pidx - 2) >> 1 - } - - if (aa != 0) && (aa < a) { - a = aa - } - } -} - -func (s *MemStore) setCapacity(c uint) { - s.lock.Lock() - defer s.lock.Unlock() - - for c < s.entryCnt { - s.removeOldest() - } - s.capacity = c -} - -func (s *MemStore) Counter() uint { - return s.entryCnt -} - -// entry (not its copy) is going to be in MemStore -func (s *MemStore) Put(entry *Chunk) { - log.Trace("memstore.put", "key", entry.Key) - if s.capacity == 0 { - return - } - - s.lock.Lock() - defer s.lock.Unlock() - - if s.entryCnt >= s.capacity { - s.removeOldest() - } - - s.accessCnt++ - - memstorePutCounter.Inc(1) - - node := s.memtree - bitpos := uint(0) - for node.entry == nil { - l := entry.Key.bits(bitpos, node.bits) - st := node.subtree[l] - if st == nil { - st = newMemTree(memTreeLW, node, l) - bitpos += node.bits - node = st - break - } - bitpos += node.bits - node = st - } - - if node.entry != nil { - - if node.entry.Key.isEqual(entry.Key) { - node.updateAccess(s.accessCnt) - if entry.SData == nil { - entry.Size = node.entry.Size - entry.SData = node.entry.SData - } - if entry.ReqC == nil { - entry.ReqC = node.entry.ReqC - } - entry.C = node.entry.C - node.entry = entry - return - } - - for node.entry != nil { - - l := node.entry.Key.bits(bitpos, node.bits) - st := node.subtree[l] - if st == nil { - st = newMemTree(memTreeLW, node, l) - } - st.entry = node.entry - node.entry = nil - st.updateAccess(node.access[0]) - - l = entry.Key.bits(bitpos, node.bits) - st = node.subtree[l] - if st == nil { - st = newMemTree(memTreeLW, node, l) - } - bitpos += node.bits - node = st - - } - } - - node.entry = entry - node.lastDBaccess = s.dbAccessCnt - node.updateAccess(s.accessCnt) - s.entryCnt++ -} - -func (s *MemStore) Get(hash Key) (chunk *Chunk, err error) { - log.Trace("memstore.get", "key", hash) - s.lock.Lock() - defer s.lock.Unlock() - - node := s.memtree - bitpos := uint(0) - for node.entry == nil { - l := hash.bits(bitpos, node.bits) - st := node.subtree[l] - if st == nil { - log.Trace("memstore.get ErrChunkNotFound", "key", hash) - return nil, ErrChunkNotFound - } - bitpos += node.bits - node = st - } - - if node.entry.Key.isEqual(hash) { - s.accessCnt++ - node.updateAccess(s.accessCnt) - chunk = node.entry - if s.dbAccessCnt-node.lastDBaccess > dbForceUpdateAccessCnt { - s.dbAccessCnt++ - node.lastDBaccess = s.dbAccessCnt - if s.ldbStore != nil { - s.ldbStore.updateAccessCnt(hash) - } - } - } else { - err = ErrChunkNotFound - } - - log.Trace("memstore.get return", "key", hash, "chunk", chunk, "err", err) - return -} - -func (s *MemStore) removeOldest() { - defer metrics.GetOrRegisterResettingTimer("memstore.purge", metrics.DefaultRegistry).UpdateSince(time.Now()) - - node := s.memtree - for node.entry == nil { - - aidx := uint(0) - av := node.access[aidx] - - for aidx < node.width/2-1 { - if av == node.access[aidx*2+1] { - node.access[aidx] = node.access[aidx*2+2] - aidx = aidx*2 + 1 - } else if av == node.access[aidx*2+2] { - node.access[aidx] = node.access[aidx*2+1] - aidx = aidx*2 + 2 - } else { - panic(nil) - } - } - pidx := aidx*2 + 2 - node.width - if (node.subtree[pidx] != nil) && (av == node.subtree[pidx].access[0]) { - if node.subtree[pidx+1] != nil { - node.access[aidx] = node.subtree[pidx+1].access[0] - } else { - node.access[aidx] = 0 - } - } else if (node.subtree[pidx+1] != nil) && (av == node.subtree[pidx+1].access[0]) { - if node.subtree[pidx] != nil { - node.access[aidx] = node.subtree[pidx].access[0] - } else { - node.access[aidx] = 0 - } - pidx++ - } else { - panic(nil) - } - - //fmt.Println(pidx) - node = node.subtree[pidx] - - } - - if node.entry.ReqC == nil { - log.Trace(fmt.Sprintf("Memstore Clean: Waiting for chunk %v to be saved", node.entry.Key.Log())) - <-node.entry.dbStoredC - log.Trace(fmt.Sprintf("Memstore Clean: Chunk %v saved to DBStore. Ready to clear from mem.", node.entry.Key.Log())) - - memstoreRemoveCounter.Inc(1) - node.entry = nil - s.entryCnt-- - } else { - return - } - - node.access[0] = 0 - - //--- - - aidx := uint(0) - for { - aa := node.access[aidx] - if aidx > 0 { - aidx = (aidx - 1) >> 1 - } else { - pidx := node.parentIdx - node = node.parent - if node == nil { - return - } - aidx = (node.width + pidx - 2) >> 1 - } - if (aa != 0) && ((aa < node.access[aidx]) || (node.access[aidx] == 0)) { - node.access[aidx] = aa - } - } -} - -// type MemStore struct { -// m map[string]*Chunk -// mu sync.RWMutex -// } - -// func NewMemStore(d *DbStore, capacity uint) (m *MemStore) { -// return &MemStore{ -// m: make(map[string]*Chunk), -// } -// } - -// func (m *MemStore) Get(key Key) (*Chunk, error) { -// m.mu.RLock() -// defer m.mu.RUnlock() -// c, ok := m.m[string(key[:])] -// if !ok { -// return nil, ErrNotFound -// } -// if !bytes.Equal(c.Key, key) { -// panic(fmt.Errorf("MemStore.Get: chunk key %s != req key %s", c.Key.Hex(), key.Hex())) -// } -// return c, nil -// } - -// func (m *MemStore) Put(c *Chunk) { -// m.mu.Lock() -// defer m.mu.Unlock() -// m.m[string(c.Key[:])] = c -// } - -// func (m *MemStore) setCapacity(n int) { - -// } - -// Close memstore -func (s *MemStore) Close() {} From 25cd805bea861af631392edfe92961bc1d17fc93 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Mon, 16 Apr 2018 14:43:50 +0300 Subject: [PATCH 36/38] swarm/storage: fix garbage collection to run until we reach capacity --- swarm/storage/ldbstore.go | 3 ++- swarm/storage/memstore_lrucache.go | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/swarm/storage/ldbstore.go b/swarm/storage/ldbstore.go index ae5caf700d7d..8c18e88a1209 100644 --- a/swarm/storage/ldbstore.go +++ b/swarm/storage/ldbstore.go @@ -562,9 +562,10 @@ func (s *LDBStore) writeBatches() { log.Error(fmt.Sprintf("spawn batch write (%d entries): %v", b.Len(), err)) } close(c) - if e >= s.capacity { + for e > s.capacity { log.Info("collecting garbage", "entryCnt", e, "capacity", s.capacity) s.collectGarbage(gcArrayFreeRatio) + e = s.entryCnt } s.lock.Unlock() } diff --git a/swarm/storage/memstore_lrucache.go b/swarm/storage/memstore_lrucache.go index 7e93a5a8e782..a66dcd0c221a 100644 --- a/swarm/storage/memstore_lrucache.go +++ b/swarm/storage/memstore_lrucache.go @@ -31,6 +31,12 @@ type MemStore struct { disabled bool } +//NewMemStore is instantiating a MemStore cache. We are keeping a record of all outgoing requests for chunks, that +//should later be delivered by peer nodes, in the `requests` LRU cache. We are also keeping all frequently requested +//chunks in the `cache` LRU cache. +// +//`requests` LRU cache capacity should ideally never be reached, this is why for the time being it should be initialised +//with the same value as the LDBStore capacity. func NewMemStore(params *StoreParams, _ *LDBStore) (m *MemStore) { if params.CacheCapacity == 0 { return &MemStore{ From 158b776bfc9675f2f0b4d8fb09ee4001d480e4c7 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Mon, 16 Apr 2018 15:29:38 +0300 Subject: [PATCH 37/38] swarm/storage: log if we evict an outgoing request --- swarm/storage/ldbstore.go | 4 ---- swarm/storage/memstore_lrucache.go | 6 +++++- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/swarm/storage/ldbstore.go b/swarm/storage/ldbstore.go index 8c18e88a1209..42fa731aafcd 100644 --- a/swarm/storage/ldbstore.go +++ b/swarm/storage/ldbstore.go @@ -246,7 +246,6 @@ func decodeOldData(data []byte, chunk *Chunk) { } func (s *LDBStore) collectGarbage(ratio float32) { - log.Info("collect garbage", "ratio", ratio) it := s.db.NewIterator() defer it.Release() @@ -286,7 +285,6 @@ func (s *LDBStore) collectGarbage(ratio float32) { sort.Slice(garbage[:gcnt], func(i, j int) bool { return garbage[i].value < garbage[j].value }) cutoff := int(float32(gcnt) * ratio) - log.Info("cutoff", "cutoff", cutoff, "gcnt", gcnt) for i := 0; i < cutoff; i++ { s.delete(garbage[i].idx, garbage[i].idxKey, garbage[i].po) } @@ -563,7 +561,6 @@ func (s *LDBStore) writeBatches() { } close(c) for e > s.capacity { - log.Info("collecting garbage", "entryCnt", e, "capacity", s.capacity) s.collectGarbage(gcArrayFreeRatio) e = s.entryCnt } @@ -695,7 +692,6 @@ func (s *LDBStore) setCapacity(c uint64) { ratio = 1 } for s.entryCnt > c { - log.Info("collecting garbage (set.capacity)", "entryCnt", s.entryCnt, "capacity", c) s.collectGarbage(ratio) } } diff --git a/swarm/storage/memstore_lrucache.go b/swarm/storage/memstore_lrucache.go index a66dcd0c221a..70bd7097288e 100644 --- a/swarm/storage/memstore_lrucache.go +++ b/swarm/storage/memstore_lrucache.go @@ -21,6 +21,7 @@ package storage import ( "sync" + "github.com/ethereum/go-ethereum/log" lru "github.com/hashicorp/golang-lru" ) @@ -53,7 +54,10 @@ func NewMemStore(params *StoreParams, _ *LDBStore) (m *MemStore) { panic(err) } - r, err := lru.New(int(params.ChunkRequestsCacheCapacity)) + requestEvicted := func(key interface{}, value interface{}) { + log.Error("evict called on outgoing request") + } + r, err := lru.NewWithEvict(int(params.ChunkRequestsCacheCapacity), requestEvicted) if err != nil { panic(err) } From 76cc7a211965931d708c2e650c02b7cdbfdc6eed Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Mon, 16 Apr 2018 16:54:04 +0300 Subject: [PATCH 38/38] swarm/storage: rename memstore_lrucache to memstore --- .../{memstore_lrucache.go => memstore.go} | 6 +- swarm/storage/memstore_lrucache_test.go | 169 ------------------ swarm/storage/memstore_test.go | 152 +++++++++++++++- 3 files changed, 154 insertions(+), 173 deletions(-) rename swarm/storage/{memstore_lrucache.go => memstore.go} (98%) delete mode 100644 swarm/storage/memstore_lrucache_test.go diff --git a/swarm/storage/memstore_lrucache.go b/swarm/storage/memstore.go similarity index 98% rename from swarm/storage/memstore_lrucache.go rename to swarm/storage/memstore.go index 70bd7097288e..7266bc92bed7 100644 --- a/swarm/storage/memstore_lrucache.go +++ b/swarm/storage/memstore.go @@ -28,7 +28,7 @@ import ( type MemStore struct { cache *lru.Cache requests *lru.Cache - mu sync.Mutex + mu sync.RWMutex disabled bool } @@ -73,8 +73,8 @@ func (m *MemStore) Get(key Key) (*Chunk, error) { return nil, ErrChunkNotFound } - m.mu.Lock() - defer m.mu.Unlock() + m.mu.RLock() + defer m.mu.RUnlock() r, ok := m.requests.Get(string(key)) // it is a request diff --git a/swarm/storage/memstore_lrucache_test.go b/swarm/storage/memstore_lrucache_test.go deleted file mode 100644 index 8765bed40e2d..000000000000 --- a/swarm/storage/memstore_lrucache_test.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package storage - -import ( - "crypto/rand" - "encoding/binary" - "io/ioutil" - "os" - "sync" - "testing" - - "github.com/ethereum/go-ethereum/log" -) - -func newLDBStore(t *testing.T) (*LDBStore, func()) { - dir, err := ioutil.TempDir("", "bzz-storage-test") - if err != nil { - t.Fatal(err) - } - log.Trace("memstore.tempdir", "dir", dir) - - ldbparams := NewLDBStoreParams(NewDefaultStoreParams(), dir) - db, err := NewLDBStore(ldbparams) - if err != nil { - t.Fatal(err) - } - - cleanup := func() { - db.Close() - err := os.RemoveAll(dir) - if err != nil { - t.Fatal(err) - } - } - - return db, cleanup -} - -func TestMemStoreAndLDBStore(t *testing.T) { - ldb, cleanup := newLDBStore(t) - ldb.setCapacity(4000) - defer cleanup() - - cacheCap := 200 - requestsCap := 200 - memStore := NewMemStore(NewStoreParams(4000, 200, 200, nil, nil), nil) - - tests := []struct { - n int // number of chunks to push to memStore - chunkSize uint64 // size of chunk (by default in Swarm - 4096) - request bool // whether or not to set the ReqC channel on the random chunks - }{ - { - n: 1, - chunkSize: 4096, - request: false, - }, - { - n: 201, - chunkSize: 4096, - request: false, - }, - { - n: 501, - chunkSize: 4096, - request: false, - }, - { - n: 3100, - chunkSize: 4096, - request: false, - }, - { - n: 100, - chunkSize: 4096, - request: true, - }, - } - - for i, tt := range tests { - log.Info("running test", "idx", i, "tt", tt) - var chunks []*Chunk - - for i := 0; i < tt.n; i++ { - var c *Chunk - if tt.request { - c = NewRandomRequestChunk(tt.chunkSize) - } else { - c = NewRandomChunk(tt.chunkSize) - } - - chunks = append(chunks, c) - } - - for i := 0; i < tt.n; i++ { - go ldb.Put(chunks[i]) - memStore.Put(chunks[i]) - - if got := memStore.cache.Len(); got > cacheCap { - t.Fatalf("expected to get cache capacity less than %v, but got %v", cacheCap, got) - } - - if got := memStore.requests.Len(); got > requestsCap { - t.Fatalf("expected to get requests capacity less than %v, but got %v", requestsCap, got) - } - } - - for i := 0; i < tt.n; i++ { - _, err := memStore.Get(chunks[i].Key) - if err != nil { - if err == ErrChunkNotFound { - _, err := ldb.Get(chunks[i].Key) - if err != nil { - t.Fatalf("couldn't get chunk %v from ldb, got error: %v", i, err) - } - } else { - t.Fatalf("got error from memstore: %v", err) - } - } - } - - // wait for all chunks to be stored before ending the test are cleaning up - for i := 0; i < tt.n; i++ { - <-chunks[i].dbStoredC - } - } -} - -func NewRandomChunk(chunkSize uint64) *Chunk { - c := &Chunk{ - Key: make([]byte, 32), - ReqC: nil, - SData: make([]byte, chunkSize+8), // SData should be chunkSize + 8 bytes reserved for length - dbStoredC: make(chan bool), - dbStoredMu: &sync.Mutex{}, - } - - rand.Read(c.SData) - - binary.LittleEndian.PutUint64(c.SData[:8], chunkSize) - - hasher := MakeHashFunc(SHA3Hash)() - hasher.Write(c.SData) - copy(c.Key, hasher.Sum(nil)) - - return c -} - -func NewRandomRequestChunk(chunkSize uint64) *Chunk { - c := NewRandomChunk(chunkSize) - c.ReqC = make(chan bool) - - return c -} diff --git a/swarm/storage/memstore_test.go b/swarm/storage/memstore_test.go index f2e2f81a3cf6..5dc3ae5f8602 100644 --- a/swarm/storage/memstore_test.go +++ b/swarm/storage/memstore_test.go @@ -16,7 +16,16 @@ package storage -import "testing" +import ( + "crypto/rand" + "encoding/binary" + "io/ioutil" + "os" + "sync" + "testing" + + "github.com/ethereum/go-ethereum/log" +) func newTestMemStore() *MemStore { storeparams := NewDefaultStoreParams() @@ -96,3 +105,144 @@ func BenchmarkMemStoreGet_1_5k(b *testing.B) { func BenchmarkMemStoreGet_8_5k(b *testing.B) { benchmarkMemStoreGet(5000, 8, 4096, b) } + +func newLDBStore(t *testing.T) (*LDBStore, func()) { + dir, err := ioutil.TempDir("", "bzz-storage-test") + if err != nil { + t.Fatal(err) + } + log.Trace("memstore.tempdir", "dir", dir) + + ldbparams := NewLDBStoreParams(NewDefaultStoreParams(), dir) + db, err := NewLDBStore(ldbparams) + if err != nil { + t.Fatal(err) + } + + cleanup := func() { + db.Close() + err := os.RemoveAll(dir) + if err != nil { + t.Fatal(err) + } + } + + return db, cleanup +} + +func TestMemStoreAndLDBStore(t *testing.T) { + ldb, cleanup := newLDBStore(t) + ldb.setCapacity(4000) + defer cleanup() + + cacheCap := 200 + requestsCap := 200 + memStore := NewMemStore(NewStoreParams(4000, 200, 200, nil, nil), nil) + + tests := []struct { + n int // number of chunks to push to memStore + chunkSize uint64 // size of chunk (by default in Swarm - 4096) + request bool // whether or not to set the ReqC channel on the random chunks + }{ + { + n: 1, + chunkSize: 4096, + request: false, + }, + { + n: 201, + chunkSize: 4096, + request: false, + }, + { + n: 501, + chunkSize: 4096, + request: false, + }, + { + n: 3100, + chunkSize: 4096, + request: false, + }, + { + n: 100, + chunkSize: 4096, + request: true, + }, + } + + for i, tt := range tests { + log.Info("running test", "idx", i, "tt", tt) + var chunks []*Chunk + + for i := 0; i < tt.n; i++ { + var c *Chunk + if tt.request { + c = NewRandomRequestChunk(tt.chunkSize) + } else { + c = NewRandomChunk(tt.chunkSize) + } + + chunks = append(chunks, c) + } + + for i := 0; i < tt.n; i++ { + go ldb.Put(chunks[i]) + memStore.Put(chunks[i]) + + if got := memStore.cache.Len(); got > cacheCap { + t.Fatalf("expected to get cache capacity less than %v, but got %v", cacheCap, got) + } + + if got := memStore.requests.Len(); got > requestsCap { + t.Fatalf("expected to get requests capacity less than %v, but got %v", requestsCap, got) + } + } + + for i := 0; i < tt.n; i++ { + _, err := memStore.Get(chunks[i].Key) + if err != nil { + if err == ErrChunkNotFound { + _, err := ldb.Get(chunks[i].Key) + if err != nil { + t.Fatalf("couldn't get chunk %v from ldb, got error: %v", i, err) + } + } else { + t.Fatalf("got error from memstore: %v", err) + } + } + } + + // wait for all chunks to be stored before ending the test are cleaning up + for i := 0; i < tt.n; i++ { + <-chunks[i].dbStoredC + } + } +} + +func NewRandomChunk(chunkSize uint64) *Chunk { + c := &Chunk{ + Key: make([]byte, 32), + ReqC: nil, + SData: make([]byte, chunkSize+8), // SData should be chunkSize + 8 bytes reserved for length + dbStoredC: make(chan bool), + dbStoredMu: &sync.Mutex{}, + } + + rand.Read(c.SData) + + binary.LittleEndian.PutUint64(c.SData[:8], chunkSize) + + hasher := MakeHashFunc(SHA3Hash)() + hasher.Write(c.SData) + copy(c.Key, hasher.Sum(nil)) + + return c +} + +func NewRandomRequestChunk(chunkSize uint64) *Chunk { + c := NewRandomChunk(chunkSize) + c.ReqC = make(chan bool) + + return c +}