Skip to content

Commit

Permalink
added address 32 bytes alignment
Browse files Browse the repository at this point in the history
  • Loading branch information
adranwit committed Apr 7, 2022
1 parent ba653d8 commit 8744ba2
Show file tree
Hide file tree
Showing 7 changed files with 59 additions and 33 deletions.
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
## Apil 7 2022 v0.4.0
* Added 32 bytes memory alignment to increase addressable space to 256 GB

## July 25 2021 v0.3.0
* Modify secondary segment fallback branch to return primary segment buffer

Expand Down
11 changes: 9 additions & 2 deletions cache.go
Original file line number Diff line number Diff line change
@@ -1,18 +1,21 @@
package scache

import (
"fmt"
"github.com/pkg/errors"
"sync"
"sync/atomic"
"time"
)

const segmentsSize = 2
const (
segmentsSize = 2
maxSupportedSize = 256 * 1024
)

//Cache represents cache service
type Cache struct {
config *Config
data []byte
segments [segmentsSize]segment
index uint32
mutex sync.Mutex
Expand Down Expand Up @@ -107,6 +110,10 @@ func (s *Cache) Close() (err error) {
//New creates a Cache
func New(config *Config) (*Cache, error) {
config.Init()
if config.SizeMb > maxSupportedSize {
//given 32 bytes data alignment max addressable space is 256GB
return nil, fmt.Errorf("exceeded max supported cache size: 256GB")
}
var cache = &Cache{
config: config,
}
Expand Down
5 changes: 4 additions & 1 deletion cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,10 @@ func initCache(entries, entrySize int, location string) *Cache {
if cfg.SizeMb < freeMB-1024 && freeMB-1024 > 0 {
cfg.SizeMb = freeMB - 1024
}
cache, _ := New(cfg)
cache, err := New(cfg)
if err != nil {
panic(err)
}
return cache
}

Expand Down
22 changes: 15 additions & 7 deletions config.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,14 @@ const (
//DefaultShardMapSize default map shard allocation size.
DefaultShardMapSize = 32 * 1024
mb = 1024 * 1024
alignmentSize = 32
)

//Config represents cache config
type Config struct {
MaxEntries int //optional upper entries limit in the cache
EntrySize int //optional entry size to estimate SizeMb (MaxEntries * EntrySize) when specified
MaxEntries int //optional upper entries limit in the cache
EntrySize int //optional entry size to estimate SizeMb (MaxEntries * EntrySize) when specified
KeySize int
SizeMb int //optional max cache size, default 1
Shards uint64 //optional segment shards size, default MAX(32, MaxEntries / 1024*1024)
Location string //optional path to mapped memory file
Expand All @@ -29,13 +31,15 @@ func (c *Config) SegmentDataSize() int {
func (c *Config) Init() {
if c.SizeMb == 0 {
c.SizeMb = DefaultCacheSizeMb
if c.MaxEntries > 0 && c.EntrySize > 0 {
c.SizeMb += 2 * c.MaxEntries * c.EntrySize / mb
}
}
if c.SizeMb > 4096*mb { //currently max supported memory
c.SizeMb = 4096 * mb

if c.MaxEntries > 0 && c.EntrySize > 0 {
estSizeMb := DefaultCacheSizeMb + (2*c.MaxEntries*alignSize(headerSize+c.EntrySize))/mb
if c.SizeMb < estSizeMb {
c.SizeMb = estSizeMb
}
}

if c.Shards < MinShards {
c.Shards = MinShards
if candidate := c.MaxEntries / mb; candidate > int(c.Shards) {
Expand All @@ -49,3 +53,7 @@ func (c *Config) Init() {
}

}

func alignSize(size int) int {
return ((size >> 5) + 1) << 5
}
32 changes: 16 additions & 16 deletions segment.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,13 @@ const (
controlByte = 0x9A
)


type segment struct {
*shardedMap
config *Config
index uint32
data []byte
dataSize uint32
tail uint32
dataSize uint64
tail uint64
keys uint32
mmap *mmap
}
Expand All @@ -40,7 +39,7 @@ func (s *segment) reset(aMap *shardedMap) {
s.shardedMap.maps[i] = aMap.maps[i]
s.shardedMap.lock[i].Unlock()
}
atomic.StoreUint32(&s.tail, 1)
atomic.StoreUint64(&s.tail, 32)
atomic.StoreUint32(&s.keys, 0)
}

Expand All @@ -54,12 +53,12 @@ func (s *segment) get(key string) ([]byte, bool) {
if headerAddressEnd > s.dataSize {
return nil, false
}
entrySize := binary.LittleEndian.Uint32(s.data[headerAddress+1:headerAddressEnd])
if headerAddressEnd > atomic.LoadUint32(&s.tail) {
entrySize := binary.LittleEndian.Uint32(s.data[headerAddress+1 : headerAddressEnd])
if headerAddressEnd > atomic.LoadUint64(&s.tail) {
return nil, false
}
dataAddress := headerAddress + headerSize
dataAddressEnd := dataAddress + entrySize
dataAddressEnd := dataAddress + uint64(entrySize)
if dataAddressEnd > s.dataSize {
return nil, false
}
Expand All @@ -70,7 +69,6 @@ func (s *segment) get(key string) ([]byte, bool) {
return result, true
}


func (s *segment) delete(key string) {
shardedMap := s.getShardedMap()
if shardedMap.delete(key) {
Expand All @@ -89,35 +87,37 @@ func (s *segment) getShardedMap() *shardedMap {
}

func (s *segment) set(key string, value []byte) ([]byte, bool) {
if maxEntries := s.config.MaxEntries; maxEntries > 0 && int(atomic.LoadUint32(&s.keys)) > maxEntries {
if maxEntries := s.config.MaxEntries; maxEntries > 0 && 1+int(atomic.LoadUint32(&s.keys)) > maxEntries {
return nil, false
}
shardedMap := s.getShardedMap()
blobSize := len(value) + headerSize
nextAddress := int(atomic.AddUint32(&s.tail, uint32(blobSize)))
alignBlobSize := ((blobSize >> 5) + 1) << 5
nextAddress := int(atomic.AddUint64(&s.tail, uint64(alignBlobSize)))

if nextAddress >= len(s.data) { //out of memory,
atomic.SwapUint32(&s.tail, s.dataSize-1)
atomic.SwapUint64(&s.tail, s.dataSize-1)
return nil, false
}
headerAddress := nextAddress - blobSize
headerAddress := nextAddress - alignBlobSize
s.data[headerAddress] = controlByte
binary.LittleEndian.PutUint32(s.data[headerAddress+1:headerAddress+headerSize], uint32(len(value)))
entryAddress := headerAddress + headerSize
entryAddressOffset := entryAddress + len(value)
copy(s.data[entryAddress:entryAddressOffset], value)
if hadKey := shardedMap.put(key, uint32(headerAddress)); !hadKey {
if hadKey := shardedMap.put(key, uint32(headerAddress>>5)); !hadKey {
atomic.AddUint32(&s.keys, 1)
}
return s.data[entryAddress:entryAddressOffset], true
}

func (s *segment) allocate(idx int) error {
s.index = uint32(idx)
s.tail = 1
s.tail = 32
segmentDataSize := s.config.SegmentDataSize()
if s.config.Location == "" {
s.data = make([]byte, segmentDataSize)
s.dataSize = uint32(len(s.data))
s.dataSize = uint64(segmentDataSize)
return nil
}
s.mmap = newMmap(s.config.Location, s.config.SizeMb*mb)
Expand All @@ -126,7 +126,7 @@ func (s *segment) allocate(idx int) error {
s.mmap.size = segmentDataSize
offset := int64(idx * segmentDataSize)
err = s.mmap.assign(offset, &s.data)
s.dataSize = uint32(len(s.data))
s.dataSize = uint64(len(s.data))
}
return err
}
15 changes: 10 additions & 5 deletions segment_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,23 +48,28 @@ func TestSegment_get(t *testing.T) {
}

for _, useCase := range useCases {
config := &Config{SizeMb: useCase.sizeMb}
config := &Config{SizeMb: useCase.sizeMb, EntrySize: useCase.entrySize, MaxEntries: useCase.keys}
config.Init()
segment := &segment{
config: config,
shardedMap: newShardedMap(config),
}

err := segment.allocate(0)

assert.Nil(t, err)
if !assert.Nil(t, err) {
t.Skipf("%v", err)
}
for i := 0; i < useCase.keys; i++ {
key := fmt.Sprintf("key%v", i)
_, has := segment.get(key)
assert.False(t, has, useCase.description)
if !assert.False(t, has, useCase.description) {
}
data := strings.Repeat(useCase.pattern, useCase.entrySize/2)
_, added := segment.set(key, []byte(data))
assert.True(t, added, useCase.description)
if !assert.True(t, added, useCase.description) {
panic(1)

}

actual, has := segment.get(key)
assert.True(t, has, useCase.description)
Expand Down
4 changes: 2 additions & 2 deletions sharded_map.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ type shardedMap struct {
shardsHash uint64
}

func (m *shardedMap) getAddress(key string) uint32 {
func (m *shardedMap) getAddress(key string) uint64 {
hashedKey := m.hasher.Sum64(key)
index := hashedKey & m.shardsHash
m.lock[index].RLock()
Expand All @@ -23,7 +23,7 @@ func (m *shardedMap) getAddress(key string) uint32 {
}
value := m.maps[index][hashedKey]
m.lock[index].RUnlock()
return value
return uint64(value) << 5
}

func (m *shardedMap) put(key string, value uint32) bool {
Expand Down

0 comments on commit 8744ba2

Please sign in to comment.