From b08cf5e8953dbb99fece36f38e0de1a9b2bb4691 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Mon, 11 Mar 2024 16:38:02 +0100 Subject: [PATCH 1/3] Replace bigcache with lru.Cache --- das/bigcache_storage_service.go | 65 ++++++++++------------------ das/bigcache_storage_service_test.go | 21 +++------ das/factory.go | 5 +-- 3 files changed, 28 insertions(+), 63 deletions(-) diff --git a/das/bigcache_storage_service.go b/das/bigcache_storage_service.go index b8eb624515..21b70f86a9 100644 --- a/das/bigcache_storage_service.go +++ b/das/bigcache_storage_service.go @@ -6,80 +6,62 @@ package das import ( "context" "fmt" - "time" - "github.com/allegro/bigcache" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/log" ) type BigCacheConfig struct { - // TODO add other config information like HardMaxCacheSize - Enable bool `koanf:"enable"` - Expiration time.Duration `koanf:"expiration"` - MaxSizeMB int `koanf:"max-size-mb"` + Enable bool `koanf:"enable"` + Capacity int `koanf:"capacity"` } var DefaultBigCacheConfig = BigCacheConfig{ - Expiration: time.Hour, - MaxSizeMB: 1024, + Capacity: 20_000, } var TestBigCacheConfig = BigCacheConfig{ - Enable: true, - Expiration: time.Hour, - MaxSizeMB: 128, + Capacity: 1_000, } func BigCacheConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultBigCacheConfig.Enable, "Enable local in-memory caching of sequencer batch data") - f.Duration(prefix+".expiration", DefaultBigCacheConfig.Expiration, "Expiration time for in-memory cached sequencer batches") - f.Int(prefix+".max-size-mb", DefaultBigCacheConfig.MaxSizeMB, "Maximum size of the cache, in MB") + f.Int(prefix+".capacity", DefaultBigCacheConfig.Capacity, "Maximum number of entries (up to 64KB each) to store in the cache.") } type BigCacheStorageService struct { baseStorageService StorageService - bigCacheConfig BigCacheConfig - bigCache *bigcache.BigCache + cache *lru.Cache[common.Hash, []byte] } -func NewBigCacheStorageService(bigCacheConfig BigCacheConfig, baseStorageService StorageService) (StorageService, error) { - conf := bigcache.DefaultConfig(bigCacheConfig.Expiration) - conf.HardMaxCacheSize = bigCacheConfig.MaxSizeMB - bigCache, err := bigcache.NewBigCache(conf) - if err != nil { - return nil, err - } +func NewBigCacheStorageService(bigCacheConfig BigCacheConfig, baseStorageService StorageService) *BigCacheStorageService { return &BigCacheStorageService{ baseStorageService: baseStorageService, - bigCacheConfig: bigCacheConfig, - bigCache: bigCache, - }, nil + cache: lru.NewCache[common.Hash, []byte](bigCacheConfig.Capacity), + } } func (bcs *BigCacheStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { log.Trace("das.BigCacheStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", bcs) - ret, err := bcs.bigCache.Get(string(key.Bytes())) + if val, wasCached := bcs.cache.Get(key); wasCached { + return val, nil + } + + val, err := bcs.baseStorageService.GetByHash(ctx, key) if err != nil { - ret, err = bcs.baseStorageService.GetByHash(ctx, key) - if err != nil { - return nil, err - } - - err = bcs.bigCache.Set(string(key.Bytes()), ret) - if err != nil { - return nil, err - } - return ret, err + return nil, err } - return ret, err + bcs.cache.Add(key, val) + + return val, nil } func (bcs *BigCacheStorageService) Put(ctx context.Context, value []byte, timeout uint64) error { @@ -88,7 +70,8 @@ func (bcs *BigCacheStorageService) Put(ctx context.Context, value []byte, timeou if err != nil { return err } - return bcs.bigCache.Set(string(dastree.HashBytes(value)), value) + bcs.cache.Add(common.Hash(dastree.Hash(value)), value) + return nil } func (bcs *BigCacheStorageService) Sync(ctx context.Context) error { @@ -96,10 +79,6 @@ func (bcs *BigCacheStorageService) Sync(ctx context.Context) error { } func (bcs *BigCacheStorageService) Close(ctx context.Context) error { - err := bcs.bigCache.Close() - if err != nil { - return err - } return bcs.baseStorageService.Close(ctx) } @@ -108,7 +87,7 @@ func (bcs *BigCacheStorageService) ExpirationPolicy(ctx context.Context) (arbsta } func (bcs *BigCacheStorageService) String() string { - return fmt.Sprintf("BigCacheStorageService(%+v)", bcs.bigCacheConfig) + return fmt.Sprintf("BigCacheStorageService(size:%+v)", len(bcs.cache.Keys())) } func (bcs *BigCacheStorageService) HealthCheck(ctx context.Context) error { diff --git a/das/bigcache_storage_service_test.go b/das/bigcache_storage_service_test.go index 5fd0cf68d2..6fe8cbc709 100644 --- a/das/bigcache_storage_service_test.go +++ b/das/bigcache_storage_service_test.go @@ -8,35 +8,25 @@ import ( "context" "errors" "testing" - "time" - "github.com/allegro/bigcache" "github.com/offchainlabs/nitro/das/dastree" ) func TestBigCacheStorageService(t *testing.T) { ctx := context.Background() - timeout := uint64(time.Now().Add(time.Hour).Unix()) baseStorageService := NewMemoryBackedStorageService(ctx) - bigCache, err := bigcache.NewBigCache(bigcache.DefaultConfig(TestBigCacheConfig.Expiration)) - Require(t, err) - bigCacheService := &BigCacheStorageService{ - baseStorageService: baseStorageService, - bigCacheConfig: TestBigCacheConfig, - bigCache: bigCache, - } - Require(t, err) + bigCacheService := NewBigCacheStorageService(TestBigCacheConfig, baseStorageService) val1 := []byte("The first value") val1CorrectKey := dastree.Hash(val1) val1IncorrectKey := dastree.Hash(append(val1, 0)) - _, err = bigCacheService.GetByHash(ctx, val1CorrectKey) + _, err := bigCacheService.GetByHash(ctx, val1CorrectKey) if !errors.Is(err, ErrNotFound) { t.Fatal(err) } - err = bigCacheService.Put(ctx, val1, timeout) + err = bigCacheService.Put(ctx, val1, 1) Require(t, err) _, err = bigCacheService.GetByHash(ctx, val1IncorrectKey) @@ -54,7 +44,7 @@ func TestBigCacheStorageService(t *testing.T) { val2CorrectKey := dastree.Hash(val2) val2IncorrectKey := dastree.Hash(append(val2, 0)) - err = baseStorageService.Put(ctx, val2, timeout) + err = baseStorageService.Put(ctx, val2, 1) Require(t, err) _, err = bigCacheService.GetByHash(ctx, val2IncorrectKey) @@ -71,8 +61,7 @@ func TestBigCacheStorageService(t *testing.T) { emptyBaseStorageService := NewMemoryBackedStorageService(ctx) bigCacheServiceWithEmptyBaseStorage := &BigCacheStorageService{ baseStorageService: emptyBaseStorageService, - bigCacheConfig: TestBigCacheConfig, - bigCache: bigCache, + cache: bigCacheService.cache, } val, err = bigCacheServiceWithEmptyBaseStorage.GetByHash(ctx, val1CorrectKey) Require(t, err) diff --git a/das/factory.go b/das/factory.go index 0e6b292005..c4223b8be2 100644 --- a/das/factory.go +++ b/das/factory.go @@ -130,11 +130,8 @@ func WrapStorageWithCache( } } if config.LocalCache.Enable { - storageService, err = NewBigCacheStorageService(config.LocalCache, storageService) + storageService = NewBigCacheStorageService(config.LocalCache, storageService) lifecycleManager.Register(storageService) - if err != nil { - return nil, err - } } return storageService, nil } From e8a7dc52d6b86e1f7039401a58e9e7e9c14146e5 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Mon, 11 Mar 2024 16:38:30 +0100 Subject: [PATCH 2/3] Remove bigcache dep --- go.mod | 1 - 1 file changed, 1 deletion(-) diff --git a/go.mod b/go.mod index 3b40e9a1ce..cf9e61f9b9 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,6 @@ require ( github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible github.com/Shopify/toxiproxy v2.1.4+incompatible github.com/alicebob/miniredis/v2 v2.21.0 - github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 github.com/andybalholm/brotli v1.0.4 github.com/aws/aws-sdk-go-v2 v1.16.4 github.com/aws/aws-sdk-go-v2/config v1.15.5 From d023b009e9b99b087c4694cfffd481096e0668ff Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Mon, 11 Mar 2024 16:52:00 +0100 Subject: [PATCH 3/3] Rename bigcache to cache --- das/bigcache_storage_service.go | 95 ------------------- das/cache_storage_service.go | 95 +++++++++++++++++++ ..._test.go => cache_storage_service_test.go} | 24 ++--- das/das.go | 6 +- das/factory.go | 4 +- system_tests/das_test.go | 2 +- 6 files changed, 113 insertions(+), 113 deletions(-) delete mode 100644 das/bigcache_storage_service.go create mode 100644 das/cache_storage_service.go rename das/{bigcache_storage_service_test.go => cache_storage_service_test.go} (68%) diff --git a/das/bigcache_storage_service.go b/das/bigcache_storage_service.go deleted file mode 100644 index 21b70f86a9..0000000000 --- a/das/bigcache_storage_service.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2022, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -package das - -import ( - "context" - "fmt" - - "github.com/offchainlabs/nitro/arbstate" - "github.com/offchainlabs/nitro/das/dastree" - "github.com/offchainlabs/nitro/util/pretty" - flag "github.com/spf13/pflag" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/lru" - "github.com/ethereum/go-ethereum/log" -) - -type BigCacheConfig struct { - Enable bool `koanf:"enable"` - Capacity int `koanf:"capacity"` -} - -var DefaultBigCacheConfig = BigCacheConfig{ - Capacity: 20_000, -} - -var TestBigCacheConfig = BigCacheConfig{ - Capacity: 1_000, -} - -func BigCacheConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Bool(prefix+".enable", DefaultBigCacheConfig.Enable, "Enable local in-memory caching of sequencer batch data") - f.Int(prefix+".capacity", DefaultBigCacheConfig.Capacity, "Maximum number of entries (up to 64KB each) to store in the cache.") -} - -type BigCacheStorageService struct { - baseStorageService StorageService - cache *lru.Cache[common.Hash, []byte] -} - -func NewBigCacheStorageService(bigCacheConfig BigCacheConfig, baseStorageService StorageService) *BigCacheStorageService { - return &BigCacheStorageService{ - baseStorageService: baseStorageService, - cache: lru.NewCache[common.Hash, []byte](bigCacheConfig.Capacity), - } -} - -func (bcs *BigCacheStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { - log.Trace("das.BigCacheStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", bcs) - - if val, wasCached := bcs.cache.Get(key); wasCached { - return val, nil - } - - val, err := bcs.baseStorageService.GetByHash(ctx, key) - if err != nil { - return nil, err - } - - bcs.cache.Add(key, val) - - return val, nil -} - -func (bcs *BigCacheStorageService) Put(ctx context.Context, value []byte, timeout uint64) error { - logPut("das.BigCacheStorageService.Put", value, timeout, bcs) - err := bcs.baseStorageService.Put(ctx, value, timeout) - if err != nil { - return err - } - bcs.cache.Add(common.Hash(dastree.Hash(value)), value) - return nil -} - -func (bcs *BigCacheStorageService) Sync(ctx context.Context) error { - return bcs.baseStorageService.Sync(ctx) -} - -func (bcs *BigCacheStorageService) Close(ctx context.Context) error { - return bcs.baseStorageService.Close(ctx) -} - -func (bcs *BigCacheStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { - return bcs.baseStorageService.ExpirationPolicy(ctx) -} - -func (bcs *BigCacheStorageService) String() string { - return fmt.Sprintf("BigCacheStorageService(size:%+v)", len(bcs.cache.Keys())) -} - -func (bcs *BigCacheStorageService) HealthCheck(ctx context.Context) error { - return bcs.baseStorageService.HealthCheck(ctx) -} diff --git a/das/cache_storage_service.go b/das/cache_storage_service.go new file mode 100644 index 0000000000..13bdb189d3 --- /dev/null +++ b/das/cache_storage_service.go @@ -0,0 +1,95 @@ +// Copyright 2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package das + +import ( + "context" + "fmt" + + "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" + "github.com/offchainlabs/nitro/util/pretty" + flag "github.com/spf13/pflag" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" + "github.com/ethereum/go-ethereum/log" +) + +type CacheConfig struct { + Enable bool `koanf:"enable"` + Capacity int `koanf:"capacity"` +} + +var DefaultCacheConfig = CacheConfig{ + Capacity: 20_000, +} + +var TestCacheConfig = CacheConfig{ + Capacity: 1_000, +} + +func CacheConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".enable", DefaultCacheConfig.Enable, "Enable local in-memory caching of sequencer batch data") + f.Int(prefix+".capacity", DefaultCacheConfig.Capacity, "Maximum number of entries (up to 64KB each) to store in the cache.") +} + +type CacheStorageService struct { + baseStorageService StorageService + cache *lru.Cache[common.Hash, []byte] +} + +func NewCacheStorageService(cacheConfig CacheConfig, baseStorageService StorageService) *CacheStorageService { + return &CacheStorageService{ + baseStorageService: baseStorageService, + cache: lru.NewCache[common.Hash, []byte](cacheConfig.Capacity), + } +} + +func (c *CacheStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { + log.Trace("das.CacheStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", c) + + if val, wasCached := c.cache.Get(key); wasCached { + return val, nil + } + + val, err := c.baseStorageService.GetByHash(ctx, key) + if err != nil { + return nil, err + } + + c.cache.Add(key, val) + + return val, nil +} + +func (c *CacheStorageService) Put(ctx context.Context, value []byte, timeout uint64) error { + logPut("das.CacheStorageService.Put", value, timeout, c) + err := c.baseStorageService.Put(ctx, value, timeout) + if err != nil { + return err + } + c.cache.Add(common.Hash(dastree.Hash(value)), value) + return nil +} + +func (c *CacheStorageService) Sync(ctx context.Context) error { + return c.baseStorageService.Sync(ctx) +} + +func (c *CacheStorageService) Close(ctx context.Context) error { + return c.baseStorageService.Close(ctx) +} + +func (c *CacheStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { + return c.baseStorageService.ExpirationPolicy(ctx) +} + +func (c *CacheStorageService) String() string { + return fmt.Sprintf("CacheStorageService(size:%+v)", len(c.cache.Keys())) +} + +func (c *CacheStorageService) HealthCheck(ctx context.Context) error { + return c.baseStorageService.HealthCheck(ctx) +} diff --git a/das/bigcache_storage_service_test.go b/das/cache_storage_service_test.go similarity index 68% rename from das/bigcache_storage_service_test.go rename to das/cache_storage_service_test.go index 6fe8cbc709..8b4203dab5 100644 --- a/das/bigcache_storage_service_test.go +++ b/das/cache_storage_service_test.go @@ -12,28 +12,28 @@ import ( "github.com/offchainlabs/nitro/das/dastree" ) -func TestBigCacheStorageService(t *testing.T) { +func TestCacheStorageService(t *testing.T) { ctx := context.Background() baseStorageService := NewMemoryBackedStorageService(ctx) - bigCacheService := NewBigCacheStorageService(TestBigCacheConfig, baseStorageService) + cacheService := NewCacheStorageService(TestCacheConfig, baseStorageService) val1 := []byte("The first value") val1CorrectKey := dastree.Hash(val1) val1IncorrectKey := dastree.Hash(append(val1, 0)) - _, err := bigCacheService.GetByHash(ctx, val1CorrectKey) + _, err := cacheService.GetByHash(ctx, val1CorrectKey) if !errors.Is(err, ErrNotFound) { t.Fatal(err) } - err = bigCacheService.Put(ctx, val1, 1) + err = cacheService.Put(ctx, val1, 1) Require(t, err) - _, err = bigCacheService.GetByHash(ctx, val1IncorrectKey) + _, err = cacheService.GetByHash(ctx, val1IncorrectKey) if !errors.Is(err, ErrNotFound) { t.Fatal(err) } - val, err := bigCacheService.GetByHash(ctx, val1CorrectKey) + val, err := cacheService.GetByHash(ctx, val1CorrectKey) Require(t, err) if !bytes.Equal(val, val1) { t.Fatal(val, val1) @@ -47,11 +47,11 @@ func TestBigCacheStorageService(t *testing.T) { err = baseStorageService.Put(ctx, val2, 1) Require(t, err) - _, err = bigCacheService.GetByHash(ctx, val2IncorrectKey) + _, err = cacheService.GetByHash(ctx, val2IncorrectKey) if !errors.Is(err, ErrNotFound) { t.Fatal(err) } - val, err = bigCacheService.GetByHash(ctx, val2CorrectKey) + val, err = cacheService.GetByHash(ctx, val2CorrectKey) Require(t, err) if !bytes.Equal(val, val2) { t.Fatal(val, val2) @@ -59,18 +59,18 @@ func TestBigCacheStorageService(t *testing.T) { // For Case where the value is present in the cache storage but not present in the base. emptyBaseStorageService := NewMemoryBackedStorageService(ctx) - bigCacheServiceWithEmptyBaseStorage := &BigCacheStorageService{ + cacheServiceWithEmptyBaseStorage := &CacheStorageService{ baseStorageService: emptyBaseStorageService, - cache: bigCacheService.cache, + cache: cacheService.cache, } - val, err = bigCacheServiceWithEmptyBaseStorage.GetByHash(ctx, val1CorrectKey) + val, err = cacheServiceWithEmptyBaseStorage.GetByHash(ctx, val1CorrectKey) Require(t, err) if !bytes.Equal(val, val1) { t.Fatal(val, val1) } // Closes the base storage properly. - err = bigCacheService.Close(ctx) + err = cacheService.Close(ctx) Require(t, err) _, err = baseStorageService.GetByHash(ctx, val1CorrectKey) if !errors.Is(err, ErrClosed) { diff --git a/das/das.go b/das/das.go index 910e511083..dd8e43a34d 100644 --- a/das/das.go +++ b/das/das.go @@ -40,8 +40,8 @@ type DataAvailabilityConfig struct { RequestTimeout time.Duration `koanf:"request-timeout"` - LocalCache BigCacheConfig `koanf:"local-cache"` - RedisCache RedisConfig `koanf:"redis-cache"` + LocalCache CacheConfig `koanf:"local-cache"` + RedisCache RedisConfig `koanf:"redis-cache"` LocalDBStorage LocalDBStorageConfig `koanf:"local-db-storage"` LocalFileStorage LocalFileStorageConfig `koanf:"local-file-storage"` @@ -109,7 +109,7 @@ func dataAvailabilityConfigAddOptions(prefix string, f *flag.FlagSet, r role) { f.Bool(prefix+".disable-signature-checking", DefaultDataAvailabilityConfig.DisableSignatureChecking, "disables signature checking on Data Availability Store requests (DANGEROUS, FOR TESTING ONLY)") // Cache options - BigCacheConfigAddOptions(prefix+".local-cache", f) + CacheConfigAddOptions(prefix+".local-cache", f) RedisConfigAddOptions(prefix+".redis-cache", f) // Storage options diff --git a/das/factory.go b/das/factory.go index c4223b8be2..d5f103e548 100644 --- a/das/factory.go +++ b/das/factory.go @@ -112,7 +112,7 @@ func WrapStorageWithCache( return nil, nil } - // Enable caches, Redis and (local) BigCache. Local is the outermost, so it will be tried first. + // Enable caches, Redis and (local) Cache. Local is the outermost, so it will be tried first. var err error if config.RedisCache.Enable { storageService, err = NewRedisStorageService(config.RedisCache, storageService) @@ -130,7 +130,7 @@ func WrapStorageWithCache( } } if config.LocalCache.Enable { - storageService = NewBigCacheStorageService(config.LocalCache, storageService) + storageService = NewCacheStorageService(config.LocalCache, storageService) lifecycleManager.Register(storageService) } return storageService, nil diff --git a/system_tests/das_test.go b/system_tests/das_test.go index 8edd91e1ec..e0d0ad6a11 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -256,7 +256,7 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { serverConfig := das.DataAvailabilityConfig{ Enable: true, - LocalCache: das.TestBigCacheConfig, + LocalCache: das.TestCacheConfig, LocalFileStorage: das.LocalFileStorageConfig{ Enable: true,