From 18aaec3adc5b0ec0b2588996c0a7b3f7c12417fe Mon Sep 17 00:00:00 2001 From: Adam Tucker Date: Mon, 12 Feb 2024 16:06:46 -0700 Subject: [PATCH 01/15] twap key refactor --- Makefile | 1 - app/upgrades/v17/upgrades_test.go | 16 --- proto/osmosis/twap/v1beta1/twap_record.proto | 13 +- x/twap/keeper.go | 4 +- x/twap/keeper_test.go | 6 - x/twap/listeners.go | 13 +- x/twap/listeners_test.go | 2 +- x/twap/store.go | 97 ++++++-------- x/twap/store_test.go | 110 ++++++++++++---- x/twap/types/expected_interfaces.go | 1 + x/twap/types/keys.go | 28 +---- x/twap/types/keys_test.go | 2 - x/twap/types/twap_record.pb.go | 126 ++++++++++++------- x/twap/types/twapmock/amminterface.go | 4 + 14 files changed, 228 insertions(+), 195 deletions(-) diff --git a/Makefile b/Makefile index 2ef54d93719..d0f918ea3fc 100644 --- a/Makefile +++ b/Makefile @@ -168,7 +168,6 @@ go-mock-update: mockgen -source=x/poolmanager/types/pool.go -destination=tests/mocks/pool.go -package=mocks mockgen -source=x/gamm/types/pool.go -destination=tests/mocks/cfmm_pool.go -package=mocks mockgen -source=x/concentrated-liquidity/types/cl_pool_extensionI.go -destination=tests/mocks/cl_pool.go -package=mocks - mockgen -source=ingest/sqs/domain/pools.go -destination=tests/mocks/sqs_pool.go -package=mocks -mock_names=PoolI=MockSQSPoolI ############################################################################### ### Release ### diff --git a/app/upgrades/v17/upgrades_test.go b/app/upgrades/v17/upgrades_test.go index a8667425619..db290c3bef6 100644 --- a/app/upgrades/v17/upgrades_test.go +++ b/app/upgrades/v17/upgrades_test.go @@ -218,9 +218,6 @@ func (s *UpgradeTestSuite) TestUpgrade() { clPool2TwapRecordHistoricalPoolIndexPreUpgrade, err := keepers.TwapKeeper.GetAllHistoricalPoolIndexedTWAPsForPoolId(s.Ctx, lastPoolIdMinusOne) s.Require().NoError(err) - clPoolsTwapRecordHistoricalTimeIndexPreUpgrade, err := keepers.TwapKeeper.GetAllHistoricalTimeIndexedTWAPs(s.Ctx) - s.Require().NoError(err) - // Run upgrade handler. dummyUpgrade(s) s.Require().NotPanics(func() { @@ -239,15 +236,11 @@ func (s *UpgradeTestSuite) TestUpgrade() { clPool2TwapRecordHistoricalPoolIndexPostUpgrade, err := keepers.TwapKeeper.GetAllHistoricalPoolIndexedTWAPsForPoolId(s.Ctx, lastPoolIdMinusOne) s.Require().NoError(err) - clPoolsTwapRecordHistoricalTimeIndexPostUpgrade, err := keepers.TwapKeeper.GetAllHistoricalTimeIndexedTWAPs(s.Ctx) - s.Require().NoError(err) - // check that all TWAP records aren't empty s.Require().NotEmpty(clPool1TwapRecordPostUpgrade) s.Require().NotEmpty(clPool1TwapRecordHistoricalPoolIndexPostUpgrade) s.Require().NotEmpty(clPool2TwapRecordPostUpgrade) s.Require().NotEmpty(clPool2TwapRecordHistoricalPoolIndexPostUpgrade) - s.Require().NotEmpty(clPoolsTwapRecordHistoricalTimeIndexPostUpgrade) for _, data := range []struct { pre, post []types.TwapRecord @@ -262,15 +255,6 @@ func (s *UpgradeTestSuite) TestUpgrade() { } } - for i := range clPoolsTwapRecordHistoricalTimeIndexPostUpgrade { - record := clPoolsTwapRecordHistoricalTimeIndexPostUpgrade[i] - if record.PoolId == lastPoolIdMinusOne || record.PoolId == lastPoolIdMinusTwo { - assertTwapFlipped(s, clPoolsTwapRecordHistoricalTimeIndexPreUpgrade[i], record) - } else if record.PoolId == lastPoolID { - assertEqual(s, clPoolsTwapRecordHistoricalTimeIndexPreUpgrade[i], record) - } - } - // Retrieve the community pool balance (and the feePool balance) after the upgrade communityPoolBalancePost := s.App.BankKeeper.GetAllBalances(s.Ctx, communityPoolAddress) feePoolCommunityPoolPost := s.App.DistrKeeper.GetFeePool(s.Ctx).CommunityPool diff --git a/proto/osmosis/twap/v1beta1/twap_record.proto b/proto/osmosis/twap/v1beta1/twap_record.proto index 89d2f527ca9..0f8a65c10fd 100644 --- a/proto/osmosis/twap/v1beta1/twap_record.proto +++ b/proto/osmosis/twap/v1beta1/twap_record.proto @@ -2,9 +2,6 @@ syntax = "proto3"; package osmosis.twap.v1beta1; import "gogoproto/gogo.proto"; -import "google/protobuf/any.proto"; -import "cosmos_proto/cosmos.proto"; -import "cosmos/base/v1beta1/coin.proto"; import "google/protobuf/timestamp.proto"; option go_package = "github.com/osmosis-labs/osmosis/v23/x/twap/types"; @@ -90,7 +87,11 @@ message PruningState { (gogoproto.stdtime) = true, (gogoproto.moretags) = "yaml:\"last_kept_time\"" ]; - // last_key_seen is the last key of the TWAP records that were pruned - // before reaching the block's prune limit - bytes last_key_seen = 3; + // Deprecated: This field is deprecated. + bytes last_key_seen = 3 [ deprecated = true ]; + // last_seen_pool_id is the pool_id that we will begin pruning in the next + // block. This value starts at the highest pool_id at time of epoch, and + // decreases until it reaches 1. When it reaches 1, the pruning + // process is complete. + uint64 last_seen_pool_id = 4; } diff --git a/x/twap/keeper.go b/x/twap/keeper.go index 5f7f29f8b3e..e0ed12672d4 100644 --- a/x/twap/keeper.go +++ b/x/twap/keeper.go @@ -79,9 +79,7 @@ func (k Keeper) InitGenesis(ctx sdk.Context, genState *types.GenesisState) { // ExportGenesis returns the twap module's exported genesis. func (k Keeper) ExportGenesis(ctx sdk.Context) *types.GenesisState { - // These are ordered in increasing order, guaranteed by the iterator - // that is prefixed by time. - twapRecords, err := k.GetAllHistoricalTimeIndexedTWAPs(ctx) + twapRecords, err := k.getAllHistoricalPoolIndexedTWAPs(ctx) if err != nil { panic(err) } diff --git a/x/twap/keeper_test.go b/x/twap/keeper_test.go index ca17b258367..de432b5f5e9 100644 --- a/x/twap/keeper_test.go +++ b/x/twap/keeper_test.go @@ -341,12 +341,6 @@ func (s *TestSuite) getAllHistoricalRecordsForPool(poolId uint64) []types.TwapRe func (s *TestSuite) validateExpectedRecords(expectedRecords []types.TwapRecord) { twapKeeper := s.twapkeeper - // validate that the time indexed TWAPs are cleared. - timeIndexedTwaps, err := twapKeeper.GetAllHistoricalTimeIndexedTWAPs(s.Ctx) - s.Require().NoError(err) - s.Require().Len(timeIndexedTwaps, len(expectedRecords)) - s.Require().Equal(timeIndexedTwaps, expectedRecords) - // validate that the pool indexed TWAPs are cleared. poolIndexedTwaps, err := twapKeeper.GetAllHistoricalPoolIndexedTWAPs(s.Ctx) s.Require().NoError(err) diff --git a/x/twap/listeners.go b/x/twap/listeners.go index 81efcc7ddf2..390723473d1 100644 --- a/x/twap/listeners.go +++ b/x/twap/listeners.go @@ -26,11 +26,14 @@ func (k Keeper) EpochHooks() epochtypes.EpochHooks { func (hook *epochhook) AfterEpochEnd(ctx sdk.Context, epochIdentifier string, epochNumber int64) error { if epochIdentifier == hook.k.PruneEpochIdentifier(ctx) { lastKeptTime := ctx.BlockTime().Add(-hook.k.RecordHistoryKeepPeriod(ctx)) - hook.k.SetPruningState(ctx, types.PruningState{ - IsPruning: true, - LastKeptTime: lastKeptTime, - LastKeySeen: types.FormatHistoricalTimeIndexTWAPKey(lastKeptTime, 0, "", ""), - }) + poolIdToStartFrom := hook.k.poolmanagerKeeper.GetNextPoolId(ctx) - 1 + if poolIdToStartFrom > 0 { + hook.k.SetPruningState(ctx, types.PruningState{ + IsPruning: true, + LastKeptTime: lastKeptTime, + LastSeenPoolId: poolIdToStartFrom, + }) + } } return nil } diff --git a/x/twap/listeners_test.go b/x/twap/listeners_test.go index 3d5c470bb09..aafc105f591 100644 --- a/x/twap/listeners_test.go +++ b/x/twap/listeners_test.go @@ -257,7 +257,7 @@ func (s *TestSuite) TestAfterEpochEnd() { s.twapkeeper.StoreNewRecord(s.Ctx, newestRecord) - twapsBeforeEpoch, err := s.twapkeeper.GetAllHistoricalTimeIndexedTWAPs(s.Ctx) + twapsBeforeEpoch, err := s.twapkeeper.GetAllHistoricalPoolIndexedTWAPs(s.Ctx) s.Require().NoError(err) s.Require().Equal(2, len(twapsBeforeEpoch)) diff --git a/x/twap/store.go b/x/twap/store.go index ee7f1165ee5..d62e898c127 100644 --- a/x/twap/store.go +++ b/x/twap/store.go @@ -64,10 +64,8 @@ func (k Keeper) getChangedPools(ctx sdk.Context) []uint64 { // storeHistoricalTWAP writes a twap to the store, in all needed indexing. func (k Keeper) StoreHistoricalTWAP(ctx sdk.Context, twap types.TwapRecord) { store := ctx.KVStore(k.storeKey) - key1 := types.FormatHistoricalTimeIndexTWAPKey(twap.Time, twap.PoolId, twap.Asset0Denom, twap.Asset1Denom) - key2 := types.FormatHistoricalPoolIndexTWAPKey(twap.PoolId, twap.Asset0Denom, twap.Asset1Denom, twap.Time) - osmoutils.MustSet(store, key1, &twap) - osmoutils.MustSet(store, key2, &twap) + key := types.FormatHistoricalPoolIndexTWAPKey(twap.PoolId, twap.Asset0Denom, twap.Asset1Denom, twap.Time) + osmoutils.MustSet(store, key, &twap) } // pruneRecordsBeforeTimeButNewest prunes all records for each pool before the given time but the newest @@ -93,52 +91,45 @@ func (k Keeper) StoreHistoricalTWAP(ctx sdk.Context, twap types.TwapRecord) { func (k Keeper) pruneRecordsBeforeTimeButNewest(ctx sdk.Context, state types.PruningState) error { store := ctx.KVStore(k.storeKey) - // Reverse iterator guarantees that we iterate through the newest per pool first. - // Due to how it is indexed, we will only iterate times starting from - // lastKeptTime exclusively down to the oldest record. - iter := store.ReverseIterator( - []byte(types.HistoricalTWAPTimeIndexPrefix), - state.LastKeySeen) - defer iter.Close() - - // We mark what (pool id, asset 0, asset 1) triplets we've seen. - // We prune all records for a triplet that we haven't already seen. - type uniqueTriplet struct { - poolId uint64 - asset0 string - asset1 string - } - seenPoolAssetTriplets := map[uniqueTriplet]struct{}{} - var numPruned uint16 + var lastPoolIdCompleted uint64 - for ; iter.Valid(); iter.Next() { - timeIndexKey := iter.Key() - timeS, poolId, asset0, asset1, err := types.ParseFieldsFromHistoricalTimeKey(timeIndexKey) + for poolId := state.LastSeenPoolId; poolId > 0; poolId-- { + denoms, err := k.poolmanagerKeeper.RouteGetPoolDenoms(ctx, poolId) if err != nil { return err } - poolKey := uniqueTriplet{ - poolId, - asset0, - asset1, + // Notice, even if ranging over denomPairs takes us over the prune per block limit, + // we still continue to iterate over all denom pairs of the pool. + // This simplifies logic so that we can consider a pool "done" once we start it. + // It also prevents choosing between keeping more records for a pool than we need to, + // and having to store more state in the pruning state. + denomPairs := types.GetAllUniqueDenomPairs(denoms) + for _, denomPair := range denomPairs { + // Reverse iterator guarantees that we iterate through the newest per pool first. + // Due to how it is indexed, we will only iterate times starting from + // lastKeptTime exclusively down to the oldest record. + iter := store.ReverseIterator( + types.FormatHistoricalPoolIndexDenomPairTWAPKey(poolId, denomPair.Denom0, denomPair.Denom1), + types.FormatHistoricalPoolIndexTWAPKey(poolId, denomPair.Denom0, denomPair.Denom1, state.LastKeptTime)) + defer iter.Close() + + firstIteration := true + for ; iter.Valid(); iter.Next() { + if !firstIteration { + // We have stored the newest record, so we can prune the rest. + timeIndexKey := iter.Key() + store.Delete(timeIndexKey) + numPruned += 1 + } else { + // If this is the first iteration after we have gotten through the records after lastKeptTime, we + // still keep the record in order to allow interpolation (see function description for more details). + firstIteration = false + } + } } - _, hasSeenPoolRecord := seenPoolAssetTriplets[poolKey] - if !hasSeenPoolRecord { - seenPoolAssetTriplets[poolKey] = struct{}{} - continue - } - - // Now we need to delete the historical record, formatted by both historical time and pool index. - // We already are iterating over the historical time index, so we delete that key. Then we - // reformat the key to delete the historical pool index key. - store.Delete(timeIndexKey) - poolIndexKey := types.FormatHistoricalPoolIndexTWAPKeyFromStrTime(poolId, asset0, asset1, timeS) - store.Delete(poolIndexKey) - - // Increment the number of records pruned by 2, since we delete two records per iteration. - numPruned += 2 + lastPoolIdCompleted = poolId if numPruned >= NumRecordsToPrunePerBlock { // We have hit the limit, so we stop pruning. @@ -146,13 +137,13 @@ func (k Keeper) pruneRecordsBeforeTimeButNewest(ctx sdk.Context, state types.Pru } } - if !iter.Valid() { - // The iterator is exhausted, so we have pruned all records. + if lastPoolIdCompleted == 1 { + // We have pruned all records. state.IsPruning = false k.SetPruningState(ctx, state) } else { - // We have not pruned all records, so we update the last key seen. - state.LastKeySeen = iter.Key() + // We have not pruned all records, so we update the last seen pool id as the pool ID after the last completed pool. + state.LastSeenPoolId = lastPoolIdCompleted - 1 k.SetPruningState(ctx, state) } return nil @@ -160,10 +151,8 @@ func (k Keeper) pruneRecordsBeforeTimeButNewest(ctx sdk.Context, state types.Pru func (k Keeper) DeleteHistoricalRecord(ctx sdk.Context, twap types.TwapRecord) { store := ctx.KVStore(k.storeKey) - key1 := types.FormatHistoricalTimeIndexTWAPKey(twap.Time, twap.PoolId, twap.Asset0Denom, twap.Asset1Denom) - key2 := types.FormatHistoricalPoolIndexTWAPKey(twap.PoolId, twap.Asset0Denom, twap.Asset1Denom, twap.Time) - store.Delete(key1) - store.Delete(key2) + key := types.FormatHistoricalPoolIndexTWAPKey(twap.PoolId, twap.Asset0Denom, twap.Asset1Denom, twap.Time) + store.Delete(key) } // getMostRecentRecordStoreRepresentation returns the most recent twap record in the store @@ -211,13 +200,7 @@ func (k Keeper) GetAllMostRecentRecordsForPoolWithDenoms(ctx sdk.Context, poolId return []types.TwapRecord{record}, err } -// getAllHistoricalTimeIndexedTWAPs returns all historical TWAPs indexed by time. -func (k Keeper) GetAllHistoricalTimeIndexedTWAPs(ctx sdk.Context) ([]types.TwapRecord, error) { - return osmoutils.GatherValuesFromStorePrefix(ctx.KVStore(k.storeKey), []byte(types.HistoricalTWAPTimeIndexPrefix), types.ParseTwapFromBz) -} - // getAllHistoricalPoolIndexedTWAPs returns all historical TWAPs indexed by pool id. -// nolint: unused func (k Keeper) getAllHistoricalPoolIndexedTWAPs(ctx sdk.Context) ([]types.TwapRecord, error) { return osmoutils.GatherValuesFromStorePrefix(ctx.KVStore(k.storeKey), []byte(types.HistoricalTWAPPoolIndexPrefix), types.ParseTwapFromBz) } diff --git a/x/twap/store_test.go b/x/twap/store_test.go index 51450b8f2be..5ddc20861d3 100644 --- a/x/twap/store_test.go +++ b/x/twap/store_test.go @@ -5,13 +5,21 @@ import ( "math" "time" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/osmosis-labs/osmosis/osmomath" "github.com/osmosis-labs/osmosis/v23/x/twap" gammtypes "github.com/osmosis-labs/osmosis/v23/x/gamm/types" + poolmanagertypes "github.com/osmosis-labs/osmosis/v23/x/poolmanager/types" "github.com/osmosis-labs/osmosis/v23/x/twap/types" ) +var ( + twoAssetPoolCoins = sdk.NewCoins(sdk.NewInt64Coin(denom0, 1000000000), sdk.NewInt64Coin(denom1, 1000000000)) + muliAssetPoolCoins = sdk.NewCoins(sdk.NewInt64Coin(denom0, 1000000000), sdk.NewInt64Coin(denom1, 1000000000), sdk.NewInt64Coin(denom2, 1000000000)) +) + // TestTrackChangedPool takes a list of poolIds as test cases, and runs one list per block. // Every simulated block, checks that there no changed pools. // Then runs k.trackChangedPool on every item in the test case list. @@ -509,27 +517,48 @@ func (s *TestSuite) TestPruneRecordsBeforeTimeButNewest() { expectedKeptRecords: []types.TwapRecord{}, }, - "base time; across pool 3; 4 records; 3 before lastKeptTime; only 1 deleted due to limit set to 1": { + "base time; across pool 3 and pool 5; pool 3: 4 total records; 3 before lastKeptTime; all 4 kept due to pool with larger ID hitting limit. pool 5: 24 total records; 12 before lastKeptTime; 12 deleted and 12 kept": { recordsToPreSet: []types.TwapRecord{ + pool3BaseSecMin3Ms, // base time - 3ms; in queue for deletion + pool3BaseSecMin2Ms, // base time - 2ms; in queue for deletion pool3BaseSecMin1Ms, // base time - 1ms; kept since newest before lastKeptTime pool3BaseSecBaseMs, // base time; kept since at lastKeptTime - pool3BaseSecMin3Ms, // base time - 3ms; in queue for deletion - pool3BaseSecMin2Ms, // base time - 2ms; deleted + + pool5Min2SBaseMsAB, pool5Min2SBaseMsAC, pool5Min2SBaseMsBC, // base time - 2s; deleted + pool5Min1SBaseMsAB, pool5Min1SBaseMsAC, pool5Min1SBaseMsBC, // base time - 1s; ; deleted + pool5BaseSecBaseMsAB, pool5BaseSecBaseMsAC, pool5BaseSecBaseMsBC, // base time; kept since at lastKeptTime + pool5Plus1SBaseMsAB, pool5Plus1SBaseMsAC, pool5Plus1SBaseMsBC, // base time + 1s; kept since older than lastKeptTime + + pool5Min2SMin1MsAB, pool5Min2SMin1MsAC, pool5Min2SMin1MsBC, // base time - 2s - 1ms; deleted + pool5Min1SMin1MsAB, pool5Min1SMin1MsAC, pool5Min1SMin1MsBC, // base time - 1s - 1ms; deleted + pool5BaseSecMin1MsAB, pool5BaseSecMin1MsAC, pool5BaseSecMin1MsBC, // base time - 1ms; kept since newest before lastKeptTime + pool5Plus1SMin1MsAB, pool5Plus1SMin1MsAC, pool5Plus1SMin1MsBC, // base time + 1s - 1ms; kept since older than lastKeptTime }, lastKeptTime: baseTime, - expectedKeptRecords: []types.TwapRecord{pool3BaseSecMin3Ms, pool3BaseSecMin1Ms, pool3BaseSecBaseMs}, + expectedKeptRecords: []types.TwapRecord{ + pool3BaseSecMin3Ms, + pool3BaseSecMin2Ms, + pool3BaseSecMin1Ms, + pool5BaseSecMin1MsAB, pool5BaseSecMin1MsAC, pool5BaseSecMin1MsBC, + pool3BaseSecBaseMs, + pool5BaseSecBaseMsAB, pool5BaseSecBaseMsAC, pool5BaseSecBaseMsBC, + pool5Plus1SMin1MsAB, pool5Plus1SMin1MsAC, pool5Plus1SMin1MsBC, + pool5Plus1SBaseMsAB, pool5Plus1SBaseMsAC, pool5Plus1SBaseMsBC, + }, - overwriteLimit: 1, + overwriteLimit: 5, // notice that despite this being set to 5, we delete all 12 entries for pool 5. We always complete a pool's pruning once we start it for sake of simplicity. }, } for name, tc := range tests { s.Run(name, func() { s.SetupTest() + poolCoins := []sdk.Coins{twoAssetPoolCoins, muliAssetPoolCoins, twoAssetPoolCoins, twoAssetPoolCoins, muliAssetPoolCoins} + s.prepPoolsAndRemoveRecords(poolCoins) + s.preSetRecords(tc.recordsToPreSet) - ctx := s.Ctx twapKeeper := s.twapkeeper if tc.overwriteLimit != 0 { @@ -541,12 +570,12 @@ func (s *TestSuite) TestPruneRecordsBeforeTimeButNewest() { } state := types.PruningState{ - IsPruning: true, - LastKeptTime: tc.lastKeptTime, - LastKeySeen: types.FormatHistoricalTimeIndexTWAPKey(tc.lastKeptTime, 0, "", ""), + IsPruning: true, + LastKeptTime: tc.lastKeptTime, + LastSeenPoolId: s.App.PoolManagerKeeper.GetNextPoolId(s.Ctx) - 1, } - err := twapKeeper.PruneRecordsBeforeTimeButNewest(ctx, state) + err := twapKeeper.PruneRecordsBeforeTimeButNewest(s.Ctx, state) s.Require().NoError(err) s.validateExpectedRecords(tc.expectedKeptRecords) @@ -556,6 +585,11 @@ func (s *TestSuite) TestPruneRecordsBeforeTimeButNewest() { // TestPruneRecordsBeforeTimeButNewestPerBlock tests TWAP record pruning logic over multiple blocks. func (s *TestSuite) TestPruneRecordsBeforeTimeButNewestPerBlock() { + s.SetupTest() + + poolCoins := []sdk.Coins{twoAssetPoolCoins, muliAssetPoolCoins, twoAssetPoolCoins, twoAssetPoolCoins, muliAssetPoolCoins} + s.prepPoolsAndRemoveRecords(poolCoins) + // N.B.: the records follow the following naming convention: // // These are manually created to be able to refer to them by name @@ -585,8 +619,7 @@ func (s *TestSuite) TestPruneRecordsBeforeTimeButNewestPerBlock() { pool5BaseSecMin1MsAB, pool5BaseSecMin1MsAC, pool5BaseSecMin1MsBC, pool5Plus1SMin1MsAB, pool5Plus1SMin1MsAC, pool5Plus1SMin1MsBC := s.CreateTestRecordsFromTimeInPool(baseTime.Add(-time.Millisecond), 5) - s.SetupTest() - + // 48 records recordsToPreSet := []types.TwapRecord{ pool3BaseSecMin3Ms, // base time - 3ms; kept since older pool3BaseSecMin2Ms, // base time - 2ms; kept since older @@ -620,13 +653,13 @@ func (s *TestSuite) TestPruneRecordsBeforeTimeButNewestPerBlock() { } s.preSetRecords(recordsToPreSet) - twap.NumRecordsToPrunePerBlock = 6 // 3 records max will be pruned per block + twap.NumRecordsToPrunePerBlock = 3 // 3 records max will be pruned per block lastKeptTime := baseTime.Add(-time.Second).Add(2 * -time.Millisecond) state := types.PruningState{ - IsPruning: true, - LastKeptTime: lastKeptTime, - LastKeySeen: types.FormatHistoricalTimeIndexTWAPKey(lastKeptTime, 0, "", ""), + IsPruning: true, + LastKeptTime: lastKeptTime, + LastSeenPoolId: s.App.PoolManagerKeeper.GetNextPoolId(s.Ctx) - 1, } // Block 1 @@ -637,15 +670,8 @@ func (s *TestSuite) TestPruneRecordsBeforeTimeButNewestPerBlock() { newPruningState := s.twapkeeper.GetPruningState(s.Ctx) s.Require().Equal(true, newPruningState.IsPruning) s.Require().Equal(lastKeptTime, newPruningState.LastKeptTime) - timeS, poolId, asset0, asset1, err := types.ParseFieldsFromHistoricalTimeKey(newPruningState.LastKeySeen) - s.Require().NoError(err) - - // The last key seen is the third record we delete, since we prune 3 records per block. - s.Require().Equal(pool5Min2SMin1MsAB.Time.Format("2006-01-02T15:04:05.000000000"), timeS) - s.Require().Equal(pool5Min2SMin1MsAB.PoolId, poolId) - s.Require().Equal(pool5Min2SMin1MsAB.Asset0Denom, asset0) - s.Require().Equal(pool5Min2SMin1MsAB.Asset1Denom, asset1) + // 46 records expectedKeptRecords := []types.TwapRecord{ pool1Min2SMin3Ms, // base time - 2s - 3ms; in queue to be deleted pool1Min2SMin2Ms, // base time - 2s - 2ms; in queue to be deleted @@ -677,12 +703,12 @@ func (s *TestSuite) TestPruneRecordsBeforeTimeButNewestPerBlock() { err = s.twapkeeper.PruneRecordsBeforeTimeButNewest(s.Ctx, newPruningState) s.Require().NoError(err) - // Pruning state should show now show pruning is false + // Pruning state should still be true because pool 3 brought us to the pruning limit, despite no more records needing to be pruned. newPruningState = s.twapkeeper.GetPruningState(s.Ctx) - s.Require().Equal(false, newPruningState.IsPruning) + s.Require().Equal(true, newPruningState.IsPruning) + // 42 records expectedKeptRecords = []types.TwapRecord{ - pool1Min2SMin1Ms, // mistakenly kept, is fine though pool1Min2SBaseMs, // base time - 2s; kept since newest before lastKeptTime pool5Min2SBaseMsAB, pool5Min2SBaseMsAC, pool5Min2SBaseMsBC, // base time - 2s; kept since newest before lastKeptTime pool2Min1SMin3MsAB, pool2Min1SMin3MsAC, pool2Min1SMin3MsBC, // base time - 1s - 3ms; kept since newest before lastKeptTime @@ -706,6 +732,17 @@ func (s *TestSuite) TestPruneRecordsBeforeTimeButNewestPerBlock() { } s.validateExpectedRecords(expectedKeptRecords) + + // Block 3 + err = s.twapkeeper.PruneRecordsBeforeTimeButNewest(s.Ctx, newPruningState) + s.Require().NoError(err) + + // Pruning state should now be false since we've iterated through all the records. + newPruningState = s.twapkeeper.GetPruningState(s.Ctx) + s.Require().Equal(false, newPruningState.IsPruning) + + // Records don't change from last block since there were no more records to prune. + s.validateExpectedRecords(expectedKeptRecords) } func (s *TestSuite) TestGetAllHistoricalTimeIndexedTWAPs() { @@ -870,3 +907,22 @@ func (s *TestSuite) TestGetAllHistoricalPoolIndexedTWAPsForPooId() { }) } } + +// prepPoolsAndRemoveRecords creates pool and then removes the records that get created +// at time of pool creation. This method is used to simplify tests. Pruning logic +// now requires we pull the underlying denoms from pools as well as the last pool ID. +// This method lets us create these state entries while keeping the existing test structure. +func (s *TestSuite) prepPoolsAndRemoveRecords(poolCoins []sdk.Coins) { + for _, coins := range poolCoins { + s.CreatePoolFromTypeWithCoins(poolmanagertypes.Balancer, coins) + } + + twapStoreKey := s.App.AppKeepers.GetKey(types.StoreKey) + store := s.Ctx.KVStore(twapStoreKey) + iter := sdk.KVStoreReversePrefixIterator(store, []byte(types.HistoricalTWAPPoolIndexPrefix)) + defer iter.Close() + for iter.Valid() { + store.Delete(iter.Key()) + iter.Next() + } +} diff --git a/x/twap/types/expected_interfaces.go b/x/twap/types/expected_interfaces.go index a767b8ee94b..f723b82b5e0 100644 --- a/x/twap/types/expected_interfaces.go +++ b/x/twap/types/expected_interfaces.go @@ -19,4 +19,5 @@ type PoolManagerInterface interface { quoteAssetDenom string, baseAssetDenom string, ) (price osmomath.BigDec, err error) + GetNextPoolId(ctx sdk.Context) uint64 } diff --git a/x/twap/types/keys.go b/x/twap/types/keys.go index 5a3d00af499..66f302ae50f 100644 --- a/x/twap/types/keys.go +++ b/x/twap/types/keys.go @@ -4,7 +4,6 @@ import ( "bytes" "errors" "fmt" - "strconv" time "time" sdk "github.com/cosmos/cosmos-sdk/types" @@ -29,7 +28,6 @@ const ( var ( PruningStateKey = []byte{0x01} mostRecentTWAPsNoSeparator = "recent_twap" - historicalTWAPTimeIndexNoSeparator = "historical_time_index" historicalTWAPPoolIndexNoSeparator = "historical_pool_index" // We do key management to let us easily meet the goals of (AKA minimal iteration): @@ -40,9 +38,6 @@ var ( // format is just pool id | denom1 | denom2 // made for getting most recent key mostRecentTWAPsPrefix = mostRecentTWAPsNoSeparator + KeySeparator - // format is time | pool id | denom1 | denom2 - // made for efficiently deleting records by time in pruning - HistoricalTWAPTimeIndexPrefix = historicalTWAPTimeIndexNoSeparator + KeySeparator // format is pool id | denom1 | denom2 | time // made for efficiently getting records given (pool id, denom1, denom2) and time bounds HistoricalTWAPPoolIndexPrefix = historicalTWAPPoolIndexNoSeparator + KeySeparator @@ -59,11 +54,9 @@ func FormatMostRecentTWAPKey(poolId uint64, denom1, denom2 string) []byte { return []byte(fmt.Sprintf("%s%s%s%s%s%s", mostRecentTWAPsPrefix, poolIdS, KeySeparator, denom1, KeySeparator, denom2)) } -// TODO: Replace historical management with ORM, we currently accept 2x write amplification right now. -func FormatHistoricalTimeIndexTWAPKey(accumulatorWriteTime time.Time, poolId uint64, denom1, denom2 string) []byte { +func FormatHistoricalPoolIndexDenomPairTWAPKey(poolId uint64, denom1, denom2 string) []byte { var buffer bytes.Buffer - timeS := osmoutils.FormatTimeString(accumulatorWriteTime) - fmt.Fprintf(&buffer, "%s%s%s%d%s%s%s%s", HistoricalTWAPTimeIndexPrefix, timeS, KeySeparator, poolId, KeySeparator, denom1, KeySeparator, denom2) + fmt.Fprintf(&buffer, "%s%d%s%s%s%s", HistoricalTWAPPoolIndexPrefix, poolId, KeySeparator, denom1, KeySeparator, denom2) return buffer.Bytes() } @@ -78,23 +71,6 @@ func FormatHistoricalPoolIndexTWAPKeyFromStrTime(poolId uint64, denom1, denom2 s return buffer.Bytes() } -// returns timeString, poolIdString, denom1, denom2, error -// nolint: revive -func ParseFieldsFromHistoricalTimeKey(bz []byte) (string, uint64, string, string, error) { - split := bytes.Split(bz, []byte(KeySeparator)) - if len(split) != 5 { - return "", 0, "", "", errors.New("invalid key") - } - timeS := string(split[1]) - poolId, err := strconv.Atoi(string(split[2])) - if err != nil { - return "", 0, "", "", err - } - denom1 := string(split[3]) - denom2 := string(split[4]) - return timeS, uint64(poolId), denom1, denom2, err -} - func FormatHistoricalPoolIndexTimePrefix(poolId uint64, denom1, denom2 string) []byte { return []byte(fmt.Sprintf("%s%d%s%s%s%s%s", HistoricalTWAPPoolIndexPrefix, poolId, KeySeparator, denom1, KeySeparator, denom2, KeySeparator)) } diff --git a/x/twap/types/keys_test.go b/x/twap/types/keys_test.go index 105946e6dd8..3e2b92af541 100644 --- a/x/twap/types/keys_test.go +++ b/x/twap/types/keys_test.go @@ -46,9 +46,7 @@ func TestFormatHistoricalTwapKeys(t *testing.T) { } for name, tt := range tests { t.Run(name, func(t *testing.T) { - gotTimeKey := FormatHistoricalTimeIndexTWAPKey(tt.time, tt.poolId, tt.denom1, tt.denom2) gotPoolKey := FormatHistoricalPoolIndexTWAPKey(tt.poolId, tt.denom1, tt.denom2, tt.time) - require.Equal(t, tt.wantTimeIndex, string(gotTimeKey)) require.Equal(t, tt.wantPoolIndex, string(gotPoolKey)) poolIndexPrefix := FormatHistoricalPoolIndexTimePrefix(tt.poolId, tt.denom1, tt.denom2) diff --git a/x/twap/types/twap_record.pb.go b/x/twap/types/twap_record.pb.go index b3f4b9499d5..bea64eab758 100644 --- a/x/twap/types/twap_record.pb.go +++ b/x/twap/types/twap_record.pb.go @@ -6,9 +6,6 @@ package types import ( cosmossdk_io_math "cosmossdk.io/math" fmt "fmt" - _ "github.com/cosmos/cosmos-proto" - _ "github.com/cosmos/cosmos-sdk/codec/types" - _ "github.com/cosmos/cosmos-sdk/types" _ "github.com/cosmos/gogoproto/gogoproto" proto "github.com/cosmos/gogoproto/proto" github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" @@ -148,9 +145,13 @@ type PruningState struct { // This is used to determine all TWAP records that are older than // last_kept_time and should be pruned. LastKeptTime time.Time `protobuf:"bytes,2,opt,name=last_kept_time,json=lastKeptTime,proto3,stdtime" json:"last_kept_time" yaml:"last_kept_time"` - // last_key_seen is the last key of the TWAP records that were pruned - // before reaching the block's prune limit - LastKeySeen []byte `protobuf:"bytes,3,opt,name=last_key_seen,json=lastKeySeen,proto3" json:"last_key_seen,omitempty"` + // Deprecated: This field is deprecated. + LastKeySeen []byte `protobuf:"bytes,3,opt,name=last_key_seen,json=lastKeySeen,proto3" json:"last_key_seen,omitempty"` // Deprecated: Do not use. + // last_seen_pool_id is the pool_id that we will begin pruning in the next + // block. This value starts at the highest pool_id at time of epoch, and + // decreases until it reaches 1. When it reaches 1, the pruning + // process is complete. + LastSeenPoolId uint64 `protobuf:"varint,4,opt,name=last_seen_pool_id,json=lastSeenPoolId,proto3" json:"last_seen_pool_id,omitempty"` } func (m *PruningState) Reset() { *m = PruningState{} } @@ -200,6 +201,7 @@ func (m *PruningState) GetLastKeptTime() time.Time { return time.Time{} } +// Deprecated: Do not use. func (m *PruningState) GetLastKeySeen() []byte { if m != nil { return m.LastKeySeen @@ -207,6 +209,13 @@ func (m *PruningState) GetLastKeySeen() []byte { return nil } +func (m *PruningState) GetLastSeenPoolId() uint64 { + if m != nil { + return m.LastSeenPoolId + } + return 0 +} + func init() { proto.RegisterType((*TwapRecord)(nil), "osmosis.twap.v1beta1.TwapRecord") proto.RegisterType((*PruningState)(nil), "osmosis.twap.v1beta1.PruningState") @@ -217,46 +226,46 @@ func init() { } var fileDescriptor_dbf5c78678e601aa = []byte{ - // 624 bytes of a gzipped FileDescriptorProto + // 621 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4d, 0x4f, 0xdb, 0x40, - 0x10, 0x8d, 0x81, 0x06, 0xd8, 0x84, 0x22, 0x59, 0xb4, 0x75, 0x83, 0x6a, 0x07, 0x57, 0xaa, 0xd2, - 0x43, 0xfd, 0x01, 0xb7, 0xde, 0x88, 0xe8, 0xa1, 0x2d, 0xaa, 0x90, 0xe1, 0xd4, 0x8b, 0xb5, 0x76, - 0x16, 0x7b, 0x85, 0xed, 0x5d, 0x79, 0x37, 0x50, 0xff, 0x0b, 0x7e, 0x4d, 0xef, 0xbd, 0x71, 0xe4, - 0x58, 0xf5, 0x90, 0x56, 0xe4, 0xd6, 0x23, 0xbf, 0xa0, 0xda, 0x5d, 0x27, 0x25, 0xe9, 0x07, 0x70, - 0xf3, 0xcc, 0xbc, 0x79, 0xef, 0x8d, 0x77, 0x76, 0xc1, 0x0b, 0xc2, 0x72, 0xc2, 0x30, 0x73, 0xf9, - 0x19, 0xa4, 0xee, 0xa9, 0x1f, 0x21, 0x0e, 0x7d, 0x19, 0x84, 0x25, 0x8a, 0x49, 0x39, 0x70, 0x68, - 0x49, 0x38, 0xd1, 0x37, 0x6a, 0x9c, 0x23, 0x4a, 0x4e, 0x8d, 0xeb, 0x6c, 0x24, 0x24, 0x21, 0x12, - 0xe0, 0x8a, 0x2f, 0x85, 0xed, 0x3c, 0x4d, 0x08, 0x49, 0x32, 0xe4, 0xca, 0x28, 0x1a, 0x1e, 0xbb, - 0xb0, 0xa8, 0x26, 0xa5, 0x58, 0xf2, 0x84, 0xaa, 0x47, 0x05, 0x75, 0xc9, 0x54, 0x91, 0x1b, 0x41, - 0x86, 0xa6, 0x46, 0x62, 0x82, 0x8b, 0xba, 0x6e, 0xcd, 0xb3, 0x72, 0x9c, 0x23, 0xc6, 0x61, 0x4e, - 0x15, 0xc0, 0xfe, 0xd2, 0x04, 0xe0, 0xe8, 0x0c, 0xd2, 0x40, 0xfa, 0xd6, 0x9f, 0x80, 0x65, 0x4a, - 0x48, 0x16, 0xe2, 0x81, 0xa1, 0x75, 0xb5, 0xde, 0x52, 0xd0, 0x14, 0xe1, 0xdb, 0x81, 0xbe, 0x05, - 0xda, 0x90, 0x31, 0xc4, 0xbd, 0x70, 0x80, 0x0a, 0x92, 0x1b, 0x0b, 0x5d, 0xad, 0xb7, 0x1a, 0xb4, - 0x54, 0x6e, 0x4f, 0xa4, 0xa6, 0x10, 0xbf, 0x86, 0x2c, 0xde, 0x80, 0xf8, 0x0a, 0xb2, 0x0b, 0x9a, - 0x29, 0xc2, 0x49, 0xca, 0x8d, 0xa5, 0xae, 0xd6, 0x5b, 0xec, 0xbf, 0xfc, 0x39, 0xb2, 0xd6, 0xd4, - 0x2f, 0x0b, 0x55, 0xe1, 0x7a, 0x64, 0x6d, 0x54, 0x30, 0xcf, 0x5e, 0xdb, 0x33, 0x69, 0x3b, 0xa8, - 0x1b, 0xf5, 0x0f, 0x60, 0x49, 0xcc, 0x60, 0x3c, 0xe8, 0x6a, 0xbd, 0xd6, 0x76, 0xc7, 0x51, 0x03, - 0x3a, 0x93, 0x01, 0x9d, 0xa3, 0xc9, 0x80, 0x7d, 0xf3, 0x62, 0x64, 0x35, 0xae, 0x47, 0x96, 0x3e, - 0xc3, 0x27, 0x9a, 0xed, 0xf3, 0xef, 0x96, 0x16, 0x48, 0x1e, 0xfd, 0x00, 0xe8, 0xd4, 0x0b, 0x33, - 0xc8, 0x78, 0xc8, 0x28, 0xe1, 0x21, 0x2d, 0x71, 0x8c, 0x8c, 0xa6, 0xf0, 0xde, 0x7f, 0x2e, 0x18, - 0xbe, 0x8d, 0xac, 0x4d, 0xf5, 0x97, 0xd9, 0xe0, 0xc4, 0xc1, 0xc4, 0xcd, 0x21, 0x4f, 0x9d, 0x7d, - 0x94, 0xc0, 0xb8, 0xda, 0x43, 0x71, 0xb0, 0x4e, 0xbd, 0x7d, 0xc8, 0xf8, 0x21, 0x25, 0xfc, 0x40, - 0xf4, 0x4a, 0x46, 0xff, 0x0f, 0xc6, 0xe5, 0xfb, 0x30, 0xfa, 0xb3, 0x8c, 0x29, 0x30, 0xa9, 0x17, - 0xc2, 0x12, 0xf3, 0x34, 0x47, 0x1c, 0xc7, 0xa1, 0x5c, 0x35, 0x18, 0xc7, 0xc3, 0x7c, 0x98, 0x41, - 0x4e, 0x4a, 0x63, 0xe5, 0xee, 0xec, 0x9b, 0xd4, 0xdb, 0x9d, 0x32, 0x89, 0xa3, 0xdf, 0xfd, 0xcd, - 0x23, 0x95, 0xfc, 0xff, 0x2a, 0xad, 0xde, 0x47, 0xc9, 0xff, 0xb7, 0x12, 0x04, 0x9d, 0x04, 0x91, - 0x1c, 0xf1, 0xf2, 0x6f, 0x2a, 0xe0, 0xee, 0x2a, 0xc6, 0x94, 0x66, 0x5e, 0xe2, 0x18, 0xac, 0xcb, - 0x53, 0x40, 0x65, 0x49, 0x4a, 0x79, 0xf0, 0x46, 0xeb, 0xd6, 0xad, 0xb1, 0xeb, 0xad, 0x79, 0xac, - 0xb6, 0x66, 0x8e, 0x40, 0x6d, 0xce, 0x9a, 0xc8, 0xbe, 0x11, 0x49, 0xd1, 0x67, 0x7f, 0xd6, 0x40, - 0xfb, 0xa0, 0x1c, 0x16, 0xb8, 0x48, 0x0e, 0x39, 0xe4, 0x48, 0x7f, 0x06, 0x00, 0x16, 0xd7, 0x55, - 0xa6, 0xe4, 0x45, 0x5a, 0x09, 0x56, 0x31, 0xab, 0x31, 0x7a, 0x0c, 0x1e, 0x4a, 0xda, 0x13, 0x44, - 0xb9, 0xb2, 0xb5, 0x70, 0xab, 0xad, 0xad, 0xda, 0xd6, 0xa3, 0x1b, 0xb6, 0xa6, 0xfd, 0xca, 0x55, - 0x5b, 0x24, 0xdf, 0x23, 0xca, 0x45, 0x97, 0x6e, 0x83, 0xb5, 0x1a, 0x54, 0x85, 0x0c, 0xa1, 0x42, - 0x5e, 0xc7, 0x76, 0xd0, 0x52, 0xa0, 0xea, 0x10, 0xa1, 0xa2, 0xff, 0xee, 0xe2, 0xca, 0xd4, 0x2e, - 0xaf, 0x4c, 0xed, 0xc7, 0x95, 0xa9, 0x9d, 0x8f, 0xcd, 0xc6, 0xe5, 0xd8, 0x6c, 0x7c, 0x1d, 0x9b, - 0x8d, 0x8f, 0x5e, 0x82, 0x79, 0x3a, 0x8c, 0x9c, 0x98, 0xe4, 0x6e, 0xfd, 0x88, 0xbd, 0xca, 0x60, - 0xc4, 0x26, 0x81, 0x7b, 0xba, 0xbd, 0xe3, 0x7e, 0x52, 0xef, 0x1f, 0xaf, 0x28, 0x62, 0x51, 0x53, - 0x9a, 0xde, 0xf9, 0x15, 0x00, 0x00, 0xff, 0xff, 0x59, 0x03, 0x4c, 0x93, 0x1c, 0x05, 0x00, 0x00, + 0x10, 0x8d, 0x21, 0x0d, 0xb0, 0x09, 0x45, 0xb5, 0x68, 0x6b, 0x05, 0xd5, 0x0e, 0xa9, 0x84, 0xc2, + 0xa1, 0x76, 0x0c, 0xb7, 0xde, 0x88, 0xe8, 0xa1, 0x2d, 0xaa, 0x22, 0xc3, 0xa9, 0x97, 0xd5, 0xc6, + 0x19, 0x9c, 0x15, 0xb1, 0x77, 0xe5, 0xdd, 0x40, 0xf3, 0x2f, 0xf8, 0x4b, 0xbd, 0x71, 0xe4, 0x58, + 0xf5, 0x90, 0x56, 0xa0, 0x5e, 0x7a, 0xe4, 0x17, 0x54, 0xbb, 0xeb, 0xa4, 0x40, 0x3f, 0x80, 0x9b, + 0x67, 0xf6, 0xcd, 0x7b, 0x6f, 0xbc, 0x33, 0x8b, 0x36, 0x98, 0x48, 0x99, 0xa0, 0x22, 0x90, 0x27, + 0x84, 0x07, 0xc7, 0x61, 0x0f, 0x24, 0x09, 0x75, 0x80, 0x73, 0x88, 0x59, 0xde, 0xf7, 0x79, 0xce, + 0x24, 0xb3, 0x57, 0x0b, 0x9c, 0xaf, 0x8e, 0xfc, 0x02, 0x57, 0x5f, 0x4d, 0x58, 0xc2, 0x34, 0x20, + 0x50, 0x5f, 0x06, 0x5b, 0xf7, 0x12, 0xc6, 0x92, 0x21, 0x04, 0x3a, 0xea, 0x8d, 0x0e, 0x03, 0x49, + 0x53, 0x10, 0x92, 0xa4, 0xdc, 0x00, 0x9a, 0x9f, 0x2b, 0x08, 0x1d, 0x9c, 0x10, 0x1e, 0x69, 0x05, + 0xfb, 0x39, 0x5a, 0xe0, 0x8c, 0x0d, 0x31, 0xed, 0x3b, 0x56, 0xc3, 0x6a, 0x95, 0xa3, 0x8a, 0x0a, + 0xdf, 0xf6, 0xed, 0x75, 0x54, 0x23, 0x42, 0x80, 0x6c, 0xe3, 0x3e, 0x64, 0x2c, 0x75, 0xe6, 0x1a, + 0x56, 0x6b, 0x29, 0xaa, 0x9a, 0xdc, 0xae, 0x4a, 0xcd, 0x20, 0x61, 0x01, 0x99, 0xbf, 0x06, 0x09, + 0x0d, 0x64, 0x07, 0x55, 0x06, 0x40, 0x93, 0x81, 0x74, 0xca, 0x0d, 0xab, 0x35, 0xdf, 0xd9, 0xfc, + 0x39, 0xf1, 0x96, 0x4d, 0x73, 0xd8, 0x1c, 0x5c, 0x4d, 0xbc, 0xd5, 0x31, 0x49, 0x87, 0xaf, 0x9b, + 0x37, 0xd2, 0xcd, 0xa8, 0x28, 0xb4, 0x3f, 0xa0, 0xb2, 0xea, 0xc1, 0x79, 0xd4, 0xb0, 0x5a, 0xd5, + 0xad, 0xba, 0x6f, 0x1a, 0xf4, 0xa7, 0x0d, 0xfa, 0x07, 0xd3, 0x06, 0x3b, 0xee, 0xd9, 0xc4, 0x2b, + 0x5d, 0x4d, 0x3c, 0xfb, 0x06, 0x9f, 0x2a, 0x6e, 0x9e, 0x7e, 0xf3, 0xac, 0x48, 0xf3, 0xd8, 0x5d, + 0x64, 0xf3, 0x36, 0x1e, 0x12, 0x21, 0xb1, 0xe0, 0x4c, 0x62, 0x9e, 0xd3, 0x18, 0x9c, 0x8a, 0xf2, + 0xde, 0x79, 0xa9, 0x18, 0xbe, 0x4e, 0xbc, 0xb5, 0x58, 0xff, 0x72, 0xd1, 0x3f, 0xf2, 0x29, 0x0b, + 0x52, 0x22, 0x07, 0xfe, 0x1e, 0x24, 0x24, 0x1e, 0xef, 0x42, 0x1c, 0xad, 0xf0, 0xf6, 0x1e, 0x11, + 0x72, 0x9f, 0x33, 0xd9, 0x55, 0xb5, 0x9a, 0x31, 0xfc, 0x83, 0x71, 0xe1, 0x21, 0x8c, 0xe1, 0x4d, + 0xc6, 0x01, 0x72, 0x79, 0x1b, 0x93, 0x9c, 0xca, 0x41, 0x0a, 0x92, 0xc6, 0x58, 0x0f, 0x05, 0x89, + 0xe3, 0x51, 0x3a, 0x1a, 0x12, 0xc9, 0x72, 0x67, 0xf1, 0xfe, 0xec, 0x6b, 0xbc, 0xbd, 0x33, 0x63, + 0x52, 0x57, 0xbf, 0xf3, 0x9b, 0x47, 0x2b, 0x85, 0xff, 0x55, 0x5a, 0x7a, 0x88, 0x52, 0xf8, 0x6f, + 0x25, 0x82, 0xea, 0x09, 0xb0, 0x14, 0x64, 0xfe, 0x37, 0x15, 0x74, 0x7f, 0x15, 0x67, 0x46, 0x73, + 0x5b, 0xe2, 0x10, 0xad, 0xe8, 0x5b, 0x80, 0x3c, 0x67, 0xb9, 0xbe, 0x78, 0xa7, 0x7a, 0xe7, 0xd4, + 0x34, 0x8b, 0xa9, 0x79, 0x66, 0xa6, 0xe6, 0x16, 0x81, 0x99, 0x9c, 0x65, 0x95, 0x7d, 0xa3, 0x92, + 0xaa, 0xae, 0xf9, 0xc3, 0x42, 0xb5, 0x6e, 0x3e, 0xca, 0x68, 0x96, 0xec, 0x4b, 0x22, 0xc1, 0x7e, + 0x81, 0x10, 0x15, 0x98, 0x9b, 0x94, 0x5e, 0xa4, 0xc5, 0x68, 0x89, 0x8a, 0x02, 0x63, 0xc7, 0xe8, + 0xb1, 0xa6, 0x3d, 0x02, 0x2e, 0x8d, 0xad, 0xb9, 0x3b, 0x6d, 0xad, 0x17, 0xb6, 0x9e, 0x5e, 0xb3, + 0x35, 0xab, 0x37, 0xae, 0x6a, 0x2a, 0xf9, 0x1e, 0xb8, 0x54, 0x55, 0xf6, 0x06, 0x5a, 0x2e, 0x40, + 0x63, 0x2c, 0x00, 0x32, 0xbd, 0x8e, 0xb5, 0xce, 0x9c, 0x63, 0x45, 0x55, 0x03, 0x1c, 0xef, 0x03, + 0x64, 0xf6, 0x26, 0x7a, 0x62, 0x46, 0x15, 0x20, 0xc3, 0xd3, 0xdd, 0x2f, 0xeb, 0xdd, 0xd7, 0x2e, + 0x15, 0xa8, 0xab, 0xdf, 0x80, 0xce, 0xbb, 0xb3, 0x0b, 0xd7, 0x3a, 0xbf, 0x70, 0xad, 0xef, 0x17, + 0xae, 0x75, 0x7a, 0xe9, 0x96, 0xce, 0x2f, 0xdd, 0xd2, 0x97, 0x4b, 0xb7, 0xf4, 0xb1, 0x9d, 0x50, + 0x39, 0x18, 0xf5, 0xfc, 0x98, 0xa5, 0x41, 0xf1, 0x3a, 0xbd, 0x1a, 0x92, 0x9e, 0x98, 0x06, 0xc1, + 0xf1, 0xd6, 0x76, 0xf0, 0xc9, 0x3c, 0x6c, 0x72, 0xcc, 0x41, 0xf4, 0x2a, 0xba, 0xc7, 0xed, 0x5f, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x8a, 0xd6, 0x52, 0xd1, 0xf5, 0x04, 0x00, 0x00, } func (m *TwapRecord) Marshal() (dAtA []byte, err error) { @@ -392,6 +401,11 @@ func (m *PruningState) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.LastSeenPoolId != 0 { + i = encodeVarintTwapRecord(dAtA, i, uint64(m.LastSeenPoolId)) + i-- + dAtA[i] = 0x20 + } if len(m.LastKeySeen) > 0 { i -= len(m.LastKeySeen) copy(dAtA[i:], m.LastKeySeen) @@ -483,6 +497,9 @@ func (m *PruningState) Size() (n int) { if l > 0 { n += 1 + l + sovTwapRecord(uint64(l)) } + if m.LastSeenPoolId != 0 { + n += 1 + sovTwapRecord(uint64(m.LastSeenPoolId)) + } return n } @@ -996,6 +1013,25 @@ func (m *PruningState) Unmarshal(dAtA []byte) error { m.LastKeySeen = []byte{} } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastSeenPoolId", wireType) + } + m.LastSeenPoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwapRecord + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastSeenPoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTwapRecord(dAtA[iNdEx:]) diff --git a/x/twap/types/twapmock/amminterface.go b/x/twap/types/twapmock/amminterface.go index ce252361a12..d2904b69892 100644 --- a/x/twap/types/twapmock/amminterface.go +++ b/x/twap/types/twapmock/amminterface.go @@ -85,3 +85,7 @@ func (p *ProgrammedPoolManagerInterface) RouteCalculateSpotPrice(ctx sdk.Context } return p.underlyingKeeper.RouteCalculateSpotPrice(ctx, poolId, quoteDenom, baseDenom) } + +func (p *ProgrammedPoolManagerInterface) GetNextPoolId(ctx sdk.Context) uint64 { + return p.underlyingKeeper.GetNextPoolId(ctx) +} From cd2da82388125db27b49640a4d1cd8dd983ec05e Mon Sep 17 00:00:00 2001 From: czarcas7ic <40078083+czarcas7ic@users.noreply.github.com> Date: Sun, 11 Feb 2024 05:28:14 +0000 Subject: [PATCH 02/15] [create-pull-request] automated change --- .vscode/launch.json | 2 +- Makefile | 2 +- app/app.go | 3 ++- app/upgrades/v24/constants.go | 19 +++++++++++++++++++ app/upgrades/v24/upgrades.go | 28 ++++++++++++++++++++++++++++ tests/e2e/containers/config.go | 4 ++-- 6 files changed, 53 insertions(+), 5 deletions(-) create mode 100644 app/upgrades/v24/constants.go create mode 100644 app/upgrades/v24/upgrades.go diff --git a/.vscode/launch.json b/.vscode/launch.json index 73df2f7d71f..2f9252dcae7 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -48,7 +48,7 @@ "OSMOSIS_E2E_SKIP_UPGRADE": "false", "OSMOSIS_E2E_SKIP_CLEANUP": "true", "OSMOSIS_E2E_SKIP_STATE_SYNC": "true", - "OSMOSIS_E2E_UPGRADE_VERSION": "v23", + "OSMOSIS_E2E_UPGRADE_VERSION": "v24", "OSMOSIS_E2E_DEBUG_LOG": "false", }, "preLaunchTask": "e2e-setup" diff --git a/Makefile b/Makefile index d0f918ea3fc..f45b9c0433f 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,7 @@ LEDGER_ENABLED ?= true SDK_PACK := $(shell go list -m github.com/cosmos/cosmos-sdk | sed 's/ /\@/g') BUILDDIR ?= $(CURDIR)/build DOCKER := $(shell which docker) -E2E_UPGRADE_VERSION := "v23" +E2E_UPGRADE_VERSION := "v24" #SHELL := /bin/bash # Go version to be used in docker images diff --git a/app/app.go b/app/app.go index d6b7f5763ef..0831d46c7cd 100644 --- a/app/app.go +++ b/app/app.go @@ -96,6 +96,7 @@ import ( v21 "github.com/osmosis-labs/osmosis/v23/app/upgrades/v21" v22 "github.com/osmosis-labs/osmosis/v23/app/upgrades/v22" v23 "github.com/osmosis-labs/osmosis/v23/app/upgrades/v23" + v24 "github.com/osmosis-labs/osmosis/v23/app/upgrades/v24" v3 "github.com/osmosis-labs/osmosis/v23/app/upgrades/v3" v4 "github.com/osmosis-labs/osmosis/v23/app/upgrades/v4" v5 "github.com/osmosis-labs/osmosis/v23/app/upgrades/v5" @@ -143,7 +144,7 @@ var ( _ runtime.AppI = (*OsmosisApp)(nil) - Upgrades = []upgrades.Upgrade{v4.Upgrade, v5.Upgrade, v7.Upgrade, v9.Upgrade, v11.Upgrade, v12.Upgrade, v13.Upgrade, v14.Upgrade, v15.Upgrade, v16.Upgrade, v17.Upgrade, v18.Upgrade, v19.Upgrade, v20.Upgrade, v21.Upgrade, v22.Upgrade, v23.Upgrade} + Upgrades = []upgrades.Upgrade{v4.Upgrade, v5.Upgrade, v7.Upgrade, v9.Upgrade, v11.Upgrade, v12.Upgrade, v13.Upgrade, v14.Upgrade, v15.Upgrade, v16.Upgrade, v17.Upgrade, v18.Upgrade, v19.Upgrade, v20.Upgrade, v21.Upgrade, v22.Upgrade, v23.Upgrade, v24.Upgrade} Forks = []upgrades.Fork{v3.Fork, v6.Fork, v8.Fork, v10.Fork} ) diff --git a/app/upgrades/v24/constants.go b/app/upgrades/v24/constants.go new file mode 100644 index 00000000000..05afd03dc96 --- /dev/null +++ b/app/upgrades/v24/constants.go @@ -0,0 +1,19 @@ +package v24 + +import ( + "github.com/osmosis-labs/osmosis/v23/app/upgrades" + + store "github.com/cosmos/cosmos-sdk/store/types" +) + +// UpgradeName defines the on-chain upgrade name for the Osmosis v24 upgrade. +const UpgradeName = "v24" + +var Upgrade = upgrades.Upgrade{ + UpgradeName: UpgradeName, + CreateUpgradeHandler: CreateUpgradeHandler, + StoreUpgrades: store.StoreUpgrades{ + Added: []string{}, + Deleted: []string{}, + }, +} diff --git a/app/upgrades/v24/upgrades.go b/app/upgrades/v24/upgrades.go new file mode 100644 index 00000000000..b784cac77b0 --- /dev/null +++ b/app/upgrades/v24/upgrades.go @@ -0,0 +1,28 @@ +package v24 + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + + "github.com/osmosis-labs/osmosis/v23/app/keepers" + "github.com/osmosis-labs/osmosis/v23/app/upgrades" +) + +func CreateUpgradeHandler( + mm *module.Manager, + configurator module.Configurator, + bpm upgrades.BaseAppParamManager, + keepers *keepers.AppKeepers, +) upgradetypes.UpgradeHandler { + return func(ctx sdk.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) { + // Run migrations before applying any other state changes. + // NOTE: DO NOT PUT ANY STATE CHANGES BEFORE RunMigrations(). + migrations, err := mm.RunMigrations(ctx, configurator, fromVM) + if err != nil { + return nil, err + } + + return migrations, nil + } +} diff --git a/tests/e2e/containers/config.go b/tests/e2e/containers/config.go index 018bc04c4a5..7b90fb02bc6 100644 --- a/tests/e2e/containers/config.go +++ b/tests/e2e/containers/config.go @@ -24,10 +24,10 @@ const ( // It should be uploaded to Docker Hub. OSMOSIS_E2E_SKIP_UPGRADE should be unset // for this functionality to be used. previousVersionOsmoRepository = "osmolabs/osmosis" - previousVersionOsmoTag = "22.0.0-alpine" + previousVersionOsmoTag = "v23.0.0" // Pre-upgrade repo/tag for osmosis initialization (this should be one version below upgradeVersion) previousVersionInitRepository = "osmolabs/osmosis-e2e-init-chain" - previousVersionInitTag = "22.0.0" + previousVersionInitTag = "v23.0.0" // Hermes repo/version for relayer relayerRepository = "informalsystems/hermes" relayerTag = "1.5.1" From 212843764b541bdd19c58ea103d718e15b0d8f07 Mon Sep 17 00:00:00 2001 From: Adam Tucker Date: Mon, 12 Feb 2024 16:55:51 -0600 Subject: [PATCH 03/15] use rc1 of v23 --- tests/e2e/containers/config.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/e2e/containers/config.go b/tests/e2e/containers/config.go index 7b90fb02bc6..89afc3d2341 100644 --- a/tests/e2e/containers/config.go +++ b/tests/e2e/containers/config.go @@ -24,10 +24,10 @@ const ( // It should be uploaded to Docker Hub. OSMOSIS_E2E_SKIP_UPGRADE should be unset // for this functionality to be used. previousVersionOsmoRepository = "osmolabs/osmosis" - previousVersionOsmoTag = "v23.0.0" + previousVersionOsmoTag = "23.0.0-rc1" // Pre-upgrade repo/tag for osmosis initialization (this should be one version below upgradeVersion) previousVersionInitRepository = "osmolabs/osmosis-e2e-init-chain" - previousVersionInitTag = "v23.0.0" + previousVersionInitTag = "23.0.0-rc1" // Hermes repo/version for relayer relayerRepository = "informalsystems/hermes" relayerTag = "1.5.1" From be2c96f54b493cda64a47a1440b27a2dfad2d131 Mon Sep 17 00:00:00 2001 From: Adam Tucker Date: Mon, 12 Feb 2024 16:58:55 -0600 Subject: [PATCH 04/15] use alpine --- tests/e2e/containers/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/e2e/containers/config.go b/tests/e2e/containers/config.go index 89afc3d2341..60564999117 100644 --- a/tests/e2e/containers/config.go +++ b/tests/e2e/containers/config.go @@ -24,7 +24,7 @@ const ( // It should be uploaded to Docker Hub. OSMOSIS_E2E_SKIP_UPGRADE should be unset // for this functionality to be used. previousVersionOsmoRepository = "osmolabs/osmosis" - previousVersionOsmoTag = "23.0.0-rc1" + previousVersionOsmoTag = "23.0.0-rc1-alpine" // Pre-upgrade repo/tag for osmosis initialization (this should be one version below upgradeVersion) previousVersionInitRepository = "osmolabs/osmosis-e2e-init-chain" previousVersionInitTag = "23.0.0-rc1" From 12348a953aacd20fc41d1b573ea960bbceb854ba Mon Sep 17 00:00:00 2001 From: Adam Tucker Date: Mon, 12 Feb 2024 18:10:49 -0600 Subject: [PATCH 05/15] Update config.go --- tests/e2e/containers/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/e2e/containers/config.go b/tests/e2e/containers/config.go index 60564999117..39734d5cbe8 100644 --- a/tests/e2e/containers/config.go +++ b/tests/e2e/containers/config.go @@ -27,7 +27,7 @@ const ( previousVersionOsmoTag = "23.0.0-rc1-alpine" // Pre-upgrade repo/tag for osmosis initialization (this should be one version below upgradeVersion) previousVersionInitRepository = "osmolabs/osmosis-e2e-init-chain" - previousVersionInitTag = "23.0.0-rc1" + previousVersionInitTag = "23.0.0-rc1-temp" // Hermes repo/version for relayer relayerRepository = "informalsystems/hermes" relayerTag = "1.5.1" From 0aa2e974b9d88672c9f1172bd0df610c808a71fc Mon Sep 17 00:00:00 2001 From: Adam Tucker Date: Mon, 12 Feb 2024 20:21:39 -0700 Subject: [PATCH 06/15] delete all time indexed keys in twap store --- app/upgrades/v24/upgrades.go | 4 ++++ x/twap/store.go | 13 +++++++++++++ 2 files changed, 17 insertions(+) diff --git a/app/upgrades/v24/upgrades.go b/app/upgrades/v24/upgrades.go index b784cac77b0..2771fa577c5 100644 --- a/app/upgrades/v24/upgrades.go +++ b/app/upgrades/v24/upgrades.go @@ -23,6 +23,10 @@ func CreateUpgradeHandler( return nil, err } + // Now that the TWAP keys are refactored, we can delete all time indexed TWAPs + // since we only need the pool indexed TWAPs. + keepers.TwapKeeper.DeleteAllHistoricalTimeIndexedTWAPs(ctx) + return migrations, nil } } diff --git a/x/twap/store.go b/x/twap/store.go index d62e898c127..e4ff1aecc8c 100644 --- a/x/twap/store.go +++ b/x/twap/store.go @@ -267,3 +267,16 @@ func (k Keeper) getRecordAtOrBeforeTime(ctx sdk.Context, poolId uint64, t time.T return twap, nil } + +// DeleteAllHistoricalTimeIndexedTWAPs deletes every historical twap record indexed by time. +// This is to be used in the upgrade handler, to clear out the now-obsolete historical twap records +// that were indexed by time. +func (k Keeper) DeleteAllHistoricalTimeIndexedTWAPs(ctx sdk.Context) { + store := ctx.KVStore(k.storeKey) + iter := sdk.KVStorePrefixIterator(store, []byte("historical_time_index")) + defer iter.Close() + for iter.Valid() { + store.Delete(iter.Key()) + iter.Next() + } +} From 91515832892552dac65f89063f15ad489e3df3a3 Mon Sep 17 00:00:00 2001 From: Adam Tucker Date: Mon, 12 Feb 2024 20:37:28 -0700 Subject: [PATCH 07/15] add changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b589528d1d1..b52254ab080 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### State Breaking * [#7250](https://github.com/osmosis-labs/osmosis/pull/7250) Further filter spam gauges from epoch distribution. +* [#7472](https://github.com/osmosis-labs/osmosis/pull/7472) Refactor TWAP keys to only require a single key format. ## v23.0.0 From 1172f2680b9fe2e181625a0de969028e553b25c9 Mon Sep 17 00:00:00 2001 From: Adam Tucker Date: Tue, 13 Feb 2024 13:56:11 -0700 Subject: [PATCH 08/15] respect prune limit --- x/twap/store.go | 25 +++++++++++-------------- x/twap/store_test.go | 20 +++++++++++++------- 2 files changed, 24 insertions(+), 21 deletions(-) diff --git a/x/twap/store.go b/x/twap/store.go index e4ff1aecc8c..c17d2f993fe 100644 --- a/x/twap/store.go +++ b/x/twap/store.go @@ -100,11 +100,8 @@ func (k Keeper) pruneRecordsBeforeTimeButNewest(ctx sdk.Context, state types.Pru return err } - // Notice, even if ranging over denomPairs takes us over the prune per block limit, - // we still continue to iterate over all denom pairs of the pool. - // This simplifies logic so that we can consider a pool "done" once we start it. - // It also prevents choosing between keeping more records for a pool than we need to, - // and having to store more state in the pruning state. + // Notice, if we hit the prune limit in the middle of a pool, we will re-iterate over the completed pruned pool records. + // This is acceptable overhead for the simplification this provides. denomPairs := types.GetAllUniqueDenomPairs(denoms) for _, denomPair := range denomPairs { // Reverse iterator guarantees that we iterate through the newest per pool first. @@ -122,6 +119,15 @@ func (k Keeper) pruneRecordsBeforeTimeButNewest(ctx sdk.Context, state types.Pru timeIndexKey := iter.Key() store.Delete(timeIndexKey) numPruned += 1 + + if numPruned >= NumRecordsToPrunePerBlock { + // We have hit the limit in the middle of a pool. + // We store this pool as the last seen pool in the pruning state. + // We accept re-iterating over denomPairs as acceptable overhead. + state.LastSeenPoolId = poolId + k.SetPruningState(ctx, state) + return nil + } } else { // If this is the first iteration after we have gotten through the records after lastKeptTime, we // still keep the record in order to allow interpolation (see function description for more details). @@ -130,21 +136,12 @@ func (k Keeper) pruneRecordsBeforeTimeButNewest(ctx sdk.Context, state types.Pru } } lastPoolIdCompleted = poolId - - if numPruned >= NumRecordsToPrunePerBlock { - // We have hit the limit, so we stop pruning. - break - } } if lastPoolIdCompleted == 1 { // We have pruned all records. state.IsPruning = false k.SetPruningState(ctx, state) - } else { - // We have not pruned all records, so we update the last seen pool id as the pool ID after the last completed pool. - state.LastSeenPoolId = lastPoolIdCompleted - 1 - k.SetPruningState(ctx, state) } return nil } diff --git a/x/twap/store_test.go b/x/twap/store_test.go index 5ddc20861d3..2bae4d2f44f 100644 --- a/x/twap/store_test.go +++ b/x/twap/store_test.go @@ -517,20 +517,23 @@ func (s *TestSuite) TestPruneRecordsBeforeTimeButNewest() { expectedKeptRecords: []types.TwapRecord{}, }, - "base time; across pool 3 and pool 5; pool 3: 4 total records; 3 before lastKeptTime; all 4 kept due to pool with larger ID hitting limit. pool 5: 24 total records; 12 before lastKeptTime; 12 deleted and 12 kept": { + "base time; across pool 3 and pool 5; pool 3: 4 total records; 3 before lastKeptTime; 2 in queue due to pool with larger ID hitting limit. pool 5: 24 total records; 12 before lastKeptTime; 9 deleted and 15 kept, 3 in queue due to prune limit": { recordsToPreSet: []types.TwapRecord{ pool3BaseSecMin3Ms, // base time - 3ms; in queue for deletion pool3BaseSecMin2Ms, // base time - 2ms; in queue for deletion pool3BaseSecMin1Ms, // base time - 1ms; kept since newest before lastKeptTime pool3BaseSecBaseMs, // base time; kept since at lastKeptTime - pool5Min2SBaseMsAB, pool5Min2SBaseMsAC, pool5Min2SBaseMsBC, // base time - 2s; deleted + pool5Min2SBaseMsAB, pool5Min2SBaseMsAC, // base time - 2s; deleted + pool5Min2SBaseMsBC, // base time - 2s; in queue for deletion pool5Min1SBaseMsAB, pool5Min1SBaseMsAC, pool5Min1SBaseMsBC, // base time - 1s; ; deleted pool5BaseSecBaseMsAB, pool5BaseSecBaseMsAC, pool5BaseSecBaseMsBC, // base time; kept since at lastKeptTime pool5Plus1SBaseMsAB, pool5Plus1SBaseMsAC, pool5Plus1SBaseMsBC, // base time + 1s; kept since older than lastKeptTime - pool5Min2SMin1MsAB, pool5Min2SMin1MsAC, pool5Min2SMin1MsBC, // base time - 2s - 1ms; deleted - pool5Min1SMin1MsAB, pool5Min1SMin1MsAC, pool5Min1SMin1MsBC, // base time - 1s - 1ms; deleted + pool5Min2SMin1MsAB, pool5Min2SMin1MsAC, // base time - 2s - 1ms; deleted + pool5Min2SMin1MsBC, // base time - 2s - 1ms; in queue for deletion + pool5Min1SMin1MsAB, pool5Min1SMin1MsAC, // base time - 1s - 1ms; deleted + pool5Min1SMin1MsBC, // base time - 1s - 1ms; in queue for deletion pool5BaseSecMin1MsAB, pool5BaseSecMin1MsAC, pool5BaseSecMin1MsBC, // base time - 1ms; kept since newest before lastKeptTime pool5Plus1SMin1MsAB, pool5Plus1SMin1MsAC, pool5Plus1SMin1MsBC, // base time + 1s - 1ms; kept since older than lastKeptTime }, @@ -538,17 +541,20 @@ func (s *TestSuite) TestPruneRecordsBeforeTimeButNewest() { lastKeptTime: baseTime, expectedKeptRecords: []types.TwapRecord{ - pool3BaseSecMin3Ms, - pool3BaseSecMin2Ms, + pool3BaseSecMin3Ms, // in queue for deletion + pool3BaseSecMin2Ms, // in queue for deletion pool3BaseSecMin1Ms, pool5BaseSecMin1MsAB, pool5BaseSecMin1MsAC, pool5BaseSecMin1MsBC, pool3BaseSecBaseMs, pool5BaseSecBaseMsAB, pool5BaseSecBaseMsAC, pool5BaseSecBaseMsBC, pool5Plus1SMin1MsAB, pool5Plus1SMin1MsAC, pool5Plus1SMin1MsBC, pool5Plus1SBaseMsAB, pool5Plus1SBaseMsAC, pool5Plus1SBaseMsBC, + pool5Min2SMin1MsBC, // in queue for deletion + pool5Min2SBaseMsBC, // in queue for deletion + pool5Min1SMin1MsBC, // in queue for deletion }, - overwriteLimit: 5, // notice that despite this being set to 5, we delete all 12 entries for pool 5. We always complete a pool's pruning once we start it for sake of simplicity. + overwriteLimit: 9, // 5 total records in queue to be deleted due to limit }, } for name, tc := range tests { From ccb2a5e433af1b09a42518f4fdd47969274eab7f Mon Sep 17 00:00:00 2001 From: Adam Tucker Date: Tue, 13 Feb 2024 14:23:40 -0700 Subject: [PATCH 09/15] add upgrade test for pruning old time indexed keys --- app/upgrades/v24/upgrades_test.go | 104 ++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 app/upgrades/v24/upgrades_test.go diff --git a/app/upgrades/v24/upgrades_test.go b/app/upgrades/v24/upgrades_test.go new file mode 100644 index 00000000000..0e220c8941d --- /dev/null +++ b/app/upgrades/v24/upgrades_test.go @@ -0,0 +1,104 @@ +package v24_test + +import ( + "bytes" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/suite" + + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + + abci "github.com/cometbft/cometbft/abci/types" + + "github.com/osmosis-labs/osmosis/osmomath" + "github.com/osmosis-labs/osmosis/osmoutils" + "github.com/osmosis-labs/osmosis/v23/app/apptesting" + + "github.com/osmosis-labs/osmosis/v23/x/twap/types" + twaptypes "github.com/osmosis-labs/osmosis/v23/x/twap/types" +) + +const ( + v24UpgradeHeight = int64(10) + HistoricalTWAPTimeIndexPrefix = "historical_time_index" + KeySeparator = "|" +) + +type UpgradeTestSuite struct { + apptesting.KeeperTestHelper +} + +func TestUpgradeTestSuite(t *testing.T) { + suite.Run(t, new(UpgradeTestSuite)) +} + +func (s *UpgradeTestSuite) TestUpgrade() { + s.Setup() + + // Manually set up TWAP records indexed by both pool ID and time. + twapStoreKey := s.App.GetKey(twaptypes.ModuleName) + store := s.Ctx.KVStore(twapStoreKey) + twap := twaptypes.TwapRecord{ + PoolId: 1, + Asset0Denom: "foo", + Asset1Denom: "bar", + Height: 1, + Time: time.Date(2023, 0o2, 1, 0, 0, 0, 0, time.UTC), + P0LastSpotPrice: osmomath.OneDec(), + P1LastSpotPrice: osmomath.OneDec(), + P0ArithmeticTwapAccumulator: osmomath.ZeroDec(), + P1ArithmeticTwapAccumulator: osmomath.ZeroDec(), + GeometricTwapAccumulator: osmomath.ZeroDec(), + LastErrorTime: time.Time{}, // no previous error + } + poolIndexKey := types.FormatHistoricalPoolIndexTWAPKey(twap.PoolId, twap.Asset0Denom, twap.Asset1Denom, twap.Time) + osmoutils.MustSet(store, poolIndexKey, &twap) + + // The time index key is a bit manual since we removed the old code that did this programmatically. + var buffer bytes.Buffer + timeS := osmoutils.FormatTimeString(twap.Time) + fmt.Fprintf(&buffer, "%s%d%s%s%s%s%s%s", HistoricalTWAPTimeIndexPrefix, twap.PoolId, KeySeparator, twap.Asset0Denom, KeySeparator, twap.Asset1Denom, KeySeparator, timeS) + timeIndexKey := buffer.Bytes() + osmoutils.MustSet(store, timeIndexKey, &twap) + + // TWAP records indexed by time should exist + twapRecords, err := osmoutils.GatherValuesFromStorePrefix(store, []byte(HistoricalTWAPTimeIndexPrefix), types.ParseTwapFromBz) + s.Require().NoError(err) + s.Require().Len(twapRecords, 1) + s.Require().Equal(twap, twapRecords[0]) + + // TWAP records indexed by pool ID should exist. + twapRecords, err = s.App.TwapKeeper.GetAllHistoricalPoolIndexedTWAPsForPoolId(s.Ctx, twap.PoolId) + s.Require().NoError(err) + s.Require().Len(twapRecords, 1) + s.Require().Equal(twap, twapRecords[0]) + + dummyUpgrade(s) + s.Require().NotPanics(func() { + s.App.BeginBlocker(s.Ctx, abci.RequestBeginBlock{}) + }) + + // TWAP records indexed by time should be pruned. + twapRecords, err = osmoutils.GatherValuesFromStorePrefix(store, []byte(HistoricalTWAPTimeIndexPrefix), types.ParseTwapFromBz) + s.Require().NoError(err) + s.Require().Len(twapRecords, 0) + + // TWAP records indexed by pool ID should not be pruned. + twapRecords, err = s.App.TwapKeeper.GetAllHistoricalPoolIndexedTWAPsForPoolId(s.Ctx, twap.PoolId) + s.Require().NoError(err) + s.Require().Len(twapRecords, 1) + s.Require().Equal(twap, twapRecords[0]) +} + +func dummyUpgrade(s *UpgradeTestSuite) { + s.Ctx = s.Ctx.WithBlockHeight(v24UpgradeHeight - 1) + plan := upgradetypes.Plan{Name: "v24", Height: v24UpgradeHeight} + err := s.App.UpgradeKeeper.ScheduleUpgrade(s.Ctx, plan) + s.Require().NoError(err) + _, exists := s.App.UpgradeKeeper.GetUpgradePlan(s.Ctx) + s.Require().True(exists) + + s.Ctx = s.Ctx.WithBlockHeight(v24UpgradeHeight) +} From 45e7485ba82018a20ab6281c1f680365a41a49b2 Mon Sep 17 00:00:00 2001 From: Adam Tucker Date: Tue, 13 Feb 2024 14:32:01 -0700 Subject: [PATCH 10/15] comment clean up --- app/upgrades/v24/upgrades_test.go | 4 ++-- x/twap/store.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/app/upgrades/v24/upgrades_test.go b/app/upgrades/v24/upgrades_test.go index 0e220c8941d..c6186cd785c 100644 --- a/app/upgrades/v24/upgrades_test.go +++ b/app/upgrades/v24/upgrades_test.go @@ -80,12 +80,12 @@ func (s *UpgradeTestSuite) TestUpgrade() { s.App.BeginBlocker(s.Ctx, abci.RequestBeginBlock{}) }) - // TWAP records indexed by time should be pruned. + // TWAP records indexed by time should be completely removed. twapRecords, err = osmoutils.GatherValuesFromStorePrefix(store, []byte(HistoricalTWAPTimeIndexPrefix), types.ParseTwapFromBz) s.Require().NoError(err) s.Require().Len(twapRecords, 0) - // TWAP records indexed by pool ID should not be pruned. + // TWAP records indexed by pool ID should be untouched. twapRecords, err = s.App.TwapKeeper.GetAllHistoricalPoolIndexedTWAPsForPoolId(s.Ctx, twap.PoolId) s.Require().NoError(err) s.Require().Len(twapRecords, 1) diff --git a/x/twap/store.go b/x/twap/store.go index c17d2f993fe..8218d941757 100644 --- a/x/twap/store.go +++ b/x/twap/store.go @@ -61,7 +61,7 @@ func (k Keeper) getChangedPools(ctx sdk.Context) []uint64 { return alteredPoolIds } -// storeHistoricalTWAP writes a twap to the store, in all needed indexing. +// storeHistoricalTWAP writes a twap to the store, indexed by pool id. func (k Keeper) StoreHistoricalTWAP(ctx sdk.Context, twap types.TwapRecord) { store := ctx.KVStore(k.storeKey) key := types.FormatHistoricalPoolIndexTWAPKey(twap.PoolId, twap.Asset0Denom, twap.Asset1Denom, twap.Time) From da70f02ba955d54b1bf1291df40bec2df6acacc7 Mon Sep 17 00:00:00 2001 From: Adam Tucker Date: Fri, 16 Feb 2024 11:02:52 -0600 Subject: [PATCH 11/15] Update CHANGELOG.md Co-authored-by: Dev Ojha --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b52254ab080..f7dce0a0d98 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,7 +45,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### State Breaking * [#7250](https://github.com/osmosis-labs/osmosis/pull/7250) Further filter spam gauges from epoch distribution. -* [#7472](https://github.com/osmosis-labs/osmosis/pull/7472) Refactor TWAP keys to only require a single key format. +* [#7472](https://github.com/osmosis-labs/osmosis/pull/7472) Refactor TWAP keys to only require a single key format. Significantly lowers TWAP-caused writes ## v23.0.0 From b189f5200263b60e3b8d8779b8beb31885b75b3e Mon Sep 17 00:00:00 2001 From: Adam Tucker Date: Fri, 16 Feb 2024 10:03:30 -0700 Subject: [PATCH 12/15] remove outdated comment --- x/twap/store.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/x/twap/store.go b/x/twap/store.go index 8218d941757..c835a31c207 100644 --- a/x/twap/store.go +++ b/x/twap/store.go @@ -85,9 +85,6 @@ func (k Keeper) StoreHistoricalTWAP(ctx sdk.Context, twap types.TwapRecord) { // If we reach the per block pruning limit, we store the last key seen in the pruning state. // This is so that we can continue pruning from where we left off in the next block. // If we have pruned all records, we set the pruning state to not pruning. -// There is a small bug here where we store more seenPoolAssetTriplets than we need to. -// Issue added here: https://github.com/osmosis-labs/osmosis/issues/7435 -// The bloat is minimal though, and is not at risk of getting out of hand. func (k Keeper) pruneRecordsBeforeTimeButNewest(ctx sdk.Context, state types.PruningState) error { store := ctx.KVStore(k.storeKey) From b18ecddd6b7bc2f9899dde3a22dd4deafb33b90a Mon Sep 17 00:00:00 2001 From: Adam Tucker Date: Fri, 16 Feb 2024 10:12:57 -0700 Subject: [PATCH 13/15] update readme --- x/twap/README.md | 39 ++++++++++++++++++--------------------- 1 file changed, 18 insertions(+), 21 deletions(-) diff --git a/x/twap/README.md b/x/twap/README.md index 2fb9a557ffc..5e0d0914322 100644 --- a/x/twap/README.md +++ b/x/twap/README.md @@ -9,7 +9,7 @@ A time weighted average price is a function that takes a sequence of `(time, pri Using the arithmetic mean, the TWAP of a sequence `(t_i, p_i)`, from `t_0` to `t_n`, indexed by time in ascending order, is: $$\frac{1}{t_n - t_0}\sum_{i=0}^{n-1} p_i (t_{i+1} - t_i)$$ Notice that the latest price `p_n` isn't used, as it has lasted for a time interval of `0` seconds in this range! -To illustrate with an example, given the sequence: `(0s, $1), (4s, $6), (5s, $1)`, the arithmetic mean TWAP is: +To illustrate with an example, given the sequence: `(0s, $1), (4s, $6), (5s, $1)`, the arithmetic mean TWAP is: $$\frac{\$1 * (4s - 0s) + \$6 * (5s - 4s)}{5s - 0s} = \frac{\$10}{5} = \$2$$ ## Geometric mean TWAP @@ -48,7 +48,7 @@ We also maintain within each accumulator record in state, the latest spot price. This allows us to interpolate accumulation records between times. Namely, if I want the twap from `t=10s` to `t=15s`, but the time records are at `9s, 13s, 17s`, this is fine. Using the latest spot price in each record, we create the accumulator value for `t=10` by computing -`a_10 = a_9 + a_9_latest_spot_price * (10s - 9s)`, and `a_15 = a_13 + a_13_latest_spot_price * (15s - 13s)`. +`a_10 = a_9 + a_9_latest_spot_price * (10s - 9s)`, and `a_15 = a_13 + a_13_latest_spot_price * (15s - 13s)`. Given these interpolated accumulation values, we can compute the TWAP as before. ## Module API @@ -79,7 +79,7 @@ and have a similar cosmwasm binding. // * endTime in the future // * startTime older than 48 hours OR pool creation // * pool with id poolId does not exist, or does not contain quoteAssetDenom, baseAssetDenom -// * there were some computational errors during computing arithmetic twap within the time range of +// * there were some computational errors during computing arithmetic twap within the time range of // startRecord, endRecord - including the exact record times, which indicates that the result returned could be faulty // N.B. If there is a notable use case, the state machine could maintain more historical records, e.g. at one per hour. @@ -106,32 +106,29 @@ computation of the TWAP, which is done via the geometric mean. - types/* - Implement TwapRecord, GenesisState. Define AMM interface, and methods to format keys. - twapmodule/module.go - SDK AppModule interface implementation. - api.go - Public API, that other users / modules can/should depend on -- listeners.go - Defines hooks & calls to logic.go, for triggering actions on +- listeners.go - Defines hooks & calls to logic.go, for triggering actions on - keeper.go - generic SDK boilerplate (defining a wrapper for store keys + params) - logic.go - Implements all TWAP module 'logic'. (Arithmetic, defining what to get/set where, etc.) - store.go - Managing logic for getting and setting things to underlying stores ## Store layout -We maintain TWAP accumulation records for every AMM pool on Osmosis. +We maintain TWAP accumulation records for every AMM pool on Osmosis. Because Osmosis supports multi-asset pools, a complicating factor is that we have to store a record for every asset pair in the pool. For every pool, at a given point in time, we make one twap record entry per unique pair of denoms in the pool. If a pool has `k` denoms, the number of unique pairs is `k * (k - 1) / 2`. All public API's for the module will sort the input denoms to the canonical representation, so the caller does not need to worry about this. (The canonical representation is the denoms in lexicographical order) -Example of historical TWAP time index records for a pool containing 3 assets. +Example of historical TWAP pool indexed records for a pool containing 3 assets. * Number of records per time: `3 * (3 - 1) / 2 = 3` * Records are in a format: - HistoricalTWAPTimeIndexPrefix | time | pool id | denom1 | denom2 + HistoricalTWAPPoolIndexPrefix | pool id | denom1 | denom2 | time | For our pool with Id = 1 and 3 assets: denomA, denomB and denomC: - historical_time_index|2009-11-10T23:00:00.000000000|1|denomA|denomB - historical_time_index|2009-11-10T23:00:00.000000000|1|denomA|denomC - historical_time_index|2009-11-10T23:00:00.000000000|1|denomB|denomC - - - + historical_pool_index|1|denomA|denomB|2009-11-10T23:00:00.000000000| + historical_pool_index|1|denomA|denomC|2009-11-10T23:00:00.000000000| + historical_pool_index|1|denomB|denomC|2009-11-10T23:00:00.000000000| Each twap record stores [(source)](../../proto/osmosis/twap/v1beta1/twap_record.proto): @@ -140,10 +137,10 @@ Each twap record stores [(source)](../../proto/osmosis/twap/v1beta1/twap_record. * Accumulation value of base asset A in terms of quote asset B * Accumulation value of base asset B in terms of quote asset A -important for calculation of arthmetic twap. +important for calculation of arthmetic twap. -Besides those values, TWAP records currently hold: poolId, Asset0Denom, Asset1Denom, Height (for debugging purposes), Time and -Last error time - time in which the last spot price error occurred. This will allert the caller if they are getting a potentially erroneous TWAP. +Besides those values, TWAP records currently hold: poolId, Asset0Denom, Asset1Denom, Height (for debugging purposes), Time and +Last error time - time in which the last spot price error occurred. This will alert the caller if they are getting a potentially erroneous TWAP. All TWAP records are indexed in state by the time of write. @@ -161,8 +158,8 @@ During `EndBlock`, new records are created, with: In the event that a pool is created, and has a swap in the same block, the record entries are over written with the end block price. -Error handling during records creation/updating: -* If there are issues with creating a record after pool creation, the creation of a pool will be aborted. +Error handling during records creation/updating: +* If there are issues with creating a record after pool creation, the creation of a pool will be aborted. * Whereas, if there is an issue with updating records for a pool with potentially price changing events, existing errors will be ignored and the records will not be updated. ### Tracking spot-price changing events in a block @@ -182,7 +179,7 @@ and then clears on the block committing. This is done to save on gas (and I/O fo To avoid infinite growth of the state with the TWAP records, we attempt to delete some old records after every epoch. Essentially, records older than a configurable parameter `RecordHistoryKeepPeriod` are pruned away. Currently, this parameter is set to 48 hours. -Therefore, at the end of an epoch, records older than 48 hours before the current block time are pruned away. +Therefore, at the end of an epoch, records older than 48 hours before the current block time are pruned away. This could potentially leave the store with only one record - or no records at all within the "keep" period, so the pruning mechanism keeps the newest record that is older than the pruning time. This record is necessary to enable us interpolating from and getting TWAPs from the "keep" period. Such record is preserved for each pool. @@ -192,7 +189,7 @@ Post-TWAP launch, new pool types were introduced, one such example being the concentrated liquidity pool. In the context of `x/twap`, there are subtle differences in terms of when the spot price updates for a concentrated liquidity pool. As a result, the need for their twap state updates are delivered by distinct listeners that implement a -`concentratedliquiditytypes.ConcentratedLiquidityListener` interface. +`concentratedliquiditytypes.ConcentratedLiquidityListener` interface. See `x/concentrated-liquidity/README.md` for the details about these differences. @@ -235,7 +232,7 @@ The pre-release testing methodology planned for the twap module is: - The osmosis simulator, simulates building up complex state machine states, in random ways not seen before. We plan on, in a property check, maintaining expected TWAPs for short time ranges, and seeing that the keeper query will return the same value as what we get off of the raw price history for short history intervals. - Not currently deemed release blocking, but planned: Integration for gas tracking, to ensure gas of reads/writes does not grow with time. - [ ] Mutation testing usage - - integration of the TWAP module into [go mutation testing](https://github.com/osmosis-labs/go-mutesting): + - integration of the TWAP module into [go mutation testing](https://github.com/osmosis-labs/go-mutesting): - We've seen with the `tokenfactory` module that it succeeds at surfacing behavior for untested logic. e.g. if you delete a line, or change the direction of a conditional, mutation tests show if regular Go tests catch it. - We expect to get this to a state, where after mutation testing is ran, the only items it mutates, that is not caught in a test, is: Deleting `return err`, or `panic` lines, in the situation where that error return or panic isn't reachable. From c73b96cc4e09b90b2377d15b9ce82c81bc5a4989 Mon Sep 17 00:00:00 2001 From: Adam Tucker Date: Fri, 16 Feb 2024 10:17:17 -0700 Subject: [PATCH 14/15] readme --- x/twap/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/x/twap/README.md b/x/twap/README.md index 5e0d0914322..7d14b0bbe89 100644 --- a/x/twap/README.md +++ b/x/twap/README.md @@ -122,13 +122,13 @@ All public API's for the module will sort the input denoms to the canonical repr Example of historical TWAP pool indexed records for a pool containing 3 assets. * Number of records per time: `3 * (3 - 1) / 2 = 3` * Records are in a format: - HistoricalTWAPPoolIndexPrefix | pool id | denom1 | denom2 | time | + HistoricalTWAPPoolIndexPrefix | pool id | denom1 | denom2 | time For our pool with Id = 1 and 3 assets: denomA, denomB and denomC: - historical_pool_index|1|denomA|denomB|2009-11-10T23:00:00.000000000| - historical_pool_index|1|denomA|denomC|2009-11-10T23:00:00.000000000| - historical_pool_index|1|denomB|denomC|2009-11-10T23:00:00.000000000| + historical_pool_index|1|denomA|denomB|2009-11-10T23:00:00.000000000 + historical_pool_index|1|denomA|denomC|2009-11-10T23:00:00.000000000 + historical_pool_index|1|denomB|denomC|2009-11-10T23:00:00.000000000 Each twap record stores [(source)](../../proto/osmosis/twap/v1beta1/twap_record.proto): From e2fd17e0e8465bbd00458e530b8d0e2961793ae1 Mon Sep 17 00:00:00 2001 From: Adam Tucker Date: Fri, 16 Feb 2024 10:18:24 -0700 Subject: [PATCH 15/15] add key sep to end --- x/twap/types/keys.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x/twap/types/keys.go b/x/twap/types/keys.go index 66f302ae50f..fca0e774759 100644 --- a/x/twap/types/keys.go +++ b/x/twap/types/keys.go @@ -56,7 +56,7 @@ func FormatMostRecentTWAPKey(poolId uint64, denom1, denom2 string) []byte { func FormatHistoricalPoolIndexDenomPairTWAPKey(poolId uint64, denom1, denom2 string) []byte { var buffer bytes.Buffer - fmt.Fprintf(&buffer, "%s%d%s%s%s%s", HistoricalTWAPPoolIndexPrefix, poolId, KeySeparator, denom1, KeySeparator, denom2) + fmt.Fprintf(&buffer, "%s%d%s%s%s%s%s", HistoricalTWAPPoolIndexPrefix, poolId, KeySeparator, denom1, KeySeparator, denom2, KeySeparator) return buffer.Bytes() }