diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 2c8122c2a22..b8c5e38c94a 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -303,8 +303,8 @@ [MultisigHasher] Type = "blake2b" -# The main marshalizer, used in internod communication -# Type idenftifies the marshalizer +# The main marshalizer, used in internodes communication +# Type identifies the marshalizer # SizeCheckDelta the maximum allow drift between the input data buffer and # the reencoded version (in percents). # 0 disables the feature. @@ -312,11 +312,11 @@ Type = "gogo protobuf" SizeCheckDelta = 0 -# The marshalizer used for smartcontracts data exchage +# The marshalizer used for smartcontracts data exchange [VmMarshalizer] Type = "json" -# The marshalizer used in transction signing +# The marshalizer used in transaction signing [TxSignMarshalizer] Type = "json" diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index ff1411d98da..624a5d4df8c 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -36,20 +36,17 @@ import ( "github.com/ElrondNetwork/elrond-go/data/typeConverters" "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool/headersCache" + dataRetrieverFactory "github.com/ElrondNetwork/elrond-go/dataRetriever/factory" "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/resolverscontainer" - txpoolFactory "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/txpool" "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" - "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" - "github.com/ElrondNetwork/elrond-go/dataRetriever/txpool" "github.com/ElrondNetwork/elrond-go/epochStart" "github.com/ElrondNetwork/elrond-go/epochStart/genesis" metachainEpochStart "github.com/ElrondNetwork/elrond-go/epochStart/metachain" "github.com/ElrondNetwork/elrond-go/epochStart/shardchain" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/hashing/blake2b" + factoryHasher "github.com/ElrondNetwork/elrond-go/hashing/factory" "github.com/ElrondNetwork/elrond-go/hashing/sha256" "github.com/ElrondNetwork/elrond-go/marshal" factoryMarshalizer "github.com/ElrondNetwork/elrond-go/marshal/factory" @@ -130,6 +127,7 @@ type Core struct { VmMarshalizer marshal.Marshalizer TxSignMarshalizer marshal.Marshalizer TriesContainer state.TriesHolder + TrieStorageManagers map[string]data.StorageManager Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter StatusHandler core.AppStatusHandler ChainID []byte @@ -198,7 +196,7 @@ func NewCoreComponentsFactoryArgs(config *config.Config, pathManager storage.Pat // CoreComponentsFactory creates the core components func CoreComponentsFactory(args *coreComponentsFactoryArgs) (*Core, error) { - hasher, err := getHasherFromConfig(args.config) + hasher, err := factoryHasher.NewHasher(args.config.Hasher.Type) if err != nil { return nil, errors.New("could not create hasher: " + err.Error()) } @@ -220,7 +218,7 @@ func CoreComponentsFactory(args *coreComponentsFactoryArgs) (*Core, error) { uint64ByteSliceConverter := uint64ByteSlice.NewBigEndianConverter() - trieContainer, err := createTries(args, internalMarshalizer, hasher) + trieStorageManagers, trieContainer, err := createTries(args, internalMarshalizer, hasher) if err != nil { return nil, err @@ -232,6 +230,7 @@ func CoreComponentsFactory(args *coreComponentsFactoryArgs) (*Core, error) { VmMarshalizer: vmMarshalizer, TxSignMarshalizer: txSignMarshalizer, TriesContainer: trieContainer, + TrieStorageManagers: trieStorageManagers, Uint64ByteSliceConverter: uint64ByteSliceConverter, StatusHandler: statusHandler.NewNilStatusHandler(), ChainID: args.chainID, @@ -242,10 +241,9 @@ func createTries( args *coreComponentsFactoryArgs, marshalizer marshal.Marshalizer, hasher hashing.Hasher, -) (state.TriesHolder, error) { +) (map[string]data.StorageManager, state.TriesHolder, error) { trieContainer := state.NewDataTriesHolder() - trieFactoryArgs := factory.TrieFactoryArgs{ EvictionWaitingListCfg: args.config.EvictionWaitingList, SnapshotDbCfg: args.config.TrieSnapshotDB, @@ -256,24 +254,25 @@ func createTries( } trieFactory, err := factory.NewTrieFactory(trieFactoryArgs) if err != nil { - return nil, err + return nil, nil, err } - merkleTrie, err := trieFactory.Create(args.config.AccountsTrieStorage, args.config.StateTriesConfig.AccountsStatePruningEnabled) + trieStorageManagers := make(map[string]data.StorageManager) + userStorageManager, userAccountTrie, err := trieFactory.Create(args.config.AccountsTrieStorage, args.config.StateTriesConfig.AccountsStatePruningEnabled) if err != nil { - return nil, err + return nil, nil, err } + trieContainer.Put([]byte(factory.UserAccountTrie), userAccountTrie) + trieStorageManagers[factory.UserAccountTrie] = userStorageManager - trieContainer.Put([]byte(factory.UserAccountTrie), merkleTrie) - - peerAccountsTrie, err := trieFactory.Create(args.config.PeerAccountsTrieStorage, args.config.StateTriesConfig.PeerStatePruningEnabled) + peerStorageManager, peerAccountsTrie, err := trieFactory.Create(args.config.PeerAccountsTrieStorage, args.config.StateTriesConfig.PeerStatePruningEnabled) if err != nil { - return nil, err + return nil, nil, err } - trieContainer.Put([]byte(factory.PeerAccountTrie), peerAccountsTrie) + trieStorageManagers[factory.PeerAccountTrie] = peerStorageManager - return trieContainer, nil + return trieStorageManagers, trieContainer, nil } type stateComponentsFactoryArgs struct { @@ -397,7 +396,12 @@ func DataComponentsFactory(args *dataComponentsFactoryArgs) (*Data, error) { return nil, errors.New("could not create local data store: " + err.Error()) } - datapool, err = createDataPoolFromConfig(args) + dataPoolArgs := dataRetrieverFactory.ArgsDataPool{ + Config: args.config, + EconomicsData: args.economicsData, + ShardCoordinator: args.shardCoordinator, + } + datapool, err = dataRetrieverFactory.NewDataPoolFromConfig(dataPoolArgs) if err != nil { return nil, errors.New("could not create data pools: ") } @@ -533,29 +537,29 @@ func NetworkComponentsFactory( } type processComponentsFactoryArgs struct { - coreComponents *coreComponentsFactoryArgs - genesisConfig *sharding.Genesis - economicsData *economics.EconomicsData - nodesConfig *sharding.NodesSetup - gasSchedule map[string]map[string]uint64 - syncer ntp.SyncTimer - shardCoordinator sharding.Coordinator - nodesCoordinator sharding.NodesCoordinator - data *Data - coreData *Core - crypto *Crypto - state *State - network *Network - coreServiceContainer serviceContainer.Core - requestedItemsHandler dataRetriever.RequestedItemsHandler - whiteListHandler process.InterceptedDataWhiteList - epochStartNotifier EpochStartNotifier - epochStart *config.EpochStartConfig - rater sharding.PeerAccountListAndRatingHandler - startEpochNum uint32 - sizeCheckDelta uint32 - stateCheckpointModulus uint - maxComputableRounds uint64 + coreComponents *coreComponentsFactoryArgs + genesisConfig *sharding.Genesis + economicsData *economics.EconomicsData + nodesConfig *sharding.NodesSetup + gasSchedule map[string]map[string]uint64 + syncer ntp.SyncTimer + shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator + data *Data + coreData *Core + crypto *Crypto + state *State + network *Network + coreServiceContainer serviceContainer.Core + requestedItemsHandler dataRetriever.RequestedItemsHandler + whiteListHandler process.InterceptedDataWhiteList + epochStartNotifier EpochStartNotifier + epochStart *config.EpochStartConfig + rater sharding.PeerAccountListAndRatingHandler + startEpochNum uint32 + sizeCheckDelta uint32 + stateCheckpointModulus uint + maxComputableRounds uint64 numConcurrentResolverJobs int32 minSizeInBytes uint32 maxSizeInBytes uint32 @@ -591,29 +595,29 @@ func NewProcessComponentsFactoryArgs( maxSizeInBytes uint32, ) *processComponentsFactoryArgs { return &processComponentsFactoryArgs{ - coreComponents: coreComponents, - genesisConfig: genesisConfig, - economicsData: economicsData, - nodesConfig: nodesConfig, - gasSchedule: gasSchedule, - syncer: syncer, - shardCoordinator: shardCoordinator, - nodesCoordinator: nodesCoordinator, - data: data, - coreData: coreData, - crypto: crypto, - state: state, - network: network, - coreServiceContainer: coreServiceContainer, - requestedItemsHandler: requestedItemsHandler, - whiteListHandler: whiteListHandler, - epochStartNotifier: epochStartNotifier, - epochStart: epochStart, - startEpochNum: startEpochNum, - rater: rater, - sizeCheckDelta: sizeCheckDelta, - stateCheckpointModulus: stateCheckpointModulus, - maxComputableRounds: maxComputableRounds, + coreComponents: coreComponents, + genesisConfig: genesisConfig, + economicsData: economicsData, + nodesConfig: nodesConfig, + gasSchedule: gasSchedule, + syncer: syncer, + shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + data: data, + coreData: coreData, + crypto: crypto, + state: state, + network: network, + coreServiceContainer: coreServiceContainer, + requestedItemsHandler: requestedItemsHandler, + whiteListHandler: whiteListHandler, + epochStartNotifier: epochStartNotifier, + epochStart: epochStart, + startEpochNum: startEpochNum, + rater: rater, + sizeCheckDelta: sizeCheckDelta, + stateCheckpointModulus: stateCheckpointModulus, + maxComputableRounds: maxComputableRounds, numConcurrentResolverJobs: numConcurrentResolverJobs, minSizeInBytes: minSizeInBytes, maxSizeInBytes: maxSizeInBytes, @@ -672,6 +676,7 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err args.whiteListHandler, MaxTxsToRequest, args.shardCoordinator.SelfId(), + time.Second, ) if err != nil { return nil, err @@ -939,17 +944,6 @@ func CreateSoftwareVersionChecker(statusHandler core.AppStatusHandler) (*softwar return softwareVersionChecker, nil } -func getHasherFromConfig(cfg *config.Config) (hashing.Hasher, error) { - switch cfg.Hasher.Type { - case "sha256": - return sha256.Sha256{}, nil - case "blake2b": - return &blake2b.Blake2b{}, nil - } - - return nil, errors.New("no hasher provided in config file") -} - func createBlockChainFromConfig(coordinator sharding.Coordinator, ash core.AppStatusHandler) (data.ChainHandler, error) { if coordinator == nil { @@ -1005,78 +999,6 @@ func createDataStoreFromConfig( return nil, errors.New("can not create data store") } -func createDataPoolFromConfig(args *dataComponentsFactoryArgs) (dataRetriever.PoolsHolder, error) { - log.Debug("creatingDataPool from config") - - mainConfig := args.config - - txPool, err := txpoolFactory.CreateTxPool(txpool.ArgShardedTxPool{ - Config: storageFactory.GetCacherFromConfig(mainConfig.TxDataPool), - MinGasPrice: args.economicsData.MinGasPrice(), - NumberOfShards: args.shardCoordinator.NumberOfShards(), - SelfShardID: args.shardCoordinator.SelfId(), - }) - if err != nil { - log.Error("error creating txpool") - return nil, err - } - - uTxPool, err := shardedData.NewShardedData(storageFactory.GetCacherFromConfig(mainConfig.UnsignedTransactionDataPool)) - if err != nil { - log.Error("error creating smart contract result pool") - return nil, err - } - - rewardTxPool, err := shardedData.NewShardedData(storageFactory.GetCacherFromConfig(mainConfig.RewardTransactionDataPool)) - if err != nil { - log.Error("error creating reward transaction pool") - return nil, err - } - - hdrPool, err := headersCache.NewHeadersPool(mainConfig.HeadersPoolConfig) - if err != nil { - log.Error("error creating headers pool") - return nil, err - } - - cacherCfg := storageFactory.GetCacherFromConfig(mainConfig.TxBlockBodyDataPool) - txBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Error("error creating txBlockBody") - return nil, err - } - - cacherCfg = storageFactory.GetCacherFromConfig(mainConfig.PeerBlockBodyDataPool) - peerChangeBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Error("error creating peerChangeBlockBody") - return nil, err - } - - cacherCfg = storageFactory.GetCacherFromConfig(mainConfig.TrieNodesDataPool) - trieNodes, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Info("error creating trieNodes") - return nil, err - } - - currBlockTxs, err := dataPool.NewCurrentBlockPool() - if err != nil { - return nil, err - } - - return dataPool.NewDataPool( - txPool, - uTxPool, - rewardTxPool, - hdrPool, - txBlockBody, - peerChangeBlockBody, - trieNodes, - currBlockTxs, - ) -} - func createSingleSigner(config *config.Config) (crypto.SingleSigner, error) { switch config.Consensus.Type { case BlsConsensusType: @@ -1488,15 +1410,10 @@ func generateGenesisHeadersAndApplyInitialBalances(args *processComponentsFactor return nil, err } - newStore, newBlkc, errPoolCreation := createInMemoryStoreBlkc(newShardCoordinator) - if errPoolCreation != nil { - return nil, errPoolCreation - } - + newBlockChain := blockchain.NewMetaChain() argsMetaGenesis.ShardCoordinator = newShardCoordinator argsMetaGenesis.Accounts = newAccounts - argsMetaGenesis.Store = newStore - argsMetaGenesis.Blkc = newBlkc + argsMetaGenesis.Blkc = newBlockChain } genesisBlock, err := genesis.CreateMetaGenesisBlock( @@ -1516,28 +1433,6 @@ func generateGenesisHeadersAndApplyInitialBalances(args *processComponentsFactor return genesisBlocks, nil } -func createInMemoryStoreBlkc( - shardCoordinator sharding.Coordinator, -) (dataRetriever.StorageService, data.ChainHandler, error) { - blkc := blockchain.NewMetaChain() - - store := dataRetriever.NewChainStorer() - store.AddStorer(dataRetriever.MetaBlockUnit, createMemUnit()) - store.AddStorer(dataRetriever.BlockHeaderUnit, createMemUnit()) - store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, createMemUnit()) - store.AddStorer(dataRetriever.TransactionUnit, createMemUnit()) - store.AddStorer(dataRetriever.RewardTransactionUnit, createMemUnit()) - store.AddStorer(dataRetriever.UnsignedTransactionUnit, createMemUnit()) - store.AddStorer(dataRetriever.MiniBlockUnit, createMemUnit()) - for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) - store.AddStorer(hdrNonceHashDataUnit, createMemUnit()) - } - store.AddStorer(dataRetriever.HeartbeatUnit, createMemUnit()) - - return store, blkc, nil -} - func createGenesisBlockAndApplyInitialBalances( accounts state.AccountsAdapter, shardCoordinator sharding.Coordinator, diff --git a/cmd/node/main.go b/cmd/node/main.go index 2fed977e982..5f98e109a9f 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -34,6 +34,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data/typeConverters" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" "github.com/ElrondNetwork/elrond-go/epochStart/notifier" "github.com/ElrondNetwork/elrond-go/facade" "github.com/ElrondNetwork/elrond-go/hashing" @@ -53,7 +54,6 @@ import ( "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" - storageFactory "github.com/ElrondNetwork/elrond-go/storage/factory" "github.com/ElrondNetwork/elrond-go/storage/lrucache" "github.com/ElrondNetwork/elrond-go/storage/pathmanager" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" @@ -64,13 +64,14 @@ import ( ) const ( - defaultStatsPath = "stats" - defaultLogsPath = "logs" - defaultDBPath = "db" - defaultEpochString = "Epoch" - defaultStaticDbString = "Static" - defaultShardString = "Shard" - metachainShardName = "metachain" + defaultStatsPath = "stats" + defaultLogsPath = "logs" + defaultDBPath = "db" + defaultEpochString = "Epoch" + defaultStaticDbString = "Static" + defaultShardString = "Shard" + metachainShardName = "metachain" + secondsToWaitForP2PBootstrap = 20 ) var ( @@ -491,7 +492,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } log.Debug("config", "file", ctx.GlobalString(genesisFile.Name)) - nodesConfig, err := sharding.NewNodesSetup(ctx.GlobalString(nodesFile.Name)) + genesisNodesConfig, err := sharding.NewNodesSetup(ctx.GlobalString(nodesFile.Name)) if err != nil { return err } @@ -503,13 +504,13 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { log.Debug("NTP average clock offset", "value", syncer.ClockOffset()) //TODO: The next 5 lines should be deleted when we are done testing from a precalculated (not hard coded) timestamp - if nodesConfig.StartTime == 0 { + if genesisNodesConfig.StartTime == 0 { time.Sleep(1000 * time.Millisecond) ntpTime := syncer.CurrentTime() - nodesConfig.StartTime = (ntpTime.Unix()/60 + 1) * 60 + genesisNodesConfig.StartTime = (ntpTime.Unix()/60 + 1) * 60 } - startTime := time.Unix(nodesConfig.StartTime, 0) + startTime := time.Unix(genesisNodesConfig.StartTime, 0) log.Info("start time", "formatted", startTime.Format("Mon Jan 2 15:04:05 MST 2006"), @@ -541,18 +542,15 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { preferencesConfig.Preferences.NodeDisplayName = ctx.GlobalString(nodeDisplayName.Name) } - shardCoordinator, nodeType, err := createShardCoordinator(nodesConfig, pubKey, preferencesConfig.Preferences, log) + err = cleanupStorageIfNecessary(workingDir, ctx, log) if err != nil { return err } - var shardId = core.GetShardIdString(shardCoordinator.SelfId()) - logger.SetCorrelationShard(shardId) - pathTemplateForPruningStorer := filepath.Join( workingDir, defaultDBPath, - nodesConfig.ChainID, + genesisNodesConfig.ChainID, fmt.Sprintf("%s_%s", defaultEpochString, core.PathEpochPlaceholder), fmt.Sprintf("%s_%s", defaultShardString, core.PathShardPlaceholder), core.PathIdentifierPlaceholder) @@ -560,7 +558,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { pathTemplateForStaticStorer := filepath.Join( workingDir, defaultDBPath, - nodesConfig.ChainID, + genesisNodesConfig.ChainID, defaultStaticDbString, fmt.Sprintf("%s_%s", defaultShardString, core.PathShardPlaceholder), core.PathIdentifierPlaceholder) @@ -571,53 +569,45 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { return err } - var currentEpoch uint32 - var lastRound int64 - var lastShardId uint32 - var errNotCritical error - currentEpoch, lastShardId, lastRound, errNotCritical = storageFactory.FindLatestDataFromStorage( // TODO: use last round and shard ID - *generalConfig, - &marshal.GogoProtoMarshalizer{}, // TODO: remove hardcoded marshalizer when start in epoch is merged. - workingDir, - nodesConfig.ChainID, - defaultDBPath, - defaultEpochString, - defaultShardString, - ) - if errNotCritical != nil { - currentEpoch = 0 - log.Debug("no epoch db found in storage", "error", errNotCritical.Error()) - } else { - log.Debug("got last data from storage", - "epoch", currentEpoch, - "last round", lastRound, - "last shard ID", lastShardId) - } - if !generalConfig.StoragePruning.Enabled { - // TODO: refactor this as when the pruning storer is disabled, the default directory path is Epoch_0 - // and it should be Epoch_ALL or something similar - currentEpoch = 0 + genesisShardCoordinator, nodeType, err := createShardCoordinator(genesisNodesConfig, pubKey, preferencesConfig.Preferences, log) + if err != nil { + return err } + var shardId = core.GetShardIdString(genesisShardCoordinator.SelfId()) - storageCleanupFlagValue := ctx.GlobalBool(storageCleanup.Name) - if storageCleanupFlagValue { - dbPath := filepath.Join( - workingDir, - defaultDBPath) - log.Trace("cleaning storage", "path", dbPath) - err = os.RemoveAll(dbPath) - if err != nil { - return err - } + log.Trace("creating crypto components") + cryptoArgs := factory.NewCryptoComponentsFactoryArgs( + ctx, + generalConfig, + genesisNodesConfig, + genesisShardCoordinator, + keyGen, + privKey, + log, + ) + cryptoComponents, err := factory.CryptoComponentsFactory(cryptoArgs) + if err != nil { + return err } log.Trace("creating core components") - coreArgs := factory.NewCoreComponentsFactoryArgs(generalConfig, pathManager, shardId, []byte(nodesConfig.ChainID)) + coreArgs := factory.NewCoreComponentsFactoryArgs(generalConfig, pathManager, shardId, []byte(genesisNodesConfig.ChainID)) coreComponents, err := factory.CoreComponentsFactory(coreArgs) if err != nil { return err } + log.Trace("creating network components") + networkComponents, err := factory.NetworkComponentsFactory(*p2pConfig, *generalConfig, coreComponents.StatusHandler) + if err != nil { + return err + } + err = networkComponents.NetMessenger.Bootstrap() + if err != nil { + return err + } + time.Sleep(secondsToWaitForP2PBootstrap * time.Second) + log.Trace("creating economics data components") economicsData, err := economics.NewEconomicsData(economicsConfig) if err != nil { @@ -629,6 +619,56 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { return err } + epochStartBootstrapArgs := bootstrap.ArgsEpochStartBootstrap{ + PublicKey: pubKey, + Marshalizer: coreComponents.InternalMarshalizer, + TxSignMarshalizer: coreComponents.TxSignMarshalizer, + Hasher: coreComponents.Hasher, + Messenger: networkComponents.NetMessenger, + GeneralConfig: *generalConfig, + EconomicsData: economicsData, + SingleSigner: cryptoComponents.TxSingleSigner, + BlockSingleSigner: cryptoComponents.SingleSigner, + KeyGen: cryptoComponents.TxSignKeyGen, + BlockKeyGen: cryptoComponents.BlockSignKeyGen, + GenesisNodesConfig: genesisNodesConfig, + GenesisShardCoordinator: genesisShardCoordinator, + PathManager: pathManager, + WorkingDir: workingDir, + DefaultDBPath: defaultDBPath, + DefaultEpochString: defaultEpochString, + DefaultShardString: defaultShardString, + Rater: rater, + DestinationShardAsObserver: ctx.GlobalString(destinationShardAsObserver.Name), + TrieContainer: coreComponents.TriesContainer, + TrieStorageManagers: coreComponents.TrieStorageManagers, + } + bootstrapper, err := bootstrap.NewEpochStartBootstrap(epochStartBootstrapArgs) + if err != nil { + log.Error("could not create bootsrapper", "err", err) + return err + } + bootstrapParameters, err := bootstrapper.Bootstrap() + if err != nil { + log.Error("boostrap return error", "error", err) + return err + } + + currentEpoch := bootstrapParameters.Epoch + if !generalConfig.StoragePruning.Enabled { + // TODO: refactor this as when the pruning storer is disabled, the default directory path is Epoch_0 + // and it should be Epoch_ALL or something similar + currentEpoch = 0 + } + + shardCoordinator, err := sharding.NewMultiShardCoordinator(bootstrapParameters.NumOfShards, bootstrapParameters.SelfShardId) + if err != nil { + return err + } + + var shardIdString = core.GetShardIdString(shardCoordinator.SelfId()) + logger.SetCorrelationShard(shardIdString) + log.Trace("initializing stats file") err = initStatsFileMonitor(generalConfig, pubKey, log, workingDir, pathManager, shardId) if err != nil { @@ -652,7 +692,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } log.Trace("initializing metrics") - metrics.InitMetrics(coreComponents.StatusHandler, pubKey, nodeType, shardCoordinator, nodesConfig, version, economicsConfig) + metrics.InitMetrics(coreComponents.StatusHandler, pubKey, nodeType, shardCoordinator, genesisNodesConfig, version, economicsConfig) err = statusHandlersInfo.UpdateStorerAndMetricsForPersistentHandler(dataComponents.Store.GetStorer(dataRetriever.StatusMetricsUnit)) if err != nil { @@ -671,7 +711,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } nodesCoordinator, err := createNodesCoordinator( - nodesConfig, + genesisNodesConfig, preferencesConfig.Preferences, epochStartNotifier, pubKey, @@ -701,23 +741,8 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { return err } - log.Trace("creating crypto components") - cryptoArgs := factory.NewCryptoComponentsFactoryArgs( - ctx, - generalConfig, - nodesConfig, - shardCoordinator, - keyGen, - privKey, - log, - ) - cryptoComponents, err := factory.CryptoComponentsFactory(cryptoArgs) - if err != nil { - return err - } - metrics.SaveStringMetric(coreComponents.StatusHandler, core.MetricNodeDisplayName, preferencesConfig.Preferences.NodeDisplayName) - metrics.SaveStringMetric(coreComponents.StatusHandler, core.MetricChainId, nodesConfig.ChainID) + metrics.SaveStringMetric(coreComponents.StatusHandler, core.MetricChainId, genesisNodesConfig.ChainID) metrics.SaveUint64Metric(coreComponents.StatusHandler, core.MetricMinGasPrice, economicsData.MinGasPrice()) sessionInfoFileOutput := fmt.Sprintf("%s:%s\n%s:%s\n%s:%v\n%s:%s\n%s:%v\n", @@ -753,14 +778,8 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { err = ioutil.WriteFile(statsFile, []byte(sessionInfoFileOutput), os.ModePerm) log.LogIfError(err) - log.Trace("creating network components") - networkComponents, err := factory.NetworkComponentsFactory(*p2pConfig, *generalConfig, coreComponents.StatusHandler) - if err != nil { - return err - } - log.Trace("creating tps benchmark components") - tpsBenchmark, err := statistics.NewTPSBenchmark(shardCoordinator.NumberOfShards(), nodesConfig.RoundDuration/1000) + tpsBenchmark, err := statistics.NewTPSBenchmark(shardCoordinator.NumberOfShards(), genesisNodesConfig.RoundDuration/1000) if err != nil { return err } @@ -792,7 +811,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } log.Trace("creating time cache for requested items components") - requestedItemsHandler := timecache.NewTimeCache(time.Duration(uint64(time.Millisecond) * nodesConfig.RoundDuration)) + requestedItemsHandler := timecache.NewTimeCache(time.Duration(uint64(time.Millisecond) * genesisNodesConfig.RoundDuration)) whiteListCache, err := storageUnit.NewCache( storageUnit.CacheType(generalConfig.WhiteListPool.Type), @@ -812,7 +831,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { coreArgs, genesisConfig, economicsData, - nodesConfig, + genesisNodesConfig, gasSchedule, syncer, shardCoordinator, @@ -827,7 +846,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { whiteListHandler, epochStartNotifier, &generalConfig.EpochStartConfig, - 0, + currentEpoch, rater, generalConfig.Marshalizer.SizeCheckDelta, generalConfig.StateTriesConfig.CheckpointRoundsModulus, @@ -852,7 +871,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { currentNode, err := createNode( generalConfig, preferencesConfig, - nodesConfig, + genesisNodesConfig, economicsData, syncer, keyGen, @@ -949,7 +968,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { log.Trace("starting background services") ef.StartBackgroundServices() - log.Debug("bootstrapping node...") + log.Debug("starting node...") err = ef.StartNode() if err != nil { log.Error("starting node failed", "epoch", currentEpoch, "error", err.Error()) @@ -977,6 +996,9 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { log.LogIfError(err) } + err = networkComponents.NetMessenger.Close() + log.LogIfError(err) + log.Info("closing network connections...") err = networkComponents.NetMessenger.Close() log.LogIfError(err) @@ -984,6 +1006,21 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { return nil } +func cleanupStorageIfNecessary(workingDir string, ctx *cli.Context, log logger.Logger) error { + storageCleanupFlagValue := ctx.GlobalBool(storageCleanup.Name) + if storageCleanupFlagValue { + dbPath := filepath.Join( + workingDir, + defaultDBPath) + log.Trace("cleaning storage", "path", dbPath) + err := os.RemoveAll(dbPath) + if err != nil { + return err + } + } + return nil +} + func copyConfigToStatsFolder(statsFolder string, configs []string) { for _, configFile := range configs { copySingleFile(statsFolder, configFile) @@ -1159,8 +1196,7 @@ func createShardCoordinator( prefsConfig config.PreferencesConfig, log logger.Logger, ) (sharding.Coordinator, core.NodeType, error) { - // TODO: after start in epoch is merged, this needs to be refactored as the shardID cannot always be taken - // from initial configuration but needs to be determined by nodes coordinator + selfShardId, err := getShardIdFromNodePubKey(pubKey, nodesConfig) nodeType := core.NodeTypeValidator if err == sharding.ErrPublicKeyNotFoundInGenesis { @@ -1208,12 +1244,12 @@ func createNodesCoordinator( metaConsensusGroupSize := int(nodesConfig.MetaChainConsensusGroupSize) eligibleNodesInfo, waitingNodesInfo := nodesConfig.InitialNodesInfo() - eligibleValidators, errEligibleValidators := nodesInfoToValidators(eligibleNodesInfo) + eligibleValidators, errEligibleValidators := sharding.NodesInfoToValidators(eligibleNodesInfo) if errEligibleValidators != nil { return nil, errEligibleValidators } - waitingValidators, errWaitingValidators := nodesInfoToValidators(waitingNodesInfo) + waitingValidators, errWaitingValidators := sharding.NodesInfoToValidators(waitingNodesInfo) if errWaitingValidators != nil { return nil, errWaitingValidators } @@ -1238,7 +1274,6 @@ func createNodesCoordinator( argumentsNodesCoordinator := sharding.ArgNodesCoordinator{ ShardConsensusGroupSize: shardConsensusGroupSize, MetaConsensusGroupSize: metaConsensusGroupSize, - ListIndexUpdater: ratingAndListIndexHandler, Hasher: hasher, Shuffler: nodeShuffler, EpochStartNotifier: epochStartNotifier, @@ -1264,25 +1299,6 @@ func createNodesCoordinator( return nodesCoordinator, nil } -func nodesInfoToValidators(nodesInfo map[uint32][]*sharding.NodeInfo) (map[uint32][]sharding.Validator, error) { - validatorsMap := make(map[uint32][]sharding.Validator) - - for shId, nodeInfoList := range nodesInfo { - validators := make([]sharding.Validator, 0) - for _, nodeInfo := range nodeInfoList { - validator, err := sharding.NewValidator(nodeInfo.PubKey(), nodeInfo.Address()) - if err != nil { - return nil, err - } - - validators = append(validators, validator) - } - validatorsMap[shId] = validators - } - - return validatorsMap, nil -} - func processDestinationShardAsObserver(prefsConfig config.PreferencesConfig) (uint32, error) { destShard := strings.ToLower(prefsConfig.DestinationShardAsObserver) if len(destShard) == 0 { diff --git a/consensus/mock/nodesCoordinatorMock.go b/consensus/mock/nodesCoordinatorMock.go index be2fe1f28f8..451c9f1ee7c 100644 --- a/consensus/mock/nodesCoordinatorMock.go +++ b/consensus/mock/nodesCoordinatorMock.go @@ -11,14 +11,14 @@ type NodesCoordinatorMock struct { GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) } -// GetWaitingPublicKeysPerShard - -func (ncm *NodesCoordinatorMock) GetWaitingPublicKeysPerShard(epoch uint32) (map[uint32][][]byte, error) { - return nil, nil +// SetConfig - +func (ncm *NodesCoordinatorMock) SetConfig(_ *sharding.NodesCoordinatorRegistry) error { + return nil } -// UpdatePeersListAndIndex - -func (ncm *NodesCoordinatorMock) UpdatePeersListAndIndex() error { - return nil +// GetWaitingPublicKeysPerShard - +func (ncm *NodesCoordinatorMock) GetWaitingPublicKeysPerShard(_ uint32) (map[uint32][][]byte, error) { + return nil, nil } // ComputeConsensusGroup - @@ -93,30 +93,6 @@ func (ncm *NodesCoordinatorMock) GetConsensusValidatorsPublicKeys(randomness []b return pubKeys, nil } -// GetConsensusValidatorsRewardsAddresses - -func (ncm *NodesCoordinatorMock) GetConsensusValidatorsRewardsAddresses( - randomness []byte, - round uint64, - shardId uint32, - epoch uint32, -) ([]string, error) { - if ncm.GetValidatorsPublicKeysCalled != nil { - return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId, epoch) - } - - validators, err := ncm.ComputeConsensusGroup(randomness, round, shardId, epoch) - if err != nil { - return nil, err - } - - addresses := make([]string, 0) - for _, v := range validators { - addresses = append(addresses, string(v.Address())) - } - - return addresses, nil -} - // LoadState - func (ncm *NodesCoordinatorMock) LoadState(_ []byte) error { return nil @@ -141,7 +117,7 @@ func (ncm *NodesCoordinatorMock) GetConsensusWhitelistedNodes( } // SetNodesPerShards - -func (ncm *NodesCoordinatorMock) SetNodesPerShards(_ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, _ uint32, _ bool) error { +func (ncm *NodesCoordinatorMock) SetNodesPerShards(_ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, _ uint32) error { return nil } diff --git a/consensus/spos/bls/subroundStartRound.go b/consensus/spos/bls/subroundStartRound.go index a1092149d3c..a603d77bdd8 100644 --- a/consensus/spos/bls/subroundStartRound.go +++ b/consensus/spos/bls/subroundStartRound.go @@ -224,7 +224,7 @@ func (sr *subroundStartRound) generateNextConsensusGroup(roundIndex int64) error shardId := sr.ShardCoordinator().SelfId() - nextConsensusGroup, _, err := sr.GetNextConsensusGroup( + nextConsensusGroup, err := sr.GetNextConsensusGroup( randomSeed, uint64(sr.RoundIndex), shardId, diff --git a/consensus/spos/consensusState.go b/consensus/spos/consensusState.go index 11e4b5d87a1..480b49c8a2c 100644 --- a/consensus/spos/consensusState.go +++ b/consensus/spos/consensusState.go @@ -129,7 +129,7 @@ func (cns *ConsensusState) GetNextConsensusGroup( shardId uint32, nodesCoordinator sharding.NodesCoordinator, epoch uint32, -) ([]string, []string, error) { +) ([]string, error) { validatorsGroup, err := nodesCoordinator.ComputeConsensusGroup(randomSource, round, shardId, epoch) if err != nil { log.Debug( @@ -140,19 +140,17 @@ func (cns *ConsensusState) GetNextConsensusGroup( "shardId", shardId, "epoch", epoch, ) - return nil, nil, err + return nil, err } consensusSize := len(validatorsGroup) newConsensusGroup := make([]string, consensusSize) - consensusRewardAddresses := make([]string, consensusSize) for i := 0; i < consensusSize; i++ { newConsensusGroup[i] = string(validatorsGroup[i].PubKey()) - consensusRewardAddresses[i] = string(validatorsGroup[i].Address()) } - return newConsensusGroup, consensusRewardAddresses, nil + return newConsensusGroup, nil } // IsConsensusDataSet method returns true if the consensus data for the current round is set and false otherwise diff --git a/consensus/spos/consensusState_test.go b/consensus/spos/consensusState_test.go index c40d3f8f7e4..bdd161b5f90 100644 --- a/consensus/spos/consensusState_test.go +++ b/consensus/spos/consensusState_test.go @@ -151,7 +151,7 @@ func TestConsensusState_GetNextConsensusGroupShouldFailWhenComputeValidatorsGrou return nil, err } - _, _, err2 := cns.GetNextConsensusGroup([]byte(""), 0, 0, nodesCoordinator, 0) + _, err2 := cns.GetNextConsensusGroup([]byte(""), 0, 0, nodesCoordinator, 0) assert.Equal(t, err, err2) } @@ -162,10 +162,9 @@ func TestConsensusState_GetNextConsensusGroupShouldWork(t *testing.T) { nodesCoordinator := &mock.NodesCoordinatorMock{} - nextConsensusGroup, rewardAddresses, err := cns.GetNextConsensusGroup(nil, 0, 0, nodesCoordinator, 0) + nextConsensusGroup, err := cns.GetNextConsensusGroup(nil, 0, 0, nodesCoordinator, 0) assert.Nil(t, err) assert.NotNil(t, nextConsensusGroup) - assert.NotNil(t, rewardAddresses) } func TestConsensusState_IsConsensusDataSetShouldReturnTrue(t *testing.T) { diff --git a/core/common.go b/core/common.go new file mode 100644 index 00000000000..ca33765a674 --- /dev/null +++ b/core/common.go @@ -0,0 +1,14 @@ +package core + +// EmptyChannel empties the given channel +func EmptyChannel(ch chan bool) int { + readsCnt := 0 + for { + select { + case <-ch: + readsCnt++ + default: + return readsCnt + } + } +} diff --git a/core/common_test.go b/core/common_test.go new file mode 100644 index 00000000000..572f81480bb --- /dev/null +++ b/core/common_test.go @@ -0,0 +1,66 @@ +package core + +import ( + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestEmptyChannelShouldWorkOnBufferedChannel(t *testing.T) { + ch := make(chan bool, 10) + + assert.Equal(t, 0, len(ch)) + readsCnt := EmptyChannel(ch) + assert.Equal(t, 0, len(ch)) + assert.Equal(t, 0, readsCnt) + + ch <- true + ch <- true + ch <- true + + assert.Equal(t, 3, len(ch)) + readsCnt = EmptyChannel(ch) + assert.Equal(t, 0, len(ch)) + assert.Equal(t, 3, readsCnt) +} + +func TestEmptyChannelShouldWorkOnNotBufferedChannel(t *testing.T) { + ch := make(chan bool) + + assert.Equal(t, 0, len(ch)) + readsCnt := int32(EmptyChannel(ch)) + assert.Equal(t, 0, len(ch)) + assert.Equal(t, int32(0), readsCnt) + + wg := sync.WaitGroup{} + wgChanWasWritten := sync.WaitGroup{} + numConcurrentWrites := 50 + wg.Add(numConcurrentWrites) + wgChanWasWritten.Add(numConcurrentWrites) + for i := 0; i < numConcurrentWrites; i++ { + go func() { + wg.Done() + time.Sleep(time.Millisecond) + ch <- true + wgChanWasWritten.Done() + }() + } + + // wait for go routines to start + wg.Wait() + + go func() { + for readsCnt < int32(numConcurrentWrites) { + atomic.AddInt32(&readsCnt, int32(EmptyChannel(ch))) + } + }() + + // wait for go routines to finish + wgChanWasWritten.Wait() + + assert.Equal(t, 0, len(ch)) + assert.Equal(t, int32(numConcurrentWrites), atomic.LoadInt32(&readsCnt)) +} diff --git a/core/constants.go b/core/constants.go index e8b4df1a5d8..443d2283600 100644 --- a/core/constants.go +++ b/core/constants.go @@ -296,3 +296,15 @@ const MetricP2PUnknownPeers = "erd_p2p_unknown_shard_peers" // MetricP2PNumConnectedPeersClassification is the metric for monitoring the number of connected peers split on the connection type const MetricP2PNumConnectedPeersClassification = "erd_p2p_num_connected_peers_classification" + +// HighestRoundFromBootStorage is the key for the highest round that is saved in storage +const HighestRoundFromBootStorage = "highestRoundFromBootStorage" + +// TriggerRegistryKeyPrefix is the key prefix to save epoch start registry to storage +const TriggerRegistryKeyPrefix = "epochStartTrigger_" + +// TriggerRegistryInitialKeyPrefix is the key prefix to save initial data to storage +const TriggerRegistryInitialKeyPrefix = "initial_value_epoch_" + +// NodesCoordinatorRegistryKeyPrefix is the key prefix to save epoch start registry to storage +const NodesCoordinatorRegistryKeyPrefix = "indexHashed_" diff --git a/core/mock/chainStorerMock.go b/core/mock/chainStorerMock.go index 8c8b2407124..22048d7cf5c 100644 --- a/core/mock/chainStorerMock.go +++ b/core/mock/chainStorerMock.go @@ -5,7 +5,7 @@ import ( "github.com/ElrondNetwork/elrond-go/storage" ) -// ChainStorerMock is a mock implementation of the ChianStorer interface +// ChainStorerMock is a mock implementation of the ChainStorer interface type ChainStorerMock struct { AddStorerCalled func(key dataRetriever.UnitType, s storage.Storer) GetStorerCalled func(unitType dataRetriever.UnitType) storage.Storer diff --git a/data/interface.go b/data/interface.go index 04c0080dff9..631080b6e98 100644 --- a/data/interface.go +++ b/data/interface.go @@ -1,6 +1,7 @@ package data import ( + "context" "math/big" "github.com/ElrondNetwork/elrond-go/config" @@ -152,7 +153,7 @@ type DBRemoveCacher interface { // TrieSyncer synchronizes the trie, asking on the network for the missing nodes type TrieSyncer interface { - StartSyncing(rootHash []byte) error + StartSyncing(rootHash []byte, ctx context.Context) error Trie() Trie IsInterfaceNil() bool } @@ -174,7 +175,7 @@ type StorageManager interface { // TrieFactory creates new tries type TrieFactory interface { - Create(config.StorageConfig, bool) (Trie, error) + Create(config.StorageConfig, bool) (StorageManager, Trie, error) IsInterfaceNil() bool } diff --git a/data/mock/nodesCoordinatorMock.go b/data/mock/nodesCoordinatorMock.go index 56b10c7af69..690686fe486 100644 --- a/data/mock/nodesCoordinatorMock.go +++ b/data/mock/nodesCoordinatorMock.go @@ -90,30 +90,6 @@ func (ncm *NodesCoordinatorMock) GetConsensusValidatorsPublicKeys( return valGrStr, nil } -// GetConsensusValidatorsRewardsAddresses - -func (ncm *NodesCoordinatorMock) GetConsensusValidatorsRewardsAddresses( - randomness []byte, - round uint64, - shardId uint32, - epoch uint32, -) ([]string, error) { - if ncm.GetValidatorsPublicKeysCalled != nil { - return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId, epoch) - } - - validators, err := ncm.ComputeConsensusGroup(randomness, round, shardId, epoch) - if err != nil { - return nil, err - } - - addresses := make([]string, 0) - for _, v := range validators { - addresses = append(addresses, string(v.Address())) - } - - return addresses, nil -} - // SetNodesPerShards - func (ncm *NodesCoordinatorMock) SetNodesPerShards( eligible map[uint32][]sharding.Validator, diff --git a/data/mock/requestHandlerStub.go b/data/mock/requestHandlerStub.go index 18676dd70cd..3565a7a2fc7 100644 --- a/data/mock/requestHandlerStub.go +++ b/data/mock/requestHandlerStub.go @@ -1,5 +1,7 @@ package mock +import "time" + // RequestHandlerStub - type RequestHandlerStub struct { RequestShardHeaderCalled func(shardID uint32, hash []byte) @@ -15,6 +17,11 @@ type RequestHandlerStub struct { RequestStartOfEpochMetaBlockCalled func(epoch uint32) } +// RequestInterval - +func (rhs *RequestHandlerStub) RequestInterval() time.Duration { + return time.Second +} + // RequestStartOfEpochMetaBlock - func (rhs *RequestHandlerStub) RequestStartOfEpochMetaBlock(epoch uint32) { if rhs.RequestStartOfEpochMetaBlockCalled == nil { diff --git a/data/state/dataTriesHolder.go b/data/state/dataTriesHolder.go index 6b10a01221b..766a5ab248c 100644 --- a/data/state/dataTriesHolder.go +++ b/data/state/dataTriesHolder.go @@ -25,6 +25,11 @@ func (dth *dataTriesHolder) Put(key []byte, tr data.Trie) { dth.mutex.Unlock() } +// Replace changes a trie pointer to the tries map +func (dth *dataTriesHolder) Replace(key []byte, tr data.Trie) { + dth.Put(key, tr) +} + // Get returns the trie pointer that is stored in the map at the given key func (dth *dataTriesHolder) Get(key []byte) data.Trie { dth.mutex.Lock() diff --git a/data/state/interface.go b/data/state/interface.go index c718f79a534..6a9455fc730 100644 --- a/data/state/interface.go +++ b/data/state/interface.go @@ -71,6 +71,8 @@ type PeerAccountHandler interface { AddToAccumulatedFees(*big.Int) GetJailTime() TimePeriod SetJailTime(TimePeriod) + GetList() string + GetIndex() uint32 GetCurrentShardId() uint32 SetCurrentShardId(uint32) GetNextShardId() uint32 @@ -87,7 +89,7 @@ type PeerAccountHandler interface { IncreaseNumSelectedInSuccessBlocks() GetLeaderSuccessRate() SignRate GetValidatorSuccessRate() SignRate - SetListAndIndex(shardID uint32, list string, index int32) + SetListAndIndex(shardID uint32, list string, index uint32) GetRating() uint32 SetRating(uint32) GetTempRating() uint32 @@ -164,6 +166,7 @@ type JournalEntry interface { // TriesHolder is used to store multiple tries type TriesHolder interface { Put([]byte, data.Trie) + Replace(key []byte, tr data.Trie) Get([]byte) data.Trie GetAll() []data.Trie Reset() diff --git a/data/state/peerAccount.go b/data/state/peerAccount.go index 36fd6e54e82..daa10a19444 100644 --- a/data/state/peerAccount.go +++ b/data/state/peerAccount.go @@ -146,12 +146,22 @@ func (pa *peerAccount) SetTempRating(rating uint32) { } // SetListAndIndex will update the peer's list (eligible, waiting) and the index inside it with journal -func (pa *peerAccount) SetListAndIndex(shardID uint32, list string, index int32) { +func (pa *peerAccount) SetListAndIndex(shardID uint32, list string, index uint32) { pa.CurrentShardId = shardID pa.List = list pa.IndexInList = index } +// GetList returns the list the peer is in +func (pa *peerAccount) GetList() string { + return pa.List +} + +// GetIndex returns the index in list +func (pa *peerAccount) GetIndex() uint32 { + return pa.IndexInList +} + // IsInterfaceNil return if there is no value under the interface func (pa *peerAccount) IsInterfaceNil() bool { return pa == nil diff --git a/data/state/peerAccountData.pb.go b/data/state/peerAccountData.pb.go index c6271545adf..4b0f436aea5 100644 --- a/data/state/peerAccountData.pb.go +++ b/data/state/peerAccountData.pb.go @@ -7,16 +7,15 @@ import ( bytes "bytes" encoding_binary "encoding/binary" fmt "fmt" + github_com_ElrondNetwork_elrond_go_data "github.com/ElrondNetwork/elrond-go/data" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" io "io" math "math" math_big "math/big" math_bits "math/bits" reflect "reflect" strings "strings" - - github_com_ElrondNetwork_elrond_go_data "github.com/ElrondNetwork/elrond-go/data" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. @@ -272,8 +271,8 @@ type PeerAccountData struct { TempRating uint32 `protobuf:"varint,15,opt,name=TempRating,proto3" json:"TempRating,omitempty"` AccumulatedFees *math_big.Int `protobuf:"bytes,16,opt,name=AccumulatedFees,proto3,casttypewith=math/big.Int;github.com/ElrondNetwork/elrond-go/data.BigIntCaster" json:"AccumulatedFees,omitempty"` NumSelectedInSuccessBlocks uint32 `protobuf:"varint,17,opt,name=NumSelectedInSuccessBlocks,proto3" json:"NumSelectedInSuccessBlocks,omitempty"` - IndexInList int32 `protobuf:"varint,20,opt,name=IndexInList,proto3" json:"IndexInList,omitempty"` - List string `protobuf:"bytes,21,opt,name=List,proto3" json:"List,omitempty"` + IndexInList uint32 `protobuf:"varint,18,opt,name=IndexInList,proto3" json:"IndexInList,omitempty"` + List string `protobuf:"bytes,19,opt,name=List,proto3" json:"List,omitempty"` } func (m *PeerAccountData) Reset() { *m = PeerAccountData{} } @@ -423,6 +422,20 @@ func (m *PeerAccountData) GetNumSelectedInSuccessBlocks() uint32 { return 0 } +func (m *PeerAccountData) GetIndexInList() uint32 { + if m != nil { + return m.IndexInList + } + return 0 +} + +func (m *PeerAccountData) GetList() string { + if m != nil { + return m.List + } + return "" +} + func init() { proto.RegisterType((*TimeStamp)(nil), "proto.TimeStamp") proto.RegisterType((*TimePeriod)(nil), "proto.TimePeriod") @@ -434,106 +447,58 @@ func init() { func init() { proto.RegisterFile("peerAccountData.proto", fileDescriptor_26bd0314afcce126) } var fileDescriptor_26bd0314afcce126 = []byte{ - // 802 bytes of a gzipped FileDescriptorProto + // 806 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x41, 0x6f, 0xe3, 0x44, - 0x18, 0x8d, 0xb3, 0x4d, 0x9a, 0x7e, 0x4d, 0x9b, 0x76, 0x76, 0x59, 0x59, 0x15, 0x72, 0xa2, 0x08, - 0xa1, 0x0a, 0xb1, 0x09, 0x62, 0x91, 0x38, 0xa0, 0x05, 0xc5, 0xa5, 0x15, 0x81, 0xca, 0xaa, 0xc6, - 0x0b, 0x48, 0x70, 0x9a, 0x78, 0x06, 0xc7, 0xaa, 0x33, 0x13, 0x8d, 0xc7, 0x5a, 0xb8, 0xf1, 0x13, - 0xf8, 0x19, 0x88, 0xff, 0xc0, 0x7d, 0x8f, 0x3d, 0xf6, 0x14, 0xa8, 0x7b, 0x41, 0x39, 0xf5, 0xc8, - 0x11, 0x79, 0x6c, 0xa7, 0x71, 0x12, 0xe5, 0xc4, 0x29, 0xf9, 0xde, 0xf7, 0xde, 0x9b, 0x99, 0xcf, - 0xf3, 0x06, 0xde, 0x99, 0x32, 0x26, 0x07, 0x9e, 0x27, 0x62, 0xae, 0xbe, 0x24, 0x8a, 0xf4, 0xa6, - 0x52, 0x28, 0x81, 0x6a, 0xfa, 0xe7, 0xe4, 0x85, 0x1f, 0xa8, 0x71, 0x3c, 0xea, 0x79, 0x62, 0xd2, - 0xf7, 0x85, 0x2f, 0xfa, 0x1a, 0x1e, 0xc5, 0x3f, 0xe9, 0x4a, 0x17, 0xfa, 0x5f, 0xa6, 0xea, 0x7e, - 0x0a, 0x7b, 0xaf, 0x83, 0x09, 0x73, 0x15, 0x99, 0x4c, 0xd1, 0x33, 0xa8, 0x9d, 0x4f, 0x85, 0x37, - 0x36, 0x8d, 0x8e, 0x71, 0xba, 0x83, 0xb3, 0x22, 0x45, 0xb1, 0x88, 0x39, 0x35, 0xab, 0x19, 0xaa, - 0x8b, 0xae, 0x02, 0x48, 0x85, 0x57, 0x4c, 0x06, 0x82, 0xa2, 0x4f, 0x60, 0xcf, 0x55, 0x44, 0xaa, - 0x14, 0xd2, 0xea, 0xfd, 0x8f, 0x8f, 0xb2, 0x15, 0x7a, 0x0b, 0x7b, 0x7b, 0xe7, 0xed, 0xac, 0x5d, - 0xc1, 0x8f, 0x44, 0xf4, 0x11, 0xec, 0x9e, 0x73, 0xaa, 0x35, 0xd5, 0xad, 0x9a, 0x82, 0xd6, 0xbd, - 0x80, 0x86, 0x1b, 0xf8, 0x1c, 0x13, 0xc5, 0xd0, 0xbb, 0xb0, 0xe7, 0x48, 0x37, 0xf6, 0x3c, 0x16, - 0x45, 0x7a, 0xcd, 0x03, 0xfc, 0x08, 0x64, 0xdd, 0x0b, 0x12, 0x84, 0xb1, 0xcc, 0xdc, 0x75, 0x37, - 0x07, 0xba, 0xff, 0x56, 0xe1, 0xd9, 0x77, 0x24, 0x0c, 0x28, 0x51, 0x42, 0x0e, 0xa6, 0x01, 0x66, - 0xd1, 0x54, 0xf0, 0x88, 0xa1, 0x57, 0xd0, 0x72, 0xe4, 0x25, 0x23, 0x94, 0x95, 0xad, 0xed, 0xa7, - 0xf3, 0x59, 0xbb, 0xc5, 0xcb, 0x2d, 0xbc, 0xca, 0x5d, 0x96, 0x97, 0xd6, 0x2e, 0xcb, 0xf3, 0x16, - 0x5e, 0xe5, 0xa2, 0x0b, 0x40, 0x8e, 0x5c, 0xec, 0xab, 0xd8, 0xc0, 0x13, 0xed, 0xf0, 0x7c, 0x3e, - 0x6b, 0x23, 0xbe, 0xd6, 0xc5, 0x1b, 0x14, 0x2b, 0x3e, 0xc5, 0x4e, 0x76, 0x36, 0xfa, 0x14, 0x9b, - 0xd9, 0xa0, 0x40, 0x5d, 0xa8, 0x63, 0xa2, 0x02, 0xee, 0x9b, 0xb5, 0x8e, 0x71, 0x5a, 0xb5, 0x61, - 0x3e, 0x6b, 0xd7, 0xa5, 0x46, 0x70, 0xde, 0x41, 0x3d, 0x80, 0xd7, 0x6c, 0x32, 0xcd, 0x79, 0x75, - 0xcd, 0x3b, 0x9c, 0xcf, 0xda, 0xa0, 0x16, 0x28, 0x5e, 0x62, 0x74, 0xff, 0xdc, 0x85, 0xd6, 0x55, - 0xf9, 0x06, 0xa3, 0x2e, 0x34, 0xed, 0x4b, 0xf7, 0x2a, 0x1e, 0x85, 0x81, 0xf7, 0x0d, 0xfb, 0x45, - 0x8f, 0xbc, 0x89, 0x4b, 0x18, 0xfa, 0x00, 0x8e, 0x5c, 0x6f, 0xcc, 0x85, 0x94, 0x8f, 0xbc, 0xaa, - 0xe6, 0xad, 0xe1, 0xe8, 0x3d, 0x38, 0xc0, 0xec, 0x0d, 0x91, 0x74, 0x40, 0xa9, 0x2c, 0x46, 0xd8, - 0xc4, 0x65, 0x10, 0xfd, 0x08, 0x35, 0x57, 0x91, 0xeb, 0x6c, 0x30, 0x4d, 0xfb, 0xfc, 0x8f, 0xbf, - 0xda, 0x83, 0x09, 0x51, 0xe3, 0xfe, 0x28, 0xf0, 0x7b, 0x43, 0xae, 0x3e, 0x5b, 0x8a, 0xd2, 0x79, - 0x28, 0x05, 0xa7, 0x0e, 0x53, 0x6f, 0x84, 0xbc, 0xee, 0x33, 0x5d, 0xbd, 0xf0, 0x45, 0x9f, 0xa6, - 0x01, 0xb4, 0x03, 0x7f, 0xc8, 0xd5, 0x19, 0x89, 0x14, 0x93, 0x38, 0xf3, 0x44, 0x02, 0x5a, 0x03, - 0xcf, 0x8b, 0x27, 0x71, 0x48, 0x14, 0xa3, 0x17, 0x8c, 0x45, 0x26, 0xfa, 0x3f, 0x97, 0x59, 0x75, - 0x47, 0x2f, 0xa1, 0xf1, 0x35, 0x09, 0x42, 0x9d, 0xa6, 0x9a, 0x4e, 0xd3, 0xf1, 0x52, 0x9a, 0xb2, - 0x9c, 0xe6, 0x71, 0x5a, 0x10, 0xd1, 0x2b, 0x38, 0xb8, 0x22, 0x91, 0x2a, 0xea, 0xc8, 0xac, 0x77, - 0x9e, 0x6c, 0x53, 0x96, 0xd9, 0xe8, 0x7d, 0x38, 0x3c, 0x8b, 0xa5, 0x64, 0x5c, 0xb9, 0x63, 0x22, - 0xe9, 0x90, 0x9a, 0xbb, 0x3a, 0x69, 0x2b, 0x28, 0xea, 0xc0, 0xbe, 0xc3, 0x7e, 0x5e, 0x90, 0x1a, - 0x9a, 0xb4, 0x0c, 0xa1, 0x0f, 0xe1, 0xd8, 0x11, 0x94, 0x0d, 0xf9, 0xf7, 0x24, 0x48, 0xaf, 0xc9, - 0x65, 0x10, 0x29, 0x73, 0xaf, 0x63, 0x9c, 0x36, 0xf0, 0x7a, 0x23, 0xfd, 0xbe, 0xdf, 0x72, 0x3d, - 0x67, 0xea, 0x08, 0xee, 0x31, 0x13, 0xf4, 0xd3, 0x54, 0x06, 0xd1, 0x70, 0x29, 0xe3, 0x45, 0x5a, - 0x88, 0x62, 0xe6, 0xbe, 0x9e, 0x4e, 0x2b, 0x3f, 0x63, 0xf1, 0x9e, 0xe4, 0x27, 0xdc, 0x28, 0x41, - 0x67, 0x70, 0x5c, 0x4e, 0x7e, 0xea, 0xd3, 0xdc, 0xe6, 0xb3, 0xce, 0x47, 0x9f, 0xc3, 0x89, 0x13, - 0x4f, 0x5c, 0x16, 0x32, 0x4f, 0x31, 0x3a, 0xe4, 0x79, 0xcf, 0x0e, 0x85, 0x77, 0x1d, 0x99, 0x4f, - 0xf5, 0x50, 0xb6, 0x30, 0xd0, 0x09, 0x34, 0xce, 0x04, 0x65, 0x5f, 0x91, 0x68, 0x6c, 0x1e, 0xe8, - 0x0b, 0xbd, 0xa8, 0xd1, 0xf3, 0x45, 0x52, 0x0f, 0xb5, 0x4f, 0x91, 0x4e, 0xab, 0x94, 0xce, 0x63, - 0xdd, 0x5b, 0x42, 0x52, 0x4f, 0x2c, 0x84, 0xd2, 0x9e, 0xad, 0xcc, 0xb3, 0xa8, 0xd3, 0x87, 0x3f, - 0x9b, 0xee, 0x51, 0xf6, 0xf0, 0xeb, 0xc2, 0xfe, 0xe2, 0xe6, 0xce, 0xaa, 0xdc, 0xde, 0x59, 0x95, - 0x87, 0x3b, 0xcb, 0xf8, 0x35, 0xb1, 0x8c, 0xdf, 0x13, 0xcb, 0x78, 0x9b, 0x58, 0xc6, 0x4d, 0x62, - 0x19, 0xb7, 0x89, 0x65, 0xfc, 0x9d, 0x58, 0xc6, 0x3f, 0x89, 0x55, 0x79, 0x48, 0x2c, 0xe3, 0xb7, - 0x7b, 0xab, 0x72, 0x73, 0x6f, 0x55, 0x6e, 0xef, 0xad, 0xca, 0x0f, 0xb5, 0x48, 0x11, 0xc5, 0x46, - 0x75, 0x3d, 0xaf, 0x97, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x10, 0xf5, 0x41, 0xb2, 0xc8, 0x06, - 0x00, 0x00, - // 744 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x41, 0x6f, 0xda, 0x48, - 0x18, 0xc5, 0x24, 0x24, 0x30, 0x81, 0x10, 0x66, 0xb3, 0x91, 0x15, 0xad, 0x0c, 0x42, 0xab, 0x55, - 0xb4, 0xda, 0xc0, 0xaa, 0xa9, 0xd4, 0x43, 0x95, 0x56, 0x90, 0x82, 0x44, 0x1b, 0x59, 0xc8, 0x4e, - 0x5b, 0xa9, 0x3d, 0x0d, 0x9e, 0xa9, 0xb1, 0x62, 0x66, 0xd0, 0x78, 0xac, 0xb4, 0xb7, 0x5e, 0x7a, - 0xef, 0x8f, 0xe8, 0xa1, 0xea, 0x2f, 0xc9, 0x31, 0xc7, 0x9c, 0xd2, 0xc6, 0xb9, 0x54, 0x39, 0xe5, - 0x27, 0x54, 0x1e, 0xdb, 0x04, 0x03, 0xe2, 0xd4, 0x13, 0x7c, 0xef, 0x7b, 0xef, 0xcd, 0xf7, 0x8d, - 0xde, 0x18, 0xfc, 0x39, 0x26, 0x84, 0xb7, 0x2c, 0x8b, 0xf9, 0x54, 0x3c, 0x43, 0x02, 0x35, 0xc6, - 0x9c, 0x09, 0x06, 0x73, 0xf2, 0x67, 0x77, 0xdf, 0x76, 0xc4, 0xd0, 0x1f, 0x34, 0x2c, 0x36, 0x6a, - 0xda, 0xcc, 0x66, 0x4d, 0x09, 0x0f, 0xfc, 0x77, 0xb2, 0x92, 0x85, 0xfc, 0x17, 0xa9, 0xea, 0x8f, - 0x40, 0xe1, 0xc4, 0x19, 0x11, 0x53, 0xa0, 0xd1, 0x18, 0x6e, 0x83, 0x5c, 0x67, 0xcc, 0xac, 0xa1, - 0xaa, 0xd4, 0x94, 0xbd, 0x55, 0x23, 0x2a, 0x42, 0xd4, 0x60, 0x3e, 0xc5, 0x6a, 0x36, 0x42, 0x65, - 0x51, 0x17, 0x00, 0x84, 0xc2, 0x3e, 0xe1, 0x0e, 0xc3, 0xf0, 0x21, 0x28, 0x98, 0x02, 0x71, 0x11, - 0x42, 0x52, 0xbd, 0xf1, 0x60, 0x2b, 0x3a, 0xa1, 0x31, 0xb1, 0x6f, 0xaf, 0x9e, 0x5f, 0x55, 0x33, - 0xc6, 0x3d, 0x11, 0xfe, 0x0f, 0xd6, 0x3b, 0x14, 0x4b, 0x4d, 0x76, 0xa9, 0x26, 0xa1, 0xd5, 0xbb, - 0x20, 0x6f, 0x3a, 0x36, 0x35, 0x90, 0x20, 0xf0, 0x2f, 0x50, 0xd0, 0xb9, 0xe9, 0x5b, 0x16, 0xf1, - 0x3c, 0x79, 0x66, 0xc9, 0xb8, 0x07, 0xa2, 0x6e, 0x17, 0x39, 0xae, 0xcf, 0x23, 0x77, 0xd9, 0x8d, - 0x81, 0xfa, 0x97, 0x2c, 0xd8, 0x7e, 0x85, 0x5c, 0x07, 0x23, 0xc1, 0x78, 0x6b, 0xec, 0x18, 0xc4, - 0x1b, 0x33, 0xea, 0x11, 0x78, 0x08, 0xca, 0x3a, 0x3f, 0x26, 0x08, 0x93, 0xb4, 0x75, 0xfb, 0x8f, - 0xdb, 0xab, 0x6a, 0x99, 0xa6, 0x5b, 0xc6, 0x2c, 0x77, 0x5a, 0x9e, 0x3a, 0x3b, 0x2d, 0x8f, 0x5b, - 0xc6, 0x2c, 0x17, 0x76, 0x01, 0xd4, 0xf9, 0x64, 0xae, 0x64, 0x80, 0x15, 0xe9, 0xb0, 0x73, 0x7b, - 0x55, 0x85, 0x74, 0xae, 0x6b, 0x2c, 0x50, 0xcc, 0xf8, 0x24, 0x93, 0xac, 0x2e, 0xf4, 0x49, 0x86, - 0x59, 0xa0, 0xa8, 0x7f, 0x5a, 0x07, 0xe5, 0x7e, 0x3a, 0x6d, 0xb0, 0x0e, 0x8a, 0xed, 0x63, 0xb3, - 0xef, 0x0f, 0x5c, 0xc7, 0x7a, 0x41, 0x3e, 0xc8, 0xeb, 0x29, 0x1a, 0x29, 0x0c, 0xfe, 0x0b, 0xb6, - 0x4c, 0x6b, 0x48, 0x19, 0xe7, 0xf7, 0xbc, 0xac, 0xe4, 0xcd, 0xe1, 0xf0, 0x6f, 0x50, 0x32, 0xc8, - 0x19, 0xe2, 0xb8, 0x85, 0x31, 0x4f, 0xd6, 0x2d, 0x1a, 0x69, 0x10, 0xbe, 0x05, 0x39, 0x53, 0xa0, - 0xd3, 0x68, 0x89, 0x62, 0xbb, 0xf3, 0xed, 0x7b, 0xb5, 0x35, 0x42, 0x62, 0xd8, 0x1c, 0x38, 0x76, - 0xa3, 0x47, 0xc5, 0xe3, 0xa9, 0xd8, 0x77, 0x5c, 0xce, 0x28, 0xd6, 0x89, 0x38, 0x63, 0xfc, 0xb4, - 0x49, 0x64, 0xb5, 0x6f, 0xb3, 0x26, 0x0e, 0x1f, 0x4b, 0xdb, 0xb1, 0x7b, 0x54, 0x1c, 0x21, 0x4f, - 0x10, 0x6e, 0x44, 0x9e, 0xf0, 0x00, 0xe4, 0x9f, 0x23, 0xc7, 0x95, 0x41, 0xcc, 0xc9, 0x20, 0x56, - 0xa6, 0x82, 0x18, 0x45, 0x3c, 0x4e, 0xe2, 0x84, 0x08, 0x0f, 0x41, 0xa9, 0x8f, 0x3c, 0x91, 0xd4, - 0x9e, 0xba, 0x56, 0x5b, 0x59, 0xa6, 0x4c, 0xb3, 0xe1, 0x3f, 0x60, 0xf3, 0xc8, 0xe7, 0x9c, 0x50, - 0x61, 0x0e, 0x11, 0xc7, 0x3d, 0xac, 0xae, 0xcb, 0x90, 0xce, 0xa0, 0xb0, 0x06, 0x36, 0x74, 0xf2, - 0x7e, 0x42, 0xca, 0x4b, 0xd2, 0x34, 0x04, 0xff, 0x03, 0x15, 0x9d, 0x61, 0xd2, 0xa3, 0xaf, 0x91, - 0x23, 0x1c, 0x6a, 0x1f, 0x3b, 0x9e, 0x50, 0x0b, 0x35, 0x65, 0x2f, 0x6f, 0xcc, 0x37, 0xc2, 0xeb, - 0x7e, 0x49, 0xe5, 0xda, 0x58, 0x67, 0xd4, 0x22, 0x2a, 0x90, 0xaf, 0x3a, 0x0d, 0xc2, 0xde, 0xd4, - 0xf3, 0x48, 0x82, 0x86, 0x04, 0x51, 0x37, 0xe4, 0xed, 0x94, 0xe3, 0x1d, 0x93, 0xa7, 0x18, 0x6f, - 0xb8, 0x50, 0x02, 0x8f, 0x40, 0x25, 0xfd, 0x68, 0x42, 0x9f, 0xe2, 0x32, 0x9f, 0x79, 0x3e, 0xdc, - 0x01, 0x6b, 0x06, 0x0a, 0x77, 0x50, 0x4b, 0xf2, 0x02, 0xe2, 0x2a, 0xfc, 0x36, 0x45, 0x5b, 0x6c, - 0x46, 0xdf, 0xa6, 0x68, 0x7a, 0x0d, 0x80, 0x13, 0x32, 0x1a, 0xc7, 0x8a, 0xb2, 0x54, 0x4c, 0x21, - 0x90, 0x81, 0x72, 0xcb, 0xb2, 0xfc, 0x91, 0xef, 0x22, 0x41, 0x70, 0x97, 0x10, 0x4f, 0xdd, 0xfa, - 0x9d, 0xb1, 0x9a, 0x75, 0x87, 0x4f, 0xc0, 0xae, 0xee, 0x8f, 0x4c, 0xe2, 0x12, 0x4b, 0x10, 0xdc, - 0xa3, 0xf1, 0x6a, 0x6d, 0x97, 0x59, 0xa7, 0x9e, 0x5a, 0x91, 0x03, 0x2e, 0x61, 0xb4, 0x9f, 0x5e, - 0x5c, 0x6b, 0x99, 0xcb, 0x6b, 0x2d, 0x73, 0x77, 0xad, 0x29, 0x1f, 0x03, 0x4d, 0xf9, 0x1a, 0x68, - 0xca, 0x79, 0xa0, 0x29, 0x17, 0x81, 0xa6, 0x5c, 0x06, 0x9a, 0xf2, 0x23, 0xd0, 0x94, 0x9f, 0x81, - 0x96, 0xb9, 0x0b, 0x34, 0xe5, 0xf3, 0x8d, 0x96, 0xb9, 0xb8, 0xd1, 0x32, 0x97, 0x37, 0x5a, 0xe6, - 0x4d, 0xce, 0x13, 0x48, 0x90, 0xc1, 0x9a, 0xbc, 0xe8, 0x83, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, - 0x0c, 0xdc, 0x43, 0xdb, 0x3c, 0x06, 0x00, 0x00, + 0x14, 0x8e, 0xb3, 0x4d, 0xda, 0xbc, 0xa6, 0x4d, 0x3b, 0x5b, 0x56, 0xd6, 0x0a, 0x39, 0x51, 0x84, + 0x50, 0x85, 0xd8, 0x04, 0xb1, 0x48, 0x1c, 0xd0, 0x82, 0xe2, 0xd2, 0x4a, 0x86, 0xca, 0xaa, 0xc6, + 0x0b, 0x48, 0x70, 0x9a, 0x78, 0x06, 0xc7, 0xaa, 0x33, 0x13, 0x8d, 0xc7, 0xda, 0xe5, 0xc6, 0x4f, + 0xe0, 0x67, 0x20, 0xfe, 0x02, 0x7f, 0x60, 0x8f, 0x3d, 0xf6, 0x14, 0xa8, 0x7b, 0x41, 0x3d, 0xf5, + 0xc8, 0x11, 0x79, 0x6c, 0xa7, 0x76, 0x5a, 0xf5, 0xc4, 0x29, 0x79, 0xdf, 0xfb, 0xbe, 0x37, 0x6f, + 0xde, 0x7c, 0xcf, 0xf0, 0xde, 0x82, 0x31, 0x39, 0xf1, 0x7d, 0x91, 0x70, 0xf5, 0x35, 0x51, 0x64, + 0xb4, 0x90, 0x42, 0x09, 0xd4, 0xd2, 0x3f, 0xcf, 0x5f, 0x04, 0xa1, 0x9a, 0x25, 0xd3, 0x91, 0x2f, + 0xe6, 0xe3, 0x40, 0x04, 0x62, 0xac, 0xe1, 0x69, 0xf2, 0xb3, 0x8e, 0x74, 0xa0, 0xff, 0xe5, 0xaa, + 0xe1, 0xe7, 0xd0, 0x79, 0x1d, 0xce, 0x99, 0xa7, 0xc8, 0x7c, 0x81, 0x0e, 0xa0, 0x75, 0xbc, 0x10, + 0xfe, 0xcc, 0x34, 0x06, 0xc6, 0xe1, 0x06, 0xce, 0x83, 0x0c, 0xc5, 0x22, 0xe1, 0xd4, 0x6c, 0xe6, + 0xa8, 0x0e, 0x86, 0x0a, 0x20, 0x13, 0x9e, 0x31, 0x19, 0x0a, 0x8a, 0x3e, 0x83, 0x8e, 0xa7, 0x88, + 0x54, 0x19, 0xa4, 0xd5, 0xdb, 0x9f, 0xee, 0xe5, 0x27, 0x8c, 0x56, 0xe5, 0xed, 0x8d, 0x77, 0xcb, + 0x7e, 0x03, 0xdf, 0x11, 0xd1, 0x27, 0xb0, 0x79, 0xcc, 0xa9, 0xd6, 0x34, 0x1f, 0xd5, 0x94, 0xb4, + 0xe1, 0x09, 0x6c, 0x79, 0x61, 0xc0, 0x31, 0x51, 0x0c, 0xbd, 0x0f, 0x1d, 0x57, 0x7a, 0x89, 0xef, + 0xb3, 0x38, 0xd6, 0x67, 0xee, 0xe0, 0x3b, 0x20, 0xcf, 0x9e, 0x90, 0x30, 0x4a, 0x64, 0x5e, 0x5d, + 0x67, 0x0b, 0x60, 0xf8, 0x6f, 0x13, 0x0e, 0xbe, 0x27, 0x51, 0x48, 0x89, 0x12, 0x72, 0xb2, 0x08, + 0x31, 0x8b, 0x17, 0x82, 0xc7, 0x0c, 0xbd, 0x82, 0x9e, 0x2b, 0x4f, 0x19, 0xa1, 0xac, 0x5e, 0xda, + 0x7e, 0x7a, 0xb3, 0xec, 0xf7, 0x78, 0x3d, 0x85, 0xd7, 0xb9, 0x55, 0x79, 0xed, 0xec, 0xba, 0xbc, + 0x48, 0xe1, 0x75, 0x2e, 0x3a, 0x01, 0xe4, 0xca, 0x55, 0x5f, 0x65, 0x03, 0x4f, 0x74, 0x85, 0x67, + 0x37, 0xcb, 0x3e, 0xe2, 0xf7, 0xb2, 0xf8, 0x01, 0xc5, 0x5a, 0x9d, 0xb2, 0x93, 0x8d, 0x07, 0xeb, + 0x94, 0xcd, 0x3c, 0xa0, 0x40, 0x43, 0x68, 0x63, 0xa2, 0x42, 0x1e, 0x98, 0xad, 0x81, 0x71, 0xd8, + 0xb4, 0xe1, 0x66, 0xd9, 0x6f, 0x4b, 0x8d, 0xe0, 0x22, 0x83, 0x46, 0x00, 0xaf, 0xd9, 0x7c, 0x51, + 0xf0, 0xda, 0x9a, 0xb7, 0x7b, 0xb3, 0xec, 0x83, 0x5a, 0xa1, 0xb8, 0xc2, 0x18, 0xfe, 0xb9, 0x09, + 0xbd, 0xb3, 0xba, 0x83, 0xd1, 0x10, 0xba, 0xf6, 0xa9, 0x77, 0x96, 0x4c, 0xa3, 0xd0, 0xff, 0x96, + 0xfd, 0xa2, 0x47, 0xde, 0xc5, 0x35, 0x0c, 0x7d, 0x04, 0x7b, 0x9e, 0x3f, 0xe3, 0x42, 0xca, 0x3b, + 0x5e, 0x53, 0xf3, 0xee, 0xe1, 0xe8, 0x03, 0xd8, 0xc1, 0xec, 0x0d, 0x91, 0x74, 0x42, 0xa9, 0x2c, + 0x47, 0xd8, 0xc5, 0x75, 0x10, 0xfd, 0x04, 0x2d, 0x4f, 0x91, 0xf3, 0x7c, 0x30, 0x5d, 0xfb, 0xf8, + 0x8f, 0xbf, 0xfa, 0x93, 0x39, 0x51, 0xb3, 0xf1, 0x34, 0x0c, 0x46, 0x0e, 0x57, 0x5f, 0x54, 0x56, + 0xe9, 0x38, 0x92, 0x82, 0x53, 0x97, 0xa9, 0x37, 0x42, 0x9e, 0x8f, 0x99, 0x8e, 0x5e, 0x04, 0x62, + 0x4c, 0xb3, 0x05, 0xb4, 0xc3, 0xc0, 0xe1, 0xea, 0x88, 0xc4, 0x8a, 0x49, 0x9c, 0xd7, 0x44, 0x2f, + 0x61, 0xeb, 0x1b, 0x12, 0x46, 0xda, 0xdc, 0x2d, 0x6d, 0xee, 0xfd, 0x8a, 0xb9, 0xf3, 0xb5, 0x29, + 0xdc, 0xbd, 0x22, 0xa2, 0x57, 0xb0, 0x73, 0x46, 0x62, 0x55, 0xc6, 0xb1, 0xd9, 0x1e, 0x3c, 0x79, + 0x4c, 0x59, 0x67, 0xa3, 0x0f, 0x61, 0xf7, 0x28, 0x91, 0x92, 0x71, 0xe5, 0xcd, 0x88, 0xa4, 0x0e, + 0x35, 0x37, 0xb5, 0xf1, 0xd7, 0x50, 0x34, 0x80, 0x6d, 0x97, 0xbd, 0x5d, 0x91, 0xb6, 0x34, 0xa9, + 0x0a, 0xa1, 0x8f, 0x61, 0xdf, 0x15, 0x94, 0x39, 0xfc, 0x07, 0x12, 0x66, 0xaf, 0x76, 0x1a, 0xc6, + 0xca, 0xec, 0x0c, 0x8c, 0xc3, 0x2d, 0x7c, 0x3f, 0x91, 0x8d, 0xfb, 0x3b, 0xae, 0xaf, 0x4d, 0x5d, + 0xc1, 0x7d, 0x66, 0x82, 0xfe, 0x52, 0xd4, 0x41, 0xe4, 0x54, 0x56, 0xae, 0x34, 0x2f, 0x51, 0xcc, + 0xdc, 0xd6, 0xd3, 0xe9, 0x15, 0x77, 0x2c, 0xd7, 0xbb, 0xb8, 0xe1, 0x83, 0x12, 0x74, 0x04, 0xfb, + 0xf5, 0x45, 0xcc, 0xea, 0x74, 0x1f, 0xab, 0x73, 0x9f, 0x8f, 0x9e, 0xad, 0xcc, 0xbd, 0xa3, 0x07, + 0x50, 0x1a, 0xfa, 0x00, 0x5a, 0xf9, 0x2d, 0x76, 0xf3, 0xef, 0x5d, 0xde, 0xbd, 0x55, 0xb3, 0x79, + 0x4f, 0x2b, 0x2a, 0x08, 0x12, 0xd0, 0x9b, 0xf8, 0x7e, 0x32, 0x4f, 0x22, 0xa2, 0x18, 0x3d, 0x61, + 0x2c, 0x36, 0xf7, 0xfe, 0x4f, 0x5b, 0xad, 0x57, 0x47, 0x5f, 0xc2, 0x73, 0x37, 0x99, 0x7b, 0x2c, + 0x62, 0xbe, 0x62, 0xd4, 0xe1, 0xc5, 0xd5, 0xec, 0x48, 0xf8, 0xe7, 0xb1, 0xb9, 0xaf, 0x1b, 0x7c, + 0x84, 0x91, 0x99, 0xc0, 0xe1, 0x94, 0xbd, 0x75, 0xb8, 0x7e, 0x5c, 0x94, 0x9b, 0xa0, 0x02, 0x21, + 0x04, 0x1b, 0x3a, 0xf5, 0x74, 0x60, 0x1c, 0x76, 0xb0, 0xfe, 0x6f, 0x7f, 0x75, 0x71, 0x65, 0x35, + 0x2e, 0xaf, 0xac, 0xc6, 0xed, 0x95, 0x65, 0xfc, 0x9a, 0x5a, 0xc6, 0xef, 0xa9, 0x65, 0xbc, 0x4b, + 0x2d, 0xe3, 0x22, 0xb5, 0x8c, 0xcb, 0xd4, 0x32, 0xfe, 0x4e, 0x2d, 0xe3, 0x9f, 0xd4, 0x6a, 0xdc, + 0xa6, 0x96, 0xf1, 0xdb, 0xb5, 0xd5, 0xb8, 0xb8, 0xb6, 0x1a, 0x97, 0xd7, 0x56, 0xe3, 0xc7, 0x56, + 0xac, 0x88, 0x62, 0xd3, 0xb6, 0x7e, 0x9e, 0x97, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x3b, + 0x3a, 0xdc, 0xc6, 0x06, 0x00, 0x00, } func (this *TimeStamp) Equal(that interface{}) bool { @@ -737,6 +702,12 @@ func (this *PeerAccountData) Equal(that interface{}) bool { if this.NumSelectedInSuccessBlocks != that1.NumSelectedInSuccessBlocks { return false } + if this.IndexInList != that1.IndexInList { + return false + } + if this.List != that1.List { + return false + } return true } func (this *TimeStamp) GoString() string { @@ -791,7 +762,7 @@ func (this *PeerAccountData) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 21) + s := make([]string, 0, 23) s = append(s, "&state.PeerAccountData{") s = append(s, "BLSPublicKey: "+fmt.Sprintf("%#v", this.BLSPublicKey)+",\n") s = append(s, "SchnorrPublicKey: "+fmt.Sprintf("%#v", this.SchnorrPublicKey)+",\n") @@ -816,6 +787,8 @@ func (this *PeerAccountData) GoString() string { s = append(s, "TempRating: "+fmt.Sprintf("%#v", this.TempRating)+",\n") s = append(s, "AccumulatedFees: "+fmt.Sprintf("%#v", this.AccumulatedFees)+",\n") s = append(s, "NumSelectedInSuccessBlocks: "+fmt.Sprintf("%#v", this.NumSelectedInSuccessBlocks)+",\n") + s = append(s, "IndexInList: "+fmt.Sprintf("%#v", this.IndexInList)+",\n") + s = append(s, "List: "+fmt.Sprintf("%#v", this.List)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -1018,14 +991,14 @@ func (m *PeerAccountData) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x1 i-- - dAtA[i] = 0xaa + dAtA[i] = 0x9a } if m.IndexInList != 0 { i = encodeVarintPeerAccountData(dAtA, i, uint64(m.IndexInList)) i-- dAtA[i] = 0x1 i-- - dAtA[i] = 0xa0 + dAtA[i] = 0x90 } if m.NumSelectedInSuccessBlocks != 0 { i = encodeVarintPeerAccountData(dAtA, i, uint64(m.NumSelectedInSuccessBlocks)) @@ -2361,7 +2334,7 @@ func (m *PeerAccountData) Unmarshal(dAtA []byte) error { break } } - case 20: + case 18: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field IndexInList", wireType) } @@ -2375,12 +2348,12 @@ func (m *PeerAccountData) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.IndexInList |= int32(b&0x7F) << shift + m.IndexInList |= uint32(b&0x7F) << shift if b < 0x80 { break } } - case 21: + case 19: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field List", wireType) } diff --git a/data/state/proto/peerAccountData.proto b/data/state/proto/peerAccountData.proto index adc3facefb5..144a10608e6 100644 --- a/data/state/proto/peerAccountData.proto +++ b/data/state/proto/peerAccountData.proto @@ -58,6 +58,6 @@ message PeerAccountData { uint32 TempRating = 15; bytes AccumulatedFees = 16 [(gogoproto.casttypewith) = "math/big.Int;github.com/ElrondNetwork/elrond-go/data.BigIntCaster"]; uint32 NumSelectedInSuccessBlocks = 17; - int32 IndexInList = 18; + uint32 IndexInList = 18; string List = 19; } diff --git a/data/syncer/baseAccountsSyncer.go b/data/syncer/baseAccountsSyncer.go index a758f1a37d7..9549eb25430 100644 --- a/data/syncer/baseAccountsSyncer.go +++ b/data/syncer/baseAccountsSyncer.go @@ -1,6 +1,7 @@ package syncer import ( + "context" "fmt" "sync" "time" @@ -63,7 +64,7 @@ func checkArgs(args ArgsNewBaseAccountsSyncer) error { return nil } -func (b *baseAccountsSyncer) syncMainTrie(rootHash []byte, trieTopic string) error { +func (b *baseAccountsSyncer) syncMainTrie(rootHash []byte, trieTopic string, ctx context.Context) error { b.rootHash = rootHash dataTrie, err := trie.NewTrie(b.trieStorageManager, b.marshalizer, b.hasher) @@ -72,13 +73,13 @@ func (b *baseAccountsSyncer) syncMainTrie(rootHash []byte, trieTopic string) err } b.dataTries[string(rootHash)] = dataTrie - trieSyncer, err := trie.NewTrieSyncer(b.requestHandler, b.cacher, dataTrie, b.waitTime, b.shardId, trieTopic) + trieSyncer, err := trie.NewTrieSyncer(b.requestHandler, b.cacher, dataTrie, b.shardId, trieTopic) if err != nil { return err } b.trieSyncers[string(rootHash)] = trieSyncer - err = trieSyncer.StartSyncing(rootHash) + err = trieSyncer.StartSyncing(rootHash, ctx) if err != nil { return err } diff --git a/data/syncer/userAccountsSyncer.go b/data/syncer/userAccountsSyncer.go index 8a3cc99fe49..a5bc11f5241 100644 --- a/data/syncer/userAccountsSyncer.go +++ b/data/syncer/userAccountsSyncer.go @@ -1,6 +1,8 @@ package syncer import ( + "context" + logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/state" @@ -47,12 +49,15 @@ func NewUserAccountsSyncer(args ArgsNewUserAccountsSyncer) (*userAccountsSyncer, return u, nil } -// SyncAccounts will launch the syncing method to gather all the data needed for userAccounts +// SyncAccounts will launch the syncing method to gather all the data needed for userAccounts - it is a blocking method func (u *userAccountsSyncer) SyncAccounts(rootHash []byte) error { u.mutex.Lock() defer u.mutex.Unlock() - err := u.syncMainTrie(rootHash, factory.AccountTrieNodesTopic) + ctx, cancel := context.WithTimeout(context.Background(), u.waitTime) + defer cancel() + + err := u.syncMainTrie(rootHash, factory.AccountTrieNodesTopic, ctx) if err != nil { return nil } @@ -63,7 +68,7 @@ func (u *userAccountsSyncer) SyncAccounts(rootHash []byte) error { return err } - err = u.syncAccountDataTries(rootHashes) + err = u.syncAccountDataTries(rootHashes, ctx) if err != nil { return err } @@ -71,7 +76,7 @@ func (u *userAccountsSyncer) SyncAccounts(rootHash []byte) error { return nil } -func (u *userAccountsSyncer) syncAccountDataTries(rootHashes [][]byte) error { +func (u *userAccountsSyncer) syncAccountDataTries(rootHashes [][]byte, ctx context.Context) error { for _, rootHash := range rootHashes { dataTrie, err := trie.NewTrie(u.trieStorageManager, u.marshalizer, u.hasher) if err != nil { @@ -79,13 +84,13 @@ func (u *userAccountsSyncer) syncAccountDataTries(rootHashes [][]byte) error { } u.dataTries[string(rootHash)] = dataTrie - trieSyncer, err := trie.NewTrieSyncer(u.requestHandler, u.cacher, dataTrie, u.waitTime, u.shardId, factory.AccountTrieNodesTopic) + trieSyncer, err := trie.NewTrieSyncer(u.requestHandler, u.cacher, dataTrie, u.shardId, factory.AccountTrieNodesTopic) if err != nil { return err } u.trieSyncers[string(rootHash)] = trieSyncer - err = trieSyncer.StartSyncing(rootHash) + err = trieSyncer.StartSyncing(rootHash, ctx) if err != nil { return err } diff --git a/data/syncer/validatorAccountsSyncer.go b/data/syncer/validatorAccountsSyncer.go index e300ec5d5e2..fc3be17116c 100644 --- a/data/syncer/validatorAccountsSyncer.go +++ b/data/syncer/validatorAccountsSyncer.go @@ -1,6 +1,8 @@ package syncer import ( + "context" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/process/factory" @@ -42,10 +44,13 @@ func NewValidatorAccountsSyncer(args ArgsNewValidatorAccountsSyncer) (*validator return u, nil } -// SyncAccounts will launch the syncing method to gather all the data needed for validatorAccounts +// SyncAccounts will launch the syncing method to gather all the data needed for validatorAccounts - it is a blocking method func (v *validatorAccountsSyncer) SyncAccounts(rootHash []byte) error { v.mutex.Lock() defer v.mutex.Unlock() - return v.syncMainTrie(rootHash, factory.ValidatorTrieNodesTopic) + ctx, cancel := context.WithTimeout(context.Background(), v.waitTime) + defer cancel() + + return v.syncMainTrie(rootHash, factory.ValidatorTrieNodesTopic, ctx) } diff --git a/data/trie/branchNode.go b/data/trie/branchNode.go index 5b33232a59a..5e77cfcb4fe 100644 --- a/data/trie/branchNode.go +++ b/data/trie/branchNode.go @@ -664,29 +664,29 @@ func (bn *branchNode) setDirty(dirty bool) { bn.dirty = dirty } -func (bn *branchNode) loadChildren(syncer *trieSyncer) error { +func (bn *branchNode) loadChildren(getNode func([]byte) (node, error)) ([][]byte, error) { err := bn.isEmptyOrNil() if err != nil { - return err + return nil, err } + missingChildren := make([][]byte, 0) for i := range bn.EncodedChildren { if len(bn.EncodedChildren[i]) == 0 { continue } var child node - child, err = syncer.getNode(bn.EncodedChildren[i]) + child, err = getNode(bn.EncodedChildren[i]) if err != nil { - return err + missingChildren = append(missingChildren, bn.EncodedChildren[i]) + continue } bn.children[i] = child } - syncer.interceptedNodes.Remove(bn.hash) - - return nil + return missingChildren, nil } func (bn *branchNode) getAllLeaves(leaves map[string][]byte, key []byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer) error { diff --git a/data/trie/branchNode_test.go b/data/trie/branchNode_test.go index 8ec825d1cef..5fa3d6ee95e 100644 --- a/data/trie/branchNode_test.go +++ b/data/trie/branchNode_test.go @@ -6,7 +6,6 @@ import ( "io/ioutil" "reflect" "testing" - "time" "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/data" @@ -1047,31 +1046,27 @@ func TestBranchNode_loadChildren(t *testing.T) { _ = tr.root.setRootHash() nodes, _ := getEncodedTrieNodesAndHashes(tr) nodesCacher, _ := lrucache.NewCache(100) - - resolver := &mock.RequestHandlerStub{ - RequestTrieNodesCalled: func(shardId uint32, hash []byte, topic string) { - for i := range nodes { - node, _ := NewInterceptedTrieNode(nodes[i], marsh, hasher) - nodesCacher.Put(node.hash, node) - } - }, + for i := range nodes { + node, _ := NewInterceptedTrieNode(nodes[i], marsh, hasher) + nodesCacher.Put(node.hash, node) } - syncer, _ := NewTrieSyncer(resolver, nodesCacher, tr, time.Second, 0, "trie") - syncer.interceptedNodes.RegisterHandler(func(key []byte) { - syncer.chRcvTrieNodes <- true - }) firstChildIndex := 5 secondChildIndex := 7 bn := getCollapsedBn(t, tr.root) - err := bn.loadChildren(syncer) + getNode := func(hash []byte) (node, error) { + cacheData, _ := nodesCacher.Get(hash) + return trieNode(cacheData) + } + + missing, err := bn.loadChildren(getNode) assert.Nil(t, err) assert.NotNil(t, bn.children[firstChildIndex]) assert.NotNil(t, bn.children[secondChildIndex]) - - assert.Equal(t, 5, nodesCacher.Len()) + assert.Equal(t, 0, len(missing)) + assert.Equal(t, 6, nodesCacher.Len()) } func getCollapsedBn(t *testing.T, n node) *branchNode { diff --git a/data/trie/errors.go b/data/trie/errors.go index 6215bb1b4d8..59a835bba78 100644 --- a/data/trie/errors.go +++ b/data/trie/errors.go @@ -66,3 +66,6 @@ var ErrNilPathManager = errors.New("nil path manager") // ErrInvalidTrieTopic signals that invalid trie topic has been provided var ErrInvalidTrieTopic = errors.New("invalid trie topic") + +// ErrNilContext signals that nil context has been provided +var ErrNilContext = errors.New("nil context") diff --git a/data/trie/extensionNode.go b/data/trie/extensionNode.go index a8f1bdb6310..038775fce03 100644 --- a/data/trie/extensionNode.go +++ b/data/trie/extensionNode.go @@ -567,25 +567,23 @@ func (en *extensionNode) setDirty(dirty bool) { en.dirty = dirty } -func (en *extensionNode) loadChildren(syncer *trieSyncer) error { +func (en *extensionNode) loadChildren(getNode func([]byte) (node, error)) ([][]byte, error) { err := en.isEmptyOrNil() if err != nil { - return err + return nil, err } if en.EncodedChild == nil { - return ErrNilNode + return nil, ErrNilNode } - child, err := syncer.getNode(en.EncodedChild) + child, err := getNode(en.EncodedChild) if err != nil { - return err + return [][]byte{en.EncodedChild}, nil } en.child = child - syncer.interceptedNodes.Remove(en.hash) - - return nil + return nil, nil } func (en *extensionNode) getAllLeaves(leaves map[string][]byte, key []byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer) error { diff --git a/data/trie/extensionNode_test.go b/data/trie/extensionNode_test.go index f3771f374e6..5a3645ab7e8 100644 --- a/data/trie/extensionNode_test.go +++ b/data/trie/extensionNode_test.go @@ -5,7 +5,6 @@ import ( "fmt" "reflect" "testing" - "time" "github.com/ElrondNetwork/elrond-go/data/mock" "github.com/ElrondNetwork/elrond-go/storage/lrucache" @@ -835,26 +834,22 @@ func TestExtensionNode_loadChildren(t *testing.T) { _ = tr.root.setRootHash() nodes, _ := getEncodedTrieNodesAndHashes(tr) nodesCacher, _ := lrucache.NewCache(100) - resolver := &mock.RequestHandlerStub{ - RequestTrieNodesCalled: func(shardId uint32, hash []byte, topic string) { - for i := range nodes { - node, _ := NewInterceptedTrieNode(nodes[i], marsh, hasher) - nodesCacher.Put(node.hash, node) - } - }, + for i := range nodes { + node, _ := NewInterceptedTrieNode(nodes[i], marsh, hasher) + nodesCacher.Put(node.hash, node) } - syncer, _ := NewTrieSyncer(resolver, nodesCacher, tr, time.Second, 0, "trie") - syncer.interceptedNodes.RegisterHandler(func(key []byte) { - syncer.chRcvTrieNodes <- true - }) en := getCollapsedEn(t, tr.root) - err := en.loadChildren(syncer) + getNode := func(hash []byte) (node, error) { + cacheData, _ := nodesCacher.Get(hash) + return trieNode(cacheData) + } + _, err := en.loadChildren(getNode) assert.Nil(t, err) assert.NotNil(t, en.child) - assert.Equal(t, 3, nodesCacher.Len()) + assert.Equal(t, 4, nodesCacher.Len()) } func getCollapsedEn(t *testing.T, n node) *extensionNode { diff --git a/data/trie/factory/trieCreator.go b/data/trie/factory/trieCreator.go index 4d44705eb54..252621fab99 100644 --- a/data/trie/factory/trieCreator.go +++ b/data/trie/factory/trieCreator.go @@ -53,7 +53,7 @@ func NewTrieFactory( } // Create creates a new trie -func (tc *trieCreator) Create(trieStorageCfg config.StorageConfig, pruningEnabled bool) (data.Trie, error) { +func (tc *trieCreator) Create(trieStorageCfg config.StorageConfig, pruningEnabled bool) (data.StorageManager, data.Trie, error) { trieStoragePath, mainDb := path.Split(tc.pathManager.PathForStatic(tc.shardId, trieStorageCfg.DB.FilePath)) dbConfig := factory.GetDBFromConfig(trieStorageCfg.DB) @@ -64,17 +64,22 @@ func (tc *trieCreator) Create(trieStorageCfg config.StorageConfig, pruningEnable factory.GetBloomFromConfig(trieStorageCfg.Bloom), ) if err != nil { - return nil, err + return nil, nil, err } log.Trace("trie pruning status", "enabled", pruningEnabled) if !pruningEnabled { trieStorage, errNewTrie := trie.NewTrieStorageManagerWithoutPruning(accountsTrieStorage) if errNewTrie != nil { - return nil, errNewTrie + return nil, nil, errNewTrie } - return trie.NewTrie(trieStorage, tc.marshalizer, tc.hasher) + newTrie, err := trie.NewTrie(trieStorage, tc.marshalizer, tc.hasher) + if err != nil { + return nil, nil, err + } + + return trieStorage, newTrie, nil } arg := storageUnit.ArgDB{ @@ -86,12 +91,12 @@ func (tc *trieCreator) Create(trieStorageCfg config.StorageConfig, pruningEnable } evictionDb, err := storageUnit.NewDB(arg) if err != nil { - return nil, err + return nil, nil, err } ewl, err := evictionWaitingList.NewEvictionWaitingList(tc.evictionWaitingListCfg.Size, evictionDb, tc.marshalizer) if err != nil { - return nil, err + return nil, nil, err } snapshotDbCfg := config.DBConfig{ @@ -104,10 +109,15 @@ func (tc *trieCreator) Create(trieStorageCfg config.StorageConfig, pruningEnable trieStorage, err := trie.NewTrieStorageManager(accountsTrieStorage, tc.marshalizer, tc.hasher, snapshotDbCfg, ewl) if err != nil { - return nil, err + return nil, nil, err + } + + newTrie, err := trie.NewTrie(trieStorage, tc.marshalizer, tc.hasher) + if err != nil { + return nil, nil, err } - return trie.NewTrie(trieStorage, tc.marshalizer, tc.hasher) + return trieStorage, newTrie, nil } // IsInterfaceNil returns true if there is no value under the interface diff --git a/data/trie/factory/trieCreator_test.go b/data/trie/factory/trieCreator_test.go index b8e25e3e865..ac34d0cb24c 100644 --- a/data/trie/factory/trieCreator_test.go +++ b/data/trie/factory/trieCreator_test.go @@ -81,7 +81,7 @@ func TestTrieFactory_CreateNotSupportedCacheType(t *testing.T) { tf, _ := NewTrieFactory(args) trieStorageCfg := config.StorageConfig{} - tr, err := tf.Create(trieStorageCfg, false) + _, tr, err := tf.Create(trieStorageCfg, false) require.Nil(t, tr) require.Equal(t, storage.ErrNotSupportedCacheType, err) } @@ -93,7 +93,7 @@ func TestTrieFactory_CreateWithoutPrunningWork(t *testing.T) { tf, _ := NewTrieFactory(args) trieStorageCfg := createTrieStorageCfg() - tr, err := tf.Create(trieStorageCfg, false) + _, tr, err := tf.Create(trieStorageCfg, false) require.NotNil(t, tr) require.Nil(t, err) } @@ -105,7 +105,7 @@ func TestTrieFactory_CreateWithPrunningWrongDbType(t *testing.T) { tf, _ := NewTrieFactory(args) trieStorageCfg := createTrieStorageCfg() - tr, err := tf.Create(trieStorageCfg, true) + _, tr, err := tf.Create(trieStorageCfg, true) require.Nil(t, tr) require.Equal(t, storage.ErrNotSupportedDBType, err) } @@ -120,7 +120,7 @@ func TestTrieFactory_CreateInvalidCacheSize(t *testing.T) { tf, _ := NewTrieFactory(args) trieStorageCfg := createTrieStorageCfg() - tr, err := tf.Create(trieStorageCfg, true) + _, tr, err := tf.Create(trieStorageCfg, true) require.Nil(t, tr) require.Equal(t, data.ErrInvalidCacheSize, err) } @@ -136,7 +136,7 @@ func TestTrieFactory_CreateWithPRunningShouldWork(t *testing.T) { tf, _ := NewTrieFactory(args) trieStorageCfg := createTrieStorageCfg() - tr, err := tf.Create(trieStorageCfg, true) + _, tr, err := tf.Create(trieStorageCfg, true) require.NotNil(t, tr) require.Nil(t, err) } diff --git a/data/trie/interface.go b/data/trie/interface.go index 4f7101b1531..e37260de8c5 100644 --- a/data/trie/interface.go +++ b/data/trie/interface.go @@ -3,6 +3,7 @@ package trie import ( "io" "sync" + "time" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/hashing" @@ -36,7 +37,7 @@ type node interface { getChildren(db data.DBWriteCacher) ([]node, error) isValid() bool setDirty(bool) - loadChildren(*trieSyncer) error + loadChildren(func([]byte) (node, error)) ([][]byte, error) getAllLeaves(map[string][]byte, []byte, data.DBWriteCacher, marshal.Marshalizer) error getMarshalizer() marshal.Marshalizer @@ -59,5 +60,6 @@ type snapshotNode interface { // RequestHandler defines the methods through which request to data can be made type RequestHandler interface { RequestTrieNodes(destShardID uint32, hash []byte, topic string) + RequestInterval() time.Duration IsInterfaceNil() bool } diff --git a/data/trie/leafNode.go b/data/trie/leafNode.go index fa0da3ff636..ed5dbb40c85 100644 --- a/data/trie/leafNode.go +++ b/data/trie/leafNode.go @@ -335,9 +335,8 @@ func (ln *leafNode) setDirty(dirty bool) { ln.dirty = dirty } -func (ln *leafNode) loadChildren(syncer *trieSyncer) error { - syncer.interceptedNodes.Remove(ln.hash) - return nil +func (ln *leafNode) loadChildren(_ func([]byte) (node, error)) ([][]byte, error) { + return nil, nil } func (ln *leafNode) getAllLeaves(leaves map[string][]byte, key []byte, _ data.DBWriteCacher, _ marshal.Marshalizer) error { diff --git a/data/trie/leafNode_test.go b/data/trie/leafNode_test.go index 1a4d6abd50c..fe6b6fe9053 100644 --- a/data/trie/leafNode_test.go +++ b/data/trie/leafNode_test.go @@ -6,7 +6,6 @@ import ( "fmt" "reflect" "testing" - "time" "github.com/ElrondNetwork/elrond-go/data/mock" "github.com/ElrondNetwork/elrond-go/hashing" @@ -534,22 +533,17 @@ func TestLeafNode_loadChildren(t *testing.T) { tr := initTrie() nodes, hashes := getEncodedTrieNodesAndHashes(tr) nodesCacher, _ := lrucache.NewCache(100) - - resolver := &mock.RequestHandlerStub{} for i := range nodes { node, _ := NewInterceptedTrieNode(nodes[i], marsh, hasher) nodesCacher.Put(node.hash, node) } - syncer, _ := NewTrieSyncer(resolver, nodesCacher, tr, time.Second, 0, "trie") - syncer.interceptedNodes.RegisterHandler(func(key []byte) { - syncer.chRcvTrieNodes <- true - }) lnPosition := 5 ln := &leafNode{baseNode: &baseNode{hash: hashes[lnPosition]}} - err := ln.loadChildren(syncer) + missing, err := ln.loadChildren(nil) assert.Nil(t, err) - assert.Equal(t, 5, nodesCacher.Len()) + assert.Equal(t, 6, nodesCacher.Len()) + assert.Equal(t, 0, len(missing)) } //------- deepClone diff --git a/data/trie/sync.go b/data/trie/sync.go index f2f0246d33c..a168b6fb242 100644 --- a/data/trie/sync.go +++ b/data/trie/sync.go @@ -2,25 +2,34 @@ package trie import ( "bytes" + "context" "sync" "time" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/storage" ) type trieSyncer struct { - trie *patriciaMerkleTrie - requestHandler RequestHandler - interceptedNodes storage.Cacher - chRcvTrieNodes chan bool - waitTime time.Duration - shardId uint32 - topic string - - requestedHashes [][]byte - requestedHashesMutex sync.Mutex + trie *patriciaMerkleTrie + rootFound bool + rootHash []byte + + requestHandler RequestHandler + interceptedNodes storage.Cacher + shardId uint32 + topic string + waitTimeBetweenRequests time.Duration + + nodeHashes map[string]struct{} + nodeHashesMutex sync.Mutex + + receivedNodes map[string]node + receivedNodesMutex sync.Mutex + + chanReceivedNew chan bool } // NewTrieSyncer creates a new instance of trieSyncer @@ -28,7 +37,6 @@ func NewTrieSyncer( requestHandler RequestHandler, interceptedNodes storage.Cacher, trie data.Trie, - waitTime time.Duration, shardId uint32, topic string, ) (*trieSyncer, error) { @@ -50,72 +58,148 @@ func NewTrieSyncer( return nil, ErrWrongTypeAssertion } - return &trieSyncer{ - requestHandler: requestHandler, - interceptedNodes: interceptedNodes, - trie: pmt, - chRcvTrieNodes: make(chan bool), - requestedHashes: make([][]byte, 0), - waitTime: waitTime, - topic: topic, - shardId: shardId, - }, nil + ts := &trieSyncer{ + requestHandler: requestHandler, + interceptedNodes: interceptedNodes, + trie: pmt, + nodeHashes: make(map[string]struct{}), + receivedNodes: make(map[string]node), + topic: topic, + shardId: shardId, + waitTimeBetweenRequests: requestHandler.RequestInterval(), + chanReceivedNew: make(chan bool), + } + ts.interceptedNodes.RegisterHandler(ts.trieNodeIntercepted) + + return ts, nil } // StartSyncing completes the trie, asking for missing trie nodes on the network -func (ts *trieSyncer) StartSyncing(rootHash []byte) error { - // TODO: add implementation to try to request for trie nodes for several times before returning with error - +func (ts *trieSyncer) StartSyncing(rootHash []byte, ctx context.Context) error { if len(rootHash) == 0 { return ErrInvalidHash } - ts.interceptedNodes.RegisterHandler(ts.trieNodeIntercepted) - - currentNode, err := ts.getNode(rootHash) - if err != nil { - return err + if ctx == nil { + return ErrNilContext } - ts.trie.root = currentNode - err = ts.trie.root.loadChildren(ts) - if err != nil { - return err - } + ts.nodeHashesMutex.Lock() + ts.nodeHashes = make(map[string]struct{}) + ts.nodeHashes[string(rootHash)] = struct{}{} + ts.nodeHashesMutex.Unlock() - nextNodes, err := ts.trie.root.getChildren(ts.trie.Database()) - if err != nil { - return err - } + ts.rootFound = false + ts.rootHash = rootHash - for len(nextNodes) != 0 { - currentNode, err = ts.getNode(nextNodes[0].getHash()) + for { + err := ts.getNextNodes() if err != nil { return err } - nextNodes = nextNodes[1:] + _ = core.EmptyChannel(ts.chanReceivedNew) - err = currentNode.loadChildren(ts) - if err != nil { - return err + numRequested := ts.requestNodes() + if numRequested == 0 { + err := ts.trie.Commit() + if err != nil { + return err + } + + return nil } - var children []node - children, err = currentNode.getChildren(ts.trie.Database()) - if err != nil { - return err + select { + case <-ts.chanReceivedNew: + continue + case <-time.After(ts.waitTimeBetweenRequests): + continue + case <-ctx.Done(): + return ErrTimeIsOut } - nextNodes = append(nextNodes, children...) } +} - err = ts.trie.Commit() - if err != nil { - return err +func (ts *trieSyncer) getNextNodes() error { + var currentNode node + var err error + nextNodes := make([]node, 0) + missingNodes := make([][]byte, 0) + currentMissingNodes := make([][]byte, 0) + + newElement := true + + for newElement { + newElement = false + + ts.nodeHashesMutex.Lock() + for nodeHash := range ts.nodeHashes { + currentMissingNodes = currentMissingNodes[:0] + + currentNode, err = ts.getNode([]byte(nodeHash)) + if err != nil { + continue + } + + if !ts.rootFound && bytes.Equal([]byte(nodeHash), ts.rootHash) { + ts.trie.root = currentNode + } + + currentMissingNodes, err = currentNode.loadChildren(ts.getNode) + if err != nil { + ts.nodeHashesMutex.Unlock() + return err + } + + if len(currentMissingNodes) > 0 { + missingNodes = append(missingNodes, currentMissingNodes...) + continue + } + + delete(ts.nodeHashes, nodeHash) + ts.deleteFromReceived(nodeHash) + + nextNodes, err = currentNode.getChildren(ts.trie.Database()) + if err != nil { + ts.nodeHashesMutex.Lock() + return err + } + + tmpNewElement := ts.addNew(nextNodes) + newElement = newElement || tmpNewElement + } + ts.nodeHashesMutex.Unlock() + } + + ts.nodeHashesMutex.Lock() + for _, missingNode := range missingNodes { + ts.nodeHashes[string(missingNode)] = struct{}{} } + ts.nodeHashesMutex.Unlock() return nil } +func (ts *trieSyncer) deleteFromReceived(nodeHash string) { + ts.receivedNodesMutex.Lock() + delete(ts.receivedNodes, nodeHash) + ts.receivedNodesMutex.Unlock() +} + +// adds new elements to needed hash map, lock ts.nodeHashesMutex before calling +func (ts *trieSyncer) addNew(nextNodes []node) bool { + newElement := false + for _, nextNode := range nextNodes { + nextHash := string(nextNode.getHash()) + if _, ok := ts.nodeHashes[nextHash]; !ok { + ts.nodeHashes[nextHash] = struct{}{} + newElement = true + } + } + + return newElement +} + // Trie returns the synced trie func (ts *trieSyncer) Trie() data.Trie { return ts.trie @@ -127,13 +211,15 @@ func (ts *trieSyncer) getNode(hash []byte) (node, error) { return trieNode(n) } - err := ts.requestNode(hash) - if err != nil { - return nil, err + ts.receivedNodesMutex.Lock() + node, ok := ts.receivedNodes[string(hash)] + ts.receivedNodesMutex.Unlock() + + if ok { + return node, nil } - n, _ = ts.interceptedNodes.Get(hash) - return trieNode(n) + return nil, ErrNodeNotFound } func trieNode(data interface{}) (node, error) { @@ -145,51 +231,40 @@ func trieNode(data interface{}) (node, error) { return n.node, nil } -func (ts *trieSyncer) requestNode(hash []byte) error { - receivedRequestedHashTrigger := append(hash, hash...) - ts.requestedHashesMutex.Lock() - ts.requestedHashes = append(ts.requestedHashes, receivedRequestedHashTrigger) - ts.requestedHashesMutex.Unlock() - - ts.requestHandler.RequestTrieNodes(ts.shardId, hash, ts.topic) - - return ts.waitForTrieNode() -} - -func (ts *trieSyncer) waitForTrieNode() error { - select { - case <-ts.chRcvTrieNodes: - return nil - case <-time.After(ts.waitTime): - return ErrTimeIsOut +func (ts *trieSyncer) requestNodes() uint32 { + ts.nodeHashesMutex.Lock() + numRequested := uint32(len(ts.nodeHashes)) + for hash := range ts.nodeHashes { + ts.requestHandler.RequestTrieNodes(ts.shardId, []byte(hash), ts.topic) } + ts.nodeHashesMutex.Unlock() + + return numRequested } func (ts *trieSyncer) trieNodeIntercepted(hash []byte) { - ts.requestedHashesMutex.Lock() - - if hashInSlice(hash, ts.requestedHashes) { - ts.chRcvTrieNodes <- true - ts.removeRequestedHash(hash) + ts.nodeHashesMutex.Lock() + _, ok := ts.nodeHashes[string(hash)] + ts.nodeHashesMutex.Unlock() + if !ok { + return } - ts.requestedHashesMutex.Unlock() -} -func (ts *trieSyncer) removeRequestedHash(hash []byte) { - for i := range ts.requestedHashes { - if bytes.Equal(ts.requestedHashes[i], hash) { - ts.requestedHashes = append(ts.requestedHashes[:i], ts.requestedHashes[i+1:]...) - } + interceptedData, ok := ts.interceptedNodes.Get(hash) + if !ok { + return } -} -func hashInSlice(hash []byte, hashes [][]byte) bool { - for _, h := range hashes { - if bytes.Equal(h, hash) { - return true - } + node, err := trieNode(interceptedData) + if err != nil { + return } - return false + + ts.receivedNodesMutex.Lock() + ts.receivedNodes[string(hash)] = node + ts.receivedNodesMutex.Unlock() + + ts.chanReceivedNew <- true } // IsInterfaceNil returns true if there is no value under the interface diff --git a/data/trie/sync_test.go b/data/trie/sync_test.go index c69297f97ac..72a659254ba 100644 --- a/data/trie/sync_test.go +++ b/data/trie/sync_test.go @@ -1,6 +1,7 @@ package trie_test import ( + "context" "io/ioutil" "math/rand" "strconv" @@ -73,10 +74,14 @@ func TestTrieSyncer_StartSyncing(t *testing.T) { } rootHash, _ := syncTrie.Root() - sync, _ := trie.NewTrieSyncer(resolver, interceptedNodesCacher, tr, 10*time.Second, 0, "trie") + sync, _ := trie.NewTrieSyncer(resolver, interceptedNodesCacher, tr, 0, "trie") + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - _ = sync.StartSyncing(rootHash) + err := sync.StartSyncing(rootHash, ctx) + + cancel() newTrieRootHash, _ := tr.Root() + assert.Nil(t, err) assert.Equal(t, rootHash, newTrieRootHash) assert.Equal(t, expectedRequests, nrRequests) } diff --git a/dataRetriever/errors.go b/dataRetriever/errors.go index d96b8ae8452..eccecf52042 100644 --- a/dataRetriever/errors.go +++ b/dataRetriever/errors.go @@ -178,3 +178,6 @@ var ErrInvalidValue = errors.New("invalid value") // ErrNilWhiteListHandler signals that white list handler is nil var ErrNilWhiteListHandler = errors.New("nil white list handler") + +// ErrRequestIntervalTooSmall signals that request interval is too small +var ErrRequestIntervalTooSmall = errors.New("request interval is too small") diff --git a/dataRetriever/factory/dataPoolFactory.go b/dataRetriever/factory/dataPoolFactory.go new file mode 100644 index 00000000000..ac888fd2c63 --- /dev/null +++ b/dataRetriever/factory/dataPoolFactory.go @@ -0,0 +1,99 @@ +package factory + +import ( + logger "github.com/ElrondNetwork/elrond-go-logger" + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool/headersCache" + txPoolFactory "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/txpool" + "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/dataRetriever/txpool" + "github.com/ElrondNetwork/elrond-go/process/economics" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage/factory" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" +) + +var log = logger.GetOrCreate("dataRetriever/factory") + +// ArgsDataPool holds the arguments needed for NewDataPoolFromConfig function +type ArgsDataPool struct { + Config *config.Config + EconomicsData *economics.EconomicsData + ShardCoordinator sharding.Coordinator +} + +// TODO: unit tests +// NewDataPoolFromConfig will return a new instance of a PoolsHolder +func NewDataPoolFromConfig(args ArgsDataPool) (dataRetriever.PoolsHolder, error) { + log.Debug("creatingDataPool from config") + + mainConfig := args.Config + + txPool, err := txPoolFactory.CreateTxPool(txpool.ArgShardedTxPool{ + Config: factory.GetCacherFromConfig(mainConfig.TxDataPool), + MinGasPrice: args.EconomicsData.MinGasPrice(), + NumberOfShards: args.ShardCoordinator.NumberOfShards(), + SelfShardID: args.ShardCoordinator.SelfId(), + }) + if err != nil { + log.Error("error creating txpool") + return nil, err + } + + uTxPool, err := shardedData.NewShardedData(factory.GetCacherFromConfig(mainConfig.UnsignedTransactionDataPool)) + if err != nil { + log.Error("error creating smart contract result pool") + return nil, err + } + + rewardTxPool, err := shardedData.NewShardedData(factory.GetCacherFromConfig(mainConfig.RewardTransactionDataPool)) + if err != nil { + log.Error("error creating reward transaction pool") + return nil, err + } + + hdrPool, err := headersCache.NewHeadersPool(mainConfig.HeadersPoolConfig) + if err != nil { + log.Error("error creating headers pool") + return nil, err + } + + cacherCfg := factory.GetCacherFromConfig(mainConfig.TxBlockBodyDataPool) + txBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Error("error creating txBlockBody") + return nil, err + } + + cacherCfg = factory.GetCacherFromConfig(mainConfig.PeerBlockBodyDataPool) + peerChangeBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Error("error creating peerChangeBlockBody") + return nil, err + } + + cacherCfg = factory.GetCacherFromConfig(mainConfig.TrieNodesDataPool) + trieNodes, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Info("error creating trieNodes") + return nil, err + } + + currBlockTxs, err := dataPool.NewCurrentBlockPool() + if err != nil { + return nil, err + } + + return dataPool.NewDataPool( + txPool, + uTxPool, + rewardTxPool, + hdrPool, + txBlockBody, + peerChangeBlockBody, + trieNodes, + currBlockTxs, + ) +} diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go index 1ea8cfb203a..e3c1ff1d309 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go @@ -138,7 +138,11 @@ func (mrcf *metaResolversContainerFactory) generateShardHeaderResolvers() error return mrcf.container.AddMultiple(keys, resolversSlice) } -func (mrcf *metaResolversContainerFactory) createShardHeaderResolver(topic string, excludedTopic string, shardID uint32) (dataRetriever.Resolver, error) { +func (mrcf *metaResolversContainerFactory) createShardHeaderResolver( + topic string, + excludedTopic string, + shardID uint32, +) (dataRetriever.Resolver, error) { hdrStorer := mrcf.store.GetStorer(dataRetriever.BlockHeaderUnit) resolverSender, err := mrcf.createOneResolverSender(topic, excludedTopic, shardID) @@ -185,7 +189,10 @@ func (mrcf *metaResolversContainerFactory) generateMetaChainHeaderResolvers() er return mrcf.container.Add(identifierHeader, resolver) } -func (mrcf *metaResolversContainerFactory) createMetaChainHeaderResolver(identifier string, shardId uint32) (dataRetriever.Resolver, error) { +func (mrcf *metaResolversContainerFactory) createMetaChainHeaderResolver( + identifier string, + shardId uint32, +) (dataRetriever.Resolver, error) { hdrStorer := mrcf.store.GetStorer(dataRetriever.MetaBlockUnit) resolverSender, err := mrcf.createOneResolverSender(identifier, emptyExcludePeersOnTopic, shardId) diff --git a/dataRetriever/mock/chainStorerMock.go b/dataRetriever/mock/chainStorerMock.go index 2948d09b6ed..83e932e1016 100644 --- a/dataRetriever/mock/chainStorerMock.go +++ b/dataRetriever/mock/chainStorerMock.go @@ -6,7 +6,7 @@ import ( "github.com/pkg/errors" ) -// ChainStorerMock is a mock implementation of the ChianStorer interface +// ChainStorerMock is a mock implementation of the ChainStorer interface type ChainStorerMock struct { AddStorerCalled func(key dataRetriever.UnitType, s storage.Storer) GetStorerCalled func(unitType dataRetriever.UnitType) storage.Storer diff --git a/dataRetriever/requestHandlers/requestHandler.go b/dataRetriever/requestHandlers/requestHandler.go index 34f12268dd0..e4b0c4db3cc 100644 --- a/dataRetriever/requestHandlers/requestHandler.go +++ b/dataRetriever/requestHandlers/requestHandler.go @@ -21,6 +21,7 @@ type resolverRequestHandler struct { shardID uint32 maxTxsToRequest int sweepTime time.Time + requestInterval time.Duration mutSweepTime sync.Mutex } @@ -33,6 +34,7 @@ func NewResolverRequestHandler( whiteList dataRetriever.WhiteListHandler, maxTxsToRequest int, shardID uint32, + requestInterval time.Duration, ) (*resolverRequestHandler, error) { if check.IfNil(finder) { @@ -47,6 +49,9 @@ func NewResolverRequestHandler( if check.IfNil(whiteList) { return nil, dataRetriever.ErrNilWhiteListHandler } + if requestInterval < time.Millisecond { + return nil, fmt.Errorf("%w:request interval is smaller than a millisecond", dataRetriever.ErrRequestIntervalTooSmall) + } rrh := &resolverRequestHandler{ resolversFinder: finder, @@ -55,6 +60,7 @@ func NewResolverRequestHandler( shardID: shardID, maxTxsToRequest: maxTxsToRequest, whiteList: whiteList, + requestInterval: requestInterval, } rrh.sweepTime = time.Now() @@ -503,6 +509,11 @@ func (rrh *resolverRequestHandler) RequestStartOfEpochMetaBlock(epoch uint32) { rrh.addRequestedItem([]byte(epochStartIdentifier)) } +// RequestInterval returns the request interval between sending the same request +func (rrh *resolverRequestHandler) RequestInterval() time.Duration { + return rrh.requestInterval +} + // IsInterfaceNil returns true if there is no value under the interface func (rrh *resolverRequestHandler) IsInterfaceNil() bool { return rrh == nil @@ -526,7 +537,7 @@ func (rrh *resolverRequestHandler) sweepIfNeeded() { rrh.mutSweepTime.Lock() defer rrh.mutSweepTime.Unlock() - if time.Since(rrh.sweepTime) <= time.Second { + if time.Since(rrh.sweepTime) <= rrh.requestInterval { return } diff --git a/dataRetriever/requestHandlers/requestHandler_test.go b/dataRetriever/requestHandlers/requestHandler_test.go index 58c6c7b73ec..2a3ee7090d4 100644 --- a/dataRetriever/requestHandlers/requestHandler_test.go +++ b/dataRetriever/requestHandlers/requestHandler_test.go @@ -40,6 +40,7 @@ func TestNewResolverRequestHandlerNilFinder(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) assert.Nil(t, rrh) @@ -55,6 +56,7 @@ func TestNewResolverRequestHandlerNilRequestedItemsHandler(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) assert.Nil(t, rrh) @@ -70,6 +72,7 @@ func TestNewResolverRequestHandlerMaxTxRequestTooSmall(t *testing.T) { &mock.WhiteListHandlerStub{}, 0, 0, + time.Second, ) assert.Nil(t, rrh) @@ -85,6 +88,7 @@ func TestNewResolverRequestHandler(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) assert.Nil(t, err) @@ -114,6 +118,7 @@ func TestResolverRequestHandler_RequestTransactionErrorWhenGettingCrossShardReso &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestTransaction(0, make([][]byte, 0)) @@ -141,6 +146,7 @@ func TestResolverRequestHandler_RequestTransactionWrongResolverShouldNotPanic(t &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestTransaction(0, make([][]byte, 0)) @@ -167,6 +173,7 @@ func TestResolverRequestHandler_RequestTransactionShouldRequestTransactions(t *t &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) @@ -209,6 +216,7 @@ func TestResolverRequestHandler_RequestTransactionErrorsOnRequestShouldNotPanic( &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) @@ -245,6 +253,7 @@ func TestResolverRequestHandler_RequestMiniBlockErrorWhenGettingCrossShardResolv &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestMiniBlock(0, make([]byte, 0)) @@ -277,6 +286,7 @@ func TestResolverRequestHandler_RequestMiniBlockErrorsOnRequestShouldNotPanic(t &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestMiniBlock(0, []byte("mbHash")) @@ -303,6 +313,7 @@ func TestResolverRequestHandler_RequestMiniBlockShouldCallRequestOnResolver(t *t &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestMiniBlock(0, []byte("mbHash")) @@ -331,6 +342,7 @@ func TestResolverRequestHandler_RequestMiniBlockShouldCallWithTheCorrectEpoch(t &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.SetEpoch(expectedEpoch) @@ -353,6 +365,7 @@ func TestResolverRequestHandler_RequestShardHeaderHashAlreadyRequestedShouldNotR &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestShardHeader(0, make([]byte, 0)) @@ -367,6 +380,7 @@ func TestResolverRequestHandler_RequestShardHeaderHashBadRequest(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestShardHeader(1, make([]byte, 0)) @@ -393,6 +407,7 @@ func TestResolverRequestHandler_RequestShardHeaderShouldCallRequestOnResolver(t &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestShardHeader(0, []byte("hdrHash")) @@ -415,6 +430,7 @@ func TestResolverRequestHandler_RequestMetadHeaderHashAlreadyRequestedShouldNotR &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestMetaHeader(make([]byte, 0)) @@ -441,6 +457,7 @@ func TestResolverRequestHandler_RequestMetadHeaderHashNotHeaderResolverShouldNot &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestMetaHeader([]byte("hdrHash")) @@ -469,6 +486,7 @@ func TestResolverRequestHandler_RequestMetaHeaderShouldCallRequestOnResolver(t * &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestMetaHeader([]byte("hdrHash")) @@ -493,6 +511,7 @@ func TestResolverRequestHandler_RequestShardHeaderByNonceAlreadyRequestedShouldN &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestShardHeaderByNonce(0, 0) @@ -515,6 +534,7 @@ func TestResolverRequestHandler_RequestShardHeaderByNonceBadRequest(t *testing.T &mock.WhiteListHandlerStub{}, 1, core.MetachainShardId, + time.Second, ) rrh.RequestShardHeaderByNonce(1, 0) @@ -543,6 +563,7 @@ func TestResolverRequestHandler_RequestShardHeaderByNonceFinderReturnsErrorShoul &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestShardHeaderByNonce(0, 0) @@ -575,6 +596,7 @@ func TestResolverRequestHandler_RequestShardHeaderByNonceFinderReturnsAWrongReso &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestShardHeaderByNonce(0, 0) @@ -607,6 +629,7 @@ func TestResolverRequestHandler_RequestShardHeaderByNonceResolverFailsShouldNotP &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestShardHeaderByNonce(0, 0) @@ -633,6 +656,7 @@ func TestResolverRequestHandler_RequestShardHeaderByNonceShouldRequest(t *testin &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestShardHeaderByNonce(0, 0) @@ -655,6 +679,7 @@ func TestResolverRequestHandler_RequestMetaHeaderHashAlreadyRequestedShouldNotRe &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestMetaHeaderByNonce(0) @@ -681,6 +706,7 @@ func TestResolverRequestHandler_RequestMetaHeaderByNonceShouldRequest(t *testing &mock.WhiteListHandlerStub{}, 100, 0, + time.Second, ) rrh.RequestMetaHeaderByNonce(0) @@ -711,6 +737,7 @@ func TestResolverRequestHandler_RequestScrErrorWhenGettingCrossShardResolverShou &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestUnsignedTransactions(0, make([][]byte, 0)) @@ -738,6 +765,7 @@ func TestResolverRequestHandler_RequestScrWrongResolverShouldNotPanic(t *testing &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestUnsignedTransactions(0, make([][]byte, 0)) @@ -764,6 +792,7 @@ func TestResolverRequestHandler_RequestScrShouldRequestScr(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestUnsignedTransactions(0, [][]byte{[]byte("txHash")}) @@ -806,6 +835,7 @@ func TestResolverRequestHandler_RequestScrErrorsOnRequestShouldNotPanic(t *testi &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestUnsignedTransactions(0, [][]byte{[]byte("txHash")}) @@ -842,6 +872,7 @@ func TestResolverRequestHandler_RequestRewardShouldRequestReward(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestRewardTransactions(0, [][]byte{[]byte("txHash")}) @@ -876,6 +907,7 @@ func TestRequestTrieNodes_ShouldWork(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestTrieNodes(0, []byte("hash"), "topic") @@ -898,6 +930,7 @@ func TestRequestTrieNodes_NilResolver(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestTrieNodes(core.MetachainShardId, []byte("hash"), "topic") @@ -926,6 +959,7 @@ func TestRequestTrieNodes_RequestByHashError(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestTrieNodes(0, []byte("hash"), "topic") @@ -948,6 +982,7 @@ func TestRequestStartOfEpochMetaBlock_MissingResolver(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestStartOfEpochMetaBlock(0) @@ -971,6 +1006,7 @@ func TestRequestStartOfEpochMetaBlock_WrongResolver(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestStartOfEpochMetaBlock(0) @@ -999,6 +1035,7 @@ func TestRequestStartOfEpochMetaBlock_RequestDataFromEpochError(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestStartOfEpochMetaBlock(0) @@ -1031,6 +1068,7 @@ func TestRequestStartOfEpochMetaBlock_AddError(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestStartOfEpochMetaBlock(0) diff --git a/epochStart/bootstrap/baseStorageHandler.go b/epochStart/bootstrap/baseStorageHandler.go new file mode 100644 index 00000000000..e9496a53ff7 --- /dev/null +++ b/epochStart/bootstrap/baseStorageHandler.go @@ -0,0 +1,78 @@ +package bootstrap + +import ( + "encoding/json" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// baseStorageHandler handles the storage functions for saving bootstrap data +type baseStorageHandler struct { + storageService dataRetriever.StorageService + shardCoordinator sharding.Coordinator + marshalizer marshal.Marshalizer + hasher hashing.Hasher + currentEpoch uint32 +} + +func (bsh *baseStorageHandler) groupMiniBlocksByShard(miniBlocks map[string]*block.MiniBlock) ([]bootstrapStorage.PendingMiniBlocksInfo, error) { + pendingMBsMap := make(map[uint32][][]byte) + for hash, miniBlock := range miniBlocks { + senderShId := miniBlock.SenderShardID + pendingMBsMap[senderShId] = append(pendingMBsMap[senderShId], []byte(hash)) + } + + sliceToRet := make([]bootstrapStorage.PendingMiniBlocksInfo, 0) + for shardID, hashes := range pendingMBsMap { + sliceToRet = append(sliceToRet, bootstrapStorage.PendingMiniBlocksInfo{ + ShardID: shardID, + MiniBlocksHashes: hashes, + }) + } + + return sliceToRet, nil +} + +func (bsh *baseStorageHandler) saveNodesCoordinatorRegistry( + metaBlock *block.MetaBlock, + nodesConfig *sharding.NodesCoordinatorRegistry, +) ([]byte, error) { + key := append([]byte(core.NodesCoordinatorRegistryKeyPrefix), metaBlock.RandSeed...) + + // TODO: replace hardcoded json - although it is hardcoded in nodesCoordinator as well. + registryBytes, err := json.Marshal(nodesConfig) + if err != nil { + return nil, err + } + + err = bsh.storageService.GetStorer(dataRetriever.BootstrapUnit).Put(key, registryBytes) + if err != nil { + return nil, err + } + + return key, nil +} + +func (bsh *baseStorageHandler) commitTries(components *ComponentsNeededForBootstrap) error { + for _, trie := range components.UserAccountTries { + err := trie.Commit() + if err != nil { + return err + } + } + + for _, trie := range components.PeerAccountTries { + err := trie.Commit() + if err != nil { + return err + } + } + + return nil +} diff --git a/epochStart/bootstrap/disabled/disabledAccountsAdapter.go b/epochStart/bootstrap/disabled/disabledAccountsAdapter.go new file mode 100644 index 00000000000..c3a73eb9b25 --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledAccountsAdapter.go @@ -0,0 +1,103 @@ +package disabled + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/state" +) + +type accountsAdapter struct { +} + +// NewAccountsAdapter returns a nil implementation of accountsAdapter +func NewAccountsAdapter() *accountsAdapter { + return &accountsAdapter{} +} + +// LoadAccount - +func (a *accountsAdapter) LoadAccount(_ state.AddressContainer) (state.AccountHandler, error) { + return nil, nil +} + +// SaveAccount - +func (a *accountsAdapter) SaveAccount(_ state.AccountHandler) error { + return nil +} + +// PruneTrie - +func (a *accountsAdapter) PruneTrie(_ []byte, _ data.TriePruningIdentifier) { +} + +// GetExistingAccount - +func (a *accountsAdapter) GetExistingAccount(_ state.AddressContainer) (state.AccountHandler, error) { + return nil, nil +} + +// RemoveAccount - +func (a *accountsAdapter) RemoveAccount(_ state.AddressContainer) error { + return nil +} + +// Commit - +func (a *accountsAdapter) Commit() ([]byte, error) { + return nil, nil +} + +// JournalLen - +func (a *accountsAdapter) JournalLen() int { + return 0 +} + +// RevertToSnapshot - +func (a *accountsAdapter) RevertToSnapshot(_ int) error { + return nil +} + +// RootHash - +func (a *accountsAdapter) RootHash() ([]byte, error) { + return nil, nil +} + +// RecreateTrie - +func (a *accountsAdapter) RecreateTrie(_ []byte) error { + return nil +} + +// CancelPrune - +func (a *accountsAdapter) CancelPrune(_ []byte, _ data.TriePruningIdentifier) { + return +} + +// SnapshotState - +func (a *accountsAdapter) SnapshotState(_ []byte) { + return +} + +// SetStateCheckpoint - +func (a *accountsAdapter) SetStateCheckpoint(_ []byte) { + return +} + +// IsPruningEnabled - +func (a *accountsAdapter) IsPruningEnabled() bool { + return false +} + +// ClosePersister - +func (a *accountsAdapter) ClosePersister() error { + return nil +} + +// GetAllLeaves - +func (a *accountsAdapter) GetAllLeaves(_ []byte) (map[string][]byte, error) { + return nil, nil +} + +// RecreateAllTries - +func (a *accountsAdapter) RecreateAllTries(_ []byte) (map[string]data.Trie, error) { + return nil, nil +} + +// IsInterfaceNil - +func (a *accountsAdapter) IsInterfaceNil() bool { + return a == nil +} diff --git a/epochStart/bootstrap/disabled/disabledAntiFloodHandler.go b/epochStart/bootstrap/disabled/disabledAntiFloodHandler.go new file mode 100644 index 00000000000..6ca7b1183ed --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledAntiFloodHandler.go @@ -0,0 +1,28 @@ +package disabled + +import ( + "github.com/ElrondNetwork/elrond-go/p2p" +) + +type antiFloodHandler struct { +} + +// NewAntiFloodHandler returns a new instance of antiFloodHandler +func NewAntiFloodHandler() *antiFloodHandler { + return &antiFloodHandler{} +} + +// CanProcessMessage return nil regardless of the input +func (a *antiFloodHandler) CanProcessMessage(_ p2p.MessageP2P, _ p2p.PeerID) error { + return nil +} + +// CanProcessMessageOnTopic return nil regardless of the input +func (a *antiFloodHandler) CanProcessMessageOnTopic(_ p2p.PeerID, _ string) error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (a *antiFloodHandler) IsInterfaceNil() bool { + return a == nil +} diff --git a/epochStart/bootstrap/disabled/disabledChainStorer.go b/epochStart/bootstrap/disabled/disabledChainStorer.go new file mode 100644 index 00000000000..51ae235551d --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledChainStorer.go @@ -0,0 +1,121 @@ +package disabled + +import ( + "sync" + + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/storage" +) + +// ChainStorer is a mock implementation of the ChainStorer interface +type chainStorer struct { + mapStorages map[dataRetriever.UnitType]storage.Storer + mutex sync.Mutex +} + +// NewChainStorer - +func NewChainStorer() *chainStorer { + return &chainStorer{ + mapStorages: make(map[dataRetriever.UnitType]storage.Storer), + } +} + +// CloseAll - +func (c *chainStorer) CloseAll() error { + c.mutex.Lock() + defer c.mutex.Unlock() + + for _, store := range c.mapStorages { + err := store.Close() + if err != nil { + return err + } + } + + return nil +} + +// AddStorer will add a new storer to the chain map +func (c *chainStorer) AddStorer(key dataRetriever.UnitType, s storage.Storer) { + c.mutex.Lock() + defer c.mutex.Unlock() + + c.mapStorages[key] = s +} + +// GetStorer returns the storer from the chain map or nil if the storer was not found +func (c *chainStorer) GetStorer(unitType dataRetriever.UnitType) storage.Storer { + c.mutex.Lock() + defer c.mutex.Unlock() + + _, ok := c.mapStorages[unitType] + if !ok { + c.mapStorages[unitType] = CreateMemUnit() + } + + store := c.mapStorages[unitType] + return store +} + +// Has returns true if the key is found in the selected Unit or false otherwise +// It can return an error if the provided unit type is not supported or if the +// underlying implementation of the storage unit reports an error. +func (c *chainStorer) Has(unitType dataRetriever.UnitType, key []byte) error { + store := c.GetStorer(unitType) + return store.Has(key) +} + +// Get returns the value for the given key if found in the selected storage unit, +// nil otherwise. It can return an error if the provided unit type is not supported +// or if the storage unit underlying implementation reports an error +func (c *chainStorer) Get(unitType dataRetriever.UnitType, key []byte) ([]byte, error) { + store := c.GetStorer(unitType) + return store.Get(key) +} + +// Put stores the key, value pair in the selected storage unit +// It can return an error if the provided unit type is not supported +// or if the storage unit underlying implementation reports an error +func (c *chainStorer) Put(unitType dataRetriever.UnitType, key []byte, value []byte) error { + store := c.GetStorer(unitType) + return store.Put(key, value) +} + +// GetAll gets all the elements with keys in the keys array, from the selected storage unit +// It can report an error if the provided unit type is not supported, if there is a missing +// key in the unit, or if the underlying implementation of the storage unit reports an error. +func (c *chainStorer) GetAll(unitType dataRetriever.UnitType, keys [][]byte) (map[string][]byte, error) { + store := c.GetStorer(unitType) + allValues := make(map[string][]byte, len(keys)) + + for _, key := range keys { + value, err := store.Get(key) + if err != nil { + return nil, err + } + + allValues[string(key)] = value + } + + return allValues, nil +} + +// Destroy removes the underlying files/resources used by the storage service +func (c *chainStorer) Destroy() error { + c.mutex.Lock() + defer c.mutex.Unlock() + + for _, store := range c.mapStorages { + err := store.DestroyUnit() + if err != nil { + return err + } + } + + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (c *chainStorer) IsInterfaceNil() bool { + return c == nil +} diff --git a/epochStart/bootstrap/disabled/disabledEpochStartNotifier.go b/epochStart/bootstrap/disabled/disabledEpochStartNotifier.go new file mode 100644 index 00000000000..7abbb4950b4 --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledEpochStartNotifier.go @@ -0,0 +1,31 @@ +package disabled + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/epochStart" +) + +// EpochStartNotifier - +type EpochStartNotifier struct { +} + +// RegisterHandler - +func (desn *EpochStartNotifier) RegisterHandler(handler epochStart.ActionHandler) { +} + +// UnregisterHandler - +func (desn *EpochStartNotifier) UnregisterHandler(handler epochStart.ActionHandler) { +} + +// NotifyAllPrepare - +func (desn *EpochStartNotifier) NotifyAllPrepare(metaHeader data.HeaderHandler) { +} + +// NotifyAll - +func (desn *EpochStartNotifier) NotifyAll(hdr data.HeaderHandler) { +} + +// IsInterfaceNil - +func (desn *EpochStartNotifier) IsInterfaceNil() bool { + return desn == nil +} diff --git a/epochStart/bootstrap/disabled/disabledEpochStartTrigger.go b/epochStart/bootstrap/disabled/disabledEpochStartTrigger.go new file mode 100644 index 00000000000..bc1f941b2be --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledEpochStartTrigger.go @@ -0,0 +1,78 @@ +package disabled + +import ( + "github.com/ElrondNetwork/elrond-go/data" +) + +type epochStartTrigger struct { +} + +// NewEpochStartTrigger returns a new instance of epochStartTrigger +func NewEpochStartTrigger() *epochStartTrigger { + return &epochStartTrigger{} +} + +// Update - +func (e *epochStartTrigger) Update(_ uint64) { +} + +// ReceivedHeader - +func (e *epochStartTrigger) ReceivedHeader(_ data.HeaderHandler) { +} + +// IsEpochStart - +func (e *epochStartTrigger) IsEpochStart() bool { + return false +} + +// Epoch - +func (e *epochStartTrigger) Epoch() uint32 { + return 0 +} + +// EpochStartRound - +func (e *epochStartTrigger) EpochStartRound() uint64 { + return 0 +} + +// SetProcessed - +func (e *epochStartTrigger) SetProcessed(_ data.HeaderHandler) { +} + +// RevertStateToBlock - +func (e *epochStartTrigger) RevertStateToBlock(_ data.HeaderHandler) error { + return nil +} + +// EpochStartMetaHdrHash - +func (e *epochStartTrigger) EpochStartMetaHdrHash() []byte { + return nil +} + +// GetSavedStateKey - +func (e *epochStartTrigger) GetSavedStateKey() []byte { + return nil +} + +// LoadState - +func (e *epochStartTrigger) LoadState(_ []byte) error { + return nil +} + +// SetFinalityAttestingRound - +func (e *epochStartTrigger) SetFinalityAttestingRound(_ uint64) { +} + +// EpochFinalityAttestingRound - +func (e *epochStartTrigger) EpochFinalityAttestingRound() uint64 { + return 0 +} + +// RequestEpochStartIfNeeded - +func (e *epochStartTrigger) RequestEpochStartIfNeeded(_ data.HeaderHandler) { +} + +// IsInterfaceNil - +func (e *epochStartTrigger) IsInterfaceNil() bool { + return e == nil +} diff --git a/epochStart/bootstrap/disabled/disabledHeaderSigVerifier.go b/epochStart/bootstrap/disabled/disabledHeaderSigVerifier.go new file mode 100644 index 00000000000..317c3552efc --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledHeaderSigVerifier.go @@ -0,0 +1,28 @@ +package disabled + +import ( + "github.com/ElrondNetwork/elrond-go/data" +) + +type headerSigVerifier struct { +} + +// NewHeaderSigVerifier returns a new instance of headerSigVerifier +func NewHeaderSigVerifier() *headerSigVerifier { + return &headerSigVerifier{} +} + +// VerifyRandSeedAndLeaderSignature - +func (h *headerSigVerifier) VerifyRandSeedAndLeaderSignature(_ data.HeaderHandler) error { + return nil +} + +// VerifySignature - +func (h *headerSigVerifier) VerifySignature(_ data.HeaderHandler) error { + return nil +} + +// IsInterfaceNil - +func (h *headerSigVerifier) IsInterfaceNil() bool { + return h == nil +} diff --git a/epochStart/bootstrap/disabled/disabledMultiSigner.go b/epochStart/bootstrap/disabled/disabledMultiSigner.go new file mode 100644 index 00000000000..36e63c70b69 --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledMultiSigner.go @@ -0,0 +1,63 @@ +package disabled + +import ( + "github.com/ElrondNetwork/elrond-go/crypto" +) + +type multiSigner struct { +} + +// NewMultiSigner returns a new instance of multiSigner +func NewMultiSigner() *multiSigner { + return &multiSigner{} +} + +// Create - +func (m *multiSigner) Create(_ []string, _ uint16) (crypto.MultiSigner, error) { + return nil, nil +} + +// SetAggregatedSig - +func (m *multiSigner) SetAggregatedSig([]byte) error { + return nil +} + +// Verify - +func (m *multiSigner) Verify(_ []byte, _ []byte) error { + return nil +} + +// Reset - +func (m *multiSigner) Reset(_ []string, _ uint16) error { + return nil +} + +// CreateSignatureShare - +func (m *multiSigner) CreateSignatureShare(_ []byte, _ []byte) ([]byte, error) { + return nil, nil +} + +// StoreSignatureShare - +func (m *multiSigner) StoreSignatureShare(_ uint16, _ []byte) error { + return nil +} + +// SignatureShare - +func (m *multiSigner) SignatureShare(_ uint16) ([]byte, error) { + return nil, nil +} + +// VerifySignatureShare - +func (m *multiSigner) VerifySignatureShare(_ uint16, _ []byte, _ []byte, _ []byte) error { + return nil +} + +// AggregateSigs - +func (m *multiSigner) AggregateSigs(_ []byte) ([]byte, error) { + return nil, nil +} + +// IsInterfaceNil - +func (m *multiSigner) IsInterfaceNil() bool { + return m == nil +} diff --git a/epochStart/bootstrap/disabled/disabledNodesCoordinator.go b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go new file mode 100644 index 00000000000..098d64210f5 --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go @@ -0,0 +1,103 @@ +package disabled + +import ( + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// nodesCoordinator - +type nodesCoordinator struct { +} + +// NewNodesCoordinator returns a new instance of nodesCoordinator +func NewNodesCoordinator() *nodesCoordinator { + return &nodesCoordinator{} +} + +// SetNodesPerShards - +func (n *nodesCoordinator) SetNodesPerShards( + _ map[uint32][]sharding.Validator, + _ map[uint32][]sharding.Validator, + _ uint32, +) error { + return nil +} + +// SetConfig - +func (n *nodesCoordinator) SetConfig(_ *sharding.NodesCoordinatorRegistry) error { + return nil +} + +// ComputeLeaving - +func (n *nodesCoordinator) ComputeLeaving(_ []sharding.Validator) []sharding.Validator { + return nil +} + +// GetValidatorsIndexes - +func (n *nodesCoordinator) GetValidatorsIndexes(_ []string, _ uint32) ([]uint64, error) { + return nil, nil +} + +// GetAllEligibleValidatorsPublicKeys - +func (n *nodesCoordinator) GetAllEligibleValidatorsPublicKeys(_ uint32) (map[uint32][][]byte, error) { + return nil, nil +} + +// GetAllWaitingValidatorsPublicKeys - +func (n *nodesCoordinator) GetAllWaitingValidatorsPublicKeys(_ uint32) (map[uint32][][]byte, error) { + return nil, nil +} + +// GetConsensusValidatorsPublicKeys - +func (n *nodesCoordinator) GetConsensusValidatorsPublicKeys(_ []byte, _ uint64, _ uint32, _ uint32) ([]string, error) { + return nil, nil +} + +// GetOwnPublicKey - +func (n *nodesCoordinator) GetOwnPublicKey() []byte { + return nil +} + +// ComputeConsensusGroup - +func (n *nodesCoordinator) ComputeConsensusGroup(_ []byte, _ uint64, _ uint32, _ uint32) (validatorsGroup []sharding.Validator, err error) { + return nil, nil +} + +// GetValidatorWithPublicKey - +func (n *nodesCoordinator) GetValidatorWithPublicKey(_ []byte, _ uint32) (validator sharding.Validator, shardId uint32, err error) { + return nil, 0, nil +} + +// LoadState - +func (n *nodesCoordinator) LoadState(_ []byte) error { + return nil +} + +// GetSavedStateKey - +func (n *nodesCoordinator) GetSavedStateKey() []byte { + return nil +} + +// ShardIdForEpoch - +func (n *nodesCoordinator) ShardIdForEpoch(_ uint32) (uint32, error) { + return 0, nil +} + +// GetConsensusWhitelistedNodes - +func (n *nodesCoordinator) GetConsensusWhitelistedNodes(_ uint32) (map[string]struct{}, error) { + return nil, nil +} + +// ConsensusGroupSize - +func (n *nodesCoordinator) ConsensusGroupSize(uint32) int { + return 0 +} + +// GetNumTotalEligible - +func (n *nodesCoordinator) GetNumTotalEligible() uint64 { + return 0 +} + +// IsInterfaceNil - +func (n *nodesCoordinator) IsInterfaceNil() bool { + return n == nil +} diff --git a/epochStart/bootstrap/disabled/disabledStorer.go b/epochStart/bootstrap/disabled/disabledStorer.go new file mode 100644 index 00000000000..6cc75e4c653 --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledStorer.go @@ -0,0 +1,25 @@ +package disabled + +import ( + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/memorydb" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" +) + +const defaultMemDBSize = 1000 +const defaultNumShards = 1 + +// CreateMemUnit creates an in-memory storer unit using maps +func CreateMemUnit() storage.Storer { + cache, err := storageUnit.NewCache(storageUnit.LRUCache, defaultMemDBSize, defaultNumShards) + if err != nil { + return nil + } + + unit, err := storageUnit.NewStorageUnit(cache, memorydb.New()) + if err != nil { + return nil + } + + return unit +} diff --git a/epochStart/bootstrap/disabled/disabledValidityAttester.go b/epochStart/bootstrap/disabled/disabledValidityAttester.go new file mode 100644 index 00000000000..ca315b29fa5 --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledValidityAttester.go @@ -0,0 +1,28 @@ +package disabled + +import ( + "github.com/ElrondNetwork/elrond-go/data" +) + +type validityAttester struct { +} + +// NewValidityAttester returns a new instance of validityAttester +func NewValidityAttester() *validityAttester { + return &validityAttester{} +} + +// CheckBlockAgainstFinal - +func (v *validityAttester) CheckBlockAgainstFinal(_ data.HeaderHandler) error { + return nil +} + +// CheckBlockAgainstRounder - +func (v *validityAttester) CheckBlockAgainstRounder(_ data.HeaderHandler) error { + return nil +} + +// IsInterfaceNil - +func (v *validityAttester) IsInterfaceNil() bool { + return v == nil +} diff --git a/epochStart/bootstrap/export_test.go b/epochStart/bootstrap/export_test.go new file mode 100644 index 00000000000..7920331caef --- /dev/null +++ b/epochStart/bootstrap/export_test.go @@ -0,0 +1,20 @@ +package bootstrap + +import ( + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/p2p" +) + +func (s *simpleEpochStartMetaBlockInterceptor) GetReceivedMetablocks() map[string]*block.MetaBlock { + s.mutReceivedMetaBlocks.RLock() + defer s.mutReceivedMetaBlocks.RUnlock() + + return s.mapReceivedMetaBlocks +} + +func (s *simpleEpochStartMetaBlockInterceptor) GetPeersSliceForMetablocks() map[string][]p2p.PeerID { + s.mutReceivedMetaBlocks.RLock() + defer s.mutReceivedMetaBlocks.RUnlock() + + return s.mapMetaBlocksFromPeers +} diff --git a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go new file mode 100644 index 00000000000..de414da95c9 --- /dev/null +++ b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go @@ -0,0 +1,108 @@ +package factory + +import ( + "time" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/crypto" + "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" + "github.com/ElrondNetwork/elrond-go/epochStart/genesis" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/factory/interceptorscontainer" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage/timecache" + "github.com/ElrondNetwork/elrond-go/update" +) + +const timeSpanForBadHeaders = time.Minute + +// ArgsEpochStartInterceptorContainer holds the arguments needed for creating a new epoch start interceptors +// container factory +type ArgsEpochStartInterceptorContainer struct { + Config config.Config + ShardCoordinator sharding.Coordinator + TxSignMarshalizer marshal.Marshalizer + ProtoMarshalizer marshal.Marshalizer + Hasher hashing.Hasher + Messenger process.TopicHandler + DataPool dataRetriever.PoolsHolder + SingleSigner crypto.SingleSigner + BlockSingleSigner crypto.SingleSigner + KeyGen crypto.KeyGenerator + BlockKeyGen crypto.KeyGenerator + WhiteListHandler update.WhiteListHandler + ChainID []byte +} + +// NewEpochStartInterceptorsContainer will return a real interceptors container factory, but will many disabled +// components +func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) (process.InterceptorsContainer, error) { + nodesCoordinator := disabled.NewNodesCoordinator() + storer := disabled.NewChainStorer() + antiFloodHandler := disabled.NewAntiFloodHandler() + multiSigner := disabled.NewMultiSigner() + accountsAdapter := disabled.NewAccountsAdapter() + addressConverter, err := addressConverters.NewPlainAddressConverter( + args.Config.Address.Length, + args.Config.Address.Prefix, + ) + if err != nil { + return nil, err + } + blackListHandler := timecache.NewTimeCache(timeSpanForBadHeaders) + feeHandler := genesis.NewGenesisFeeHandler() + headerSigVerifier := disabled.NewHeaderSigVerifier() + sizeCheckDelta := 0 + validityAttester := disabled.NewValidityAttester() + epochStartTrigger := disabled.NewEpochStartTrigger() + + containerFactoryArgs := interceptorscontainer.MetaInterceptorsContainerFactoryArgs{ + ShardCoordinator: args.ShardCoordinator, + NodesCoordinator: nodesCoordinator, + Messenger: args.Messenger, + Store: storer, + ProtoMarshalizer: args.ProtoMarshalizer, + TxSignMarshalizer: args.TxSignMarshalizer, + Hasher: args.Hasher, + MultiSigner: multiSigner, + DataPool: args.DataPool, + Accounts: accountsAdapter, + AddrConverter: addressConverter, + SingleSigner: args.SingleSigner, + BlockSingleSigner: args.BlockSingleSigner, + KeyGen: args.KeyGen, + BlockKeyGen: args.BlockKeyGen, + MaxTxNonceDeltaAllowed: core.MaxTxNonceDeltaAllowed, + TxFeeHandler: feeHandler, + BlackList: blackListHandler, + HeaderSigVerifier: headerSigVerifier, + ChainID: args.ChainID, + SizeCheckDelta: uint32(sizeCheckDelta), + ValidityAttester: validityAttester, + EpochStartTrigger: epochStartTrigger, + WhiteListHandler: args.WhiteListHandler, + AntifloodHandler: antiFloodHandler, + } + + interceptorsContainerFactory, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(containerFactoryArgs) + if err != nil { + return nil, err + } + + container, err := interceptorsContainerFactory.Create() + if err != nil { + return nil, err + } + + err = interceptorscontainer.SetWhiteListHandlerToInterceptors(container, args.WhiteListHandler) + if err != nil { + return nil, err + } + + return container, nil +} diff --git a/epochStart/bootstrap/fromLocalStorage.go b/epochStart/bootstrap/fromLocalStorage.go new file mode 100644 index 00000000000..a89e35ab825 --- /dev/null +++ b/epochStart/bootstrap/fromLocalStorage.go @@ -0,0 +1,205 @@ +package bootstrap + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + storageFactory "github.com/ElrondNetwork/elrond-go/storage/factory" +) + +func (e *epochStartBootstrap) initializeFromLocalStorage() { + var errNotCritical error + e.baseData.lastEpoch, e.baseData.shardId, e.baseData.lastRound, errNotCritical = storageFactory.FindLatestDataFromStorage( + e.generalConfig, + e.marshalizer, + e.workingDir, + e.genesisNodesConfig.ChainID, + e.defaultDBPath, + e.defaultEpochString, + e.defaultShardString, + ) + if errNotCritical != nil { + e.baseData.storageExists = false + log.Debug("no epoch db found in storage", "error", errNotCritical.Error()) + } else { + e.baseData.storageExists = true + log.Debug("got last data from storage", + "epoch", e.baseData.lastEpoch, + "last round", e.baseData.lastRound, + "last shard ID", e.baseData.lastRound) + } +} + +func (e *epochStartBootstrap) prepareEpochFromStorage() (Parameters, error) { + args := storageFactory.ArgsNewOpenStorageUnits{ + GeneralConfig: e.generalConfig, + Marshalizer: e.marshalizer, + WorkingDir: e.workingDir, + ChainID: e.genesisNodesConfig.ChainID, + DefaultDBPath: e.defaultDBPath, + DefaultEpochString: e.defaultEpochString, + DefaultShardString: e.defaultShardString, + } + openStorageHandler, err := storageFactory.NewStorageUnitOpenHandler(args) + if err != nil { + return Parameters{}, err + } + + unitsToOpen := []string{e.generalConfig.BootstrapStorage.DB.FilePath, e.generalConfig.MetaBlockStorage.DB.FilePath} + + storageUnits, err := openStorageHandler.OpenStorageUnits(unitsToOpen) + defer func() { + for _, storer := range storageUnits { + errClose := storer.Close() + log.LogIfError(errClose) + } + }() + + if err != nil || len(storageUnits) != len(unitsToOpen) { + return Parameters{}, err + } + + _, e.nodesConfig, err = e.getLastBootstrapData(storageUnits[0]) + if err != nil { + return Parameters{}, err + } + + pubKey, err := e.publicKey.ToByteArray() + if err != nil { + return Parameters{}, err + } + + e.epochStartMeta, err = e.getEpochStartMetaFromStorage(storageUnits[1]) + if err != nil { + return Parameters{}, err + } + e.baseData.numberOfShards = uint32(len(e.epochStartMeta.EpochStart.LastFinalizedHeaders)) + + if !e.checkIfShuffledOut(pubKey, e.nodesConfig) { + parameters := Parameters{ + Epoch: e.baseData.lastEpoch, + SelfShardId: e.baseData.shardId, + NumOfShards: e.baseData.numberOfShards, + } + return parameters, nil + } + + err = e.createSyncers() + if err != nil { + return Parameters{}, err + } + + e.syncedHeaders, err = e.syncHeadersFrom(e.epochStartMeta) + if err != nil { + return Parameters{}, err + } + + prevEpochStartMetaHash := e.epochStartMeta.EpochStart.Economics.PrevEpochStartHash + prevEpochStartMeta, ok := e.syncedHeaders[string(prevEpochStartMetaHash)].(*block.MetaBlock) + if !ok { + return Parameters{}, epochStart.ErrWrongTypeAssertion + } + e.prevEpochStartMeta = prevEpochStartMeta + + e.shardCoordinator, err = sharding.NewMultiShardCoordinator(e.baseData.numberOfShards, e.baseData.shardId) + if err != nil { + return Parameters{}, err + } + + if e.shardCoordinator.SelfId() != e.genesisShardCoordinator.SelfId() { + err = e.createTriesForNewShardId(e.shardCoordinator.SelfId()) + if err != nil { + return Parameters{}, err + } + } + + if e.shardCoordinator.SelfId() == core.MetachainShardId { + err = e.requestAndProcessForMeta() + if err != nil { + return Parameters{}, err + } + } else { + err = e.requestAndProcessForShard() + if err != nil { + return Parameters{}, err + } + } + + parameters := Parameters{ + Epoch: e.baseData.lastEpoch, + SelfShardId: e.shardCoordinator.SelfId(), + NumOfShards: e.shardCoordinator.NumberOfShards(), + } + return parameters, nil +} + +func (e *epochStartBootstrap) checkIfShuffledOut( + pubKey []byte, + nodesConfig *sharding.NodesCoordinatorRegistry, +) bool { + epochConfig := nodesConfig.EpochsConfig[fmt.Sprint(e.baseData.lastEpoch)] + shardIdForConfig := fmt.Sprint(e.baseData.shardId) + + for _, validator := range epochConfig.WaitingValidators[shardIdForConfig] { + if bytes.Equal(pubKey, validator.PubKey) { + return false + } + } + + for _, validator := range epochConfig.EligibleValidators[shardIdForConfig] { + if bytes.Equal(pubKey, validator.PubKey) { + return false + } + } + + return true +} + +func (e *epochStartBootstrap) getLastBootstrapData(storer storage.Storer) (*bootstrapStorage.BootstrapData, *sharding.NodesCoordinatorRegistry, error) { + bootStorer, err := bootstrapStorage.NewBootstrapStorer(e.marshalizer, storer) + if err != nil { + return nil, nil, err + } + + highestRound := bootStorer.GetHighestRound() + bootstrapData, err := bootStorer.Get(highestRound) + if err != nil { + return nil, nil, err + } + + data, err := storer.Get(bootstrapData.NodesCoordinatorConfigKey) + if err != nil { + return nil, nil, err + } + + config := &sharding.NodesCoordinatorRegistry{} + err = json.Unmarshal(data, config) + if err != nil { + return nil, nil, err + } + + return &bootstrapData, config, nil +} + +func (e *epochStartBootstrap) getEpochStartMetaFromStorage(storer storage.Storer) (*block.MetaBlock, error) { + epochIdentifier := core.EpochStartIdentifier(e.baseData.lastEpoch) + data, err := storer.Get([]byte(epochIdentifier)) + if err != nil { + return nil, err + } + + metaBlock := &block.MetaBlock{} + err = e.marshalizer.Unmarshal(metaBlock, data) + if err != nil { + return nil, err + } + + return metaBlock, nil +} diff --git a/epochStart/bootstrap/interface.go b/epochStart/bootstrap/interface.go new file mode 100644 index 00000000000..c890cb6c364 --- /dev/null +++ b/epochStart/bootstrap/interface.go @@ -0,0 +1,35 @@ +package bootstrap + +import ( + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// StartOfEpochNodesConfigHandler defines the methods to process nodesConfig from epoch start metablocks +type StartOfEpochNodesConfigHandler interface { + NodesConfigFromMetaBlock( + currMetaBlock *block.MetaBlock, + prevMetaBlock *block.MetaBlock, + publicKey []byte, + ) (*sharding.NodesCoordinatorRegistry, uint32, error) + IsInterfaceNil() bool +} + +// EpochStartInterceptor - +type EpochStartInterceptor interface { + process.Interceptor + GetEpochStartMetaBlock(target int, epoch uint32) (*block.MetaBlock, error) +} + +// EpochStartNodesCoordinator - +type EpochStartNodesCoordinator interface { + ComputeNodesConfigFor( + metaBlock *block.MetaBlock, + validatorInfos []*state.ValidatorInfo, + ) (*sharding.EpochValidators, error) + ComputeNodesConfigForGenesis(genesis *sharding.NodesSetup) (*sharding.EpochValidators, error) + ComputeShardForSelfPublicKey(epoch uint32, pubKey []byte) uint32 + IsInterfaceNil() bool +} diff --git a/epochStart/bootstrap/metaStorageHandler.go b/epochStart/bootstrap/metaStorageHandler.go new file mode 100644 index 00000000000..d1001ec5d17 --- /dev/null +++ b/epochStart/bootstrap/metaStorageHandler.go @@ -0,0 +1,212 @@ +package bootstrap + +import ( + "encoding/json" + "fmt" + "strconv" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" + "github.com/ElrondNetwork/elrond-go/epochStart/metachain" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/factory" +) + +type metaStorageHandler struct { + *baseStorageHandler +} + +// NewMetaStorageHandler will return a new instance of metaStorageHandler +func NewMetaStorageHandler( + generalConfig config.Config, + shardCoordinator sharding.Coordinator, + pathManagerHandler storage.PathManagerHandler, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + currentEpoch uint32, +) (*metaStorageHandler, error) { + epochStartNotifier := &disabled.EpochStartNotifier{} + storageFactory, err := factory.NewStorageServiceFactory( + &generalConfig, + shardCoordinator, + pathManagerHandler, + epochStartNotifier, + currentEpoch, + ) + if err != nil { + return nil, err + } + + storageService, err := storageFactory.CreateForMeta() + if err != nil { + return nil, err + } + + base := &baseStorageHandler{ + storageService: storageService, + shardCoordinator: shardCoordinator, + marshalizer: marshalizer, + hasher: hasher, + currentEpoch: currentEpoch, + } + + return &metaStorageHandler{baseStorageHandler: base}, nil +} + +// SaveDataToStorage will save the fetched data to storage so it will be used by the storage bootstrap component +func (msh *metaStorageHandler) SaveDataToStorage(components *ComponentsNeededForBootstrap) error { + defer func() { + err := msh.storageService.CloseAll() + if err != nil { + log.Debug("error while closing storers", "error", err) + } + }() + + bootStorer := msh.storageService.GetStorer(dataRetriever.BootstrapUnit) + + lastHeader, err := msh.saveLastHeader(components.EpochStartMetaBlock) + if err != nil { + return err + } + + miniBlocks, err := msh.groupMiniBlocksByShard(components.PendingMiniBlocks) + if err != nil { + return err + } + + triggerConfigKey, err := msh.saveTriggerRegistry(components) + if err != nil { + return err + } + + nodesCoordinatorConfigKey, err := msh.saveNodesCoordinatorRegistry(components.EpochStartMetaBlock, components.NodesConfig) + if err != nil { + return err + } + + lastCrossNotarizedHeader := msh.getLastCrossNotarizedHeaders(components.EpochStartMetaBlock) + + bootStrapData := bootstrapStorage.BootstrapData{ + LastHeader: lastHeader, + LastCrossNotarizedHeaders: lastCrossNotarizedHeader, + LastSelfNotarizedHeaders: []bootstrapStorage.BootstrapHeaderInfo{lastHeader}, + ProcessedMiniBlocks: nil, + PendingMiniBlocks: miniBlocks, + NodesCoordinatorConfigKey: nodesCoordinatorConfigKey, + EpochStartTriggerConfigKey: triggerConfigKey, + HighestFinalBlockNonce: lastHeader.Nonce, + LastRound: int64(components.EpochStartMetaBlock.Round), + } + bootStrapDataBytes, err := msh.marshalizer.Marshal(&bootStrapData) + if err != nil { + return err + } + + round := int64(components.EpochStartMetaBlock.Round) + roundNum := bootstrapStorage.RoundNum{Num: round} + roundNumBytes, err := msh.marshalizer.Marshal(&roundNum) + if err != nil { + return err + } + + err = bootStorer.Put([]byte(core.HighestRoundFromBootStorage), roundNumBytes) + if err != nil { + return err + } + key := []byte(strconv.FormatInt(round, 10)) + err = bootStorer.Put(key, bootStrapDataBytes) + if err != nil { + return err + } + + err = msh.commitTries(components) + if err != nil { + return err + } + + log.Info("saved bootstrap data to storage") + return nil +} + +func (msh *metaStorageHandler) getLastCrossNotarizedHeaders(meta *block.MetaBlock) []bootstrapStorage.BootstrapHeaderInfo { + crossNotarizedHdrs := make([]bootstrapStorage.BootstrapHeaderInfo, 0) + for _, epochStartShardData := range meta.EpochStart.LastFinalizedHeaders { + crossNotarizedHdrs = append(crossNotarizedHdrs, bootstrapStorage.BootstrapHeaderInfo{ + ShardId: epochStartShardData.ShardID, + Nonce: epochStartShardData.Nonce, + Hash: epochStartShardData.HeaderHash, + }) + } + + return crossNotarizedHdrs +} + +func (msh *metaStorageHandler) saveLastHeader(metaBlock *block.MetaBlock) (bootstrapStorage.BootstrapHeaderInfo, error) { + lastHeaderHash, err := core.CalculateHash(msh.marshalizer, msh.hasher, metaBlock) + if err != nil { + return bootstrapStorage.BootstrapHeaderInfo{}, err + } + + lastHeaderBytes, err := msh.marshalizer.Marshal(metaBlock) + if err != nil { + return bootstrapStorage.BootstrapHeaderInfo{}, err + } + + err = msh.storageService.GetStorer(dataRetriever.MetaBlockUnit).Put(lastHeaderHash, lastHeaderBytes) + if err != nil { + return bootstrapStorage.BootstrapHeaderInfo{}, err + } + + bootstrapHdrInfo := bootstrapStorage.BootstrapHeaderInfo{ + ShardId: core.MetachainShardId, + Epoch: metaBlock.Epoch, + Nonce: metaBlock.Nonce, + Hash: lastHeaderHash, + } + + return bootstrapHdrInfo, nil +} + +func (msh *metaStorageHandler) saveTriggerRegistry(components *ComponentsNeededForBootstrap) ([]byte, error) { + metaBlock := components.EpochStartMetaBlock + hash, err := core.CalculateHash(msh.marshalizer, msh.hasher, metaBlock) + if err != nil { + return nil, err + } + + triggerReg := metachain.TriggerRegistry{ + Epoch: metaBlock.Epoch, + CurrentRound: metaBlock.Round, + EpochFinalityAttestingRound: metaBlock.Round, + CurrEpochStartRound: metaBlock.Round, + PrevEpochStartRound: components.PreviousEpochStartRound, + EpochStartMetaHash: hash, + EpochStartMeta: metaBlock, + } + + trigInternalKey := append([]byte(core.TriggerRegistryKeyPrefix), []byte(fmt.Sprint(metaBlock.Round))...) + + triggerRegBytes, err := json.Marshal(&triggerReg) + if err != nil { + return nil, err + } + + errPut := msh.storageService.GetStorer(dataRetriever.BootstrapUnit).Put(trigInternalKey, triggerRegBytes) + if errPut != nil { + return nil, errPut + } + + return []byte(core.TriggerRegistryKeyPrefix), nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (msh *metaStorageHandler) IsInterfaceNil() bool { + return msh == nil +} diff --git a/epochStart/bootstrap/nodesCoordinator.go b/epochStart/bootstrap/nodesCoordinator.go new file mode 100644 index 00000000000..e1d6d73a21d --- /dev/null +++ b/epochStart/bootstrap/nodesCoordinator.go @@ -0,0 +1,298 @@ +package bootstrap + +import ( + "bytes" + "fmt" + "sort" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type nodesCoordinator struct { + shuffler sharding.NodesShuffler + chance sharding.ChanceComputer + numShards map[uint32]uint32 + shardConsensusGroupSize uint32 + metaConsensusGroupSize uint32 + + nodesConfig map[uint32]*epochNodesConfig +} + +type epochNodesConfig struct { + nbShards uint32 + shardID uint32 + eligibleMap map[uint32][]sharding.Validator + waitingMap map[uint32][]sharding.Validator + expandedEligibleMap map[uint32][]sharding.Validator +} + +// ArgsNewStartInEpochNodesCoordinator - +type ArgsNewStartInEpochNodesCoordinator struct { + Shuffler sharding.NodesShuffler + Chance sharding.ChanceComputer + ShardConsensusGroupSize uint32 + MetaConsensusGroupSize uint32 +} + +// NewStartInEpochNodesCoordinator creates an epoch start nodes coordinator +func NewStartInEpochNodesCoordinator(args ArgsNewStartInEpochNodesCoordinator) (*nodesCoordinator, error) { + n := &nodesCoordinator{ + shuffler: args.Shuffler, + chance: args.Chance, + shardConsensusGroupSize: args.ShardConsensusGroupSize, + metaConsensusGroupSize: args.MetaConsensusGroupSize, + nodesConfig: make(map[uint32]*epochNodesConfig), + numShards: make(map[uint32]uint32), + } + + return n, nil +} + +// ComputeNodesConfigForGenesis creates the actual node config for genesis +func (n *nodesCoordinator) ComputeNodesConfigForGenesis(nodesConfig *sharding.NodesSetup) (*sharding.EpochValidators, error) { + eligibleNodesInfo, waitingNodesInfo := nodesConfig.InitialNodesInfo() + + eligibleValidators, err := sharding.NodesInfoToValidators(eligibleNodesInfo) + if err != nil { + return nil, err + } + + waitingValidators, err := sharding.NodesInfoToValidators(waitingNodesInfo) + if err != nil { + return nil, err + } + + err = n.setNodesPerShards(eligibleValidators, waitingValidators, 0) + epochValidators := epochNodesConfigToEpochValidators(n.nodesConfig[0]) + + return epochValidators, nil +} + +// ComputeNodesConfigFor computes the actual nodes config for the set epoch from the validator info +func (n *nodesCoordinator) ComputeNodesConfigFor( + metaBlock *block.MetaBlock, + validatorInfos []*state.ValidatorInfo, +) (*sharding.EpochValidators, error) { + if check.IfNil(metaBlock) { + return nil, epochStart.ErrNilHeaderHandler + } + if len(validatorInfos) == 0 { + return nil, epochStart.ErrNilValidatorInfo + } + + randomness := metaBlock.GetPrevRandSeed() + newEpoch := metaBlock.GetEpoch() + n.numShards[newEpoch] = uint32(len(metaBlock.EpochStart.LastFinalizedHeaders)) + + sort.Slice(validatorInfos, func(i, j int) bool { + return bytes.Compare(validatorInfos[i].PublicKey, validatorInfos[j].PublicKey) < 0 + }) + + leaving, err := n.computeLeaving(validatorInfos) + if err != nil { + return nil, err + } + + eligibleMap := make(map[uint32][]sharding.Validator) + waitingMap := make(map[uint32][]sharding.Validator) + newNodesMap := make([]sharding.Validator, 0) + for i := uint32(0); i < n.numShards[newEpoch]; i++ { + eligibleMap[i] = make([]sharding.Validator, 0) + waitingMap[i] = make([]sharding.Validator, 0) + } + eligibleMap[core.MetachainShardId] = make([]sharding.Validator, 0) + waitingMap[core.MetachainShardId] = make([]sharding.Validator, 0) + + mapValidatorInfo := make(map[string]*state.ValidatorInfo, len(validatorInfos)) + for _, validatorInfo := range validatorInfos { + validator, err := sharding.NewValidator(validatorInfo.PublicKey, validatorInfo.RewardAddress) + if err != nil { + return nil, err + } + mapValidatorInfo[string(validatorInfo.PublicKey)] = validatorInfo + + switch validatorInfo.List { + case string(core.WaitingList): + waitingMap[validatorInfo.ShardId] = append(waitingMap[validatorInfo.ShardId], validator) + case string(core.EligibleList): + eligibleMap[validatorInfo.ShardId] = append(eligibleMap[validatorInfo.ShardId], validator) + case string(core.NewList): + newNodesMap = append(newNodesMap, validator) + } + } + + shufflerArgs := sharding.ArgsUpdateNodes{ + Eligible: eligibleMap, + Waiting: waitingMap, + NewNodes: newNodesMap, + Leaving: leaving, + Rand: randomness, + NbShards: n.numShards[newEpoch], + } + + newEligibleMap, newWaitingMap, _ := n.shuffler.UpdateNodeLists(shufflerArgs) + + err = n.setNodesPerShards(newEligibleMap, newWaitingMap, newEpoch) + if err != nil { + log.Error("set nodes per shard failed", "error", err) + return nil, err + } + + err = n.expandSavedNodes(mapValidatorInfo, newEpoch) + if err != nil { + return nil, err + } + + epochValidators := epochNodesConfigToEpochValidators(n.nodesConfig[newEpoch]) + + return epochValidators, nil +} + +func (n *nodesCoordinator) computeLeaving(allValidators []*state.ValidatorInfo) ([]sharding.Validator, error) { + leavingValidators := make([]sharding.Validator, 0) + minChances := n.chance.GetChance(0) + for _, validator := range allValidators { + + chances := n.chance.GetChance(validator.TempRating) + if chances < minChances { + val, err := sharding.NewValidator(validator.PublicKey, validator.RewardAddress) + if err != nil { + return nil, err + } + leavingValidators = append(leavingValidators, val) + } + } + + return leavingValidators, nil +} + +func (n *nodesCoordinator) setNodesPerShards( + eligible map[uint32][]sharding.Validator, + waiting map[uint32][]sharding.Validator, + epoch uint32, +) error { + nodesConfig, ok := n.nodesConfig[epoch] + if !ok { + nodesConfig = &epochNodesConfig{} + } + + nodesList, ok := eligible[core.MetachainShardId] + if !ok || uint32(len(nodesList)) < n.metaConsensusGroupSize { + return epochStart.ErrSmallMetachainEligibleListSize + } + + for shardId := uint32(0); shardId < uint32(len(eligible)-1); shardId++ { + nbNodesShard := uint32(len(eligible[shardId])) + if nbNodesShard < n.shardConsensusGroupSize { + return epochStart.ErrSmallShardEligibleListSize + } + } + + // nbShards holds number of shards without meta + nodesConfig.nbShards = uint32(len(eligible) - 1) + nodesConfig.eligibleMap = eligible + nodesConfig.waitingMap = waiting + + n.nodesConfig[epoch] = nodesConfig + return nil +} + +// ComputeShardForSelfPublicKey - +func (n *nodesCoordinator) ComputeShardForSelfPublicKey(epoch uint32, pubKey []byte) uint32 { + for shard, validators := range n.nodesConfig[epoch].eligibleMap { + for _, v := range validators { + if bytes.Equal(v.PubKey(), pubKey) { + return shard + } + } + } + + for shard, validators := range n.nodesConfig[epoch].waitingMap { + for _, v := range validators { + if bytes.Equal(v.PubKey(), pubKey) { + return shard + } + } + } + + return core.AllShardId +} + +func (n *nodesCoordinator) expandSavedNodes( + mapValidatorInfo map[string]*state.ValidatorInfo, + epoch uint32, +) error { + nodesConfig := n.nodesConfig[epoch] + nodesConfig.expandedEligibleMap = make(map[uint32][]sharding.Validator) + + nrShards := len(nodesConfig.eligibleMap) + var err error + nodesConfig.expandedEligibleMap[core.MetachainShardId], err = n.expandEligibleList(nodesConfig.eligibleMap[core.MetachainShardId], mapValidatorInfo) + if err != nil { + return err + } + + for shardId := uint32(0); shardId < uint32(nrShards-1); shardId++ { + nodesConfig.expandedEligibleMap[shardId], err = n.expandEligibleList(nodesConfig.eligibleMap[shardId], mapValidatorInfo) + if err != nil { + return err + } + } + + return nil +} + +func (n *nodesCoordinator) expandEligibleList( + validators []sharding.Validator, + mapValidatorInfo map[string]*state.ValidatorInfo, +) ([]sharding.Validator, error) { + minChance := n.chance.GetChance(0) + minSize := len(validators) * int(minChance) + validatorList := make([]sharding.Validator, 0, minSize) + + for _, validatorInShard := range validators { + pk := validatorInShard.PubKey() + validatorInfo, ok := mapValidatorInfo[string(pk)] + if !ok { + return nil, epochStart.ErrNilValidatorInfo + } + + chances := n.chance.GetChance(validatorInfo.TempRating) + if chances < minChance { + chances = minChance + } + + for i := uint32(0); i < chances; i++ { + validatorList = append(validatorList, validatorInShard) + } + } + + return validatorList, nil +} + +func epochNodesConfigToEpochValidators(config *epochNodesConfig) *sharding.EpochValidators { + result := &sharding.EpochValidators{ + EligibleValidators: make(map[string][]*sharding.SerializableValidator, len(config.eligibleMap)), + WaitingValidators: make(map[string][]*sharding.SerializableValidator, len(config.waitingMap)), + } + + for k, v := range config.eligibleMap { + result.EligibleValidators[fmt.Sprint(k)] = sharding.ValidatorArrayToSerializableValidatorArray(v) + } + + for k, v := range config.waitingMap { + result.WaitingValidators[fmt.Sprint(k)] = sharding.ValidatorArrayToSerializableValidatorArray(v) + } + + return result +} + +// IsInterfaceNil returns true if underlying object is nil +func (n *nodesCoordinator) IsInterfaceNil() bool { + return n == nil +} diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go new file mode 100644 index 00000000000..fc22dde96c3 --- /dev/null +++ b/epochStart/bootstrap/process.go @@ -0,0 +1,757 @@ +package bootstrap + +import ( + "context" + "strconv" + "time" + + logger "github.com/ElrondNetwork/elrond-go-logger" + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/partitioning" + "github.com/ElrondNetwork/elrond-go/crypto" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/syncer" + "github.com/ElrondNetwork/elrond-go/data/trie/factory" + "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + factoryDataPool "github.com/ElrondNetwork/elrond-go/dataRetriever/factory" + "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" + "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/resolverscontainer" + "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" + "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" + factoryInterceptors "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/factory" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/economics" + "github.com/ElrondNetwork/elrond-go/process/interceptors" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" + "github.com/ElrondNetwork/elrond-go/storage/timecache" + "github.com/ElrondNetwork/elrond-go/update" + "github.com/ElrondNetwork/elrond-go/update/sync" +) + +var log = logger.GetOrCreate("epochStart/bootstrap") + +const timeToWait = 10 * time.Second +const timeBetweenRequests = 100 * time.Millisecond +const maxToRequest = 100 + +// Parameters defines the DTO for the result produced by the bootstrap component +type Parameters struct { + Epoch uint32 + SelfShardId uint32 + NumOfShards uint32 +} + +// ComponentsNeededForBootstrap holds the components which need to be initialized from network +type ComponentsNeededForBootstrap struct { + EpochStartMetaBlock *block.MetaBlock + PreviousEpochStartRound uint64 + ShardHeader *block.Header + NodesConfig *sharding.NodesCoordinatorRegistry + Headers map[string]data.HeaderHandler + ShardCoordinator sharding.Coordinator + UserAccountTries map[string]data.Trie + PeerAccountTries map[string]data.Trie + PendingMiniBlocks map[string]*block.MiniBlock +} + +// epochStartBootstrap will handle requesting the needed data to start when joining late the network +type epochStartBootstrap struct { + // should come via arguments + publicKey crypto.PublicKey + marshalizer marshal.Marshalizer + txSignMarshalizer marshal.Marshalizer + hasher hashing.Hasher + messenger p2p.Messenger + generalConfig config.Config + economicsData *economics.EconomicsData + singleSigner crypto.SingleSigner + blockSingleSigner crypto.SingleSigner + keyGen crypto.KeyGenerator + blockKeyGen crypto.KeyGenerator + shardCoordinator sharding.Coordinator + genesisNodesConfig *sharding.NodesSetup + genesisShardCoordinator sharding.Coordinator + pathManager storage.PathManagerHandler + workingDir string + defaultDBPath string + defaultEpochString string + defaultShardString string + destinationShardAsObserver string + rater sharding.ChanceComputer + trieContainer state.TriesHolder + trieStorageManagers map[string]data.StorageManager + + // created components + requestHandler process.RequestHandler + interceptorContainer process.InterceptorsContainer + dataPool dataRetriever.PoolsHolder + miniBlocksSyncer epochStart.PendingMiniBlocksSyncHandler + headersSyncer epochStart.HeadersByHashSyncer + epochStartMetaBlockSyncer epochStart.StartOfEpochMetaSyncer + nodesConfigHandler StartOfEpochNodesConfigHandler + whiteListHandler update.WhiteListHandler + + // gathered data + epochStartMeta *block.MetaBlock + prevEpochStartMeta *block.MetaBlock + syncedHeaders map[string]data.HeaderHandler + nodesConfig *sharding.NodesCoordinatorRegistry + userAccountTries map[string]data.Trie + peerAccountTries map[string]data.Trie + baseData baseDataInStorage + computedEpoch uint32 +} + +type baseDataInStorage struct { + shardId uint32 + numberOfShards uint32 + lastRound int64 + lastEpoch uint32 + storageExists bool +} + +// ArgsEpochStartBootstrap holds the arguments needed for creating an epoch start data provider component +type ArgsEpochStartBootstrap struct { + PublicKey crypto.PublicKey + Marshalizer marshal.Marshalizer + TxSignMarshalizer marshal.Marshalizer + Hasher hashing.Hasher + Messenger p2p.Messenger + GeneralConfig config.Config + EconomicsData *economics.EconomicsData + SingleSigner crypto.SingleSigner + BlockSingleSigner crypto.SingleSigner + KeyGen crypto.KeyGenerator + BlockKeyGen crypto.KeyGenerator + GenesisNodesConfig *sharding.NodesSetup + GenesisShardCoordinator sharding.Coordinator + PathManager storage.PathManagerHandler + WorkingDir string + DefaultDBPath string + DefaultEpochString string + DefaultShardString string + Rater sharding.ChanceComputer + DestinationShardAsObserver string + TrieContainer state.TriesHolder + TrieStorageManagers map[string]data.StorageManager +} + +// NewEpochStartBootstrap will return a new instance of epochStartBootstrap +func NewEpochStartBootstrap(args ArgsEpochStartBootstrap) (*epochStartBootstrap, error) { + epochStartProvider := &epochStartBootstrap{ + publicKey: args.PublicKey, + marshalizer: args.Marshalizer, + txSignMarshalizer: args.TxSignMarshalizer, + hasher: args.Hasher, + messenger: args.Messenger, + generalConfig: args.GeneralConfig, + economicsData: args.EconomicsData, + genesisNodesConfig: args.GenesisNodesConfig, + genesisShardCoordinator: args.GenesisShardCoordinator, + workingDir: args.WorkingDir, + pathManager: args.PathManager, + defaultEpochString: args.DefaultEpochString, + defaultDBPath: args.DefaultEpochString, + defaultShardString: args.DefaultShardString, + keyGen: args.KeyGen, + blockKeyGen: args.BlockKeyGen, + singleSigner: args.SingleSigner, + blockSingleSigner: args.BlockSingleSigner, + rater: args.Rater, + destinationShardAsObserver: args.DestinationShardAsObserver, + trieContainer: args.TrieContainer, + trieStorageManagers: args.TrieStorageManagers, + } + + return epochStartProvider, nil +} + +func (e *epochStartBootstrap) computedDurationOfEpoch() time.Duration { + return time.Duration(e.genesisNodesConfig.RoundDuration* + uint64(e.generalConfig.EpochStartConfig.RoundsPerEpoch)) * time.Millisecond +} + +func (e *epochStartBootstrap) isStartInEpochZero() bool { + startTime := time.Unix(e.genesisNodesConfig.StartTime, 0) + isCurrentTimeBeforeGenesis := time.Now().Sub(startTime) < 0 + if isCurrentTimeBeforeGenesis { + return true + } + + configuredDurationOfEpoch := startTime.Add(e.computedDurationOfEpoch()) + isEpochZero := time.Now().Sub(configuredDurationOfEpoch) < 0 + + return isEpochZero +} + +func (e *epochStartBootstrap) prepareEpochZero() (Parameters, error) { + parameters := Parameters{ + Epoch: 0, + SelfShardId: e.genesisShardCoordinator.SelfId(), + NumOfShards: e.genesisShardCoordinator.NumberOfShards(), + } + return parameters, nil +} + +func (e *epochStartBootstrap) computeMostProbableEpoch() { + startTime := time.Unix(e.genesisNodesConfig.StartTime, 0) + elapsedTime := time.Since(startTime) + + timeForOneEpoch := e.computedDurationOfEpoch() + + elaspedTimeInSeconds := uint64(elapsedTime.Seconds()) + timeForOneEpochInSeconds := uint64(timeForOneEpoch.Seconds()) + + e.computedEpoch = uint32(elaspedTimeInSeconds / timeForOneEpochInSeconds) +} + +func (e *epochStartBootstrap) Bootstrap() (Parameters, error) { + var err error + e.shardCoordinator, err = sharding.NewMultiShardCoordinator(e.genesisShardCoordinator.NumberOfShards(), core.MetachainShardId) + if err != nil { + return Parameters{}, err + } + + if e.isStartInEpochZero() { + return e.prepareEpochZero() + } + + e.computeMostProbableEpoch() + e.initializeFromLocalStorage() + + // TODO: make a better decision according to lastRound, lastEpoch + isCurrentEpochSaved := (e.baseData.lastEpoch+1 >= e.computedEpoch) && e.baseData.storageExists + if isCurrentEpochSaved { + parameters, err := e.prepareEpochFromStorage() + if err == nil { + return parameters, nil + } + } + + err = e.prepareComponentsToSyncFromNetwork() + if err != nil { + return Parameters{}, err + } + + e.epochStartMeta, err = e.epochStartMetaBlockSyncer.SyncEpochStartMeta(timeToWait) + if err != nil { + return Parameters{}, err + } + + err = e.createSyncers() + if err != nil { + return Parameters{}, err + } + + return e.requestAndProcessing() +} + +func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { + whiteListCache, err := storageUnit.NewCache( + storageUnit.CacheType(e.generalConfig.WhiteListPool.Type), + e.generalConfig.WhiteListPool.Size, + e.generalConfig.WhiteListPool.Shards, + ) + if err != nil { + return err + } + + e.whiteListHandler, err = interceptors.NewWhiteListDataVerifier(whiteListCache) + if err != nil { + return err + } + + e.dataPool, err = factoryDataPool.NewDataPoolFromConfig( + factoryDataPool.ArgsDataPool{ + Config: &e.generalConfig, + EconomicsData: e.economicsData, + ShardCoordinator: e.shardCoordinator, + }, + ) + if err != nil { + return err + } + + err = e.createRequestHandler() + if err != nil { + return err + } + + argsEpochStartSyncer := ArgsNewEpochStartMetaSyncer{ + RequestHandler: e.requestHandler, + Messenger: e.messenger, + Marshalizer: e.marshalizer, + Hasher: e.hasher, + } + e.epochStartMetaBlockSyncer, err = NewEpochStartMetaSyncer(argsEpochStartSyncer) + if err != nil { + return err + } + + return nil +} + +func (e *epochStartBootstrap) createSyncers() error { + var err error + + args := factoryInterceptors.ArgsEpochStartInterceptorContainer{ + Config: e.generalConfig, + ShardCoordinator: e.shardCoordinator, + ProtoMarshalizer: e.marshalizer, + TxSignMarshalizer: e.txSignMarshalizer, + Hasher: e.hasher, + Messenger: e.messenger, + DataPool: e.dataPool, + SingleSigner: e.singleSigner, + BlockSingleSigner: e.blockSingleSigner, + KeyGen: e.keyGen, + BlockKeyGen: e.blockKeyGen, + WhiteListHandler: e.whiteListHandler, + ChainID: []byte(e.genesisNodesConfig.ChainID), + } + + e.interceptorContainer, err = factoryInterceptors.NewEpochStartInterceptorsContainer(args) + if err != nil { + return err + } + + syncMiniBlocksArgs := sync.ArgsNewPendingMiniBlocksSyncer{ + Storage: disabled.CreateMemUnit(), + Cache: e.dataPool.MiniBlocks(), + Marshalizer: e.marshalizer, + RequestHandler: e.requestHandler, + } + e.miniBlocksSyncer, err = sync.NewPendingMiniBlocksSyncer(syncMiniBlocksArgs) + + syncMissingHeadersArgs := sync.ArgsNewMissingHeadersByHashSyncer{ + Storage: disabled.CreateMemUnit(), + Cache: e.dataPool.Headers(), + Marshalizer: e.marshalizer, + RequestHandler: e.requestHandler, + } + e.headersSyncer, err = sync.NewMissingheadersByHashSyncer(syncMissingHeadersArgs) + + return nil +} + +func (e *epochStartBootstrap) syncHeadersFrom(meta *block.MetaBlock) (map[string]data.HeaderHandler, error) { + hashesToRequest := make([][]byte, 0, len(meta.EpochStart.LastFinalizedHeaders)+1) + shardIds := make([]uint32, 0, len(meta.EpochStart.LastFinalizedHeaders)+1) + + for _, epochStartData := range meta.EpochStart.LastFinalizedHeaders { + hashesToRequest = append(hashesToRequest, epochStartData.HeaderHash) + shardIds = append(shardIds, epochStartData.ShardID) + } + + if meta.Epoch > 1 { // no need to request genesis block + hashesToRequest = append(hashesToRequest, meta.EpochStart.Economics.PrevEpochStartHash) + shardIds = append(shardIds, core.MetachainShardId) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + err := e.headersSyncer.SyncMissingHeadersByHash(shardIds, hashesToRequest, ctx) + cancel() + if err != nil { + return nil, err + } + + syncedHeaders, err := e.headersSyncer.GetHeaders() + if err != nil { + return nil, err + } + + if meta.Epoch == 1 { + syncedHeaders[string(meta.EpochStart.Economics.PrevEpochStartHash)] = &block.MetaBlock{} + } + + return syncedHeaders, nil +} + +// Bootstrap will handle requesting and receiving the needed information the node will bootstrap from +func (e *epochStartBootstrap) requestAndProcessing() (Parameters, error) { + var err error + e.baseData.numberOfShards = uint32(len(e.epochStartMeta.EpochStart.LastFinalizedHeaders)) + e.baseData.lastEpoch = e.epochStartMeta.Epoch + + e.syncedHeaders, err = e.syncHeadersFrom(e.epochStartMeta) + if err != nil { + return Parameters{}, err + } + log.Debug("start in epoch bootstrap: got shard headers and previous epoch start meta block") + + prevEpochStartMetaHash := e.epochStartMeta.EpochStart.Economics.PrevEpochStartHash + prevEpochStartMeta, ok := e.syncedHeaders[string(prevEpochStartMetaHash)].(*block.MetaBlock) + if !ok { + return Parameters{}, epochStart.ErrWrongTypeAssertion + } + e.prevEpochStartMeta = prevEpochStartMeta + + pubKeyBytes, err := e.publicKey.ToByteArray() + if err != nil { + return Parameters{}, err + } + + log.Debug("start in epoch bootstrap: createTrieStorageManagers") + + log.Debug("start in epoch bootstrap: started syncPeerAccountsState") + err = e.syncPeerAccountsState(e.epochStartMeta.ValidatorStatsRootHash) + if err != nil { + return Parameters{}, err + } + log.Debug("start in epoch bootstrap: syncPeerAccountsState", "peer account tries map length", len(e.peerAccountTries)) + + err = e.processNodesConfig(pubKeyBytes) + if err != nil { + return Parameters{}, err + } + log.Debug("start in epoch bootstrap: processNodesConfig") + + if e.baseData.shardId == core.AllShardId { + destShardID := core.MetachainShardId + if e.destinationShardAsObserver != "metachain" { + var destShardIDUint64 uint64 + destShardIDUint64, err = strconv.ParseUint(e.destinationShardAsObserver, 10, 64) + if err != nil { + return Parameters{}, nil + } + destShardID = uint32(destShardIDUint64) + } + + e.baseData.shardId = destShardID + } + e.shardCoordinator, err = sharding.NewMultiShardCoordinator(e.baseData.numberOfShards, e.baseData.shardId) + if err != nil { + return Parameters{}, err + } + log.Debug("start in epoch bootstrap: shardCoordinator") + + if e.shardCoordinator.SelfId() != e.genesisShardCoordinator.SelfId() { + err = e.createTriesForNewShardId(e.shardCoordinator.SelfId()) + if err != nil { + return Parameters{}, err + } + } + + if e.shardCoordinator.SelfId() == core.MetachainShardId { + err = e.requestAndProcessForMeta() + if err != nil { + return Parameters{}, err + } + } else { + err = e.requestAndProcessForShard() + if err != nil { + return Parameters{}, err + } + } + + parameters := Parameters{ + Epoch: e.baseData.shardId, + SelfShardId: core.MetachainShardId, + NumOfShards: e.baseData.numberOfShards, + } + return parameters, nil +} + +func (e *epochStartBootstrap) processNodesConfig(pubKey []byte) error { + var err error + argsNewValidatorStatusSyncers := ArgsNewSyncValidatorStatus{ + DataPool: e.dataPool, + Marshalizer: e.marshalizer, + RequestHandler: e.requestHandler, + Rater: e.rater, + GenesisNodesConfig: e.genesisNodesConfig, + } + e.nodesConfigHandler, err = NewSyncValidatorStatus(argsNewValidatorStatusSyncers) + if err != nil { + return err + } + + e.nodesConfig, e.baseData.shardId, err = e.nodesConfigHandler.NodesConfigFromMetaBlock(e.epochStartMeta, e.prevEpochStartMeta, pubKey) + return err +} + +func (e *epochStartBootstrap) requestAndProcessForMeta() error { + var err error + err = e.syncUserAccountsState(e.epochStartMeta.RootHash) + if err != nil { + return err + } + + components := &ComponentsNeededForBootstrap{ + EpochStartMetaBlock: e.epochStartMeta, + PreviousEpochStartRound: e.prevEpochStartMeta.Round, + NodesConfig: e.nodesConfig, + Headers: e.syncedHeaders, + ShardCoordinator: e.shardCoordinator, + UserAccountTries: e.userAccountTries, + PeerAccountTries: e.peerAccountTries, + } + + storageHandlerComponent, err := NewMetaStorageHandler( + e.generalConfig, + e.shardCoordinator, + e.pathManager, + e.marshalizer, + e.hasher, + e.epochStartMeta.Epoch, + ) + if err != nil { + return err + } + + errSavingToStorage := storageHandlerComponent.SaveDataToStorage(components) + if errSavingToStorage != nil { + return errSavingToStorage + } + + return nil +} + +func (e *epochStartBootstrap) findSelfShardEpochStartData() (block.EpochStartShardData, error) { + var epochStartData block.EpochStartShardData + for _, shardData := range e.epochStartMeta.EpochStart.LastFinalizedHeaders { + if shardData.ShardID == e.shardCoordinator.SelfId() { + return shardData, nil + } + } + return epochStartData, epochStart.ErrEpochStartDataForShardNotFound +} + +func (e *epochStartBootstrap) requestAndProcessForShard() error { + epochStartData, err := e.findSelfShardEpochStartData() + if err != nil { + return err + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + err = e.miniBlocksSyncer.SyncPendingMiniBlocks(epochStartData.PendingMiniBlockHeaders, ctx) + cancel() + if err != nil { + return err + } + + pendingMiniBlocks, err := e.miniBlocksSyncer.GetMiniBlocks() + if err != nil { + return err + } + log.Debug("start in epoch bootstrap: GetMiniBlocks") + + shardIds := []uint32{ + core.MetachainShardId, + core.MetachainShardId, + } + hashesToRequest := [][]byte{ + epochStartData.LastFinishedMetaBlock, + epochStartData.FirstPendingMetaBlock, + } + + e.headersSyncer.ClearFields() + ctx, cancel = context.WithTimeout(context.Background(), time.Minute) + err = e.headersSyncer.SyncMissingHeadersByHash(shardIds, hashesToRequest, ctx) + cancel() + if err != nil { + return err + } + + neededHeaders, err := e.headersSyncer.GetHeaders() + if err != nil { + return err + } + log.Debug("start in epoch bootstrap: SyncMissingHeadersByHash") + + for hash, hdr := range neededHeaders { + e.syncedHeaders[hash] = hdr + } + + ownShardHdr, ok := e.syncedHeaders[string(epochStartData.HeaderHash)].(*block.Header) + if !ok { + return epochStart.ErrWrongTypeAssertion + } + + log.Debug("start in epoch bootstrap: started syncUserAccountsState") + err = e.syncUserAccountsState(ownShardHdr.RootHash) + if err != nil { + return err + } + log.Debug("start in epoch bootstrap: syncUserAccountsState") + + components := &ComponentsNeededForBootstrap{ + EpochStartMetaBlock: e.epochStartMeta, + PreviousEpochStartRound: e.prevEpochStartMeta.Round, + ShardHeader: ownShardHdr, + NodesConfig: e.nodesConfig, + Headers: e.syncedHeaders, + ShardCoordinator: e.shardCoordinator, + UserAccountTries: e.userAccountTries, + PeerAccountTries: e.peerAccountTries, + PendingMiniBlocks: pendingMiniBlocks, + } + + log.Debug("reached maximum tested point from integration test") + storageHandlerComponent, err := NewShardStorageHandler( + e.generalConfig, + e.shardCoordinator, + e.pathManager, + e.marshalizer, + e.hasher, + e.baseData.lastEpoch, + ) + if err != nil { + return err + } + + errSavingToStorage := storageHandlerComponent.SaveDataToStorage(components) + if errSavingToStorage != nil { + return errSavingToStorage + } + + return nil +} + +func (e *epochStartBootstrap) syncUserAccountsState(rootHash []byte) error { + argsUserAccountsSyncer := syncer.ArgsNewUserAccountsSyncer{ + ArgsNewBaseAccountsSyncer: syncer.ArgsNewBaseAccountsSyncer{ + Hasher: e.hasher, + Marshalizer: e.marshalizer, + TrieStorageManager: e.trieStorageManagers[factory.UserAccountTrie], + RequestHandler: e.requestHandler, + WaitTime: time.Minute, + Cacher: e.dataPool.TrieNodes(), + }, + ShardId: e.shardCoordinator.SelfId(), + } + accountsDBSyncer, err := syncer.NewUserAccountsSyncer(argsUserAccountsSyncer) + if err != nil { + return err + } + + err = accountsDBSyncer.SyncAccounts(rootHash) + if err != nil { + return err + } + + e.userAccountTries = accountsDBSyncer.GetSyncedTries() + return nil +} + +func (e *epochStartBootstrap) createTriesForNewShardId(shardId uint32) error { + trieFactoryArgs := factory.TrieFactoryArgs{ + EvictionWaitingListCfg: e.generalConfig.EvictionWaitingList, + SnapshotDbCfg: e.generalConfig.TrieSnapshotDB, + Marshalizer: e.marshalizer, + Hasher: e.hasher, + PathManager: e.pathManager, + ShardId: core.GetShardIdString(shardId), + } + trieFactory, err := factory.NewTrieFactory(trieFactoryArgs) + if err != nil { + return err + } + + userStorageManager, userAccountTrie, err := trieFactory.Create(e.generalConfig.AccountsTrieStorage, e.generalConfig.StateTriesConfig.AccountsStatePruningEnabled) + if err != nil { + return err + } + + e.trieContainer.Replace([]byte(factory.UserAccountTrie), userAccountTrie) + e.trieStorageManagers[factory.UserAccountTrie] = userStorageManager + + peerStorageManager, peerAccountsTrie, err := trieFactory.Create(e.generalConfig.PeerAccountsTrieStorage, e.generalConfig.StateTriesConfig.PeerStatePruningEnabled) + if err != nil { + return err + } + + e.trieContainer.Replace([]byte(factory.PeerAccountTrie), peerAccountsTrie) + e.trieStorageManagers[factory.PeerAccountTrie] = peerStorageManager + + return nil +} + +func (e *epochStartBootstrap) syncPeerAccountsState(rootHash []byte) error { + argsValidatorAccountsSyncer := syncer.ArgsNewValidatorAccountsSyncer{ + ArgsNewBaseAccountsSyncer: syncer.ArgsNewBaseAccountsSyncer{ + Hasher: e.hasher, + Marshalizer: e.marshalizer, + TrieStorageManager: e.trieStorageManagers[factory.PeerAccountTrie], + RequestHandler: e.requestHandler, + WaitTime: time.Minute, + Cacher: e.dataPool.TrieNodes(), + }, + } + accountsDBSyncer, err := syncer.NewValidatorAccountsSyncer(argsValidatorAccountsSyncer) + if err != nil { + return err + } + + err = accountsDBSyncer.SyncAccounts(rootHash) + if err != nil { + return err + } + + e.peerAccountTries = accountsDBSyncer.GetSyncedTries() + return nil +} + +func (e *epochStartBootstrap) createRequestHandler() error { + dataPacker, err := partitioning.NewSimpleDataPacker(e.marshalizer) + if err != nil { + return err + } + + storageService := disabled.NewChainStorer() + + resolversContainerArgs := resolverscontainer.FactoryArgs{ + ShardCoordinator: e.shardCoordinator, + Messenger: e.messenger, + Store: storageService, + Marshalizer: e.marshalizer, + DataPools: e.dataPool, + Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), + NumConcurrentResolvingJobs: 10, + DataPacker: dataPacker, + TriesContainer: e.trieContainer, + SizeCheckDelta: 0, + InputAntifloodHandler: disabled.NewAntiFloodHandler(), + OutputAntifloodHandler: disabled.NewAntiFloodHandler(), + } + resolverFactory, err := resolverscontainer.NewMetaResolversContainerFactory(resolversContainerArgs) + if err != nil { + return err + } + + container, err := resolverFactory.Create() + if err != nil { + return err + } + + finder, err := containers.NewResolversFinder(container, e.shardCoordinator) + if err != nil { + return err + } + + requestedItemsHandler := timecache.NewTimeCache(timeBetweenRequests) + e.requestHandler, err = requestHandlers.NewResolverRequestHandler( + finder, + requestedItemsHandler, + e.whiteListHandler, + maxToRequest, + core.MetachainShardId, + timeBetweenRequests, + ) + return err +} + +// IsInterfaceNil returns true if there is no value under the interface +func (e *epochStartBootstrap) IsInterfaceNil() bool { + return e == nil +} diff --git a/epochStart/bootstrap/shardStorageHandler.go b/epochStart/bootstrap/shardStorageHandler.go new file mode 100644 index 00000000000..31e292458e7 --- /dev/null +++ b/epochStart/bootstrap/shardStorageHandler.go @@ -0,0 +1,325 @@ +package bootstrap + +import ( + "encoding/json" + "fmt" + "strconv" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" + "github.com/ElrondNetwork/elrond-go/epochStart/shardchain" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/factory" +) + +type shardStorageHandler struct { + *baseStorageHandler +} + +// NewShardStorageHandler will return a new instance of shardStorageHandler +func NewShardStorageHandler( + generalConfig config.Config, + shardCoordinator sharding.Coordinator, + pathManagerHandler storage.PathManagerHandler, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + currentEpoch uint32, +) (*shardStorageHandler, error) { + epochStartNotifier := &disabled.EpochStartNotifier{} + storageFactory, err := factory.NewStorageServiceFactory( + &generalConfig, + shardCoordinator, + pathManagerHandler, + epochStartNotifier, + currentEpoch, + ) + if err != nil { + return nil, err + } + + storageService, err := storageFactory.CreateForShard() + if err != nil { + return nil, err + } + + base := &baseStorageHandler{ + storageService: storageService, + shardCoordinator: shardCoordinator, + marshalizer: marshalizer, + hasher: hasher, + currentEpoch: currentEpoch, + } + + return &shardStorageHandler{baseStorageHandler: base}, nil +} + +// SaveDataToStorage will save the fetched data to storage so it will be used by the storage bootstrap component +func (ssh *shardStorageHandler) SaveDataToStorage(components *ComponentsNeededForBootstrap) error { + defer func() { + err := ssh.storageService.CloseAll() + if err != nil { + log.Warn("error while closing storers", "error", err) + } + }() + + bootStorer := ssh.storageService.GetStorer(dataRetriever.BootstrapUnit) + + lastHeader, err := ssh.saveLastHeader(components.ShardHeader) + if err != nil { + return err + } + + processedMiniBlocks, err := ssh.getProcessedMiniBlocks(components.PendingMiniBlocks, components.EpochStartMetaBlock, components.Headers) + if err != nil { + return err + } + + pendingMiniBlocks, err := ssh.groupMiniBlocksByShard(components.PendingMiniBlocks) + if err != nil { + return err + } + + triggerConfigKey, err := ssh.saveTriggerRegistry(components) + if err != nil { + return err + } + + nodesCoordinatorConfigKey, err := ssh.saveNodesCoordinatorRegistry(components.EpochStartMetaBlock, components.NodesConfig) + if err != nil { + return err + } + + lastCrossNotarizedHdrs, err := ssh.getLastCrossNotarizedHeaders(components.EpochStartMetaBlock, components.Headers) + if err != nil { + return err + } + + bootStrapData := bootstrapStorage.BootstrapData{ + LastHeader: lastHeader, + LastCrossNotarizedHeaders: lastCrossNotarizedHdrs, + LastSelfNotarizedHeaders: []bootstrapStorage.BootstrapHeaderInfo{lastHeader}, + ProcessedMiniBlocks: processedMiniBlocks, + PendingMiniBlocks: pendingMiniBlocks, + NodesCoordinatorConfigKey: nodesCoordinatorConfigKey, + EpochStartTriggerConfigKey: triggerConfigKey, + HighestFinalBlockNonce: lastHeader.Nonce, + LastRound: int64(components.ShardHeader.Round), + } + bootStrapDataBytes, err := ssh.marshalizer.Marshal(&bootStrapData) + if err != nil { + return err + } + + roundToUseAsKey := int64(components.ShardHeader.Round + 2) + // TODO: change this. added 2 in order to skip + // equality check between round and LastRound from bootstrap from storage component + roundNum := bootstrapStorage.RoundNum{Num: roundToUseAsKey} + roundNumBytes, err := ssh.marshalizer.Marshal(&roundNum) + if err != nil { + return err + } + + err = bootStorer.Put([]byte(core.HighestRoundFromBootStorage), roundNumBytes) + if err != nil { + return err + } + + log.Info("saved bootstrap data to storage") + key := []byte(strconv.FormatInt(roundToUseAsKey, 10)) + err = bootStorer.Put(key, bootStrapDataBytes) + if err != nil { + return err + } + + err = ssh.commitTries(components) + if err != nil { + return err + } + + return nil +} + +func getEpochStartShardData(metaBlock *block.MetaBlock, shardId uint32) (block.EpochStartShardData, error) { + for _, epochStartShardData := range metaBlock.EpochStart.LastFinalizedHeaders { + if epochStartShardData.ShardID == shardId { + return epochStartShardData, nil + } + } + + return block.EpochStartShardData{}, epochStart.ErrEpochStartDataForShardNotFound +} + +func (ssh *shardStorageHandler) getProcessedMiniBlocks( + pendingMiniBlocks map[string]*block.MiniBlock, + meta *block.MetaBlock, + headers map[string]data.HeaderHandler, +) ([]bootstrapStorage.MiniBlocksInMeta, error) { + shardData, err := getEpochStartShardData(meta, ssh.shardCoordinator.SelfId()) + if err != nil { + return nil, err + } + + neededMeta, ok := headers[string(shardData.FirstPendingMetaBlock)].(*block.MetaBlock) + if !ok { + return nil, epochStart.ErrMissingHeader + } + + if check.IfNil(neededMeta) { + return nil, epochStart.ErrMissingHeader + } + + processedMbHashes := make([][]byte, 0) + miniBlocksDstMe := getAllMiniBlocksWithDst(neededMeta, ssh.shardCoordinator.SelfId()) + for hash, mb := range miniBlocksDstMe { + if _, ok := pendingMiniBlocks[hash]; ok { + continue + } + + processedMbHashes = append(processedMbHashes, mb.Hash) + } + + processedMiniBlocks := make([]bootstrapStorage.MiniBlocksInMeta, 0) + processedMiniBlocks = append(processedMiniBlocks, bootstrapStorage.MiniBlocksInMeta{ + MetaHash: shardData.FirstPendingMetaBlock, + MiniBlocksHashes: processedMbHashes, + }) + + return processedMiniBlocks, nil +} + +func (ssh *shardStorageHandler) getLastCrossNotarizedHeaders(meta *block.MetaBlock, headers map[string]data.HeaderHandler) ([]bootstrapStorage.BootstrapHeaderInfo, error) { + shardData, err := getEpochStartShardData(meta, ssh.shardCoordinator.SelfId()) + if err != nil { + return nil, err + } + + neededMeta, ok := headers[string(shardData.LastFinishedMetaBlock)] + if !ok { + return nil, epochStart.ErrMissingHeader + } + + crossNotarizedHdrs := make([]bootstrapStorage.BootstrapHeaderInfo, 0) + crossNotarizedHdrs = append(crossNotarizedHdrs, bootstrapStorage.BootstrapHeaderInfo{ + ShardId: core.MetachainShardId, + Nonce: neededMeta.GetNonce(), + Hash: shardData.LastFinishedMetaBlock, + }) + + neededMeta, ok = headers[string(shardData.FirstPendingMetaBlock)] + if !ok { + return nil, epochStart.ErrMissingHeader + } + + crossNotarizedHdrs = append(crossNotarizedHdrs, bootstrapStorage.BootstrapHeaderInfo{ + ShardId: core.MetachainShardId, + Nonce: neededMeta.GetNonce(), + Hash: shardData.FirstPendingMetaBlock, + }) + + return crossNotarizedHdrs, nil +} + +func (ssh *shardStorageHandler) saveLastHeader(shardHeader *block.Header) (bootstrapStorage.BootstrapHeaderInfo, error) { + lastHeaderHash, err := core.CalculateHash(ssh.marshalizer, ssh.hasher, shardHeader) + if err != nil { + return bootstrapStorage.BootstrapHeaderInfo{}, err + } + + lastHeaderBytes, err := ssh.marshalizer.Marshal(shardHeader) + if err != nil { + return bootstrapStorage.BootstrapHeaderInfo{}, err + } + + err = ssh.storageService.GetStorer(dataRetriever.BlockHeaderUnit).Put(lastHeaderHash, lastHeaderBytes) + if err != nil { + return bootstrapStorage.BootstrapHeaderInfo{}, err + } + + bootstrapHdrInfo := bootstrapStorage.BootstrapHeaderInfo{ + ShardId: core.MetachainShardId, + Epoch: shardHeader.Epoch, + Nonce: shardHeader.Nonce, + Hash: lastHeaderHash, + } + + return bootstrapHdrInfo, nil +} + +func (ssh *shardStorageHandler) saveTriggerRegistry(components *ComponentsNeededForBootstrap) ([]byte, error) { + shardHeader := components.ShardHeader + + metaBlock := components.EpochStartMetaBlock + metaBlockHash, err := core.CalculateHash(ssh.marshalizer, ssh.hasher, metaBlock) + if err != nil { + return nil, err + } + + triggerReg := shardchain.TriggerRegistry{ + Epoch: shardHeader.Epoch, + CurrentRoundIndex: int64(shardHeader.Round), + EpochStartRound: shardHeader.Round, + EpochMetaBlockHash: metaBlockHash, + IsEpochStart: false, + NewEpochHeaderReceived: false, + EpochFinalityAttestingRound: 0, + } + + trigInternalKey := append([]byte(core.TriggerRegistryKeyPrefix), []byte(fmt.Sprint(shardHeader.Round))...) + + triggerRegBytes, err := json.Marshal(&triggerReg) + if err != nil { + return nil, err + } + + errPut := ssh.storageService.GetStorer(dataRetriever.BootstrapUnit).Put(trigInternalKey, triggerRegBytes) + if errPut != nil { + return nil, errPut + } + + return trigInternalKey, nil +} + +func getAllMiniBlocksWithDst(metaBlock *block.MetaBlock, destId uint32) map[string]block.ShardMiniBlockHeader { + hashDst := make(map[string]block.ShardMiniBlockHeader) + for i := 0; i < len(metaBlock.ShardInfo); i++ { + if metaBlock.ShardInfo[i].ShardID == destId { + continue + } + + for _, val := range metaBlock.ShardInfo[i].ShardMiniBlockHeaders { + if val.ReceiverShardID == destId && val.SenderShardID != destId { + hashDst[string(val.Hash)] = val + } + } + } + + for _, val := range metaBlock.MiniBlockHeaders { + isCrossShardDestMe := val.ReceiverShardID == destId && val.SenderShardID != destId + if isCrossShardDestMe { + shardMiniBlockHdr := block.ShardMiniBlockHeader{ + Hash: val.Hash, + ReceiverShardID: val.ReceiverShardID, + SenderShardID: val.SenderShardID, + TxCount: val.TxCount, + } + hashDst[string(val.Hash)] = shardMiniBlockHdr + } + } + + return hashDst +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ssh *shardStorageHandler) IsInterfaceNil() bool { + return ssh == nil +} diff --git a/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go new file mode 100644 index 00000000000..ea7212a4622 --- /dev/null +++ b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go @@ -0,0 +1,137 @@ +package bootstrap + +import ( + "math" + "sync" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" +) + +const timeToWaitBeforeCheckingReceivedHeaders = 1 * time.Second +const numTriesUntilExit = 5 + +type simpleEpochStartMetaBlockInterceptor struct { + marshalizer marshal.Marshalizer + hasher hashing.Hasher + mutReceivedMetaBlocks sync.RWMutex + mapReceivedMetaBlocks map[string]*block.MetaBlock + mapMetaBlocksFromPeers map[string][]p2p.PeerID +} + +// NewSimpleEpochStartMetaBlockInterceptor will return a new instance of simpleEpochStartMetaBlockInterceptor +func NewSimpleEpochStartMetaBlockInterceptor(marshalizer marshal.Marshalizer, hasher hashing.Hasher) (*simpleEpochStartMetaBlockInterceptor, error) { + if check.IfNil(marshalizer) { + return nil, epochStart.ErrNilMarshalizer + } + if check.IfNil(hasher) { + return nil, epochStart.ErrNilHasher + } + + return &simpleEpochStartMetaBlockInterceptor{ + marshalizer: marshalizer, + hasher: hasher, + mutReceivedMetaBlocks: sync.RWMutex{}, + mapReceivedMetaBlocks: make(map[string]*block.MetaBlock), + mapMetaBlocksFromPeers: make(map[string][]p2p.PeerID), + }, nil +} + +// SetIsDataForCurrentShardVerifier - +func (s *simpleEpochStartMetaBlockInterceptor) SetIsDataForCurrentShardVerifier(_ process.InterceptedDataVerifier) error { + return nil +} + +// ProcessReceivedMessage will receive the metablocks and will add them to the maps +func (s *simpleEpochStartMetaBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID) error { + metaBlock := &block.MetaBlock{} + err := s.marshalizer.Unmarshal(metaBlock, message.Data()) + if err != nil { + return err + } + + if !metaBlock.IsStartOfEpochBlock() { + return epochStart.ErrNotEpochStartBlock + } + + mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, metaBlock) + if err != nil { + return err + } + + s.mutReceivedMetaBlocks.Lock() + s.mapReceivedMetaBlocks[string(mbHash)] = metaBlock + s.addToPeerList(string(mbHash), message.Peer()) + s.mutReceivedMetaBlocks.Unlock() + + return nil +} + +// this func should be called under mutex protection +func (s *simpleEpochStartMetaBlockInterceptor) addToPeerList(hash string, peer p2p.PeerID) { + peersListForHash := s.mapMetaBlocksFromPeers[hash] + for _, pid := range peersListForHash { + if pid == peer { + return + } + } + s.mapMetaBlocksFromPeers[hash] = append(s.mapMetaBlocksFromPeers[hash], peer) +} + +// GetEpochStartMetaBlock will return the metablock after it is confirmed or an error if the number of tries was exceeded +func (s *simpleEpochStartMetaBlockInterceptor) GetEpochStartMetaBlock(target int, epoch uint32) (*block.MetaBlock, error) { + // TODO : replace this with a channel which will be written in when data is ready + for count := 0; count < numTriesUntilExit; count++ { + time.Sleep(timeToWaitBeforeCheckingReceivedHeaders) + s.mutReceivedMetaBlocks.RLock() + for hash, peersList := range s.mapMetaBlocksFromPeers { + log.Debug("metablock from peers", "num peers", len(peersList), "target", target, "hash", []byte(hash)) + isOk := s.isMapEntryOk(peersList, hash, target, epoch) + if isOk { + metaBlockToReturn := s.mapReceivedMetaBlocks[hash] + s.mutReceivedMetaBlocks.RUnlock() + s.clearFields() + return metaBlockToReturn, nil + } + } + s.mutReceivedMetaBlocks.RUnlock() + } + + return nil, epochStart.ErrNumTriesExceeded +} + +func (s *simpleEpochStartMetaBlockInterceptor) isMapEntryOk( + peersList []p2p.PeerID, + hash string, + target int, + epoch uint32, +) bool { + mb := s.mapReceivedMetaBlocks[hash] + epochCheckNotRequired := epoch == math.MaxUint32 + isEpochOk := epochCheckNotRequired || mb.Epoch == epoch + if len(peersList) >= target && isEpochOk { + log.Info("got consensus for epoch start metablock", "len", len(peersList)) + return true + } + + return false +} + +func (s *simpleEpochStartMetaBlockInterceptor) clearFields() { + s.mutReceivedMetaBlocks.Lock() + s.mapReceivedMetaBlocks = make(map[string]*block.MetaBlock) + s.mapMetaBlocksFromPeers = make(map[string][]p2p.PeerID) + s.mutReceivedMetaBlocks.Unlock() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *simpleEpochStartMetaBlockInterceptor) IsInterfaceNil() bool { + return s == nil +} diff --git a/epochStart/bootstrap/syncEpochStartMeta.go b/epochStart/bootstrap/syncEpochStartMeta.go new file mode 100644 index 00000000000..260b17c5e4d --- /dev/null +++ b/epochStart/bootstrap/syncEpochStartMeta.go @@ -0,0 +1,120 @@ +package bootstrap + +import ( + "math" + "time" + + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process/factory" +) + +type epochStartMetaSyncer struct { + requestHandler epochStart.RequestHandler + messenger p2p.Messenger + epochStartMetaBlockInterceptor EpochStartInterceptor + marshalizer marshal.Marshalizer + hasher hashing.Hasher +} + +// ArgsNewEpochStartMetaSyncer - +type ArgsNewEpochStartMetaSyncer struct { + RequestHandler epochStart.RequestHandler + Messenger p2p.Messenger + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher +} + +const delayBetweenRequests = 1 * time.Second +const thresholdForConsideringMetaBlockCorrect = 0.2 +const numRequestsToSendOnce = 4 +const maxNumTimesToRetry = 100 + +func NewEpochStartMetaSyncer(args ArgsNewEpochStartMetaSyncer) (*epochStartMetaSyncer, error) { + e := &epochStartMetaSyncer{ + requestHandler: args.RequestHandler, + messenger: args.Messenger, + marshalizer: args.Marshalizer, + hasher: args.Hasher, + } + + var err error + e.epochStartMetaBlockInterceptor, err = NewSimpleEpochStartMetaBlockInterceptor(e.marshalizer, e.hasher) + if err != nil { + return nil, err + } + + return e, nil +} + +// SyncEpochStartMeta syncs the latest epoch start metablock +func (e *epochStartMetaSyncer) SyncEpochStartMeta(_ time.Duration) (*block.MetaBlock, error) { + err := e.initTopicForEpochStartMetaBlockInterceptor() + if err != nil { + return nil, err + } + defer func() { + e.resetTopicsAndInterceptors() + }() + + e.requestEpochStartMetaBlock() + + unknownEpoch := uint32(math.MaxUint32) + count := 0 + for { + if count > maxNumTimesToRetry { + return nil, epochStart.ErrNumTriesExceeded + } + + count++ + numConnectedPeers := len(e.messenger.ConnectedPeers()) + threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) + + mb, errConsensusNotReached := e.epochStartMetaBlockInterceptor.GetEpochStartMetaBlock(threshold, unknownEpoch) + if errConsensusNotReached == nil { + return mb, nil + } + + log.Info("consensus not reached for meta block. re-requesting and trying again...") + e.requestEpochStartMetaBlock() + } +} + +func (e *epochStartMetaSyncer) requestEpochStartMetaBlock() { + // send more requests + unknownEpoch := uint32(math.MaxUint32) + for i := 0; i < numRequestsToSendOnce; i++ { + time.Sleep(delayBetweenRequests) + e.requestHandler.RequestStartOfEpochMetaBlock(unknownEpoch) + } +} + +func (e *epochStartMetaSyncer) resetTopicsAndInterceptors() { + err := e.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) + if err != nil { + log.Info("error unregistering message processors", "error", err) + } +} + +func (e *epochStartMetaSyncer) initTopicForEpochStartMetaBlockInterceptor() error { + err := e.messenger.CreateTopic(factory.MetachainBlocksTopic, true) + if err != nil { + log.Info("error registering message processor", "error", err) + return err + } + + err = e.messenger.RegisterMessageProcessor(factory.MetachainBlocksTopic, e.epochStartMetaBlockInterceptor) + if err != nil { + return err + } + + return nil +} + +// IsInterfaceNil returns true if underlying object is nil +func (e *epochStartMetaSyncer) IsInterfaceNil() bool { + return e == nil +} diff --git a/epochStart/bootstrap/syncValidatorStatus.go b/epochStart/bootstrap/syncValidatorStatus.go new file mode 100644 index 00000000000..09e667e0539 --- /dev/null +++ b/epochStart/bootstrap/syncValidatorStatus.go @@ -0,0 +1,182 @@ +package bootstrap + +import ( + "context" + "fmt" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/update/sync" +) + +type syncValidatorStatus struct { + miniBlocksSyncer epochStart.PendingMiniBlocksSyncHandler + dataPool dataRetriever.PoolsHolder + marshalizer marshal.Marshalizer + requestHandler process.RequestHandler + nodeCoordinator EpochStartNodesCoordinator + genesisNodesConfig *sharding.NodesSetup +} + +// ArgsNewSyncValidatorStatus +type ArgsNewSyncValidatorStatus struct { + DataPool dataRetriever.PoolsHolder + Marshalizer marshal.Marshalizer + RequestHandler process.RequestHandler + Rater sharding.ChanceComputer + GenesisNodesConfig *sharding.NodesSetup +} + +// NewSyncValidatorStatus creates a new validator status process component +func NewSyncValidatorStatus(args ArgsNewSyncValidatorStatus) (*syncValidatorStatus, error) { + s := &syncValidatorStatus{ + dataPool: args.DataPool, + marshalizer: args.Marshalizer, + requestHandler: args.RequestHandler, + genesisNodesConfig: args.GenesisNodesConfig, + } + syncMiniBlocksArgs := sync.ArgsNewPendingMiniBlocksSyncer{ + Storage: disabled.CreateMemUnit(), + Cache: s.dataPool.MiniBlocks(), + Marshalizer: s.marshalizer, + RequestHandler: s.requestHandler, + } + var err error + s.miniBlocksSyncer, err = sync.NewPendingMiniBlocksSyncer(syncMiniBlocksArgs) + if err != nil { + return nil, err + } + + nodeShuffler := sharding.NewXorValidatorsShuffler( + args.GenesisNodesConfig.MinNodesPerShard, + args.GenesisNodesConfig.MetaChainMinNodes, + args.GenesisNodesConfig.Hysteresis, + args.GenesisNodesConfig.Adaptivity, + ) + + argsNodesCoordinator := ArgsNewStartInEpochNodesCoordinator{ + Shuffler: nodeShuffler, + Chance: args.Rater, + ShardConsensusGroupSize: args.GenesisNodesConfig.ConsensusGroupSize, + MetaConsensusGroupSize: args.GenesisNodesConfig.MetaChainConsensusGroupSize, + } + s.nodeCoordinator, err = NewStartInEpochNodesCoordinator(argsNodesCoordinator) + + return s, nil +} + +// NodesConfigFromMetaBlock synces and creates registry from epoch start metablock +func (s *syncValidatorStatus) NodesConfigFromMetaBlock( + currMetaBlock *block.MetaBlock, + prevMetaBlock *block.MetaBlock, + publicKey []byte, +) (*sharding.NodesCoordinatorRegistry, uint32, error) { + if !currMetaBlock.IsStartOfEpochBlock() { + return nil, 0, epochStart.ErrNotEpochStartBlock + } + if !prevMetaBlock.IsStartOfEpochBlock() { + return nil, 0, epochStart.ErrNotEpochStartBlock + } + + prevEpochsValidators, err := s.computeNodesConfigFor(prevMetaBlock) + if err != nil { + return nil, 0, err + } + + currEpochsValidators, err := s.computeNodesConfigFor(currMetaBlock) + if err != nil { + return nil, 0, err + } + + selfShardId := s.nodeCoordinator.ComputeShardForSelfPublicKey(currMetaBlock.Epoch, publicKey) + + nodesConfig := &sharding.NodesCoordinatorRegistry{ + EpochsConfig: make(map[string]*sharding.EpochValidators, 2), + CurrentEpoch: currMetaBlock.Epoch, + } + + epochConfigId := fmt.Sprint(prevMetaBlock.Epoch) + nodesConfig.EpochsConfig[epochConfigId] = prevEpochsValidators + epochConfigId = fmt.Sprint(currMetaBlock.Epoch) + nodesConfig.EpochsConfig[epochConfigId] = currEpochsValidators + + return nodesConfig, selfShardId, nil +} + +func (s *syncValidatorStatus) computeNodesConfigFor(metaBlock *block.MetaBlock) (*sharding.EpochValidators, error) { + if metaBlock.Epoch == 0 { + return s.nodeCoordinator.ComputeNodesConfigForGenesis(s.genesisNodesConfig) + } + + epochValidatorsInfo, err := s.processNodesConfigFor(metaBlock) + if err != nil { + return nil, err + } + + return s.nodeCoordinator.ComputeNodesConfigFor(metaBlock, epochValidatorsInfo) +} + +func findPeerMiniBlockHeaders(metaBlock *block.MetaBlock) []block.ShardMiniBlockHeader { + shardMBHeaders := make([]block.ShardMiniBlockHeader, 0) + for _, mbHeader := range metaBlock.MiniBlockHeaders { + if mbHeader.Type != block.PeerBlock { + continue + } + + shardMBHdr := block.ShardMiniBlockHeader{ + Hash: mbHeader.Hash, + ReceiverShardID: mbHeader.ReceiverShardID, + SenderShardID: core.MetachainShardId, + TxCount: mbHeader.TxCount, + } + shardMBHeaders = append(shardMBHeaders, shardMBHdr) + } + return shardMBHeaders +} + +func (s *syncValidatorStatus) processNodesConfigFor( + metaBlock *block.MetaBlock, +) ([]*state.ValidatorInfo, error) { + shardMBHeaders := findPeerMiniBlockHeaders(metaBlock) + + s.miniBlocksSyncer.ClearFields() + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + err := s.miniBlocksSyncer.SyncPendingMiniBlocks(shardMBHeaders, ctx) + cancel() + if err != nil { + return nil, err + } + + peerMiniBlocks, err := s.miniBlocksSyncer.GetMiniBlocks() + if err != nil { + return nil, err + } + + validatorInfos := make([]*state.ValidatorInfo, 0) + for _, mb := range peerMiniBlocks { + for _, txHash := range mb.TxHashes { + vid := &state.ValidatorInfo{} + err := s.marshalizer.Unmarshal(vid, txHash) + if err != nil { + return nil, err + } + + validatorInfos = append(validatorInfos, vid) + } + } + + return validatorInfos, nil +} + +// IsInterfaceNil returns true if underlying object is nil +func (s *syncValidatorStatus) IsInterfaceNil() bool { + return s == nil +} diff --git a/epochStart/errors.go b/epochStart/errors.go index 47382d2f37a..c909ad86533 100644 --- a/epochStart/errors.go +++ b/epochStart/errors.go @@ -41,9 +41,6 @@ var ErrNilStorage = errors.New("nil storage") // ErrNilHeaderHandler signals that a nil header handler has been provided var ErrNilHeaderHandler = errors.New("nil header handler") -// ErrNilArgsPendingMiniblocks signals that nil argument was passed -var ErrNilArgsPendingMiniblocks = errors.New("nil arguments for pending miniblock object") - // ErrNilMiniblocks signals that nil argument was passed var ErrNilMiniblocks = errors.New("nil arguments for miniblocks object") @@ -86,15 +83,9 @@ var ErrNilTriggerStorage = errors.New("nil trigger storage") // ErrNilMetaNonceHashStorage signals that nil meta header nonce hash storage has been provided var ErrNilMetaNonceHashStorage = errors.New("nil meta nonce hash storage") -// ErrNilMiniblocksStorage signals that nil miniblocks storage has been provided -var ErrNilMiniblocksStorage = errors.New("nil miniblocks storage") - // ErrValidatorMiniBlockHashDoesNotMatch signals that created and received validatorInfo miniblock hash does not match var ErrValidatorMiniBlockHashDoesNotMatch = errors.New("validatorInfo miniblock hash does not match") -// ErrTxHashDoesNotMatch signals that created and received tx hash does not match -var ErrTxHashDoesNotMatch = errors.New("validatorInfo miniblock tx hash does not match") - // ErrRewardMiniBlockHashDoesNotMatch signals that created and received rewards miniblock hash does not match var ErrRewardMiniBlockHashDoesNotMatch = errors.New("reward miniblock hash does not match") @@ -133,3 +124,18 @@ var ErrValidatorInfoMiniBlocksNumDoesNotMatch = errors.New("number of created an // ErrNilValidatorInfo signals that a nil value for the validatorInfo has been provided var ErrNilValidatorInfo = errors.New("validator info is nil") + +// ErrEpochStartDataForShardNotFound signals that epoch start shard data was not found for current shard id +var ErrEpochStartDataForShardNotFound = errors.New("epoch start data for current shard not found") + +// ErrNumTriesExceeded signals that number of tries has exceeded +var ErrNumTriesExceeded = errors.New("number of tries exceeded") + +// ErrMissingHeader signals that searched header is missing +var ErrMissingHeader = errors.New("missing header") + +// ErrSmallShardEligibleListSize signals that the eligible validators list's size is less than the consensus size +var ErrSmallShardEligibleListSize = errors.New("small shard eligible list size") + +// ErrSmallMetachainEligibleListSize signals that the eligible validators list's size is less than the consensus size +var ErrSmallMetachainEligibleListSize = errors.New("small metachain eligible list size") diff --git a/epochStart/interface.go b/epochStart/interface.go index 0a8cdfd5689..f8508273e1f 100644 --- a/epochStart/interface.go +++ b/epochStart/interface.go @@ -1,9 +1,11 @@ package epochStart import ( + "context" "time" "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" ) // TriggerHandler defines the functionalities for an start of epoch trigger @@ -48,6 +50,7 @@ type RequestHandler interface { RequestShardHeaderByNonce(shardId uint32, nonce uint64) RequestStartOfEpochMetaBlock(epoch uint32) RequestMiniBlocks(destShardID uint32, miniblocksHashes [][]byte) + RequestInterval() time.Duration IsInterfaceNil() bool } @@ -77,3 +80,32 @@ type ValidatorStatisticsProcessorHandler interface { Process(info data.ValidatorInfoHandler) error IsInterfaceNil() bool } + +// HeadersByHashSyncer defines the methods to sync all missing headers by hash +type HeadersByHashSyncer interface { + SyncMissingHeadersByHash(shardIDs []uint32, headersHashes [][]byte, ctx context.Context) error + GetHeaders() (map[string]data.HeaderHandler, error) + ClearFields() + IsInterfaceNil() bool +} + +// PendingMiniBlocksSyncHandler defines the methods to sync all pending miniblocks +type PendingMiniBlocksSyncHandler interface { + SyncPendingMiniBlocks(miniBlockHeaders []block.ShardMiniBlockHeader, ctx context.Context) error + GetMiniBlocks() (map[string]*block.MiniBlock, error) + ClearFields() + IsInterfaceNil() bool +} + +// AccountsDBSyncer defines the methods for the accounts db syncer +type AccountsDBSyncer interface { + GetSyncedTries() map[string]data.Trie + SyncAccounts(rootHash []byte) error + IsInterfaceNil() bool +} + +// StartOfEpochMetaSyncer defines the methods to synchronize epoch start meta block from the network when nothing is known +type StartOfEpochMetaSyncer interface { + SyncEpochStartMeta(waitTime time.Duration) (*block.MetaBlock, error) + IsInterfaceNil() bool +} diff --git a/epochStart/metachain/trigger.go b/epochStart/metachain/trigger.go index 4bd11ec84c6..9a6a1173896 100644 --- a/epochStart/metachain/trigger.go +++ b/epochStart/metachain/trigger.go @@ -95,10 +95,10 @@ func NewEpochStartTrigger(args *ArgsNewMetaEpochStartTrigger) (*trigger, error) return nil, epochStart.ErrNilMetaBlockStorage } - trigStateKey := fmt.Sprintf("initial_value_epoch%d", args.Epoch) + trigggerStateKey := core.TriggerRegistryInitialKeyPrefix + fmt.Sprintf("%d", args.Epoch) trigger := &trigger{ - triggerStateKey: []byte(trigStateKey), + triggerStateKey: []byte(trigggerStateKey), roundsPerEpoch: uint64(args.Settings.RoundsPerEpoch), epochStartTime: args.GenesisTime, currEpochStartRound: args.EpochStartRound, diff --git a/epochStart/metachain/triggerRegistry.go b/epochStart/metachain/triggerRegistry.go index 5f64d384be1..a0b84c05c61 100644 --- a/epochStart/metachain/triggerRegistry.go +++ b/epochStart/metachain/triggerRegistry.go @@ -3,11 +3,10 @@ package metachain import ( "encoding/json" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data/block" ) -const keyPrefix = "epochStartTrigger_" - // TriggerRegistry holds the data required to correctly initialize the trigger when booting from saved state type TriggerRegistry struct { Epoch uint32 @@ -21,7 +20,7 @@ type TriggerRegistry struct { // LoadState loads into trigger the saved state func (t *trigger) LoadState(key []byte) error { - trigInternalKey := append([]byte(keyPrefix), key...) + trigInternalKey := append([]byte(core.TriggerRegistryKeyPrefix), key...) log.Debug("getting start of epoch trigger state", "key", trigInternalKey) data, err := t.triggerStorage.Get(trigInternalKey) @@ -64,7 +63,7 @@ func (t *trigger) saveState(key []byte) error { return err } - trigInternalKey := append([]byte(keyPrefix), key...) + trigInternalKey := append([]byte(core.TriggerRegistryKeyPrefix), key...) log.Debug("saving start of epoch trigger state", "key", trigInternalKey) return t.triggerStorage.Put(trigInternalKey, data) diff --git a/epochStart/mock/chainStorerStub.go b/epochStart/mock/chainStorerStub.go index 02f9a3e5635..8be4fa3a835 100644 --- a/epochStart/mock/chainStorerStub.go +++ b/epochStart/mock/chainStorerStub.go @@ -6,7 +6,7 @@ import ( "github.com/pkg/errors" ) -// ChainStorerStub is a mock implementation of the ChianStorer interface +// ChainStorerStub is a mock implementation of the ChainStorer interface type ChainStorerStub struct { AddStorerCalled func(key dataRetriever.UnitType, s storage.Storer) GetStorerCalled func(unitType dataRetriever.UnitType) storage.Storer diff --git a/epochStart/mock/nodesCoordinatorStub.go b/epochStart/mock/nodesCoordinatorStub.go index d34d99f443e..304e57da481 100644 --- a/epochStart/mock/nodesCoordinatorStub.go +++ b/epochStart/mock/nodesCoordinatorStub.go @@ -13,6 +13,11 @@ type NodesCoordinatorStub struct { GetAllValidatorsPublicKeysCalled func() (map[uint32][][]byte, error) } +// SetConfig - +func (ncm *NodesCoordinatorStub) SetConfig(_ *sharding.NodesCoordinatorRegistry) error { + return nil +} + // ComputeLeaving - func (ncm *NodesCoordinatorStub) ComputeLeaving(_ []sharding.Validator) []sharding.Validator { return nil @@ -28,11 +33,6 @@ func (ncm *NodesCoordinatorStub) GetAllWaitingValidatorsPublicKeys(_ uint32) (ma return nil, nil } -// UpdatePeersListAndIndex - -func (ncm *NodesCoordinatorStub) UpdatePeersListAndIndex() error { - return nil -} - // GetNumTotalEligible - func (ncm *NodesCoordinatorStub) GetNumTotalEligible() uint64 { return 1 @@ -88,22 +88,8 @@ func (ncm *NodesCoordinatorStub) GetConsensusValidatorsPublicKeys( return nil, nil } -// GetConsensusValidatorsRewardsAddresses - -func (ncm *NodesCoordinatorStub) GetConsensusValidatorsRewardsAddresses( - randomness []byte, - round uint64, - shardId uint32, - epoch uint32, -) ([]string, error) { - if ncm.GetValidatorsPublicKeysCalled != nil { - return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId, epoch) - } - - return nil, nil -} - // SetNodesPerShards - -func (ncm *NodesCoordinatorStub) SetNodesPerShards(_ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, _ uint32, _ bool) error { +func (ncm *NodesCoordinatorStub) SetNodesPerShards(_ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, _ uint32) error { return nil } diff --git a/epochStart/mock/requestHandlerStub.go b/epochStart/mock/requestHandlerStub.go index 9044d157c54..c99460de944 100644 --- a/epochStart/mock/requestHandlerStub.go +++ b/epochStart/mock/requestHandlerStub.go @@ -1,5 +1,7 @@ package mock +import "time" + // RequestHandlerStub - type RequestHandlerStub struct { RequestShardHeaderCalled func(shardId uint32, hash []byte) @@ -13,6 +15,11 @@ type RequestHandlerStub struct { RequestStartOfEpochMetaBlockCalled func(epoch uint32) } +// RequestInterval - +func (rhs *RequestHandlerStub) RequestInterval() time.Duration { + return time.Second +} + // RequestStartOfEpochMetaBlock - func (rhs *RequestHandlerStub) RequestStartOfEpochMetaBlock(epoch uint32) { if rhs.RequestStartOfEpochMetaBlockCalled == nil { diff --git a/epochStart/shardchain/trigger.go b/epochStart/shardchain/trigger.go index 66a4f42fd8c..c8bf54246f5 100644 --- a/epochStart/shardchain/trigger.go +++ b/epochStart/shardchain/trigger.go @@ -144,10 +144,10 @@ func NewEpochStartTrigger(args *ArgsShardEpochStartTrigger) (*trigger, error) { return nil, epochStart.ErrNilShardHeaderStorage } - trigStateKey := fmt.Sprintf("initial_value_epoch%d", args.Epoch) + trigggerStateKey := core.TriggerRegistryInitialKeyPrefix + fmt.Sprintf("%d", args.Epoch) newTrigger := &trigger{ - triggerStateKey: []byte(trigStateKey), + triggerStateKey: []byte(trigggerStateKey), epoch: args.Epoch, currentRoundIndex: 0, epochStartRound: 0, diff --git a/epochStart/shardchain/triggerRegistry.go b/epochStart/shardchain/triggerRegistry.go index 2bc0a6f3b3f..0ff3479a0f0 100644 --- a/epochStart/shardchain/triggerRegistry.go +++ b/epochStart/shardchain/triggerRegistry.go @@ -3,11 +3,10 @@ package shardchain import ( "encoding/json" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data/block" ) -const keyPrefix = "epochStartTrigger_" - // TriggerRegistry holds the data required to correctly initialize the trigger when booting from saved state type TriggerRegistry struct { Epoch uint32 @@ -22,7 +21,7 @@ type TriggerRegistry struct { // LoadState loads into trigger the saved state func (t *trigger) LoadState(key []byte) error { - trigInternalKey := append([]byte(keyPrefix), key...) + trigInternalKey := append([]byte(core.TriggerRegistryKeyPrefix), key...) log.Debug("getting start of epoch trigger state", "key", trigInternalKey) data, err := t.triggerStorage.Get(trigInternalKey) @@ -69,7 +68,7 @@ func (t *trigger) saveState(key []byte) error { return err } - trigInternalKey := append([]byte(keyPrefix), key...) + trigInternalKey := append([]byte(core.TriggerRegistryKeyPrefix), key...) log.Debug("saving start of epoch trigger state", "key", trigInternalKey) return t.triggerStorage.Put(trigInternalKey, data) diff --git a/facade/elrondNodeFacade.go b/facade/elrondNodeFacade.go index cc193382b4c..b129ed8a1fa 100644 --- a/facade/elrondNodeFacade.go +++ b/facade/elrondNodeFacade.go @@ -100,13 +100,8 @@ func (ef *ElrondNodeFacade) SetConfig(facadeConfig *config.FacadeConfig) { // StartNode starts the underlying node func (ef *ElrondNodeFacade) StartNode() error { - err := ef.node.Start() - if err != nil { - return err - } - - err = ef.node.StartConsensus() - return err + ef.node.Start() + return ef.node.StartConsensus() } // StartBackgroundServices starts all background services needed for the correct functionality of the node diff --git a/facade/elrondNodeFacade_test.go b/facade/elrondNodeFacade_test.go index 08e924cba73..cdecb739685 100644 --- a/facade/elrondNodeFacade_test.go +++ b/facade/elrondNodeFacade_test.go @@ -134,12 +134,8 @@ func TestNewElrondFacade_WithInvalidSameSourceRequestsShouldErr(t *testing.T) { func TestElrondFacade_StartNodeWithNodeNotNullShouldNotReturnError(t *testing.T) { started := false node := &mock.NodeMock{ - StartHandler: func() error { + StartHandler: func() { started = true - return nil - }, - P2PBootstrapHandler: func() error { - return nil }, IsRunningHandler: func() bool { return started @@ -158,35 +154,11 @@ func TestElrondFacade_StartNodeWithNodeNotNullShouldNotReturnError(t *testing.T) assert.True(t, isRunning) } -func TestElrondFacade_StartNodeWithErrorOnStartNodeShouldReturnError(t *testing.T) { - started := false - node := &mock.NodeMock{ - StartHandler: func() error { - return fmt.Errorf("error on start node") - }, - IsRunningHandler: func() bool { - return started - }, - } - - ef := createElrondNodeFacadeWithMockResolver(node) - - err := ef.StartNode() - assert.NotNil(t, err) - - isRunning := ef.IsNodeRunning() - assert.False(t, isRunning) -} - func TestElrondFacade_StartNodeWithErrorOnStartConsensusShouldReturnError(t *testing.T) { started := false node := &mock.NodeMock{ - StartHandler: func() error { + StartHandler: func() { started = true - return nil - }, - P2PBootstrapHandler: func() error { - return nil }, IsRunningHandler: func() bool { return started diff --git a/facade/interface.go b/facade/interface.go index 74e2e5e9820..ed53936904c 100644 --- a/facade/interface.go +++ b/facade/interface.go @@ -13,11 +13,8 @@ import ( //NodeWrapper contains all functions that a node should contain. type NodeWrapper interface { - // Start will create a new messenger and and set up the Node state as running - Start() error - - // P2PBootstrap starts the peer discovery process and peer connection filtering - P2PBootstrap() error + // Start will set up the Node state as running + Start() //IsRunning returns if the underlying node is running IsRunning() bool diff --git a/facade/mock/nodeMock.go b/facade/mock/nodeMock.go index 38acf7138c9..a35de4b432a 100644 --- a/facade/mock/nodeMock.go +++ b/facade/mock/nodeMock.go @@ -11,9 +11,8 @@ import ( // NodeMock - type NodeMock struct { AddressHandler func() (string, error) - StartHandler func() error + StartHandler func() StopHandler func() error - P2PBootstrapHandler func() error IsRunningHandler func() bool ConnectToAddressesHandler func([]string) error StartConsensusHandler func() error @@ -38,13 +37,8 @@ func (nm *NodeMock) Address() (string, error) { } // Start - -func (nm *NodeMock) Start() error { - return nm.StartHandler() -} - -// P2PBootstrap - -func (nm *NodeMock) P2PBootstrap() error { - return nm.P2PBootstrapHandler() +func (nm *NodeMock) Start() { + nm.StartHandler() } // IsRunning - diff --git a/go.mod b/go.mod index e3543141a03..88f961e7190 100644 --- a/go.mod +++ b/go.mod @@ -44,5 +44,6 @@ require ( github.com/whyrusleeping/go-logging v0.0.1 // indirect github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 + golang.org/x/net v0.0.0-20190620200207-3b0461eec859 gopkg.in/go-playground/validator.v8 v8.18.2 ) diff --git a/hashing/factory/errors.go b/hashing/factory/errors.go new file mode 100644 index 00000000000..f990e5fbff1 --- /dev/null +++ b/hashing/factory/errors.go @@ -0,0 +1,6 @@ +package factory + +import "errors" + +// ErrNoHasherInConfig signals that no hasher was provided in the config file +var ErrNoHasherInConfig = errors.New("no hasher provided in config file") diff --git a/hashing/factory/hasherFactory.go b/hashing/factory/hasherFactory.go new file mode 100644 index 00000000000..266c3d9be16 --- /dev/null +++ b/hashing/factory/hasherFactory.go @@ -0,0 +1,19 @@ +package factory + +import ( + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/hashing/blake2b" + "github.com/ElrondNetwork/elrond-go/hashing/sha256" +) + +// NewHasher will return a new instance of hasher based on the value stored in config +func NewHasher(name string) (hashing.Hasher, error) { + switch name { + case "sha256": + return sha256.Sha256{}, nil + case "blake2b": + return &blake2b.Blake2b{}, nil + } + + return nil, ErrNoHasherInConfig +} diff --git a/hashing/factory/hasherFactory_test.go b/hashing/factory/hasherFactory_test.go new file mode 100644 index 00000000000..08661e35ffd --- /dev/null +++ b/hashing/factory/hasherFactory_test.go @@ -0,0 +1,42 @@ +package factory + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/hashing/blake2b" + "github.com/ElrondNetwork/elrond-go/hashing/sha256" + "github.com/stretchr/testify/assert" +) + +func TestNewHasher(t *testing.T) { + t.Parallel() + + type res struct { + hasher hashing.Hasher + err error + } + testData := make(map[string]res) + testData["sha256"] = res{ + hasher: sha256.Sha256{}, + err: nil, + } + testData["blake2b"] = res{ + hasher: &blake2b.Blake2b{}, + err: nil, + } + testData[""] = res{ + hasher: nil, + err: ErrNoHasherInConfig, + } + testData["invalid hasher name"] = res{ + hasher: nil, + err: ErrNoHasherInConfig, + } + + for key, value := range testData { + hasher, err := NewHasher(key) + assert.Equal(t, value.err, err) + assert.Equal(t, value.hasher, hasher) + } +} diff --git a/integrationTests/consensus/testInitializer.go b/integrationTests/consensus/testInitializer.go index 1eddc31000f..1b906baf1ea 100644 --- a/integrationTests/consensus/testInitializer.go +++ b/integrationTests/consensus/testInitializer.go @@ -126,8 +126,8 @@ func displayAndStartNodes(nodes []*testNode) { hex.EncodeToString(skBuff), hex.EncodeToString(pkBuff), ) - _ = n.node.Start() - _ = n.node.P2PBootstrap() + n.node.Start() + _ = n.mesenger.Bootstrap() } } @@ -495,7 +495,6 @@ func createNodes( WaitingNodes: waitingMap, SelfPublicKey: []byte(strconv.Itoa(i)), ConsensusGroupCache: consensusCache, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/mock/chainStorerMock.go b/integrationTests/mock/chainStorerMock.go index 2948d09b6ed..83e932e1016 100644 --- a/integrationTests/mock/chainStorerMock.go +++ b/integrationTests/mock/chainStorerMock.go @@ -6,7 +6,7 @@ import ( "github.com/pkg/errors" ) -// ChainStorerMock is a mock implementation of the ChianStorer interface +// ChainStorerMock is a mock implementation of the ChainStorer interface type ChainStorerMock struct { AddStorerCalled func(key dataRetriever.UnitType, s storage.Storer) GetStorerCalled func(unitType dataRetriever.UnitType) storage.Storer diff --git a/integrationTests/mock/listIndexUpdaterStub.go b/integrationTests/mock/listIndexUpdaterStub.go index 0471a84ac23..31c5ae19b76 100644 --- a/integrationTests/mock/listIndexUpdaterStub.go +++ b/integrationTests/mock/listIndexUpdaterStub.go @@ -2,13 +2,13 @@ package mock // ListIndexUpdaterStub - type ListIndexUpdaterStub struct { - UpdateListAndIndexCalled func(pubKey string, list string, index int32) error + UpdateListAndIndexCalled func(pubKey string, shardID uint32, list string, index uint32) error } // UpdateListAndIndex - -func (lius *ListIndexUpdaterStub) UpdateListAndIndex(pubKey string, shardID uint32, list string, index int32) error { +func (lius *ListIndexUpdaterStub) UpdateListAndIndex(pubKey string, shardID uint32, list string, index uint32) error { if lius.UpdateListAndIndexCalled != nil { - return lius.UpdateListAndIndexCalled(pubKey, list, index) + return lius.UpdateListAndIndexCalled(pubKey, shardID, list, index) } return nil diff --git a/integrationTests/mock/nodesCoordinatorMock.go b/integrationTests/mock/nodesCoordinatorMock.go index 08fe526217b..90f1e53451f 100644 --- a/integrationTests/mock/nodesCoordinatorMock.go +++ b/integrationTests/mock/nodesCoordinatorMock.go @@ -14,6 +14,11 @@ type NodesCoordinatorMock struct { GetAllWaitingValidatorsPublicKeysCalled func() (map[uint32][][]byte, error) } +// SetConfig - +func (ncm *NodesCoordinatorMock) SetConfig(_ *sharding.NodesCoordinatorRegistry) error { + return nil +} + // GetNumTotalEligible - func (ncm *NodesCoordinatorMock) GetNumTotalEligible() uint64 { return 1 @@ -28,11 +33,6 @@ func (ncm *NodesCoordinatorMock) GetAllEligibleValidatorsPublicKeys(_ uint32) (m return nil, nil } -// UpdatePeersListAndIndex - -func (ncm *NodesCoordinatorMock) UpdatePeersListAndIndex() error { - return nil -} - // GetAllWaitingValidatorsPublicKeys - func (ncm *NodesCoordinatorMock) GetAllWaitingValidatorsPublicKeys(_ uint32) (map[uint32][][]byte, error) { if ncm.GetAllWaitingValidatorsPublicKeysCalled != nil { @@ -93,32 +93,8 @@ func (ncm *NodesCoordinatorMock) GetConsensusValidatorsPublicKeys( return pubKeys, nil } -// GetConsensusValidatorsRewardsAddresses - -func (ncm *NodesCoordinatorMock) GetConsensusValidatorsRewardsAddresses( - randomness []byte, - round uint64, - shardId uint32, - epoch uint32, -) ([]string, error) { - if ncm.GetValidatorsPublicKeysCalled != nil { - return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId, epoch) - } - - validators, err := ncm.ComputeConsensusGroup(randomness, round, shardId, epoch) - if err != nil { - return nil, err - } - - addresses := make([]string, 0) - for _, v := range validators { - addresses = append(addresses, string(v.Address())) - } - - return addresses, nil -} - // SetNodesPerShards - -func (ncm *NodesCoordinatorMock) SetNodesPerShards(_ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, _ uint32, _ bool) error { +func (ncm *NodesCoordinatorMock) SetNodesPerShards(_ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, _ uint32) error { return nil } @@ -169,7 +145,7 @@ func (ncm *NodesCoordinatorMock) GetOwnPublicKey() []byte { } // GetNodesPerShard - -func (ncm *NodesCoordinatorMock) GetNodesPerShard(epoch uint32) (map[uint32][]sharding.Validator, error) { +func (ncm *NodesCoordinatorMock) GetNodesPerShard(_ uint32) (map[uint32][]sharding.Validator, error) { return nil, nil } diff --git a/integrationTests/mock/pathManagerStub.go b/integrationTests/mock/pathManagerStub.go new file mode 100644 index 00000000000..78aa45b6b67 --- /dev/null +++ b/integrationTests/mock/pathManagerStub.go @@ -0,0 +1,32 @@ +package mock + +import "fmt" + +// PathManagerStub - +type PathManagerStub struct { + PathForEpochCalled func(shardId string, epoch uint32, identifier string) string + PathForStaticCalled func(shardId string, identifier string) string +} + +// PathForEpoch - +func (p *PathManagerStub) PathForEpoch(shardId string, epoch uint32, identifier string) string { + if p.PathForEpochCalled != nil { + return p.PathForEpochCalled(shardId, epoch, identifier) + } + + return fmt.Sprintf("Epoch_%d/Shard_%s/%s", epoch, shardId, identifier) +} + +// PathForStatic - +func (p *PathManagerStub) PathForStatic(shardId string, identifier string) string { + if p.PathForEpochCalled != nil { + return p.PathForStaticCalled(shardId, identifier) + } + + return fmt.Sprintf("Static/Shard_%s/%s", shardId, identifier) +} + +// IsInterfaceNil - +func (p *PathManagerStub) IsInterfaceNil() bool { + return p == nil +} diff --git a/integrationTests/mock/raterMock.go b/integrationTests/mock/raterMock.go index 2048f631eb8..7c09cb2eae9 100644 --- a/integrationTests/mock/raterMock.go +++ b/integrationTests/mock/raterMock.go @@ -60,7 +60,11 @@ func (rm *RaterMock) ComputeDecreaseValidator(rating uint32) uint32 { // GetChance - func (rm *RaterMock) GetChance(rating uint32) uint32 { - return rm.GetChanceCalled(rating) + if rm.GetChanceCalled != nil { + return rm.GetChanceCalled(rating) + } + + return 80 } // SetRatingReader - diff --git a/integrationTests/mock/requestHandlerStub.go b/integrationTests/mock/requestHandlerStub.go index 18676dd70cd..3565a7a2fc7 100644 --- a/integrationTests/mock/requestHandlerStub.go +++ b/integrationTests/mock/requestHandlerStub.go @@ -1,5 +1,7 @@ package mock +import "time" + // RequestHandlerStub - type RequestHandlerStub struct { RequestShardHeaderCalled func(shardID uint32, hash []byte) @@ -15,6 +17,11 @@ type RequestHandlerStub struct { RequestStartOfEpochMetaBlockCalled func(epoch uint32) } +// RequestInterval - +func (rhs *RequestHandlerStub) RequestInterval() time.Duration { + return time.Second +} + // RequestStartOfEpochMetaBlock - func (rhs *RequestHandlerStub) RequestStartOfEpochMetaBlock(epoch uint32) { if rhs.RequestStartOfEpochMetaBlockCalled == nil { diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go new file mode 100644 index 00000000000..f755f47a90f --- /dev/null +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -0,0 +1,512 @@ +package startInEpoch + +import ( + "context" + "encoding/hex" + "math/big" + "os" + "strconv" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/state" + triesFactory "github.com/ElrondNetwork/elrond-go/data/trie/factory" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/integrationTests/mock" + "github.com/ElrondNetwork/elrond-go/integrationTests/multiShard/endOfEpoch" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/factory" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" + "github.com/stretchr/testify/assert" +) + +func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfShards := 2 + totalNodesPerShard := 4 + numNodesPerShardOnline := totalNodesPerShard - 1 + shardCnsSize := 2 + metaCnsSize := 3 + numMetachainNodes := 3 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + nodesMap := integrationTests.CreateNodesWithNodesCoordinator( + numNodesPerShardOnline, + numMetachainNodes, + numOfShards, + shardCnsSize, + metaCnsSize, + integrationTests.GetConnectableAddress(advertiser), + ) + + nodes := convertToSlice(nodesMap) + + // TODO: refactor test - node to join late should be created late. + nodeToJoinLate := nodes[numNodesPerShardOnline] // will return the last node in shard 0 which was not used in consensus + _ = nodeToJoinLate.Messenger.Close() // set not offline + // TODO: call nodeToJoinLate.Messenger.Bootstrap() later in the test and followed by a time.sleep as to allow it to bootstrap its peers. + + nodes = append(nodes[:numNodesPerShardOnline], nodes[numNodesPerShardOnline+1:]...) + nodes = append(nodes[:2*numNodesPerShardOnline], nodes[2*numNodesPerShardOnline+1:]...) + + roundsPerEpoch := uint64(10) + for _, node := range nodes { + node.EpochStartTrigger.SetRoundsPerEpoch(roundsPerEpoch) + } + + idxProposers := make([]int, numOfShards+1) + for i := 0; i < numOfShards; i++ { + idxProposers[i] = i * numNodesPerShardOnline + } + idxProposers[numOfShards] = numOfShards * numNodesPerShardOnline + + integrationTests.DisplayAndStartNodes(nodes) + + defer func() { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Node.Stop() + } + }() + + initialVal := big.NewInt(10000000) + sendValue := big.NewInt(5) + integrationTests.MintAllNodes(nodes, initialVal) + receiverAddress := []byte("12345678901234567890123456789012") + + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + time.Sleep(time.Second) + + /////////----- wait for epoch end period + epoch := uint32(2) + nrRoundsToPropagateMultiShard := uint64(5) + for i := uint64(0); i <= (uint64(epoch)*roundsPerEpoch)+nrRoundsToPropagateMultiShard; i++ { + integrationTests.UpdateRound(nodes, round) + integrationTests.ProposeBlock(nodes, idxProposers, round, nonce) + integrationTests.SyncBlock(t, nodes, idxProposers, round) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + for _, node := range nodes { + integrationTests.CreateAndSendTransaction(node, sendValue, receiverAddress, "") + } + + time.Sleep(time.Second) + } + + time.Sleep(time.Second) + + endOfEpoch.VerifyThatNodesHaveCorrectEpoch(t, epoch, nodes) + endOfEpoch.VerifyIfAddedShardHeadersAreWithNewEpoch(t, nodes) + + epochHandler := &mock.EpochStartTriggerStub{ + EpochCalled: func() uint32 { + return epoch + }, + } + for _, node := range nodes { + _ = dataRetriever.SetEpochHandlerToHdrResolver(node.ResolversContainer, epochHandler) + } + + // TODO: refactor this test in another PR + + generalConfig := getGeneralConfig() + roundDurationMillis := 4000 + epochDurationMillis := generalConfig.EpochStartConfig.RoundsPerEpoch * int64(roundDurationMillis) + nodesConfig := sharding.NodesSetup{ + StartTime: time.Now().Add(-time.Duration(epochDurationMillis) * time.Millisecond).Unix(), + RoundDuration: 4000, + InitialNodes: getInitialNodes(nodesMap), + ChainID: string(integrationTests.ChainID), + } + nodesConfig.SetNumberOfShards(uint32(numOfShards)) + + defer func() { + errRemoveDir := os.RemoveAll("Epoch_0") + assert.NoError(t, errRemoveDir) + }() + + genesisShardCoordinator, _ := sharding.NewMultiShardCoordinator(nodesConfig.NumberOfShards(), 0) + messenger := integrationTests.CreateMessengerWithKadDht(context.Background(), integrationTests.GetConnectableAddress(advertiser)) + _ = messenger.Bootstrap() + time.Sleep(integrationTests.P2pBootstrapDelay) + + trieStorageManager, triesHolder, _ := createTries(getGeneralConfig(), integrationTests.TestMarshalizer, integrationTests.TestHasher, 0, &mock.PathManagerStub{}) + argsBootstrapHandler := bootstrap.ArgsEpochStartBootstrap{ + PublicKey: nodeToJoinLate.NodeKeys.Pk, + Marshalizer: integrationTests.TestMarshalizer, + TxSignMarshalizer: integrationTests.TestTxSignMarshalizer, + Hasher: integrationTests.TestHasher, + Messenger: messenger, + GeneralConfig: getGeneralConfig(), + GenesisShardCoordinator: genesisShardCoordinator, + EconomicsData: integrationTests.CreateEconomicsData(), + SingleSigner: &mock.SignerMock{}, + BlockSingleSigner: &mock.SignerMock{}, + KeyGen: &mock.KeyGenMock{}, + BlockKeyGen: &mock.KeyGenMock{}, + GenesisNodesConfig: &nodesConfig, + PathManager: &mock.PathManagerStub{}, + WorkingDir: "test_directory", + DefaultDBPath: "test_db", + DefaultEpochString: "test_epoch", + DefaultShardString: "test_shard", + Rater: &mock.RaterMock{}, + DestinationShardAsObserver: "0", + TrieContainer: triesHolder, + TrieStorageManagers: trieStorageManager, + } + epochStartBootstrap, err := bootstrap.NewEpochStartBootstrap(argsBootstrapHandler) + assert.Nil(t, err) + + _, err = epochStartBootstrap.Bootstrap() + assert.NoError(t, err) + //assert.Equal(t, epoch, params.Epoch) + //assert.Equal(t, uint32(0), params.SelfShardId) + //assert.Equal(t, uint32(2), params.NumOfShards) + + shardC, _ := sharding.NewMultiShardCoordinator(2, 0) + + storageFactory, err := factory.NewStorageServiceFactory( + &generalConfig, + shardC, + &mock.PathManagerStub{}, + &mock.EpochStartNotifierStub{}, + epoch) + assert.NoError(t, err) + storageServiceShard, err := storageFactory.CreateForShard() + assert.NoError(t, err) + assert.NotNil(t, storageServiceShard) + + bootstrapUnit := storageServiceShard.GetStorer(dataRetriever.BootstrapUnit) + assert.NotNil(t, bootstrapUnit) + + highestRound, err := bootstrapUnit.Get([]byte(core.HighestRoundFromBootStorage)) + assert.NoError(t, err) + var roundFromStorage bootstrapStorage.RoundNum + err = integrationTests.TestMarshalizer.Unmarshal(&roundFromStorage, highestRound) + assert.NoError(t, err) + + roundInt64 := roundFromStorage.Num + assert.Equal(t, int64(21), roundInt64) + + key := []byte(strconv.FormatInt(roundInt64, 10)) + bootstrapDataBytes, err := bootstrapUnit.Get(key) + assert.NoError(t, err) + + var bd bootstrapStorage.BootstrapData + err = integrationTests.TestMarshalizer.Unmarshal(&bd, bootstrapDataBytes) + assert.NoError(t, err) + assert.Equal(t, epoch-1, bd.LastHeader.Epoch) +} + +func createTries( + config config.Config, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + shardId uint32, + pathManager storage.PathManagerHandler, +) (map[string]data.StorageManager, state.TriesHolder, error) { + + trieContainer := state.NewDataTriesHolder() + trieFactoryArgs := triesFactory.TrieFactoryArgs{ + EvictionWaitingListCfg: config.EvictionWaitingList, + SnapshotDbCfg: config.TrieSnapshotDB, + Marshalizer: marshalizer, + Hasher: hasher, + PathManager: pathManager, + ShardId: core.GetShardIdString(shardId), + } + trieFactory, err := triesFactory.NewTrieFactory(trieFactoryArgs) + if err != nil { + return nil, nil, err + } + + trieStorageManagers := make(map[string]data.StorageManager) + userStorageManager, userAccountTrie, err := trieFactory.Create(config.AccountsTrieStorage, config.StateTriesConfig.AccountsStatePruningEnabled) + if err != nil { + return nil, nil, err + } + trieContainer.Put([]byte(triesFactory.UserAccountTrie), userAccountTrie) + trieStorageManagers[triesFactory.UserAccountTrie] = userStorageManager + + peerStorageManager, peerAccountsTrie, err := trieFactory.Create(config.PeerAccountsTrieStorage, config.StateTriesConfig.PeerStatePruningEnabled) + if err != nil { + return nil, nil, err + } + trieContainer.Put([]byte(triesFactory.PeerAccountTrie), peerAccountsTrie) + trieStorageManagers[triesFactory.PeerAccountTrie] = peerStorageManager + + return trieStorageManagers, trieContainer, nil +} + +func convertToSlice(originalMap map[uint32][]*integrationTests.TestProcessorNode) []*integrationTests.TestProcessorNode { + sliceToRet := make([]*integrationTests.TestProcessorNode, 0) + for _, nodesPerShard := range originalMap { + for _, node := range nodesPerShard { + sliceToRet = append(sliceToRet, node) + } + } + + return sliceToRet +} + +func getInitialNodes(nodesMap map[uint32][]*integrationTests.TestProcessorNode) []*sharding.InitialNode { + sliceToRet := make([]*sharding.InitialNode, 0) + for _, nodesPerShard := range nodesMap { + for _, node := range nodesPerShard { + pubKeyBytes, _ := node.NodeKeys.Pk.ToByteArray() + addressBytes := node.OwnAccount.Address.Bytes() + entry := &sharding.InitialNode{ + PubKey: hex.EncodeToString(pubKeyBytes), + Address: hex.EncodeToString(addressBytes), + NodeInfo: sharding.NodeInfo{}, + } + sliceToRet = append(sliceToRet, entry) + } + } + + return sliceToRet +} + +func getGeneralConfig() config.Config { + return config.Config{ + EpochStartConfig: config.EpochStartConfig{ + MinRoundsBetweenEpochs: 5, + RoundsPerEpoch: 10, + }, + WhiteListPool: config.CacheConfig{ + Size: 10000, + Type: "LRU", + Shards: 1, + }, + StoragePruning: config.StoragePruningConfig{ + Enabled: false, + FullArchive: true, + NumEpochsToKeep: 3, + NumActivePersisters: 3, + }, + AccountsTrieStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "AccountsTrie/MainDB", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + PeerAccountsTrieStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "PeerAccountsTrie/MainDB", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + TxDataPool: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + UnsignedTransactionDataPool: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + RewardTransactionDataPool: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + HeadersPoolConfig: config.HeadersPoolConfig{ + MaxHeadersPerShard: 100, + NumElementsToRemoveOnEviction: 1, + }, + TxBlockBodyDataPool: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + PeerBlockBodyDataPool: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + TrieNodesDataPool: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + TxStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "Transactions", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + MiniBlocksStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "MiniBlocks", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + MiniBlockHeadersStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "MiniBlocks", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + ShardHdrNonceHashStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "ShardHdrHashNonce", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + MetaBlockStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "MetaBlock", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + MetaHdrNonceHashStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "MetaHdrHashNonce", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + UnsignedTransactionStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "UnsignedTransactions", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + RewardTxStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "RewardTransactions", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + BlockHeaderStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "BlockHeaders", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + Heartbeat: config.HeartbeatConfig{ + HeartbeatStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "HeartbeatStorage", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + }, + StatusMetricsStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "StatusMetricsStorageDB", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + PeerBlockBodyStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "PeerBlocks", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + BootstrapStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "BootstrapData", + Type: string(storageUnit.LvlDBSerial), + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + } +} diff --git a/integrationTests/nodesCoordinatorFactory.go b/integrationTests/nodesCoordinatorFactory.go index 89a90c65b92..28a4fb25c4a 100644 --- a/integrationTests/nodesCoordinatorFactory.go +++ b/integrationTests/nodesCoordinatorFactory.go @@ -51,7 +51,6 @@ func (tpn *IndexHashedNodesCoordinatorFactory) CreateNodesCoordinator(arg ArgInd SelfPublicKey: pubKeyBytes, ConsensusGroupCache: arg.consensusGroupCache, BootStorer: arg.bootStorer, - ListIndexUpdater: arg.listIndexUpdater, } nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) if err != nil { @@ -88,7 +87,6 @@ func (ihncrf *IndexHashedNodesCoordinatorWithRaterFactory) CreateNodesCoordinato SelfPublicKey: pubKeyBytes, ConsensusGroupCache: arg.consensusGroupCache, BootStorer: arg.bootStorer, - ListIndexUpdater: arg.listIndexUpdater, } baseCoordinator, err := sharding.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/p2p/networkSharding/networkSharding_test.go b/integrationTests/p2p/networkSharding/networkSharding_test.go index 1b9162a5e5e..2d6fe98b0de 100644 --- a/integrationTests/p2p/networkSharding/networkSharding_test.go +++ b/integrationTests/p2p/networkSharding/networkSharding_test.go @@ -124,7 +124,7 @@ func stopNodes(advertiser p2p.Messenger, nodesMap map[uint32][]*integrationTests func startNodes(nodesMap map[uint32][]*integrationTests.TestP2PNode) { for _, nodes := range nodesMap { for _, n := range nodes { - _ = n.Node.Start() + _ = n.Messenger.Bootstrap() } } } diff --git a/integrationTests/resolvers/testInitializer.go b/integrationTests/resolvers/testInitializer.go index 101d2363560..92f2c1d525f 100644 --- a/integrationTests/resolvers/testInitializer.go +++ b/integrationTests/resolvers/testInitializer.go @@ -28,8 +28,8 @@ func CreateResolverRequester( nResolver := integrationTests.NewTestProcessorNode(numShards, resolverShardID, txSignShardId, advertiserAddress) nRequester := integrationTests.NewTestProcessorNode(numShards, requesterShardID, txSignShardId, advertiserAddress) - _ = nRequester.Node.Start() - _ = nResolver.Node.Start() + nRequester.Node.Start() + nResolver.Node.Start() time.Sleep(time.Second) err := nRequester.Messenger.ConnectToPeer(integrationTests.GetConnectableAddress(nResolver.Messenger)) diff --git a/integrationTests/singleShard/block/interceptedRequestHdr/interceptedRequestHdr_test.go b/integrationTests/singleShard/block/interceptedRequestHdr/interceptedRequestHdr_test.go index 89929c2a5b2..980fa2d87b1 100644 --- a/integrationTests/singleShard/block/interceptedRequestHdr/interceptedRequestHdr_test.go +++ b/integrationTests/singleShard/block/interceptedRequestHdr/interceptedRequestHdr_test.go @@ -39,8 +39,8 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { fmt.Println("Resolver:") nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) - _ = nRequester.Node.Start() - _ = nResolver.Node.Start() + nRequester.Node.Start() + nResolver.Node.Start() defer func() { _ = nRequester.Node.Stop() _ = nResolver.Node.Stop() @@ -98,8 +98,8 @@ func TestNode_InterceptedHeaderWithWrongChainIDShouldBeDiscarded(t *testing.T) { fmt.Println("Resolver:") nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) - _ = nRequester.Node.Start() - _ = nResolver.Node.Start() + nRequester.Node.Start() + nResolver.Node.Start() defer func() { _ = nRequester.Node.Stop() _ = nResolver.Node.Stop() diff --git a/integrationTests/singleShard/block/interceptedRequestTxBlockBody/interceptedRequestTxBlockBody_test.go b/integrationTests/singleShard/block/interceptedRequestTxBlockBody/interceptedRequestTxBlockBody_test.go index 6b298f65c40..5cc938826c9 100644 --- a/integrationTests/singleShard/block/interceptedRequestTxBlockBody/interceptedRequestTxBlockBody_test.go +++ b/integrationTests/singleShard/block/interceptedRequestTxBlockBody/interceptedRequestTxBlockBody_test.go @@ -32,8 +32,8 @@ func TestNode_GenerateSendInterceptTxBlockBodyWithNetMessenger(t *testing.T) { fmt.Println("Resolver:") nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) - _ = nRequester.Node.Start() - _ = nResolver.Node.Start() + nRequester.Node.Start() + nResolver.Node.Start() defer func() { _ = nRequester.Node.Stop() diff --git a/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go b/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go index 3977344f3e9..022b5155727 100644 --- a/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go @@ -30,13 +30,13 @@ func TestNode_GenerateSendInterceptBulkTransactionsWithMessenger(t *testing.T) { nodeAddr := "0" n := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, nodeAddr) - _ = n.Node.Start() + n.Node.Start() defer func() { _ = n.Node.Stop() }() - _ = n.Node.P2PBootstrap() + _ = n.Messenger.Bootstrap() time.Sleep(stepDelay) diff --git a/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx/interceptedBulkUnsignedTx_test.go b/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx/interceptedBulkUnsignedTx_test.go index dc72984ce25..48634a1dc1d 100644 --- a/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx/interceptedBulkUnsignedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx/interceptedBulkUnsignedTx_test.go @@ -36,13 +36,13 @@ func TestNode_GenerateSendInterceptBulkUnsignedTransactionsWithMessenger(t *test nodeAddr := "0" n := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, nodeAddr) - _ = n.Node.Start() + n.Node.Start() defer func() { _ = n.Node.Stop() }() - _ = n.Node.P2PBootstrap() + _ = n.Messenger.Bootstrap() time.Sleep(time.Second) diff --git a/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go index 9c10256ebc4..3791ae9bea2 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go @@ -34,8 +34,8 @@ func TestNode_RequestInterceptTransactionWithMessenger(t *testing.T) { fmt.Println("Resolver:") nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) - _ = nRequester.Node.Start() - _ = nResolver.Node.Start() + nRequester.Node.Start() + nResolver.Node.Start() defer func() { _ = nRequester.Node.Stop() _ = nResolver.Node.Stop() @@ -126,8 +126,8 @@ func TestNode_RequestInterceptRewardTransactionWithMessenger(t *testing.T) { fmt.Println("Resolver:") nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) - _ = nRequester.Node.Start() - _ = nResolver.Node.Start() + nRequester.Node.Start() + nResolver.Node.Start() defer func() { _ = nRequester.Node.Stop() _ = nResolver.Node.Stop() diff --git a/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx/interceptedResolvedUnsignedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx/interceptedResolvedUnsignedTx_test.go index 088af24307f..ce0994b3385 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx/interceptedResolvedUnsignedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx/interceptedResolvedUnsignedTx_test.go @@ -30,8 +30,8 @@ func TestNode_RequestInterceptUnsignedTransactionWithMessenger(t *testing.T) { fmt.Println("Resolver:") nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) - _ = nRequester.Node.Start() - _ = nResolver.Node.Start() + nRequester.Node.Start() + nResolver.Node.Start() defer func() { _ = nRequester.Node.Stop() _ = nResolver.Node.Stop() diff --git a/integrationTests/state/stateTrieSync/stateTrieSync_test.go b/integrationTests/state/stateTrieSync/stateTrieSync_test.go index d362e541148..48bddca9ca1 100644 --- a/integrationTests/state/stateTrieSync/stateTrieSync_test.go +++ b/integrationTests/state/stateTrieSync/stateTrieSync_test.go @@ -1,6 +1,7 @@ package stateTrieSync import ( + "context" "fmt" "testing" "time" @@ -32,8 +33,8 @@ func TestNode_RequestInterceptTrieNodesWithMessenger(t *testing.T) { fmt.Println("Resolver:") nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) - _ = nRequester.Node.Start() - _ = nResolver.Node.Start() + nRequester.Node.Start() + nResolver.Node.Start() defer func() { _ = nRequester.Node.Stop() _ = nResolver.Node.Stop() @@ -63,11 +64,15 @@ func TestNode_RequestInterceptTrieNodesWithMessenger(t *testing.T) { whiteListHandler, 10000, nRequester.ShardCoordinator.SelfId(), + time.Second, ) - waitTime := 5 * time.Second - trieSyncer, _ := trie.NewTrieSyncer(requestHandler, nRequester.DataPool.TrieNodes(), requesterTrie, waitTime, core.MetachainShardId, factory.AccountTrieNodesTopic) - err = trieSyncer.StartSyncing(rootHash) + waitTime := 10 * time.Second + trieSyncer, _ := trie.NewTrieSyncer(requestHandler, nRequester.DataPool.TrieNodes(), requesterTrie, core.MetachainShardId, factory.AccountTrieNodesTopic) + ctx, cancel := context.WithTimeout(context.Background(), waitTime) + defer cancel() + + err = trieSyncer.StartSyncing(rootHash, ctx) assert.Nil(t, err) newRootHash, _ := requesterTrie.Root() diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 8429859df01..1d0b7474db3 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -1006,8 +1006,8 @@ func DisplayAndStartNodes(nodes []*TestProcessorNode) { hex.EncodeToString(skBuff), hex.EncodeToString(pkBuff), ) - _ = n.Node.Start() - _ = n.Node.P2PBootstrap() + n.Node.Start() + _ = n.Messenger.Bootstrap() } fmt.Println("Delaying for node bootstrap and topic announcement...") diff --git a/integrationTests/testP2PNode.go b/integrationTests/testP2PNode.go index f8f5b329aa4..27174c5a6be 100644 --- a/integrationTests/testP2PNode.go +++ b/integrationTests/testP2PNode.go @@ -260,7 +260,6 @@ func CreateNodesWithTestP2PNodes( WaitingNodes: make(map[uint32][]sharding.Validator), Epoch: 0, EpochStartNotifier: &mock.EpochStartNotifierStub{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) log.LogIfError(err) @@ -300,7 +299,6 @@ func CreateNodesWithTestP2PNodes( WaitingNodes: make(map[uint32][]sharding.Validator), Epoch: 0, EpochStartNotifier: &mock.EpochStartNotifierStub{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) log.LogIfError(err) diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index d55206e062a..26a0f1f4b11 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -666,6 +666,7 @@ func (tpn *TestProcessorNode) initResolvers() { tpn.WhiteListHandler, 100, tpn.ShardCoordinator.SelfId(), + time.Second, ) } else { resolversContainerFactory, _ := resolverscontainer.NewShardResolversContainerFactory(resolverContainerFactory) @@ -680,6 +681,7 @@ func (tpn *TestProcessorNode) initResolvers() { tpn.WhiteListHandler, 100, tpn.ShardCoordinator.SelfId(), + time.Second, ) } } diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go index 891e45d425c..ac330f7f196 100644 --- a/integrationTests/testProcessorNodeWithMultisigner.go +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -299,7 +299,6 @@ func CreateNodesWithNodesCoordinatorAndHeaderSigVerifier( WaitingNodes: make(map[uint32][]sharding.Validator), SelfPublicKey: []byte(strconv.Itoa(int(shardId))), ConsensusGroupCache: consensusCache, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) @@ -378,7 +377,6 @@ func CreateNodesWithNodesCoordinatorKeygenAndSingleSigner( WaitingNodes: waitingMap, SelfPublicKey: []byte(strconv.Itoa(int(shardId))), ConsensusGroupCache: cache, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/node/mock/chainStorerMock.go b/node/mock/chainStorerMock.go index 51d5de42132..0d5b0600cd7 100644 --- a/node/mock/chainStorerMock.go +++ b/node/mock/chainStorerMock.go @@ -6,7 +6,7 @@ import ( "github.com/pkg/errors" ) -// ChainStorerMock is a mock implementation of the ChianStorer interface +// ChainStorerMock is a mock implementation of the ChainStorer interface type ChainStorerMock struct { AddStorerCalled func(key dataRetriever.UnitType, s storage.Storer) GetStorerCalled func(unitType dataRetriever.UnitType) storage.Storer diff --git a/node/mock/nodesCoordinatorMock.go b/node/mock/nodesCoordinatorMock.go index 169c290fe14..f5fec4392c2 100644 --- a/node/mock/nodesCoordinatorMock.go +++ b/node/mock/nodesCoordinatorMock.go @@ -12,11 +12,6 @@ type NodesCoordinatorMock struct { GetAllEligibleValidatorsPublicKeysCalled func() (map[uint32][][]byte, error) } -// UpdatePeersListAndIndex - -func (ncm *NodesCoordinatorMock) UpdatePeersListAndIndex() error { - return nil -} - // GetAllEligibleValidatorsPublicKeys - func (ncm *NodesCoordinatorMock) GetAllEligibleValidatorsPublicKeys(_ uint32) (map[uint32][][]byte, error) { if ncm.GetAllEligibleValidatorsPublicKeysCalled != nil { @@ -92,36 +87,11 @@ func (ncm *NodesCoordinatorMock) GetConsensusValidatorsPublicKeys( return pubKeys, nil } -// GetConsensusValidatorsRewardsAddresses - -func (ncm *NodesCoordinatorMock) GetConsensusValidatorsRewardsAddresses( - randomness []byte, - round uint64, - shardId uint32, - epoch uint32, -) ([]string, error) { - if ncm.GetValidatorsPublicKeysCalled != nil { - return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId, epoch) - } - - validators, err := ncm.ComputeConsensusGroup(randomness, round, shardId, epoch) - if err != nil { - return nil, err - } - - addresses := make([]string, 0) - for _, v := range validators { - addresses = append(addresses, string(v.Address())) - } - - return addresses, nil -} - // SetNodesPerShards - func (ncm *NodesCoordinatorMock) SetNodesPerShards( _ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, _ uint32, - _ bool, ) error { return nil } @@ -180,7 +150,7 @@ func (ncm *NodesCoordinatorMock) GetOwnPublicKey() []byte { } // GetNodesPerShard - -func (ncm *NodesCoordinatorMock) GetNodesPerShard(epoch uint32) (map[uint32][]sharding.Validator, error) { +func (ncm *NodesCoordinatorMock) GetNodesPerShard(_ uint32) (map[uint32][]sharding.Validator, error) { return nil, nil } diff --git a/node/mock/requestHandlerStub.go b/node/mock/requestHandlerStub.go index 19278e33634..3565a7a2fc7 100644 --- a/node/mock/requestHandlerStub.go +++ b/node/mock/requestHandlerStub.go @@ -1,5 +1,7 @@ package mock +import "time" + // RequestHandlerStub - type RequestHandlerStub struct { RequestShardHeaderCalled func(shardID uint32, hash []byte) @@ -15,6 +17,11 @@ type RequestHandlerStub struct { RequestStartOfEpochMetaBlockCalled func(epoch uint32) } +// RequestInterval - +func (rhs *RequestHandlerStub) RequestInterval() time.Duration { + return time.Second +} + // RequestStartOfEpochMetaBlock - func (rhs *RequestHandlerStub) RequestStartOfEpochMetaBlock(epoch uint32) { if rhs.RequestStartOfEpochMetaBlockCalled == nil { @@ -24,7 +31,7 @@ func (rhs *RequestHandlerStub) RequestStartOfEpochMetaBlock(epoch uint32) { } // SetEpoch - -func (rhs *RequestHandlerStub) SetEpoch(epoch uint32) { +func (rhs *RequestHandlerStub) SetEpoch(_ uint32) { } // RequestShardHeader - diff --git a/node/node.go b/node/node.go index 8f2666358a9..de4609523c1 100644 --- a/node/node.go +++ b/node/node.go @@ -165,13 +165,11 @@ func (n *Node) IsRunning() bool { return n.isRunning } -// Start will create a new messenger and and set up the Node state as running -func (n *Node) Start() error { - err := n.P2PBootstrap() - if err == nil { - n.isRunning = true - } - return err +// TODO: delete useles IsRunning, Start and Stop - too many usages in tests for this PR. + +// Start will set up the Node state as running +func (n *Node) Start() { + n.isRunning = true } // Stop closes the messenger and undos everything done in Start @@ -179,23 +177,10 @@ func (n *Node) Stop() error { if !n.IsRunning() { return nil } - err := n.messenger.Close() - if err != nil { - return err - } return nil } -// P2PBootstrap will try to connect to many peers as possible -func (n *Node) P2PBootstrap() error { - if n.messenger == nil { - return ErrNilMessenger - } - - return n.messenger.Bootstrap() -} - // CreateShardedStores instantiate sharded cachers for Transactions and Headers func (n *Node) CreateShardedStores() error { if n.shardCoordinator == nil { diff --git a/node/node_test.go b/node/node_test.go index 0b7b6a75e27..7d567ec0ba5 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -120,13 +120,6 @@ func TestNewNode_ApplyNilOptionShouldError(t *testing.T) { assert.NotNil(t, err) } -func TestStart_NoMessenger(t *testing.T) { - n, _ := node.NewNode() - err := n.Start() - defer func() { _ = n.Stop() }() - assert.NotNil(t, err) -} - func TestStart_CorrectParams(t *testing.T) { messenger := getMessenger() n, _ := node.NewNode( @@ -137,9 +130,8 @@ func TestStart_CorrectParams(t *testing.T) { node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), ) - err := n.Start() + n.Start() defer func() { _ = n.Stop() }() - assert.Nil(t, err) assert.True(t, n.IsRunning()) } @@ -153,11 +145,10 @@ func TestStart_CannotApplyOptions(t *testing.T) { node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), ) - err := n.Start() - require.Nil(t, err) + n.Start() defer func() { _ = n.Stop() }() - err = n.ApplyOptions(node.WithDataPool(&mock.PoolsHolderStub{})) + err := n.ApplyOptions(node.WithDataPool(&mock.PoolsHolderStub{})) require.Error(t, err) } @@ -176,9 +167,8 @@ func TestStart_CorrectParamsApplyingOptions(t *testing.T) { logError(err) - err = n.Start() + n.Start() defer func() { _ = n.Stop() }() - assert.Nil(t, err) assert.True(t, n.IsRunning()) } @@ -191,9 +181,8 @@ func TestApplyOptions_NodeStarted(t *testing.T) { node.WithVmMarshalizer(getMarshalizer()), node.WithHasher(getHasher()), ) - err := n.Start() + n.Start() defer func() { _ = n.Stop() }() - logError(err) assert.True(t, n.IsRunning()) } @@ -211,39 +200,16 @@ func TestStop_NotStartedYet(t *testing.T) { assert.False(t, n.IsRunning()) } -func TestStop_MessengerCloseErrors(t *testing.T) { - errorString := "messenger close error" - messenger := getMessenger() - messenger.CloseCalled = func() error { - return errors.New(errorString) - } - n, _ := node.NewNode( - node.WithMessenger(messenger), - node.WithInternalMarshalizer(getMarshalizer(), testSizeCheckDelta), - node.WithVmMarshalizer(getMarshalizer()), - node.WithHasher(getHasher()), - ) - - _ = n.Start() - - err := n.Stop() - assert.NotNil(t, err) - assert.Contains(t, err.Error(), errorString) -} - func TestStop(t *testing.T) { - n, _ := node.NewNode( node.WithInternalMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithVmMarshalizer(getMarshalizer()), node.WithHasher(getHasher()), ) - err := n.Start() - logError(err) + n.Start() - err = n.Stop() + err := n.Stop() assert.Nil(t, err) - assert.False(t, n.IsRunning()) } func TestGetBalance_NoAddrConverterShouldError(t *testing.T) { @@ -773,10 +739,9 @@ func TestCreateShardedStores_NilShardCoordinatorShouldError(t *testing.T) { node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), ) - err := n.Start() - logError(err) + n.Start() defer func() { _ = n.Stop() }() - err = n.CreateShardedStores() + err := n.CreateShardedStores() assert.NotNil(t, err) assert.Contains(t, err.Error(), "nil shard coordinator") } @@ -794,10 +759,9 @@ func TestCreateShardedStores_NilDataPoolShouldError(t *testing.T) { node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), ) - err := n.Start() - logError(err) + n.Start() defer func() { _ = n.Stop() }() - err = n.CreateShardedStores() + err := n.CreateShardedStores() assert.NotNil(t, err) assert.Contains(t, err.Error(), "nil data pool") } @@ -823,10 +787,9 @@ func TestCreateShardedStores_NilTransactionDataPoolShouldError(t *testing.T) { node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), ) - err := n.Start() - logError(err) + n.Start() defer func() { _ = n.Stop() }() - err = n.CreateShardedStores() + err := n.CreateShardedStores() assert.NotNil(t, err) assert.Contains(t, err.Error(), "nil transaction sharded data store") } @@ -853,10 +816,9 @@ func TestCreateShardedStores_NilHeaderDataPoolShouldError(t *testing.T) { node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), ) - err := n.Start() - logError(err) + n.Start() defer func() { _ = n.Stop() }() - err = n.CreateShardedStores() + err := n.CreateShardedStores() assert.NotNil(t, err) assert.Contains(t, err.Error(), "nil header sharded data store") } @@ -890,10 +852,9 @@ func TestCreateShardedStores_ReturnsSuccessfully(t *testing.T) { node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), ) - err := n.Start() - logError(err) + n.Start() defer func() { _ = n.Stop() }() - err = n.CreateShardedStores() + err := n.CreateShardedStores() assert.Nil(t, err) assert.True(t, containString(process.ShardCacherIdentifier(0, 0), txShardedStores)) diff --git a/p2p/libp2p/netMessenger.go b/p2p/libp2p/netMessenger.go index fd41c3df79b..ac2630b96a1 100644 --- a/p2p/libp2p/netMessenger.go +++ b/p2p/libp2p/netMessenger.go @@ -472,7 +472,7 @@ func (netMes *networkMessenger) CreateTopic(name string, createChannelForTopic b _, found := netMes.topics[name] if found { netMes.mutTopics.Unlock() - return p2p.ErrTopicAlreadyExists + return nil } //TODO investigate if calling Subscribe on the pubsub impl does exactly the same thing as Topic.Subscribe @@ -603,7 +603,28 @@ func (netMes *networkMessenger) RegisterMessageProcessor(topic string, handler p return nil } -// UnregisterMessageProcessor registers a message processes on a topic +// UnregisterAllMessageProcessors will unregister all message processors for topics +func (netMes *networkMessenger) UnregisterAllMessageProcessors() error { + netMes.mutTopics.Lock() + defer netMes.mutTopics.Unlock() + + for topic, validator := range netMes.topics { + if validator == nil { + return p2p.ErrTopicValidatorOperationNotSupported + } + + err := netMes.pb.UnregisterTopicValidator(topic) + if err != nil { + return err + } + + netMes.topics[topic] = nil + } + + return nil +} + +// UnregisterMessageProcessor unregisters a message processes on a topic func (netMes *networkMessenger) UnregisterMessageProcessor(topic string) error { netMes.mutTopics.Lock() defer netMes.mutTopics.Unlock() diff --git a/p2p/libp2p/netMessenger_test.go b/p2p/libp2p/netMessenger_test.go index 0b98d07b287..3ef7b654c42 100644 --- a/p2p/libp2p/netMessenger_test.go +++ b/p2p/libp2p/netMessenger_test.go @@ -314,12 +314,12 @@ func TestLibp2pMessenger_CreateTopicOkValsShouldWork(t *testing.T) { _ = mes.Close() } -func TestLibp2pMessenger_CreateTopicTwiceShouldErr(t *testing.T) { +func TestLibp2pMessenger_CreateTopicTwiceShouldNotErr(t *testing.T) { mes := createMockMessenger() _ = mes.CreateTopic("test", false) err := mes.CreateTopic("test", false) - assert.Equal(t, p2p.ErrTopicAlreadyExists, err) + assert.Nil(t, err) _ = mes.Close() } diff --git a/p2p/p2p.go b/p2p/p2p.go index a5594ce4a5a..9817cfe3879 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -137,6 +137,11 @@ type Messenger interface { // specified topic. RegisterMessageProcessor(topic string, handler MessageProcessor) error + // UnregisterAllMessageProcessors removes all the MessageProcessor set by the + // Messenger from the list of registered handlers for the messages on the + // given topic. + UnregisterAllMessageProcessors() error + // UnregisterMessageProcessor removes the MessageProcessor set by the // Messenger from the list of registered handlers for the messages on the // given topic. diff --git a/process/block/bootstrapStorage/bootstrapStorer.go b/process/block/bootstrapStorage/bootstrapStorer.go index 6e87d2fb1cf..c4baa663748 100644 --- a/process/block/bootstrapStorage/bootstrapStorer.go +++ b/process/block/bootstrapStorage/bootstrapStorer.go @@ -6,14 +6,12 @@ import ( "strconv" "sync/atomic" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/storage" ) -// HighestRoundFromBootStorage is the key for the highest round that is saved in storage -const highestRoundFromBootStorage = "highestRoundFromBootStorage" - // ErrNilMarshalizer signals that an operation has been attempted to or with a nil Marshalizer implementation var ErrNilMarshalizer = errors.New("nil Marshalizer") @@ -69,7 +67,7 @@ func (bs *bootstrapStorer) Put(round int64, bootData BootstrapData) error { return err } - err = bs.store.Put([]byte(highestRoundFromBootStorage), roundBytes) + err = bs.store.Put([]byte(core.HighestRoundFromBootStorage), roundBytes) if err != nil { return err } @@ -98,7 +96,7 @@ func (bs *bootstrapStorer) Get(round int64) (BootstrapData, error) { // GetHighestRound will return highest round saved in storage func (bs *bootstrapStorer) GetHighestRound() int64 { - roundBytes, err := bs.store.Get([]byte(highestRoundFromBootStorage)) + roundBytes, err := bs.store.Get([]byte(core.HighestRoundFromBootStorage)) if err != nil { return 0 } @@ -122,7 +120,7 @@ func (bs *bootstrapStorer) SaveLastRound(round int64) error { return err } - err = bs.store.Put([]byte(highestRoundFromBootStorage), roundBytes) + err = bs.store.Put([]byte(core.HighestRoundFromBootStorage), roundBytes) if err != nil { return err } diff --git a/process/block/metablock.go b/process/block/metablock.go index dd007962c0c..3b46cdcfc64 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -1547,7 +1547,7 @@ func (mp *metaProcessor) requestMissingFinalityAttestingShardHeaders() uint32 { } func (mp *metaProcessor) requestShardHeaders(metaBlock *block.MetaBlock) (uint32, uint32) { - _ = process.EmptyChannel(mp.chRcvAllHdrs) + _ = core.EmptyChannel(mp.chRcvAllHdrs) if len(metaBlock.ShardInfo) == 0 { return 0, 0 diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index 5b03f44311c..815ed119dcb 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -260,7 +260,7 @@ func (rtp *rewardTxPreprocessor) receivedRewardTransaction(txHash []byte) { // CreateBlockStarted cleans the local cache map for processed/created reward transactions at this round func (rtp *rewardTxPreprocessor) CreateBlockStarted() { - _ = process.EmptyChannel(rtp.chReceivedAllRewardTxs) + _ = core.EmptyChannel(rtp.chReceivedAllRewardTxs) rtp.rewardTxsForBlock.mutTxsForBlock.Lock() rtp.rewardTxsForBlock.missingTxs = 0 diff --git a/process/block/preprocess/smartContractResults.go b/process/block/preprocess/smartContractResults.go index a6218f81054..b4e0b322293 100644 --- a/process/block/preprocess/smartContractResults.go +++ b/process/block/preprocess/smartContractResults.go @@ -281,7 +281,7 @@ func (scr *smartContractResults) receivedSmartContractResult(txHash []byte) { // CreateBlockStarted cleans the local cache map for processed/created smartContractResults at this round func (scr *smartContractResults) CreateBlockStarted() { - _ = process.EmptyChannel(scr.chRcvAllScrs) + _ = core.EmptyChannel(scr.chRcvAllScrs) scr.scrForBlock.mutTxsForBlock.Lock() scr.scrForBlock.missingTxs = 0 diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index c5cc252b849..991183ac73d 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -516,7 +516,7 @@ func (txs *transactions) receivedTransaction(txHash []byte) { // CreateBlockStarted cleans the local cache map for processed/created transactions at this round func (txs *transactions) CreateBlockStarted() { - _ = process.EmptyChannel(txs.chRcvAllTxs) + _ = core.EmptyChannel(txs.chRcvAllTxs) txs.txsForCurrBlock.mutTxsForBlock.Lock() txs.txsForCurrBlock.missingTxs = 0 diff --git a/process/block/shardblock.go b/process/block/shardblock.go index df684d98240..2eba4176108 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -1403,7 +1403,7 @@ func (sp *shardProcessor) receivedMetaBlock(headerHandler data.HeaderHandler, me } func (sp *shardProcessor) requestMetaHeaders(shardHeader *block.Header) (uint32, uint32) { - _ = process.EmptyChannel(sp.chRcvAllMetaHdrs) + _ = core.EmptyChannel(sp.chRcvAllMetaHdrs) if len(shardHeader.MetaBlockHashes) == 0 { return 0, 0 diff --git a/process/common.go b/process/common.go index aa235a6a3d9..f654f29d72d 100644 --- a/process/common.go +++ b/process/common.go @@ -20,19 +20,6 @@ import ( var log = logger.GetOrCreate("process") -// EmptyChannel empties the given channel -func EmptyChannel(ch chan bool) int { - readsCnt := 0 - for { - select { - case <-ch: - readsCnt++ - default: - return readsCnt - } - } -} - // GetShardHeader gets the header, which is associated with the given hash, from pool or storage func GetShardHeader( hash []byte, diff --git a/process/common_test.go b/process/common_test.go index e3216a7180c..4e0c32c74ee 100644 --- a/process/common_test.go +++ b/process/common_test.go @@ -4,10 +4,7 @@ import ( "bytes" "errors" "math/big" - "sync" - "sync/atomic" "testing" - "time" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data" @@ -20,62 +17,6 @@ import ( "github.com/stretchr/testify/assert" ) -func TestEmptyChannelShouldWorkOnBufferedChannel(t *testing.T) { - ch := make(chan bool, 10) - - assert.Equal(t, 0, len(ch)) - readsCnt := process.EmptyChannel(ch) - assert.Equal(t, 0, len(ch)) - assert.Equal(t, 0, readsCnt) - - ch <- true - ch <- true - ch <- true - - assert.Equal(t, 3, len(ch)) - readsCnt = process.EmptyChannel(ch) - assert.Equal(t, 0, len(ch)) - assert.Equal(t, 3, readsCnt) -} - -func TestEmptyChannelShouldWorkOnNotBufferedChannel(t *testing.T) { - ch := make(chan bool) - - assert.Equal(t, 0, len(ch)) - readsCnt := int32(process.EmptyChannel(ch)) - assert.Equal(t, 0, len(ch)) - assert.Equal(t, int32(0), readsCnt) - - wg := sync.WaitGroup{} - wgChanWasWritten := sync.WaitGroup{} - numConcurrentWrites := 50 - wg.Add(numConcurrentWrites) - wgChanWasWritten.Add(numConcurrentWrites) - for i := 0; i < numConcurrentWrites; i++ { - go func() { - wg.Done() - time.Sleep(time.Millisecond) - ch <- true - wgChanWasWritten.Done() - }() - } - - // wait for go routines to start - wg.Wait() - - go func() { - for readsCnt < int32(numConcurrentWrites) { - atomic.AddInt32(&readsCnt, int32(process.EmptyChannel(ch))) - } - }() - - // wait for go routines to finish - wgChanWasWritten.Wait() - - assert.Equal(t, 0, len(ch)) - assert.Equal(t, int32(numConcurrentWrites), atomic.LoadInt32(&readsCnt)) -} - func TestGetShardHeaderShouldErrNilCacher(t *testing.T) { hash := []byte("X") diff --git a/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go b/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go index 16d6733f927..69cfaeccdd7 100644 --- a/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go +++ b/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go @@ -462,3 +462,19 @@ func (bicf *baseInterceptorsContainerFactory) generateUnsignedTxsInterceptors() return bicf.container.AddMultiple(keys, interceptorsSlice) } + +// SetWhiteListHandlerToInterceptors will set the white list handler to all given interceptors +func SetWhiteListHandlerToInterceptors(containter process.InterceptorsContainer, handler process.WhiteListHandler) error { + var err error + + containter.Iterate(func(key string, interceptor process.Interceptor) bool { + errFound := interceptor.SetIsDataForCurrentShardVerifier(handler) + if errFound != nil { + err = errFound + return false + } + return true + }) + + return err +} diff --git a/process/interface.go b/process/interface.go index e3c8bd587f7..256be730d9d 100644 --- a/process/interface.go +++ b/process/interface.go @@ -434,6 +434,7 @@ type RequestHandler interface { RequestMiniBlocks(destShardID uint32, miniblocksHashes [][]byte) RequestTrieNodes(destShardID uint32, hash []byte, topic string) RequestStartOfEpochMetaBlock(epoch uint32) + RequestInterval() time.Duration IsInterfaceNil() bool } @@ -765,3 +766,11 @@ type InterceptedDataWhiteList interface { Add(keys [][]byte) IsInterfaceNil() bool } + +// WhiteListHandler is the interface needed to add whitelisted data +type WhiteListHandler interface { + Remove(keys [][]byte) + Add(keys [][]byte) + IsForCurrentShard(interceptedData InterceptedData) bool + IsInterfaceNil() bool +} diff --git a/process/mock/chainStorerMock.go b/process/mock/chainStorerMock.go index 2a1c0f1c5c2..891326e299c 100644 --- a/process/mock/chainStorerMock.go +++ b/process/mock/chainStorerMock.go @@ -6,7 +6,7 @@ import ( "github.com/pkg/errors" ) -// ChainStorerMock is a mock implementation of the ChianStorer interface +// ChainStorerMock is a mock implementation of the ChainStorer interface type ChainStorerMock struct { AddStorerCalled func(key dataRetriever.UnitType, s storage.Storer) GetStorerCalled func(unitType dataRetriever.UnitType) storage.Storer diff --git a/process/mock/nodesCoordinatorMock.go b/process/mock/nodesCoordinatorMock.go index e9b1a2e38a4..d3a2fac35b2 100644 --- a/process/mock/nodesCoordinatorMock.go +++ b/process/mock/nodesCoordinatorMock.go @@ -63,6 +63,11 @@ func NewNodesCoordinatorMock() *NodesCoordinatorMock { } } +// SetConfig - +func (ncm *NodesCoordinatorMock) SetConfig(_ *sharding.NodesCoordinatorRegistry) error { + return nil +} + // GetNumTotalEligible - func (ncm *NodesCoordinatorMock) GetNumTotalEligible() uint64 { return 1 @@ -133,36 +138,11 @@ func (ncm *NodesCoordinatorMock) GetConsensusValidatorsPublicKeys( return valGrStr, nil } -// GetConsensusValidatorsRewardsAddresses - -func (ncm *NodesCoordinatorMock) GetConsensusValidatorsRewardsAddresses( - randomness []byte, - round uint64, - shardId uint32, - epoch uint32, -) ([]string, error) { - if ncm.GetValidatorsPublicKeysCalled != nil { - return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId, epoch) - } - - validators, err := ncm.ComputeConsensusGroup(randomness, round, shardId, epoch) - if err != nil { - return nil, err - } - - addresses := make([]string, 0) - for _, v := range validators { - addresses = append(addresses, string(v.Address())) - } - - return addresses, nil -} - // SetNodesPerShards - func (ncm *NodesCoordinatorMock) SetNodesPerShards( eligible map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, epoch uint32, - _ bool, ) error { if ncm.SetNodesPerShardsCalled != nil { return ncm.SetNodesPerShardsCalled(eligible, epoch) @@ -272,7 +252,7 @@ func (ncm *NodesCoordinatorMock) GetConsensusWhitelistedNodes( } // GetNodesPerShard - -func (ncm *NodesCoordinatorMock) GetNodesPerShard(epoch uint32) (map[uint32][]sharding.Validator, error) { +func (ncm *NodesCoordinatorMock) GetNodesPerShard(_ uint32) (map[uint32][]sharding.Validator, error) { return nil, nil } diff --git a/process/mock/peerAccountHandlerMock.go b/process/mock/peerAccountHandlerMock.go index 0b64c53220f..f8ecdfb9177 100644 --- a/process/mock/peerAccountHandlerMock.go +++ b/process/mock/peerAccountHandlerMock.go @@ -17,7 +17,17 @@ type PeerAccountHandlerMock struct { GetTempRatingCalled func() uint32 SetAccumulatedFeesCalled func(*big.Int) GetAccumulatedFeesCalled func() *big.Int - SetListAndIndexCalled func(shardID uint32, list string, index int32) + SetListAndIndexCalled func(shardID uint32, list string, index uint32) +} + +// GetList - +func (p *PeerAccountHandlerMock) GetList() string { + return "" +} + +// GetIndex - +func (p *PeerAccountHandlerMock) GetIndex() uint32 { + return 0 } // GetBLSPublicKey - @@ -263,7 +273,7 @@ func (p *PeerAccountHandlerMock) DataTrieTracker() state.DataTrieTracker { } // SetListAndIndex - -func (pahm *PeerAccountHandlerMock) SetListAndIndex(shardID uint32, list string, index int32) { +func (pahm *PeerAccountHandlerMock) SetListAndIndex(shardID uint32, list string, index uint32) { if pahm.SetListAndIndexCalled != nil { pahm.SetListAndIndexCalled(shardID, list, index) } diff --git a/process/mock/raterMock.go b/process/mock/raterMock.go index 186dfd48d42..85826dd06b9 100644 --- a/process/mock/raterMock.go +++ b/process/mock/raterMock.go @@ -21,7 +21,7 @@ type RaterMock struct { ComputeIncreaseValidatorCalled func(val uint32) uint32 ComputeDecreaseValidatorCalled func(val uint32) uint32 GetChancesCalled func(val uint32) uint32 - UpdateListAndIndexCalled func(pubKey string, list string, index int32) error + UpdateListAndIndexCalled func(pubKey string, shardID uint32, list string, index uint32) error RatingReader sharding.RatingReader } @@ -116,13 +116,13 @@ func (rm *RaterMock) GetChance(rating uint32) uint32 { } // SetListIndexUpdater - -func (rm *RaterMock) SetListIndexUpdater(updater sharding.ListIndexUpdaterHandler) { +func (rm *RaterMock) SetListIndexUpdater(_ sharding.ListIndexUpdaterHandler) { } // UpdateListAndIndex - -func (rm *RaterMock) UpdateListAndIndex(pubKey string, shardID uint32, list string, index int32) error { +func (rm *RaterMock) UpdateListAndIndex(pubKey string, shardID uint32, list string, index uint32) error { if rm.UpdateListAndIndexCalled != nil { - return rm.UpdateListAndIndexCalled(pubKey, list, index) + return rm.UpdateListAndIndexCalled(pubKey, shardID, list, index) } return nil diff --git a/process/mock/requestHandlerStub.go b/process/mock/requestHandlerStub.go index 18676dd70cd..3565a7a2fc7 100644 --- a/process/mock/requestHandlerStub.go +++ b/process/mock/requestHandlerStub.go @@ -1,5 +1,7 @@ package mock +import "time" + // RequestHandlerStub - type RequestHandlerStub struct { RequestShardHeaderCalled func(shardID uint32, hash []byte) @@ -15,6 +17,11 @@ type RequestHandlerStub struct { RequestStartOfEpochMetaBlockCalled func(epoch uint32) } +// RequestInterval - +func (rhs *RequestHandlerStub) RequestInterval() time.Duration { + return time.Second +} + // RequestStartOfEpochMetaBlock - func (rhs *RequestHandlerStub) RequestStartOfEpochMetaBlock(epoch uint32) { if rhs.RequestStartOfEpochMetaBlockCalled == nil { diff --git a/process/peer/listIndexUpdater.go b/process/peer/listIndexUpdater.go index cec9c6d0a1d..a379d3bda07 100644 --- a/process/peer/listIndexUpdater.go +++ b/process/peer/listIndexUpdater.go @@ -2,11 +2,11 @@ package peer // ListIndexUpdater will handle the updating of list type and the index for a peer type ListIndexUpdater struct { - updateListAndIndex func(pubKey string, shardID uint32, list string, index int32) error + updateListAndIndex func(pubKey string, shardID uint32, list string, index uint32) error } // UpdateListAndIndex will update the list and the index for a given peer -func (liu *ListIndexUpdater) UpdateListAndIndex(pubKey string, shardID uint32, list string, index int32) error { +func (liu *ListIndexUpdater) UpdateListAndIndex(pubKey string, shardID uint32, list string, index uint32) error { return liu.updateListAndIndex(pubKey, shardID, list, index) } diff --git a/process/peer/process.go b/process/peer/process.go index d19ddb35c5d..b89f744bb0e 100644 --- a/process/peer/process.go +++ b/process/peer/process.go @@ -129,24 +129,7 @@ func NewValidatorStatisticsProcessor(arguments ArgValidatorStatisticsProcessor) ratingReaderSetter.SetRatingReader(rr) - listIndexUpdaterSetter, ok := rater.(sharding.ListIndexUpdaterSetter) - if !ok { - return nil, process.ErrNilListIndexUpdaterSetter - } - log.Debug("setting list index updater") - - liu := &ListIndexUpdater{ - updateListAndIndex: vs.updateListAndIndex, - } - - listIndexUpdaterSetter.SetListIndexUpdater(liu) - - err := vs.nodesCoordinator.UpdatePeersListAndIndex() - if err != nil { - return nil, err - } - - err = vs.saveInitialState(arguments.StakeValue, rater.GetStartRating(), arguments.StartEpoch) + err := vs.saveInitialState(arguments.StakeValue, rater.GetStartRating(), arguments.StartEpoch) if err != nil { return nil, err } @@ -341,8 +324,8 @@ func (vs *validatorStatistics) peerAccountToValidatorInfo(peerAccount state.Peer return &state.ValidatorInfo{ PublicKey: peerAccount.GetBLSPublicKey(), ShardId: peerAccount.GetCurrentShardId(), - List: "list", - Index: 0, + List: peerAccount.GetList(), + Index: peerAccount.GetIndex(), TempRating: peerAccount.GetTempRating(), Rating: peerAccount.GetRating(), RewardAddress: peerAccount.GetRewardAddress(), @@ -828,7 +811,7 @@ func (vs *validatorStatistics) updateRatingFromTempRating(pks []string) error { } // updateListAndIndex updates the list and the index for a given public key -func (vs *validatorStatistics) updateListAndIndex(pubKey string, shardID uint32, list string, index int32) error { +func (vs *validatorStatistics) updateListAndIndex(pubKey string, shardID uint32, list string, index uint32) error { peer, err := vs.GetPeerAccount([]byte(pubKey)) if err != nil { log.Debug("error getting peer account", "error", err, "key", pubKey) diff --git a/process/rating/blockSigningRater.go b/process/rating/blockSigningRater.go index cb743cf766c..3167fc54eba 100644 --- a/process/rating/blockSigningRater.go +++ b/process/rating/blockSigningRater.go @@ -146,7 +146,7 @@ func (bsr *BlockSigningRaterAndListIndexer) ComputeDecreaseValidator(val uint32) } // UpdateListAndIndex will update the list and the index for a peer -func (bsr *BlockSigningRaterAndListIndexer) UpdateListAndIndex(pubKey string, shardID uint32, list string, index int32) error { +func (bsr *BlockSigningRaterAndListIndexer) UpdateListAndIndex(pubKey string, shardID uint32, list string, index uint32) error { return bsr.ListIndexUpdaterHandler.UpdateListAndIndex(pubKey, shardID, list, index) } diff --git a/process/rating/disabledListIndexUpdater.go b/process/rating/disabledListIndexUpdater.go index 5cfb4988944..bb17fd07d93 100644 --- a/process/rating/disabledListIndexUpdater.go +++ b/process/rating/disabledListIndexUpdater.go @@ -5,7 +5,7 @@ type DisabledListIndexUpdater struct { } // UpdateListAndIndex will return nil -func (n *DisabledListIndexUpdater) UpdateListAndIndex(pubKey string, shardID uint32, list string, index int32) error { +func (n *DisabledListIndexUpdater) UpdateListAndIndex(_ string, _ uint32, _ string, _ uint32) error { return nil } diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index 9a949950c1d..26ee2c8ccfd 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -865,7 +865,7 @@ func (boot *baseBootstrap) requestMiniBlocksByHashes(hashes [][]byte) { func (boot *baseBootstrap) getMiniBlocksRequestingIfMissing(hashes [][]byte) (block.MiniBlockSlice, error) { miniBlocks, missingMiniBlocksHashes := boot.miniBlocksResolver.GetMiniBlocksFromPool(hashes) if len(missingMiniBlocksHashes) > 0 { - _ = process.EmptyChannel(boot.chRcvMiniBlocks) + _ = core.EmptyChannel(boot.chRcvMiniBlocks) boot.requestMiniBlocksByHashes(missingMiniBlocksHashes) err := boot.waitForMiniBlocks() if err != nil { diff --git a/process/sync/metablock.go b/process/sync/metablock.go index 1d83c8c3971..ec805055dcc 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -171,7 +171,7 @@ func (boot *MetaBootstrap) getHeaderWithNonceRequestingIfMissing(nonce uint64) ( nonce, boot.headers) if err != nil { - _ = process.EmptyChannel(boot.chRcvHdrNonce) + _ = core.EmptyChannel(boot.chRcvHdrNonce) boot.requestHeaderWithNonce(nonce) err = boot.waitForHeaderNonce() if err != nil { @@ -194,7 +194,7 @@ func (boot *MetaBootstrap) getHeaderWithNonceRequestingIfMissing(nonce uint64) ( func (boot *MetaBootstrap) getHeaderWithHashRequestingIfMissing(hash []byte) (data.HeaderHandler, error) { hdr, err := process.GetMetaHeader(hash, boot.headers, boot.marshalizer, boot.store) if err != nil { - _ = process.EmptyChannel(boot.chRcvHdrHash) + _ = core.EmptyChannel(boot.chRcvHdrHash) boot.requestHeaderWithHash(hash) err = boot.waitForHeaderHash() if err != nil { diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index 93746649b4b..b7a47c2d99b 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -148,7 +148,7 @@ func (boot *ShardBootstrap) getHeaderWithNonceRequestingIfMissing(nonce uint64) boot.shardCoordinator.SelfId(), boot.headers) if err != nil { - _ = process.EmptyChannel(boot.chRcvHdrNonce) + _ = core.EmptyChannel(boot.chRcvHdrNonce) boot.requestHeaderWithNonce(nonce) err = boot.waitForHeaderNonce() if err != nil { @@ -172,7 +172,7 @@ func (boot *ShardBootstrap) getHeaderWithNonceRequestingIfMissing(nonce uint64) func (boot *ShardBootstrap) getHeaderWithHashRequestingIfMissing(hash []byte) (data.HeaderHandler, error) { hdr, err := process.GetShardHeader(hash, boot.headers, boot.marshalizer, boot.store) if err != nil { - _ = process.EmptyChannel(boot.chRcvHdrHash) + _ = core.EmptyChannel(boot.chRcvHdrHash) boot.requestHeaderWithHash(hash) err = boot.waitForHeaderHash() if err != nil { diff --git a/process/sync/storageBootstrap/metaStorageBootstrapper.go b/process/sync/storageBootstrap/metaStorageBootstrapper.go index a566c25f3fd..1335ac6456a 100644 --- a/process/sync/storageBootstrap/metaStorageBootstrapper.go +++ b/process/sync/storageBootstrap/metaStorageBootstrapper.go @@ -14,7 +14,7 @@ type metaStorageBootstrapper struct { pendingMiniBlocksHandler process.PendingMiniBlocksHandler } -// NewMetaStorageBootstrapper is method used to create a nes storage bootstrapper +// NewMetaStorageBootstrapper is method used to create a new storage bootstrapper func NewMetaStorageBootstrapper(arguments ArgsMetaStorageBootstrapper) (*metaStorageBootstrapper, error) { err := checkMetaStorageBootstrapperArgs(arguments) if err != nil { @@ -111,7 +111,7 @@ func (msb *metaStorageBootstrapper) cleanupNotarizedStorage(metaBlockHash []byte } log.Debug("removing shard header from ShardHdrNonceHashDataUnit storage", - "shradId", shardHeader.GetShardID(), + "shardId", shardHeader.GetShardID(), "nonce", shardHeader.GetNonce(), "hash", shardHeaderHash) @@ -121,7 +121,7 @@ func (msb *metaStorageBootstrapper) cleanupNotarizedStorage(metaBlockHash []byte err = storer.Remove(nonceToByteSlice) if err != nil { log.Debug("shard header was not removed from ShardHdrNonceHashDataUnit storage", - "shradId", shardHeader.GetShardID(), + "shardId", shardHeader.GetShardID(), "nonce", shardHeader.GetNonce(), "hash", shardHeaderHash, "error", err.Error()) diff --git a/process/sync/storageBootstrap/shardStorageBootstrapper.go b/process/sync/storageBootstrap/shardStorageBootstrapper.go index 63c3a15eedb..d65348a68f0 100644 --- a/process/sync/storageBootstrap/shardStorageBootstrapper.go +++ b/process/sync/storageBootstrap/shardStorageBootstrapper.go @@ -15,7 +15,7 @@ type shardStorageBootstrapper struct { miniBlocksResolver dataRetriever.MiniBlocksResolver } -// NewShardStorageBootstrapper is method used to create a nes storage bootstrapper +// NewShardStorageBootstrapper is method used to create a new storage bootstrapper func NewShardStorageBootstrapper(arguments ArgsShardStorageBootstrapper) (*shardStorageBootstrapper, error) { err := checkShardStorageBootstrapperArgs(arguments) if err != nil { @@ -137,7 +137,7 @@ func (ssb *shardStorageBootstrapper) cleanupNotarizedStorage(shardHeaderHash []b } log.Debug("removing meta block from storage", - "shradId", metaBlock.GetShardID(), + "shardId", metaBlock.GetShardID(), "nonce", metaBlock.GetNonce(), "hash", metaBlockHash) @@ -145,7 +145,7 @@ func (ssb *shardStorageBootstrapper) cleanupNotarizedStorage(shardHeaderHash []b err = ssb.store.GetStorer(dataRetriever.MetaHdrNonceHashDataUnit).Remove(nonceToByteSlice) if err != nil { log.Debug("meta block was not removed from MetaHdrNonceHashDataUnit storage", - "shradId", metaBlock.GetShardID(), + "shardId", metaBlock.GetShardID(), "nonce", metaBlock.GetNonce(), "hash", metaBlockHash, "error", err.Error()) @@ -154,7 +154,7 @@ func (ssb *shardStorageBootstrapper) cleanupNotarizedStorage(shardHeaderHash []b err = ssb.store.GetStorer(dataRetriever.MetaBlockUnit).Remove(metaBlockHash) if err != nil { log.Debug("meta block was not removed from MetaBlockUnit storage", - "shradId", metaBlock.GetShardID(), + "shardId", metaBlock.GetShardID(), "nonce", metaBlock.GetNonce(), "hash", metaBlockHash, "error", err.Error()) diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index 8031609a75a..535764aa2c7 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -48,7 +48,6 @@ type indexHashedNodesCoordinator struct { hasher hashing.Hasher shuffler NodesShuffler epochStartRegistrationHandler EpochStartEventNotifier - listIndexUpdater ListIndexUpdaterHandler bootStorer storage.Storer selfPubKey []byte nodesConfig map[uint32]*epochNodesConfig @@ -61,7 +60,7 @@ type indexHashedNodesCoordinator struct { metaConsensusGroupSize int nodesPerShardSetter NodesPerShardSetter consensusGroupCacher Cacher - shardIDAsObserver uint32 + shardIDAsObserver uint32 } // NewIndexHashedNodesCoordinator creates a new index hashed group selector @@ -88,7 +87,6 @@ func NewIndexHashedNodesCoordinator(arguments ArgNodesCoordinator) (*indexHashed shuffler: arguments.Shuffler, epochStartRegistrationHandler: arguments.EpochStartNotifier, bootStorer: arguments.BootStorer, - listIndexUpdater: arguments.ListIndexUpdater, selfPubKey: arguments.SelfPublicKey, nodesConfig: nodesConfig, currentEpoch: arguments.Epoch, @@ -96,13 +94,11 @@ func NewIndexHashedNodesCoordinator(arguments ArgNodesCoordinator) (*indexHashed shardConsensusGroupSize: arguments.ShardConsensusGroupSize, metaConsensusGroupSize: arguments.MetaConsensusGroupSize, consensusGroupCacher: arguments.ConsensusGroupCache, - shardIDAsObserver: arguments.ShardIDAsObserver, + shardIDAsObserver: arguments.ShardIDAsObserver, } ihgs.nodesPerShardSetter = ihgs - err = ihgs.nodesPerShardSetter.SetNodesPerShards(arguments.EligibleNodes, arguments.WaitingNodes, arguments.Epoch, false) - - err = ihgs.SetNodesPerShards(arguments.EligibleNodes, arguments.WaitingNodes, arguments.Epoch, false) + err = ihgs.nodesPerShardSetter.SetNodesPerShards(arguments.EligibleNodes, arguments.WaitingNodes, arguments.Epoch) if err != nil { return nil, err } @@ -137,9 +133,6 @@ func checkArguments(arguments ArgNodesCoordinator) error { if check.IfNil(arguments.Shuffler) { return ErrNilShuffler } - if check.IfNil(arguments.ListIndexUpdater) { - return ErrNilListIndexUpdater - } if check.IfNil(arguments.BootStorer) { return ErrNilBootStorer } @@ -155,7 +148,6 @@ func (ihgs *indexHashedNodesCoordinator) SetNodesPerShards( eligible map[uint32][]Validator, waiting map[uint32][]Validator, epoch uint32, - updatePeersListAndIndex bool, ) error { ihgs.mutNodesConfig.Lock() defer ihgs.mutNodesConfig.Unlock() @@ -213,13 +205,6 @@ func (ihgs *indexHashedNodesCoordinator) SetNodesPerShards( ihgs.nodesConfig[epoch] = nodesConfig ihgs.numTotalEligible = numTotalEligible - if updatePeersListAndIndex { - err := ihgs.updatePeersListAndIndex(nodesConfig) - if err != nil { - return err - } - } - return nil } @@ -356,27 +341,6 @@ func (ihgs *indexHashedNodesCoordinator) GetConsensusValidatorsPublicKeys( return pubKeys, nil } -// GetConsensusValidatorsRewardsAddresses calculates the validator consensus group for a specific shard, randomness and round -// number, returning their staking/rewards addresses -func (ihgs *indexHashedNodesCoordinator) GetConsensusValidatorsRewardsAddresses( - randomness []byte, - round uint64, - shardID uint32, - epoch uint32, -) ([]string, error) { - consensusNodes, err := ihgs.ComputeConsensusGroup(randomness, round, shardID, epoch) - if err != nil { - return nil, err - } - - addresses := make([]string, len(consensusNodes)) - for i, v := range consensusNodes { - addresses[i] = string(v.Address()) - } - - return addresses, nil -} - // GetAllEligibleValidatorsPublicKeys will return all validators public keys for all shards func (ihgs *indexHashedNodesCoordinator) GetAllEligibleValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) { validatorsPubKeys := make(map[uint32][][]byte) @@ -538,7 +502,7 @@ func (ihgs *indexHashedNodesCoordinator) EpochStartPrepare(metaHeader data.Heade eligibleMap, waitingMap, stillRemaining := ihgs.shuffler.UpdateNodeLists(shufflerArgs) - err := ihgs.nodesPerShardSetter.SetNodesPerShards(eligibleMap, waitingMap, newEpoch, true) + err := ihgs.nodesPerShardSetter.SetNodesPerShards(eligibleMap, waitingMap, newEpoch) if err != nil { log.Error("set nodes per shard failed", "error", err.Error()) } @@ -654,60 +618,6 @@ func (ihgs *indexHashedNodesCoordinator) GetConsensusWhitelistedNodes( return shardEligible, nil } -// UpdatePeersListAndIndex will update the list and the index for all peers -func (ihgs *indexHashedNodesCoordinator) UpdatePeersListAndIndex() error { - ihgs.mutNodesConfig.RLock() - nodesConfig, ok := ihgs.nodesConfig[ihgs.currentEpoch] - ihgs.mutNodesConfig.RUnlock() - - if !ok { - return ErrEpochNodesConfigDoesNotExist - } - - nodesConfig.mutNodesMaps.RLock() - defer nodesConfig.mutNodesMaps.RUnlock() - - return ihgs.updatePeersListAndIndex(nodesConfig) -} - -// updatePeersListAndIndex will update the list and the index for all peers -// should be called with mutex locked -func (ihgs *indexHashedNodesCoordinator) updatePeersListAndIndex(nodesConfig *epochNodesConfig) error { - err := ihgs.updatePeerAccountsForGivenMap(nodesConfig.eligibleMap, core.EligibleList) - if err != nil { - return err - } - - err = ihgs.updatePeerAccountsForGivenMap(nodesConfig.waitingMap, core.WaitingList) - if err != nil { - return err - } - - return nil -} - -func (ihgs *indexHashedNodesCoordinator) updatePeerAccountsForGivenMap( - peers map[uint32][]Validator, - list core.PeerType, -) error { - for shardId, accountsPerShard := range peers { - for index, account := range accountsPerShard { - err := ihgs.listIndexUpdater.UpdateListAndIndex( - string(account.PubKey()), - shardId, - string(list), - int32(index)) - if err != nil { - log.Warn("error while updating list and index for peer", - "error", err, - "public key", account.PubKey()) - } - } - } - - return nil -} - func (ihgs *indexHashedNodesCoordinator) computeShardForSelfPublicKey(nodesConfig *epochNodesConfig) uint32 { pubKey := ihgs.selfPubKey selfShard := ihgs.shardIDAsObserver diff --git a/sharding/indexHashedNodesCoordinatorRegistry.go b/sharding/indexHashedNodesCoordinatorRegistry.go index d6386bf8de1..80d838b766f 100644 --- a/sharding/indexHashedNodesCoordinatorRegistry.go +++ b/sharding/indexHashedNodesCoordinatorRegistry.go @@ -4,6 +4,8 @@ import ( "encoding/json" "fmt" "strconv" + + "github.com/ElrondNetwork/elrond-go/core" ) const keyPrefix = "indexHashed_" @@ -26,6 +28,7 @@ type NodesCoordinatorRegistry struct { CurrentEpoch uint32 `json:"currentEpoch"` } +// TODO: add proto marshalizer for these package - replace all json marshalizers // LoadState loads the nodes coordinator state from the used boot storage func (ihgs *indexHashedNodesCoordinator) LoadState(key []byte) error { return ihgs.baseLoadState(key) @@ -99,7 +102,7 @@ func (ihgs *indexHashedNodesCoordinator) saveState(key []byte) error { return err } - ncInternalkey := append([]byte(keyPrefix), key...) + ncInternalkey := append([]byte(core.NodesCoordinatorRegistryKeyPrefix), key...) log.Debug("saving nodes coordinator config", "key", ncInternalkey) @@ -136,7 +139,6 @@ func (ihgs *indexHashedNodesCoordinator) registryToNodesCoordinator( } var nodesConfig *epochNodesConfig - nodesConfig, err = epochValidatorsToEpochNodesConfig(epochValidators) if err != nil { return nil, err @@ -147,6 +149,8 @@ func (ihgs *indexHashedNodesCoordinator) registryToNodesCoordinator( return nil, ErrInvalidNumberOfShards } + nodesConfig.expandedEligibleMap = nodesConfig.eligibleMap + // shards without metachain shard nodesConfig.nbShards = nbShards - 1 nodesConfig.shardID = ihgs.computeShardForSelfPublicKey(nodesConfig) @@ -164,11 +168,11 @@ func epochNodesConfigToEpochValidators(config *epochNodesConfig) *EpochValidator } for k, v := range config.eligibleMap { - result.EligibleValidators[fmt.Sprint(k)] = validatorArrayToSerializableValidatorArray(v) + result.EligibleValidators[fmt.Sprint(k)] = ValidatorArrayToSerializableValidatorArray(v) } for k, v := range config.waitingMap { - result.WaitingValidators[fmt.Sprint(k)] = validatorArrayToSerializableValidatorArray(v) + result.WaitingValidators[fmt.Sprint(k)] = ValidatorArrayToSerializableValidatorArray(v) } return result @@ -214,7 +218,8 @@ func serializableValidatorsMapToValidatorsMap( return result, nil } -func validatorArrayToSerializableValidatorArray(validators []Validator) []*SerializableValidator { +// ValidatorArrayToSerializableValidatorArray - +func ValidatorArrayToSerializableValidatorArray(validators []Validator) []*SerializableValidator { result := make([]*SerializableValidator, len(validators)) for i, v := range validators { @@ -240,3 +245,23 @@ func serializableValidatorArrayToValidatorArray(sValidators []*SerializableValid return result, nil } + +// NodesInfoToValidators maps nodeInfo to validator interface +func NodesInfoToValidators(nodesInfo map[uint32][]*NodeInfo) (map[uint32][]Validator, error) { + validatorsMap := make(map[uint32][]Validator) + + for shId, nodeInfoList := range nodesInfo { + validators := make([]Validator, 0, len(nodeInfoList)) + for _, nodeInfo := range nodeInfoList { + validator, err := NewValidator(nodeInfo.PubKey(), nodeInfo.Address()) + if err != nil { + return nil, err + } + + validators = append(validators, validator) + } + validatorsMap[shId] = validators + } + + return validatorsMap, nil +} diff --git a/sharding/indexHashedNodesCoordinatorRegistry_test.go b/sharding/indexHashedNodesCoordinatorRegistry_test.go index 3f2e4d06f83..0a4d5f5cc74 100644 --- a/sharding/indexHashedNodesCoordinatorRegistry_test.go +++ b/sharding/indexHashedNodesCoordinatorRegistry_test.go @@ -154,7 +154,7 @@ func TestIndexHashedNodesCoordinator_validatorArrayToSerializableValidatorArray( validatorsMap := createDummyNodesMap(5, 2, "dummy") for _, validatorsArray := range validatorsMap { - sValidators := validatorArrayToSerializableValidatorArray(validatorsArray) + sValidators := ValidatorArrayToSerializableValidatorArray(validatorsArray) assert.True(t, validatorsEqualSerializableValidators(validatorsArray, sValidators)) } } @@ -164,7 +164,7 @@ func TestIndexHashedNodesCoordinator_serializableValidatorsMapToValidatorsMap(t sValidatorsMap := make(map[string][]*SerializableValidator) for k, validatorsArray := range validatorsMap { - sValidators := validatorArrayToSerializableValidatorArray(validatorsArray) + sValidators := ValidatorArrayToSerializableValidatorArray(validatorsArray) sValidatorsMap[fmt.Sprint(k)] = sValidators } @@ -175,7 +175,7 @@ func TestIndexHashedNodesCoordinator_serializableValidatorArrayToValidatorArray( validatorsMap := createDummyNodesMap(5, 2, "dummy") for _, validatorsArray := range validatorsMap { - sValidators := validatorArrayToSerializableValidatorArray(validatorsArray) + sValidators := ValidatorArrayToSerializableValidatorArray(validatorsArray) valArray, err := serializableValidatorArrayToValidatorArray(sValidators) assert.Nil(t, err) assert.True(t, sameValidators(validatorsArray, valArray)) diff --git a/sharding/indexHashedNodesCoordinatorWithRater.go b/sharding/indexHashedNodesCoordinatorWithRater.go index d4a216fe1f6..49e6338bc27 100644 --- a/sharding/indexHashedNodesCoordinatorWithRater.go +++ b/sharding/indexHashedNodesCoordinatorWithRater.go @@ -46,9 +46,8 @@ func (ihgs *indexHashedNodesCoordinatorWithRater) SetNodesPerShards( eligible map[uint32][]Validator, waiting map[uint32][]Validator, epoch uint32, - updateList bool, ) error { - err := ihgs.indexHashedNodesCoordinator.SetNodesPerShards(eligible, waiting, epoch, updateList) + err := ihgs.indexHashedNodesCoordinator.SetNodesPerShards(eligible, waiting, epoch) if err != nil { return err } diff --git a/sharding/indexHashedNodesCoordinatorWithRater_test.go b/sharding/indexHashedNodesCoordinatorWithRater_test.go index a213c4f7b88..a18dcfadaf2 100644 --- a/sharding/indexHashedNodesCoordinatorWithRater_test.go +++ b/sharding/indexHashedNodesCoordinatorWithRater_test.go @@ -47,7 +47,7 @@ func TestIndexHashedGroupSelectorWithRater_SetNilEligibleMapShouldErr(t *testing waiting := createDummyNodesMap(2, 1, "waiting") nc, _ := NewIndexHashedNodesCoordinator(createArguments()) ihgs, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) - assert.Equal(t, ErrNilInputNodesMap, ihgs.SetNodesPerShards(nil, waiting, 0, true)) + assert.Equal(t, ErrNilInputNodesMap, ihgs.SetNodesPerShards(nil, waiting, 0)) } func TestIndexHashedGroupSelectorWithRater_OkValShouldWork(t *testing.T) { @@ -71,7 +71,6 @@ func TestIndexHashedGroupSelectorWithRater_OkValShouldWork(t *testing.T) { WaitingNodes: waitingMap, SelfPublicKey: []byte("test"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } nc, err := NewIndexHashedNodesCoordinator(arguments) assert.Nil(t, err) @@ -151,7 +150,6 @@ func TestIndexHashedGroupSelectorWithRater_ComputeExpandedList(t *testing.T) { WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ratingPk0 := uint32(5) @@ -217,7 +215,6 @@ func BenchmarkIndexHashedGroupSelectorWithRater_ComputeValidatorsGroup63of400(b WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, _ := NewIndexHashedNodesCoordinator(arguments) ihgsRater, _ := NewIndexHashedNodesCoordinatorWithRater(ihgs, &mock.RaterMock{}) @@ -258,7 +255,6 @@ func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldReturn WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } nc, _ := NewIndexHashedNodesCoordinator(arguments) ihgs, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) @@ -294,7 +290,6 @@ func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldReturn WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } nc, _ := NewIndexHashedNodesCoordinator(arguments) ihgs, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) @@ -344,7 +339,6 @@ func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldWork(t WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } nc, _ := NewIndexHashedNodesCoordinator(arguments) ihgs, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) @@ -415,7 +409,6 @@ func TestIndexHashedGroupSelectorWithRater_GetAllEligibleValidatorsPublicKeys(t WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } nc, _ := NewIndexHashedNodesCoordinator(arguments) diff --git a/sharding/indexHashedNodesCoordinator_test.go b/sharding/indexHashedNodesCoordinator_test.go index e12bfdb990f..81bffcccb22 100644 --- a/sharding/indexHashedNodesCoordinator_test.go +++ b/sharding/indexHashedNodesCoordinator_test.go @@ -84,7 +84,6 @@ func createArguments() ArgNodesCoordinator { WaitingNodes: waitingMap, SelfPublicKey: []byte("test"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } return arguments } @@ -187,7 +186,7 @@ func TestIndexHashedNodesCoordinator_SetNilEligibleMapShouldErr(t *testing.T) { arguments := createArguments() ihgs, _ := NewIndexHashedNodesCoordinator(arguments) - require.Equal(t, ErrNilInputNodesMap, ihgs.SetNodesPerShards(nil, waitingMap, 0, false)) + require.Equal(t, ErrNilInputNodesMap, ihgs.SetNodesPerShards(nil, waitingMap, 0)) } func TestIndexHashedNodesCoordinator_SetNilWaitingMapShouldErr(t *testing.T) { @@ -197,7 +196,7 @@ func TestIndexHashedNodesCoordinator_SetNilWaitingMapShouldErr(t *testing.T) { arguments := createArguments() ihgs, _ := NewIndexHashedNodesCoordinator(arguments) - require.Equal(t, ErrNilInputNodesMap, ihgs.SetNodesPerShards(eligibleMap, nil, 0, false)) + require.Equal(t, ErrNilInputNodesMap, ihgs.SetNodesPerShards(eligibleMap, nil, 0)) } func TestIndexHashedNodesCoordinator_OkValShouldWork(t *testing.T) { @@ -221,7 +220,6 @@ func TestIndexHashedNodesCoordinator_OkValShouldWork(t *testing.T) { WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, err := NewIndexHashedNodesCoordinator(arguments) @@ -265,7 +263,6 @@ func TestIndexHashedNodesCoordinator_NewCoordinatorTooFewNodesShouldErr(t *testi WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, err := NewIndexHashedNodesCoordinator(arguments) @@ -323,7 +320,6 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup1ValidatorShouldRetur WaitingNodes: make(map[uint32][]Validator), SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, _ := NewIndexHashedNodesCoordinator(arguments) list2, err := ihgs.ComputeConsensusGroup([]byte("randomness"), 0, 0, 0) @@ -372,7 +368,6 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroupTest2Validators(t *te WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, _ := NewIndexHashedNodesCoordinator(arguments) @@ -434,7 +429,6 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroupTest2ValidatorsRevert WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, _ := NewIndexHashedNodesCoordinator(arguments) @@ -485,7 +479,6 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroupTest2ValidatorsSameIn WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, _ := NewIndexHashedNodesCoordinator(arguments) @@ -586,7 +579,6 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroupTest6From10Validators WaitingNodes: make(map[uint32][]Validator), SelfPublicKey: selfPubKey, ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, err := NewIndexHashedNodesCoordinator(arguments) require.Nil(t, err) @@ -639,7 +631,6 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup400of400For10locksNoM WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: cache, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, err := NewIndexHashedNodesCoordinator(arguments) @@ -711,7 +702,6 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup400of400For10BlocksMe WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: cache, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, err := NewIndexHashedNodesCoordinator(arguments) @@ -814,7 +804,6 @@ func computeMemoryRequirements(consensusGroupCache Cacher, consensusGroupSize in WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: consensusGroupCache, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, _ := NewIndexHashedNodesCoordinator(arguments) @@ -938,7 +927,6 @@ func TestIndexHashedNodesCoordinator_GetValidatorWithPublicKeyShouldWork(t *test WaitingNodes: make(map[uint32][]Validator), SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, _ := NewIndexHashedNodesCoordinator(arguments) @@ -1016,7 +1004,6 @@ func TestIndexHashedGroupSelector_GetAllEligibleValidatorsPublicKeys(t *testing. WaitingNodes: make(map[uint32][]Validator), SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, _ := NewIndexHashedNodesCoordinator(arguments) @@ -1078,7 +1065,6 @@ func TestIndexHashedGroupSelector_GetAllWaitingValidatorsPublicKeys(t *testing.T WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, _ := NewIndexHashedNodesCoordinator(arguments) @@ -1151,33 +1137,6 @@ func TestIndexHashedNodesCoordinator_GetConsensusValidatorsPublicKeysExistingEpo require.True(t, isStringSubgroup(pKeys, shard0PubKeys)) } -func TestIndexHashedNodesCoordinator_GetConsensusValidatorsRewardsAddressesInvalidRandomness(t *testing.T) { - t.Parallel() - - args := createArguments() - ihgs, err := NewIndexHashedNodesCoordinator(args) - require.Nil(t, err) - - var addresses []string - addresses, err = ihgs.GetConsensusValidatorsRewardsAddresses(nil, 0, 0, 0) - require.Equal(t, ErrNilRandomness, err) - require.Nil(t, addresses) -} - -func TestIndexHashedNodesCoordinator_GetConsensusValidatorsRewardsAddressesOK(t *testing.T) { - t.Parallel() - - args := createArguments() - ihgs, err := NewIndexHashedNodesCoordinator(args) - require.Nil(t, err) - - var addresses []string - randomness := []byte("randomness") - addresses, err = ihgs.GetConsensusValidatorsRewardsAddresses(randomness, 0, 0, 0) - require.Nil(t, err) - require.True(t, len(addresses) > 0) -} - func TestIndexHashedNodesCoordinator_GetValidatorsIndexes(t *testing.T) { t.Parallel() diff --git a/sharding/interface.go b/sharding/interface.go index f21d3b34ad2..47f5580d449 100644 --- a/sharding/interface.go +++ b/sharding/interface.go @@ -28,7 +28,6 @@ type NodesCoordinator interface { PublicKeysSelector ComputeConsensusGroup(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []Validator, err error) GetValidatorWithPublicKey(publicKey []byte, epoch uint32) (validator Validator, shardId uint32, err error) - UpdatePeersListAndIndex() error LoadState(key []byte) error GetSavedStateKey() []byte ShardIdForEpoch(epoch uint32) (uint32, error) @@ -44,7 +43,6 @@ type PublicKeysSelector interface { GetAllEligibleValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) GetAllWaitingValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) GetConsensusValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) - GetConsensusValidatorsRewardsAddresses(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) GetOwnPublicKey() []byte } @@ -77,7 +75,6 @@ type NodesPerShardSetter interface { eligible map[uint32][]Validator, waiting map[uint32][]Validator, epoch uint32, - updateList bool, ) error ComputeLeaving(allValidators []Validator) []Validator } @@ -88,7 +85,7 @@ type PeerAccountListAndRatingHandler interface { //GetChance returns the chances for the the rating GetChance(uint32) uint32 // UpdateListAndIndex updated the list and the index for a peer - UpdateListAndIndex(pubKey string, shardID uint32, list string, index int32) error + UpdateListAndIndex(pubKey string, shardID uint32, list string, index uint32) error //GetStartRating gets the start rating values GetStartRating() uint32 //ComputeIncreaseProposer computes the new rating for the increaseLeader @@ -104,7 +101,7 @@ type PeerAccountListAndRatingHandler interface { // ListIndexUpdaterHandler defines what a component which can update the list and index for a peer should do type ListIndexUpdaterHandler interface { // UpdateListAndIndex updated the list and the index for a peer - UpdateListAndIndex(pubKey string, shardID uint32, list string, index int32) error + UpdateListAndIndex(pubKey string, shardID uint32, list string, index uint32) error //IsInterfaceNil verifies if the interface is nil IsInterfaceNil() bool } diff --git a/sharding/mock/listIndexUpdaterStub.go b/sharding/mock/listIndexUpdaterStub.go index 0471a84ac23..31c5ae19b76 100644 --- a/sharding/mock/listIndexUpdaterStub.go +++ b/sharding/mock/listIndexUpdaterStub.go @@ -2,13 +2,13 @@ package mock // ListIndexUpdaterStub - type ListIndexUpdaterStub struct { - UpdateListAndIndexCalled func(pubKey string, list string, index int32) error + UpdateListAndIndexCalled func(pubKey string, shardID uint32, list string, index uint32) error } // UpdateListAndIndex - -func (lius *ListIndexUpdaterStub) UpdateListAndIndex(pubKey string, shardID uint32, list string, index int32) error { +func (lius *ListIndexUpdaterStub) UpdateListAndIndex(pubKey string, shardID uint32, list string, index uint32) error { if lius.UpdateListAndIndexCalled != nil { - return lius.UpdateListAndIndexCalled(pubKey, list, index) + return lius.UpdateListAndIndexCalled(pubKey, shardID, list, index) } return nil diff --git a/sharding/networksharding/mock_test.go b/sharding/networksharding/mock_test.go index 47ab51d2de0..2412e7da446 100644 --- a/sharding/networksharding/mock_test.go +++ b/sharding/networksharding/mock_test.go @@ -8,11 +8,6 @@ type nodesCoordinatorStub struct { GetValidatorWithPublicKeyCalled func(publicKey []byte, epoch uint32) (validator sharding.Validator, shardId uint32, err error) } -// UpdatePeersListAndIndex - -func (ncs *nodesCoordinatorStub) UpdatePeersListAndIndex() error { - panic("implement me") -} - // ComputeLeaving - func (ncs *nodesCoordinatorStub) ComputeLeaving(_ []sharding.Validator) []sharding.Validator { panic("implement me") @@ -54,7 +49,7 @@ func (ncs *nodesCoordinatorStub) GetOwnPublicKey() []byte { } // SetNodesPerShards - -func (ncs *nodesCoordinatorStub) SetNodesPerShards(_ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, _ uint32, _ bool) error { +func (ncs *nodesCoordinatorStub) SetNodesPerShards(_ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, _ uint32) error { panic("implement me") } diff --git a/sharding/nodesSetup.go b/sharding/nodesSetup.go index 53e7ca13fef..d56e3e43d7c 100644 --- a/sharding/nodesSetup.go +++ b/sharding/nodesSetup.go @@ -253,6 +253,11 @@ func (ns *NodesSetup) InitialNodesInfoForShard(shardId uint32) ([]*NodeInfo, []* return ns.eligible[shardId], ns.waiting[shardId], nil } +// SetNumberOfShards will update the number of shards. Should be used only when testing +func (ns *NodesSetup) SetNumberOfShards(numShards uint32) { + ns.nrOfShards = numShards +} + // NumberOfShards returns the calculated number of shards func (ns *NodesSetup) NumberOfShards() uint32 { return ns.nrOfShards diff --git a/sharding/shardingArgs.go b/sharding/shardingArgs.go index a45a32a3761..fcf4680f650 100644 --- a/sharding/shardingArgs.go +++ b/sharding/shardingArgs.go @@ -12,7 +12,6 @@ type ArgNodesCoordinator struct { Hasher hashing.Hasher Shuffler NodesShuffler EpochStartNotifier EpochStartEventNotifier - ListIndexUpdater ListIndexUpdaterHandler BootStorer storage.Storer ShardIDAsObserver uint32 NbShards uint32 diff --git a/storage/factory/common.go b/storage/factory/common.go index 9a07822592b..520dbef27d0 100644 --- a/storage/factory/common.go +++ b/storage/factory/common.go @@ -1,20 +1,10 @@ package factory import ( - "fmt" "math" - "os" - "path/filepath" - "regexp" - "sort" "strconv" - "strings" "github.com/ElrondNetwork/elrond-go/config" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/ElrondNetwork/elrond-go/storage/lrucache" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" ) @@ -58,196 +48,6 @@ func GetBloomFromConfig(cfg config.BloomFilterConfig) storageUnit.BloomConfig { } } -// FindLatestDataFromStorage finds the last data (such as last epoch, shard ID or round) by searching over the -// storage folders and opening older databases -func FindLatestDataFromStorage( - generalConfig config.Config, - marshalizer marshal.Marshalizer, - workingDir string, - chainID string, - defaultDBPath string, - defaultEpochString string, - defaultShardString string, -) (uint32, uint32, int64, error) { - parentDir := filepath.Join( - workingDir, - defaultDBPath, - chainID) - - f, err := os.Open(parentDir) - if err != nil { - return 0, 0, 0, err - } - - files, err := f.Readdir(allFiles) - _ = f.Close() - - if err != nil { - return 0, 0, 0, err - } - - epochDirs := make([]string, 0, len(files)) - for _, file := range files { - if !file.IsDir() { - continue - } - - isEpochDir := strings.HasPrefix(file.Name(), defaultEpochString) - if !isEpochDir { - continue - } - - epochDirs = append(epochDirs, file.Name()) - } - - lastEpoch, err := getLastEpochFromDirNames(epochDirs) - if err != nil { - return 0, 0, 0, err - } - - return getLastEpochAndRoundFromStorage(generalConfig, marshalizer, parentDir, defaultEpochString, defaultShardString, lastEpoch) -} - -func getLastEpochFromDirNames(epochDirs []string) (uint32, error) { - if len(epochDirs) == 0 { - return 0, nil - } - - re := regexp.MustCompile("[0-9]+") - epochsInDirName := make([]uint32, 0, len(epochDirs)) - - for _, dirname := range epochDirs { - epochStr := re.FindString(dirname) - epoch, err := strconv.ParseInt(epochStr, 10, 64) - if err != nil { - return 0, err - } - - epochsInDirName = append(epochsInDirName, uint32(epoch)) - } - - sort.Slice(epochsInDirName, func(i, j int) bool { - return epochsInDirName[i] > epochsInDirName[j] - }) - - return epochsInDirName[0], nil -} - -func getLastEpochAndRoundFromStorage( - config config.Config, - marshalizer marshal.Marshalizer, - parentDir string, - defaultEpochString string, - defaultShardString string, - epoch uint32, -) (uint32, uint32, int64, error) { - persisterFactory := NewPersisterFactory(config.BootstrapStorage.DB) - pathWithoutShard := filepath.Join( - parentDir, - fmt.Sprintf("%s_%d", defaultEpochString, epoch), - ) - shardIdsStr, err := getShardsFromDirectory(pathWithoutShard, defaultShardString) - if err != nil { - return 0, 0, 0, err - } - - var mostRecentBootstrapData *bootstrapStorage.BootstrapData - var mostRecentShard string - highestRoundInStoredShards := int64(0) - - for _, shardIdStr := range shardIdsStr { - persisterPath := filepath.Join( - pathWithoutShard, - fmt.Sprintf("%s_%s", defaultShardString, shardIdStr), - config.BootstrapStorage.DB.FilePath, - ) - - bootstrapData, errGet := getBootstrapDataForPersisterPath(persisterFactory, persisterPath, marshalizer) - if errGet != nil { - continue - } - - if bootstrapData.LastRound > highestRoundInStoredShards { - highestRoundInStoredShards = bootstrapData.LastRound - mostRecentBootstrapData = bootstrapData - mostRecentShard = shardIdStr - } - } - - if mostRecentBootstrapData == nil { - return 0, 0, 0, storage.ErrBootstrapDataNotFoundInStorage - } - shardIDAsUint32, err := convertShardIDToUint32(mostRecentShard) - if err != nil { - return 0, 0, 0, err - } - - return mostRecentBootstrapData.LastHeader.Epoch, shardIDAsUint32, mostRecentBootstrapData.LastRound, nil -} - -func getBootstrapDataForPersisterPath( - persisterFactory *PersisterFactory, - persisterPath string, - marshalizer marshal.Marshalizer, -) (*bootstrapStorage.BootstrapData, error) { - persister, err := persisterFactory.Create(persisterPath) - if err != nil { - return nil, err - } - - defer func() { - errClose := persister.Close() - log.LogIfError(errClose) - }() - - cacher, err := lrucache.NewCache(10) - if err != nil { - return nil, err - } - - storer, err := storageUnit.NewStorageUnit(cacher, persister) - if err != nil { - return nil, err - } - - bootStorer, err := bootstrapStorage.NewBootstrapStorer(marshalizer, storer) - if err != nil { - return nil, err - } - - highestRound := bootStorer.GetHighestRound() - bootstrapData, err := bootStorer.Get(highestRound) - if err != nil { - return nil, err - } - - return &bootstrapData, nil -} - -func getShardsFromDirectory(path string, defaultShardString string) ([]string, error) { - shardIDs := make([]string, 0) - f, err := os.Open(path) - if err != nil { - return nil, err - } - - files, err := f.Readdir(allFiles) - _ = f.Close() - - for _, file := range files { - fileName := file.Name() - stringToSplitBy := defaultShardString + "_" - splitSlice := strings.Split(fileName, stringToSplitBy) - if len(splitSlice) < 2 { - continue - } - - shardIDs = append(shardIDs, splitSlice[1]) - } - - return shardIDs, nil -} - func convertShardIDToUint32(shardIDStr string) (uint32, error) { if shardIDStr == "metachain" { return math.MaxUint32, nil diff --git a/storage/factory/openStorage.go b/storage/factory/openStorage.go new file mode 100644 index 00000000000..c9de1bd996f --- /dev/null +++ b/storage/factory/openStorage.go @@ -0,0 +1,358 @@ +package factory + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/lrucache" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" +) + +// ArgsNewOpenStorageUnits defines the arguments in order to open a set of storage units from disk +type ArgsNewOpenStorageUnits struct { + GeneralConfig config.Config + Marshalizer marshal.Marshalizer + WorkingDir string + ChainID string + DefaultDBPath string + DefaultEpochString string + DefaultShardString string +} + +type openStorageUnits struct { + generalConfig config.Config + marshalizer marshal.Marshalizer + workingDir string + chainID string + defaultDBPath string + defaultEpochString string + defaultShardString string +} + +// TODO refactor this and unit tests +// NewStorageUnitOpenHandler creates an openStorageUnits component +func NewStorageUnitOpenHandler(args ArgsNewOpenStorageUnits) (*openStorageUnits, error) { + o := &openStorageUnits{ + generalConfig: args.GeneralConfig, + marshalizer: args.Marshalizer, + workingDir: args.WorkingDir, + chainID: args.ChainID, + defaultDBPath: args.DefaultDBPath, + defaultEpochString: args.DefaultEpochString, + defaultShardString: args.DefaultShardString, + } + + return o, nil +} + +// OpenStorageUnits opens the defined storage units from the disk if they exists +func (o *openStorageUnits) OpenStorageUnits( + storageUnits []string, +) ([]storage.Storer, error) { + parentDir, lastEpoch, err := getParentDirAndLastEpoch( + o.workingDir, + o.chainID, + o.defaultDBPath, + o.defaultEpochString) + if err != nil { + return nil, err + } + + persisterFactory := NewPersisterFactory(o.generalConfig.BootstrapStorage.DB) + pathWithoutShard := filepath.Join( + parentDir, + fmt.Sprintf("%s_%d", o.defaultEpochString, lastEpoch), + ) + shardIdsStr, err := getShardsFromDirectory(pathWithoutShard, o.defaultShardString) + if err != nil { + return nil, err + } + + mostRecentShard, err := o.getMostUpToDateDirectory(pathWithoutShard, shardIdsStr, persisterFactory) + if err != nil { + return nil, err + } + + openedStorers := make([]storage.Storer, 0) + for _, filePath := range storageUnits { + persisterPath := filepath.Join( + pathWithoutShard, + fmt.Sprintf("%s_%s", o.defaultShardString, mostRecentShard), + filePath, + ) + + persister, err := persisterFactory.Create(persisterPath) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + errClose := persister.Close() + log.LogIfError(errClose) + } + }() + + cacher, err := lrucache.NewCache(10) + if err != nil { + return nil, err + } + + storer, err := storageUnit.NewStorageUnit(cacher, persister) + if err != nil { + return nil, err + } + + openedStorers = append(openedStorers, storer) + } + + return openedStorers, nil +} + +func (o *openStorageUnits) getMostUpToDateDirectory( + pathWithoutShard string, + shardIdsStr []string, + persisterFactory *PersisterFactory, +) (string, error) { + var mostRecentShard string + highestRoundInStoredShards := int64(0) + + for _, shardIdStr := range shardIdsStr { + persisterPath := filepath.Join( + pathWithoutShard, + fmt.Sprintf("%s_%s", o.defaultShardString, shardIdStr), + o.generalConfig.BootstrapStorage.DB.FilePath, + ) + + bootstrapData, errGet := getBootstrapDataForPersisterPath(persisterFactory, persisterPath, o.marshalizer) + if errGet != nil { + continue + } + + if bootstrapData.LastRound > highestRoundInStoredShards { + highestRoundInStoredShards = bootstrapData.LastRound + mostRecentShard = shardIdStr + } + } + + if len(mostRecentShard) == 0 { + return "", storage.ErrBootstrapDataNotFoundInStorage + } + + return mostRecentShard, nil +} + +// TODO refactor this and test it + +// FindLatestDataFromStorage finds the last data (such as last epoch, shard ID or round) by searching over the +// storage folders and opening older databases +func FindLatestDataFromStorage( + generalConfig config.Config, + marshalizer marshal.Marshalizer, + workingDir string, + chainID string, + defaultDBPath string, + defaultEpochString string, + defaultShardString string, +) (uint32, uint32, int64, error) { + parentDir, lastEpoch, err := getParentDirAndLastEpoch(workingDir, chainID, defaultDBPath, defaultEpochString) + if err != nil { + return 0, 0, 0, err + } + + return getLastEpochAndRoundFromStorage(generalConfig, marshalizer, parentDir, defaultEpochString, defaultShardString, lastEpoch) +} + +func getParentDirAndLastEpoch( + workingDir string, + chainID string, + defaultDBPath string, + defaultEpochString string, +) (string, uint32, error) { + parentDir := filepath.Join( + workingDir, + defaultDBPath, + chainID) + + f, err := os.Open(parentDir) + if err != nil { + return "", 0, err + } + + files, err := f.Readdir(allFiles) + _ = f.Close() + + if err != nil { + return "", 0, err + } + + epochDirs := make([]string, 0, len(files)) + for _, file := range files { + if !file.IsDir() { + continue + } + + isEpochDir := strings.HasPrefix(file.Name(), defaultEpochString) + if !isEpochDir { + continue + } + + epochDirs = append(epochDirs, file.Name()) + } + + lastEpoch, err := getLastEpochFromDirNames(epochDirs) + if err != nil { + return "", 0, err + } + + return parentDir, lastEpoch, nil +} + +func getLastEpochFromDirNames(epochDirs []string) (uint32, error) { + if len(epochDirs) == 0 { + return 0, nil + } + + re := regexp.MustCompile("[0-9]+") + epochsInDirName := make([]uint32, 0, len(epochDirs)) + + for _, dirname := range epochDirs { + epochStr := re.FindString(dirname) + epoch, err := strconv.ParseInt(epochStr, 10, 64) + if err != nil { + return 0, err + } + + epochsInDirName = append(epochsInDirName, uint32(epoch)) + } + + sort.Slice(epochsInDirName, func(i, j int) bool { + return epochsInDirName[i] > epochsInDirName[j] + }) + + return epochsInDirName[0], nil +} + +func getLastEpochAndRoundFromStorage( + config config.Config, + marshalizer marshal.Marshalizer, + parentDir string, + defaultEpochString string, + defaultShardString string, + epoch uint32, +) (uint32, uint32, int64, error) { + persisterFactory := NewPersisterFactory(config.BootstrapStorage.DB) + pathWithoutShard := filepath.Join( + parentDir, + fmt.Sprintf("%s_%d", defaultEpochString, epoch), + ) + shardIdsStr, err := getShardsFromDirectory(pathWithoutShard, defaultShardString) + if err != nil { + return 0, 0, 0, err + } + + var mostRecentBootstrapData *bootstrapStorage.BootstrapData + var mostRecentShard string + highestRoundInStoredShards := int64(0) + + for _, shardIdStr := range shardIdsStr { + persisterPath := filepath.Join( + pathWithoutShard, + fmt.Sprintf("%s_%s", defaultShardString, shardIdStr), + config.BootstrapStorage.DB.FilePath, + ) + + bootstrapData, errGet := getBootstrapDataForPersisterPath(persisterFactory, persisterPath, marshalizer) + if errGet != nil { + continue + } + + if bootstrapData.LastRound > highestRoundInStoredShards { + highestRoundInStoredShards = bootstrapData.LastRound + mostRecentBootstrapData = bootstrapData + mostRecentShard = shardIdStr + } + } + + if mostRecentBootstrapData == nil { + return 0, 0, 0, storage.ErrBootstrapDataNotFoundInStorage + } + shardIDAsUint32, err := convertShardIDToUint32(mostRecentShard) + if err != nil { + return 0, 0, 0, err + } + + return mostRecentBootstrapData.LastHeader.Epoch, shardIDAsUint32, mostRecentBootstrapData.LastRound, nil +} + +func getBootstrapDataForPersisterPath( + persisterFactory *PersisterFactory, + persisterPath string, + marshalizer marshal.Marshalizer, +) (*bootstrapStorage.BootstrapData, error) { + persister, err := persisterFactory.Create(persisterPath) + if err != nil { + return nil, err + } + + defer func() { + errClose := persister.Close() + log.LogIfError(errClose) + }() + + cacher, err := lrucache.NewCache(10) + if err != nil { + return nil, err + } + + storer, err := storageUnit.NewStorageUnit(cacher, persister) + if err != nil { + return nil, err + } + + bootStorer, err := bootstrapStorage.NewBootstrapStorer(marshalizer, storer) + if err != nil { + return nil, err + } + + highestRound := bootStorer.GetHighestRound() + bootstrapData, err := bootStorer.Get(highestRound) + if err != nil { + return nil, err + } + + return &bootstrapData, nil +} + +func getShardsFromDirectory(path string, defaultShardString string) ([]string, error) { + shardIDs := make([]string, 0) + f, err := os.Open(path) + if err != nil { + return nil, err + } + + files, err := f.Readdir(allFiles) + _ = f.Close() + + for _, file := range files { + fileName := file.Name() + stringToSplitBy := defaultShardString + "_" + splitSlice := strings.Split(fileName, stringToSplitBy) + if len(splitSlice) < 2 { + continue + } + + shardIDs = append(shardIDs, splitSlice[1]) + } + + return shardIDs, nil +} diff --git a/update/errors.go b/update/errors.go index 605dc1c574b..bf8c51a0c7a 100644 --- a/update/errors.go +++ b/update/errors.go @@ -157,3 +157,6 @@ var ErrNilAccountsDBSyncContainer = errors.New("nil accounts db sync container") // ErrNilValidatorInfoProcessor signals that nil validator info was provided var ErrNilValidatorInfoProcessor = errors.New("nil validator info processor") + +// ErrTimeIsOut signals that time is out +var ErrTimeIsOut = errors.New("time is out") diff --git a/update/factory/trieSyncersContainerFactory.go b/update/factory/trieSyncersContainerFactory.go index 6d4af35e60a..ad6327c7efc 100644 --- a/update/factory/trieSyncersContainerFactory.go +++ b/update/factory/trieSyncersContainerFactory.go @@ -1,8 +1,6 @@ package factory import ( - "time" - "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data/state" @@ -95,7 +93,7 @@ func (t *trieSyncersContainerFactory) createOneTrieSyncer( return update.ErrNilDataTrieContainer } - trieSyncer, err := trie.NewTrieSyncer(t.requestHandler, t.trieCacher, dataTrie, time.Minute, shId, trieTopicFromAccountType(accType)) + trieSyncer, err := trie.NewTrieSyncer(t.requestHandler, t.trieCacher, dataTrie, shId, trieTopicFromAccountType(accType)) if err != nil { return err } diff --git a/update/interface.go b/update/interface.go index 286f91a3358..a6c4eb36edf 100644 --- a/update/interface.go +++ b/update/interface.go @@ -1,6 +1,7 @@ package update import ( + "context" "time" "github.com/ElrondNetwork/elrond-go/data" @@ -21,7 +22,7 @@ type StateSyncer interface { // TrieSyncer synchronizes the trie, asking on the network for the missing nodes type TrieSyncer interface { - StartSyncing(rootHash []byte) error + StartSyncing(rootHash []byte, ctx context.Context) error Trie() data.Trie IsInterfaceNil() bool } @@ -88,6 +89,7 @@ type RequestHandler interface { RequestMetaHeaderByNonce(nonce uint64) RequestShardHeaderByNonce(shardId uint32, nonce uint64) RequestTrieNodes(destShardID uint32, hash []byte, topic string) + RequestInterval() time.Duration IsInterfaceNil() bool } @@ -121,18 +123,26 @@ type EpochStartTriesSyncHandler interface { // EpochStartPendingMiniBlocksSyncHandler defines the methods to sync all pending miniblocks type EpochStartPendingMiniBlocksSyncHandler interface { - SyncPendingMiniBlocksFromMeta(epochStart *block.MetaBlock, unFinished map[string]*block.MetaBlock, waitTime time.Duration) error + SyncPendingMiniBlocksFromMeta(epochStart *block.MetaBlock, unFinished map[string]*block.MetaBlock, ctx context.Context) error GetMiniBlocks() (map[string]*block.MiniBlock, error) IsInterfaceNil() bool } // PendingTransactionsSyncHandler defines the methods to sync all transactions from a set of miniblocks type PendingTransactionsSyncHandler interface { - SyncPendingTransactionsFor(miniBlocks map[string]*block.MiniBlock, epoch uint32, waitTime time.Duration) error + SyncPendingTransactionsFor(miniBlocks map[string]*block.MiniBlock, epoch uint32, ctx context.Context) error GetTransactions() (map[string]data.TransactionHandler, error) IsInterfaceNil() bool } +// MissingHeadersByHashSyncer defines the methods to sync all missing headers by hash +type MissingHeadersByHashSyncer interface { + SyncMissingHeadersByHash(shardIDs []uint32, headersHashes [][]byte, ctx context.Context) error + GetHeaders() (map[string]data.HeaderHandler, error) + ClearFields() + IsInterfaceNil() bool +} + // DataWriter defines the methods to write data type DataWriter interface { WriteString(s string) (int, error) diff --git a/update/mock/chainStorerMock.go b/update/mock/chainStorerMock.go index ba92d016424..1a18f9b8f50 100644 --- a/update/mock/chainStorerMock.go +++ b/update/mock/chainStorerMock.go @@ -6,7 +6,7 @@ import ( "github.com/pkg/errors" ) -// ChainStorerMock is a mock implementation of the ChianStorer interface +// ChainStorerMock is a mock implementation of the ChainStorer interface type ChainStorerMock struct { AddStorerCalled func(key dataRetriever.UnitType, s storage.Storer) GetStorerCalled func(unitType dataRetriever.UnitType) storage.Storer diff --git a/update/mock/epochMiniBlocksSyncHandlerMock.go b/update/mock/epochMiniBlocksSyncHandlerMock.go index 1b50417882f..ae4266241ea 100644 --- a/update/mock/epochMiniBlocksSyncHandlerMock.go +++ b/update/mock/epochMiniBlocksSyncHandlerMock.go @@ -1,19 +1,19 @@ package mock import ( - "time" + "context" "github.com/ElrondNetwork/elrond-go/data/block" ) type EpochStartPendingMiniBlocksSyncHandlerMock struct { - SyncPendingMiniBlocksFromMetaCalled func(epochStart *block.MetaBlock, unFinished map[string]*block.MetaBlock, waitTime time.Duration) error + SyncPendingMiniBlocksFromMetaCalled func(epochStart *block.MetaBlock, unFinished map[string]*block.MetaBlock, ctx context.Context) error GetMiniBlocksCalled func() (map[string]*block.MiniBlock, error) } -func (ep *EpochStartPendingMiniBlocksSyncHandlerMock) SyncPendingMiniBlocksFromMeta(epochStart *block.MetaBlock, unFinished map[string]*block.MetaBlock, waitTime time.Duration) error { +func (ep *EpochStartPendingMiniBlocksSyncHandlerMock) SyncPendingMiniBlocksFromMeta(epochStart *block.MetaBlock, unFinished map[string]*block.MetaBlock, ctx context.Context) error { if ep.SyncPendingMiniBlocksFromMetaCalled != nil { - return ep.SyncPendingMiniBlocksFromMetaCalled(epochStart, unFinished, waitTime) + return ep.SyncPendingMiniBlocksFromMetaCalled(epochStart, unFinished, ctx) } return nil } diff --git a/update/mock/pendingTransactionsSyncHandlerMock.go b/update/mock/pendingTransactionsSyncHandlerMock.go index 5a6f3c1de16..08b5266e725 100644 --- a/update/mock/pendingTransactionsSyncHandlerMock.go +++ b/update/mock/pendingTransactionsSyncHandlerMock.go @@ -1,20 +1,19 @@ package mock import ( - "time" - "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" + "golang.org/x/net/context" ) type PendingTransactionsSyncHandlerMock struct { - SyncPendingTransactionsForCalled func(miniBlocks map[string]*block.MiniBlock, epoch uint32, waitTime time.Duration) error + SyncPendingTransactionsForCalled func(miniBlocks map[string]*block.MiniBlock, epoch uint32, ctx context.Context) error GetTransactionsCalled func() (map[string]data.TransactionHandler, error) } -func (et *PendingTransactionsSyncHandlerMock) SyncPendingTransactionsFor(miniBlocks map[string]*block.MiniBlock, epoch uint32, waitTime time.Duration) error { +func (et *PendingTransactionsSyncHandlerMock) SyncPendingTransactionsFor(miniBlocks map[string]*block.MiniBlock, epoch uint32, ctx context.Context) error { if et.SyncPendingTransactionsForCalled != nil { - return et.SyncPendingTransactionsForCalled(miniBlocks, epoch, waitTime) + return et.SyncPendingTransactionsForCalled(miniBlocks, epoch, ctx) } return nil } diff --git a/update/mock/requestHandlerStub.go b/update/mock/requestHandlerStub.go index 93273a719a4..69be9806699 100644 --- a/update/mock/requestHandlerStub.go +++ b/update/mock/requestHandlerStub.go @@ -1,5 +1,7 @@ package mock +import "time" + type RequestHandlerStub struct { RequestShardHeaderCalled func(shardId uint32, hash []byte) RequestMetaHeaderCalled func(hash []byte) @@ -13,15 +15,20 @@ type RequestHandlerStub struct { RequestStartOfEpochMetaBlockCalled func(epoch uint32) } -func (rhs *RequestHandlerStub) SetEpoch(epoch uint32) { +// RequestInterval - +func (rhs *RequestHandlerStub) RequestInterval() time.Duration { + return time.Second +} + +func (rhs *RequestHandlerStub) SetEpoch(_ uint32) { panic("implement me") } -func (rhs *RequestHandlerStub) RequestMiniBlocks(destShardID uint32, miniblocksHashes [][]byte) { +func (rhs *RequestHandlerStub) RequestMiniBlocks(_ uint32, _ [][]byte) { panic("implement me") } -func (rhs *RequestHandlerStub) RequestTrieNodes(destShardID uint32, hash []byte, topic string) { +func (rhs *RequestHandlerStub) RequestTrieNodes(_ uint32, _ []byte, _ string) { panic("implement me") } diff --git a/update/mock/trieSyncersStub.go b/update/mock/trieSyncersStub.go index 97153d4f5dd..db1b0884fb5 100644 --- a/update/mock/trieSyncersStub.go +++ b/update/mock/trieSyncersStub.go @@ -1,6 +1,8 @@ package mock import ( + "context" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/update" ) @@ -12,13 +14,13 @@ type TrieSyncersStub struct { ReplaceCalled func(key string, val update.TrieSyncer) error RemoveCalled func(key string) LenCalled func() int - StartSyncingCalled func(rootHash []byte) error + StartSyncingCalled func(rootHash []byte, ctx context.Context) error TrieCalled func() data.Trie } -func (tss *TrieSyncersStub) StartSyncing(rootHash []byte) error { +func (tss *TrieSyncersStub) StartSyncing(rootHash []byte, ctx context.Context) error { if tss.StartSyncingCalled != nil { - return tss.StartSyncingCalled(rootHash) + return tss.StartSyncingCalled(rootHash, ctx) } return nil } diff --git a/update/mock/triesHolderMock.go b/update/mock/triesHolderMock.go index 49101f331b6..c6332d0fe55 100644 --- a/update/mock/triesHolderMock.go +++ b/update/mock/triesHolderMock.go @@ -2,30 +2,46 @@ package mock import "github.com/ElrondNetwork/elrond-go/data" +// TriesHolderMock - type TriesHolderMock struct { PutCalled func([]byte, data.Trie) + RemoveCalled func([]byte, data.Trie) GetCalled func([]byte) data.Trie GetAllCalled func() []data.Trie ResetCalled func() } +// Put - func (thm *TriesHolderMock) Put(key []byte, trie data.Trie) { if thm.PutCalled != nil { thm.PutCalled(key, trie) } } + +// Replace - +func (thm *TriesHolderMock) Replace(key []byte, trie data.Trie) { + if thm.RemoveCalled != nil { + thm.RemoveCalled(key, trie) + } +} + +// Get - func (thm *TriesHolderMock) Get(key []byte) data.Trie { if thm.GetCalled != nil { return thm.GetCalled(key) } return nil } + +// GetAll - func (thm *TriesHolderMock) GetAll() []data.Trie { if thm.GetAllCalled != nil { return thm.GetAllCalled() } return nil } + +// Reset - func (thm *TriesHolderMock) Reset() { if thm.ResetCalled != nil { thm.ResetCalled() diff --git a/update/sync/coordinator.go b/update/sync/coordinator.go index ee003aeebb5..68d411f7a8b 100644 --- a/update/sync/coordinator.go +++ b/update/sync/coordinator.go @@ -1,6 +1,7 @@ package sync import ( + "context" "sync" "time" @@ -96,7 +97,9 @@ func (ss *syncState) SyncAllState(epoch uint32) error { go func() { defer wg.Done() - err := ss.miniBlocks.SyncPendingMiniBlocksFromMeta(meta, unFinished, time.Hour) + ctx, cancel := context.WithTimeout(context.Background(), time.Hour) + err := ss.miniBlocks.SyncPendingMiniBlocksFromMeta(meta, unFinished, ctx) + cancel() if err != nil { mutErr.Lock() errFound = err @@ -112,7 +115,9 @@ func (ss *syncState) SyncAllState(epoch uint32) error { return } - err = ss.transactions.SyncPendingTransactionsFor(syncedMiniBlocks, ss.syncingEpoch, time.Hour) + ctx, cancel = context.WithTimeout(context.Background(), time.Hour) + err = ss.transactions.SyncPendingTransactionsFor(syncedMiniBlocks, ss.syncingEpoch, ctx) + cancel() if err != nil { mutErr.Lock() errFound = err diff --git a/update/sync/coordinator_test.go b/update/sync/coordinator_test.go index 7ed436f932b..0b775db4de5 100644 --- a/update/sync/coordinator_test.go +++ b/update/sync/coordinator_test.go @@ -1,11 +1,11 @@ package sync import ( + "context" "encoding/json" "errors" "math/big" "testing" - "time" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data" @@ -216,7 +216,7 @@ func TestSyncState_SyncAllStatePendingMiniBlocksErr(t *testing.T) { }, Tries: &mock.EpochStartTriesSyncHandlerMock{}, MiniBlocks: &mock.EpochStartPendingMiniBlocksSyncHandlerMock{ - SyncPendingMiniBlocksFromMetaCalled: func(meta *block.MetaBlock, unFinished map[string]*block.MetaBlock, waitTime time.Duration) error { + SyncPendingMiniBlocksFromMetaCalled: func(meta *block.MetaBlock, unFinished map[string]*block.MetaBlock, ctx context.Context) error { return localErr }, }, @@ -275,7 +275,7 @@ func TestSyncState_SyncAllStateSyncTxsErr(t *testing.T) { Tries: &mock.EpochStartTriesSyncHandlerMock{}, MiniBlocks: &mock.EpochStartPendingMiniBlocksSyncHandlerMock{}, Transactions: &mock.PendingTransactionsSyncHandlerMock{ - SyncPendingTransactionsForCalled: func(miniBlocks map[string]*block.MiniBlock, epoch uint32, waitTime time.Duration) error { + SyncPendingTransactionsForCalled: func(miniBlocks map[string]*block.MiniBlock, epoch uint32, ctx context.Context) error { return localErr }, }, diff --git a/update/sync/syncAccountsDBs.go b/update/sync/syncAccountsDBs.go index 95d95027502..d02e276e7f4 100644 --- a/update/sync/syncAccountsDBs.go +++ b/update/sync/syncAccountsDBs.go @@ -50,7 +50,6 @@ func NewSyncAccountsDBsHandler(args ArgsNewSyncAccountsDBsHandler) (*syncAccount // SyncTriesFrom syncs all the state tries from an epoch start metachain func (st *syncAccountsDBs) SyncTriesFrom(meta *block.MetaBlock, waitTime time.Duration) error { - //TODO: use context instead of waitTime if !meta.IsStartOfEpochBlock() { return update.ErrNotEpochStartBlock } diff --git a/update/sync/syncHeaders.go b/update/sync/syncHeaders.go index be2b09882e1..a486f28e265 100644 --- a/update/sync/syncHeaders.go +++ b/update/sync/syncHeaders.go @@ -153,6 +153,7 @@ func (h *headersToSync) receivedUnFinishedMetaBlocks(headerHandler data.HeaderHa // SyncUnFinishedMetaHeaders syncs and validates all the unfinished metaHeaders for each shard func (h *headersToSync) SyncUnFinishedMetaHeaders(epoch uint32) error { + // TODO: do this with context.Context err := h.syncEpochStartMetaHeader(epoch, waitTimeForHeaders) if err != nil { return err @@ -259,7 +260,7 @@ func (h *headersToSync) syncFirstPendingMetaBlocks(waitTime time.Duration) error h.firstPendingMetaBlocks[metaHash] = metaHdr } - _ = process.EmptyChannel(h.chReceivedAll) + _ = core.EmptyChannel(h.chReceivedAll) for metaHash := range h.missingMetaBlocks { h.stopSyncing = false h.requestHandler.RequestMetaHeader([]byte(metaHash)) @@ -290,7 +291,7 @@ func (h *headersToSync) syncAllNeededMetaHeaders(waitTime time.Duration) error { lowestPendingNonce := h.lowestPendingNonceFrom(h.firstPendingMetaBlocks) h.computeMissingNonce(lowestPendingNonce, h.epochStartMetaBlock.Nonce) - _ = process.EmptyChannel(h.chReceivedAll) + _ = core.EmptyChannel(h.chReceivedAll) for nonce := range h.missingMetaNonces { h.stopSyncing = false h.requestHandler.RequestMetaHeaderByNonce(nonce) diff --git a/update/sync/syncHeadersByHash.go b/update/sync/syncHeadersByHash.go new file mode 100644 index 00000000000..bd1727c07ef --- /dev/null +++ b/update/sync/syncHeadersByHash.go @@ -0,0 +1,217 @@ +package sync + +import ( + "context" + "sync" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/update" +) + +type syncHeadersByHash struct { + mutMissingHdrs sync.Mutex + mapHeaders map[string]data.HeaderHandler + mapHashes map[string]struct{} + pool dataRetriever.HeadersPool + storage update.HistoryStorer + chReceivedAll chan bool + marshalizer marshal.Marshalizer + stopSyncing bool + epochToSync uint32 + syncedAll bool + requestHandler process.RequestHandler + waitTimeBetweenRequests time.Duration +} + +// ArgsNewMissingHeadersByHashSyncer defines the arguments needed for the sycner +type ArgsNewMissingHeadersByHashSyncer struct { + Storage storage.Storer + Cache dataRetriever.HeadersPool + Marshalizer marshal.Marshalizer + RequestHandler process.RequestHandler +} + +// NewMissingheadersByHashSyncer creates a syncer for all missing headers +func NewMissingheadersByHashSyncer(args ArgsNewMissingHeadersByHashSyncer) (*syncHeadersByHash, error) { + if check.IfNil(args.Storage) { + return nil, dataRetriever.ErrNilHeadersStorage + } + if check.IfNil(args.Cache) { + return nil, update.ErrNilCacher + } + if check.IfNil(args.Marshalizer) { + return nil, dataRetriever.ErrNilMarshalizer + } + if check.IfNil(args.RequestHandler) { + return nil, process.ErrNilRequestHandler + } + + p := &syncHeadersByHash{ + mutMissingHdrs: sync.Mutex{}, + mapHeaders: make(map[string]data.HeaderHandler), + mapHashes: make(map[string]struct{}), + pool: args.Cache, + storage: args.Storage, + chReceivedAll: make(chan bool), + requestHandler: args.RequestHandler, + stopSyncing: true, + syncedAll: false, + marshalizer: args.Marshalizer, + waitTimeBetweenRequests: args.RequestHandler.RequestInterval(), + } + + p.pool.RegisterHandler(p.receivedHeader) + + return p, nil +} + +// SyncMissingHeadersByHash syncs the missing headers +func (m *syncHeadersByHash) SyncMissingHeadersByHash(shardIDs []uint32, headersHashes [][]byte, ctx context.Context) error { + _ = core.EmptyChannel(m.chReceivedAll) + + mapHashesToRequest := make(map[string]uint32) + for index, hash := range headersHashes { + mapHashesToRequest[string(hash)] = shardIDs[index] + } + + for { + requestedHdrs := 0 + + m.mutMissingHdrs.Lock() + m.stopSyncing = false + for hash, shardId := range mapHashesToRequest { + if _, ok := m.mapHeaders[hash]; ok { + delete(mapHashesToRequest, hash) + } + + m.mapHashes[hash] = struct{}{} + header, ok := m.getHeaderFromPoolOrStorage([]byte(hash)) + if ok { + m.mapHeaders[hash] = header + delete(mapHashesToRequest, hash) + continue + } + + requestedHdrs++ + if shardId == core.MetachainShardId { + m.requestHandler.RequestMetaHeader([]byte(hash)) + continue + } + + m.requestHandler.RequestShardHeader(shardId, []byte(hash)) + } + m.mutMissingHdrs.Unlock() + + if requestedHdrs == 0 { + m.mutMissingHdrs.Lock() + m.stopSyncing = true + m.syncedAll = true + m.mutMissingHdrs.Unlock() + return nil + } + + select { + case <-m.chReceivedAll: + m.mutMissingHdrs.Lock() + m.stopSyncing = true + m.syncedAll = true + m.mutMissingHdrs.Unlock() + return nil + case <-time.After(m.waitTimeBetweenRequests): + continue + case <-ctx.Done(): + m.mutMissingHdrs.Lock() + m.stopSyncing = true + m.mutMissingHdrs.Unlock() + return update.ErrTimeIsOut + } + } +} + +// receivedHeader is a callback function when a new header was received +// it will further ask for missing transactions +func (m *syncHeadersByHash) receivedHeader(hdrHandler data.HeaderHandler, hdrHash []byte) { + m.mutMissingHdrs.Lock() + if m.stopSyncing { + m.mutMissingHdrs.Unlock() + return + } + + if _, ok := m.mapHashes[string(hdrHash)]; !ok { + m.mutMissingHdrs.Unlock() + return + } + + if _, ok := m.mapHeaders[string(hdrHash)]; ok { + m.mutMissingHdrs.Unlock() + return + } + + m.mapHeaders[string(hdrHash)] = hdrHandler + receivedAll := len(m.mapHashes) == len(m.mapHeaders) + m.mutMissingHdrs.Unlock() + if receivedAll { + m.chReceivedAll <- true + } +} + +func (m *syncHeadersByHash) getHeaderFromPoolOrStorage(hash []byte) (data.HeaderHandler, bool) { + header, ok := m.getHeaderFromPool(hash) + if ok { + return header, true + } + + hdrData, err := GetDataFromStorage(hash, m.storage, m.epochToSync) + if err != nil { + return nil, false + } + + var hdr block.Header + err = m.marshalizer.Unmarshal(hdr, hdrData) + if err != nil { + return nil, false + } + + return &hdr, true +} + +func (m *syncHeadersByHash) getHeaderFromPool(hash []byte) (data.HeaderHandler, bool) { + val, err := m.pool.GetHeaderByHash(hash) + if err != nil { + return nil, false + } + + return val, true +} + +// GetHeaders returns the synced headers +func (m *syncHeadersByHash) GetHeaders() (map[string]data.HeaderHandler, error) { + m.mutMissingHdrs.Lock() + defer m.mutMissingHdrs.Unlock() + if !m.syncedAll { + return nil, update.ErrNotSynced + } + + return m.mapHeaders, nil +} + +// ClearFields will clear all the maps +func (m *syncHeadersByHash) ClearFields() { + m.mutMissingHdrs.Lock() + m.mapHashes = make(map[string]struct{}) + m.mapHeaders = make(map[string]data.HeaderHandler) + m.mutMissingHdrs.Unlock() +} + +// IsInterfaceNil returns nil if underlying object is nil +func (m *syncHeadersByHash) IsInterfaceNil() bool { + return m == nil +} diff --git a/update/sync/syncMiniBlocks.go b/update/sync/syncMiniBlocks.go index 5284964412c..186ac9bc63e 100644 --- a/update/sync/syncMiniBlocks.go +++ b/update/sync/syncMiniBlocks.go @@ -1,9 +1,11 @@ package sync import ( + "context" "sync" "time" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -14,17 +16,18 @@ import ( ) type pendingMiniBlocks struct { - mutPendingMb sync.Mutex - mapMiniBlocks map[string]*block.MiniBlock - mapHashes map[string]struct{} - pool storage.Cacher - storage update.HistoryStorer - chReceivedAll chan bool - marshalizer marshal.Marshalizer - stopSyncing bool - epochToSync uint32 - syncedAll bool - requestHandler process.RequestHandler + mutPendingMb sync.Mutex + mapMiniBlocks map[string]*block.MiniBlock + mapHashes map[string]struct{} + pool storage.Cacher + storage update.HistoryStorer + chReceivedAll chan bool + marshalizer marshal.Marshalizer + stopSyncing bool + epochToSync uint32 + syncedAll bool + requestHandler process.RequestHandler + waitTimeBetweenRequests time.Duration } // ArgsNewPendingMiniBlocksSyncer defines the arguments needed for the sycner @@ -51,16 +54,17 @@ func NewPendingMiniBlocksSyncer(args ArgsNewPendingMiniBlocksSyncer) (*pendingMi } p := &pendingMiniBlocks{ - mutPendingMb: sync.Mutex{}, - mapMiniBlocks: make(map[string]*block.MiniBlock), - mapHashes: make(map[string]struct{}), - pool: args.Cache, - storage: args.Storage, - chReceivedAll: make(chan bool), - requestHandler: args.RequestHandler, - stopSyncing: true, - syncedAll: false, - marshalizer: args.Marshalizer, + mutPendingMb: sync.Mutex{}, + mapMiniBlocks: make(map[string]*block.MiniBlock), + mapHashes: make(map[string]struct{}), + pool: args.Cache, + storage: args.Storage, + chReceivedAll: make(chan bool), + requestHandler: args.RequestHandler, + stopSyncing: true, + syncedAll: false, + marshalizer: args.Marshalizer, + waitTimeBetweenRequests: args.RequestHandler.RequestInterval(), } p.pool.RegisterHandler(p.receivedMiniBlock) @@ -69,11 +73,7 @@ func NewPendingMiniBlocksSyncer(args ArgsNewPendingMiniBlocksSyncer) (*pendingMi } // SyncPendingMiniBlocksFromMeta syncs the pending miniblocks from an epoch start metaBlock -func (p *pendingMiniBlocks) SyncPendingMiniBlocksFromMeta( - epochStart *block.MetaBlock, - unFinished map[string]*block.MetaBlock, - waitTime time.Duration, -) error { +func (p *pendingMiniBlocks) SyncPendingMiniBlocksFromMeta(epochStart *block.MetaBlock, unFinished map[string]*block.MetaBlock, ctx context.Context) error { if !epochStart.IsStartOfEpochBlock() { return update.ErrNotEpochStartBlock } @@ -93,42 +93,72 @@ func (p *pendingMiniBlocks) SyncPendingMiniBlocksFromMeta( listPendingMiniBlocks = append(listPendingMiniBlocks, computedPending...) } - _ = process.EmptyChannel(p.chReceivedAll) + return p.syncMiniBlocks(listPendingMiniBlocks, ctx) +} - requestedMBs := 0 - p.mutPendingMb.Lock() - p.stopSyncing = false - for _, mbHeader := range listPendingMiniBlocks { - p.mapHashes[string(mbHeader.Hash)] = struct{}{} - miniBlock, ok := p.getMiniBlockFromPoolOrStorage(mbHeader.Hash) - if ok { - p.mapMiniBlocks[string(mbHeader.Hash)] = miniBlock - continue - } +// SyncPendingMiniBlocks will sync the miniblocks for the given epoch start meta block +func (p *pendingMiniBlocks) SyncPendingMiniBlocks(miniBlockHeaders []block.ShardMiniBlockHeader, ctx context.Context) error { + return p.syncMiniBlocks(miniBlockHeaders, ctx) +} - requestedMBs++ - p.requestHandler.RequestMiniBlock(mbHeader.SenderShardID, mbHeader.Hash) +func (p *pendingMiniBlocks) syncMiniBlocks(listPendingMiniBlocks []block.ShardMiniBlockHeader, ctx context.Context) error { + _ = core.EmptyChannel(p.chReceivedAll) + + mapHashesToRequest := make(map[string]uint32) + for _, mbHeader := range listPendingMiniBlocks { + mapHashesToRequest[string(mbHeader.Hash)] = mbHeader.SenderShardID } + + p.mutPendingMb.Lock() + p.stopSyncing = false p.mutPendingMb.Unlock() - var err error - defer func() { + for { + requestedMBs := 0 p.mutPendingMb.Lock() - p.stopSyncing = true - if err == nil { - p.syncedAll = true + p.stopSyncing = false + for hash, shardId := range mapHashesToRequest { + if _, ok := p.mapMiniBlocks[hash]; ok { + delete(mapHashesToRequest, hash) + } + + p.mapHashes[hash] = struct{}{} + miniBlock, ok := p.getMiniBlockFromPoolOrStorage([]byte(hash)) + if ok { + p.mapMiniBlocks[hash] = miniBlock + delete(mapHashesToRequest, hash) + continue + } + + p.requestHandler.RequestMiniBlock(shardId, []byte(hash)) + requestedMBs++ } p.mutPendingMb.Unlock() - }() - if requestedMBs > 0 { - err = WaitFor(p.chReceivedAll, waitTime) - if err != nil { - return err + if requestedMBs == 0 { + p.mutPendingMb.Lock() + p.stopSyncing = true + p.syncedAll = true + p.mutPendingMb.Unlock() + return nil } - } - return nil + select { + case <-p.chReceivedAll: + p.mutPendingMb.Lock() + p.stopSyncing = true + p.syncedAll = true + p.mutPendingMb.Unlock() + return nil + case <-time.After(p.waitTimeBetweenRequests): + continue + case <-ctx.Done(): + p.mutPendingMb.Lock() + p.stopSyncing = true + p.mutPendingMb.Unlock() + return update.ErrTimeIsOut + } + } } func (p *pendingMiniBlocks) createNonceToHashMap(unFinished map[string]*block.MetaBlock) map[uint64]string { @@ -283,6 +313,14 @@ func (p *pendingMiniBlocks) GetMiniBlocks() (map[string]*block.MiniBlock, error) return p.mapMiniBlocks, nil } +// ClearFields will clear all the maps +func (p *pendingMiniBlocks) ClearFields() { + p.mutPendingMb.Lock() + p.mapHashes = make(map[string]struct{}) + p.mapMiniBlocks = make(map[string]*block.MiniBlock) + p.mutPendingMb.Unlock() +} + // IsInterfaceNil returns nil if underlying object is nil func (p *pendingMiniBlocks) IsInterfaceNil() bool { return p == nil diff --git a/update/sync/syncMiniBlocks_test.go b/update/sync/syncMiniBlocks_test.go index 05b5661f87b..18f5074c2da 100644 --- a/update/sync/syncMiniBlocks_test.go +++ b/update/sync/syncMiniBlocks_test.go @@ -1,6 +1,7 @@ package sync import ( + "context" "errors" "testing" "time" @@ -116,7 +117,9 @@ func TestSyncPendingMiniBlocksFromMeta_MiniBlocksInPool(t *testing.T) { } unFinished := make(map[string]*block.MetaBlock) unFinished["firstPending"] = metaBlock - err = pendingMiniBlocksSyncer.SyncPendingMiniBlocksFromMeta(metaBlock, unFinished, time.Second) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + err = pendingMiniBlocksSyncer.SyncPendingMiniBlocksFromMeta(metaBlock, unFinished, ctx) + cancel() require.Nil(t, err) require.True(t, miniBlockInPool) @@ -167,7 +170,9 @@ func TestSyncPendingMiniBlocksFromMeta_MiniBlocksInPoolMissingTimeout(t *testing } unFinished := make(map[string]*block.MetaBlock) unFinished["firstPending"] = metaBlock - err = pendingMiniBlocksSyncer.SyncPendingMiniBlocksFromMeta(metaBlock, unFinished, time.Second) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + err = pendingMiniBlocksSyncer.SyncPendingMiniBlocksFromMeta(metaBlock, unFinished, ctx) + cancel() require.Equal(t, process.ErrTimeIsOut, err) } @@ -215,6 +220,8 @@ func TestSyncPendingMiniBlocksFromMeta_MiniBlocksInPoolReceive(t *testing.T) { _ = pendingMiniBlocksSyncer.pool.Put(mbHash, mb) }() - err = pendingMiniBlocksSyncer.SyncPendingMiniBlocksFromMeta(metaBlock, unFinished, time.Second) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + err = pendingMiniBlocksSyncer.SyncPendingMiniBlocksFromMeta(metaBlock, unFinished, ctx) + cancel() require.Nil(t, err) } diff --git a/update/sync/syncTransactions.go b/update/sync/syncTransactions.go index 0698cda2c6b..55f993a076c 100644 --- a/update/sync/syncTransactions.go +++ b/update/sync/syncTransactions.go @@ -1,9 +1,11 @@ package sync import ( + "context" "sync" "time" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" @@ -17,17 +19,18 @@ import ( ) type pendingTransactions struct { - mutPendingTx sync.Mutex - mapTransactions map[string]data.TransactionHandler - mapHashes map[string]*block.MiniBlock - txPools map[block.Type]dataRetriever.ShardedDataCacherNotifier - storage map[block.Type]update.HistoryStorer - chReceivedAll chan bool - requestHandler process.RequestHandler - marshalizer marshal.Marshalizer - epochToSync uint32 - stopSync bool - syncedAll bool + mutPendingTx sync.Mutex + mapTransactions map[string]data.TransactionHandler + mapHashes map[string]*block.MiniBlock + txPools map[block.Type]dataRetriever.ShardedDataCacherNotifier + storage map[block.Type]update.HistoryStorer + chReceivedAll chan bool + requestHandler process.RequestHandler + marshalizer marshal.Marshalizer + epochToSync uint32 + stopSync bool + syncedAll bool + waitTimeBetweenRequests time.Duration } // ArgsNewPendingTransactionsSyncer defines the arguments needed for a new transactions syncer @@ -54,14 +57,15 @@ func NewPendingTransactionsSyncer(args ArgsNewPendingTransactionsSyncer) (*pendi } p := &pendingTransactions{ - mutPendingTx: sync.Mutex{}, - mapTransactions: make(map[string]data.TransactionHandler), - mapHashes: make(map[string]*block.MiniBlock), - chReceivedAll: make(chan bool), - requestHandler: args.RequestHandler, - marshalizer: args.Marshalizer, - stopSync: true, - syncedAll: true, + mutPendingTx: sync.Mutex{}, + mapTransactions: make(map[string]data.TransactionHandler), + mapHashes: make(map[string]*block.MiniBlock), + chReceivedAll: make(chan bool), + requestHandler: args.RequestHandler, + marshalizer: args.Marshalizer, + stopSync: true, + syncedAll: true, + waitTimeBetweenRequests: args.RequestHandler.RequestInterval(), } p.txPools = make(map[block.Type]dataRetriever.ShardedDataCacherNotifier) @@ -82,42 +86,48 @@ func NewPendingTransactionsSyncer(args ArgsNewPendingTransactionsSyncer) (*pendi } // SyncPendingTransactionsFor syncs pending transactions for a list of miniblocks -func (p *pendingTransactions) SyncPendingTransactionsFor(miniBlocks map[string]*block.MiniBlock, epoch uint32, waitTime time.Duration) error { - _ = process.EmptyChannel(p.chReceivedAll) +func (p *pendingTransactions) SyncPendingTransactionsFor(miniBlocks map[string]*block.MiniBlock, epoch uint32, ctx context.Context) error { + _ = core.EmptyChannel(p.chReceivedAll) - p.mutPendingTx.Lock() - p.epochToSync = epoch - p.syncedAll = false - p.stopSync = false - - requestedTxs := 0 - for _, miniBlock := range miniBlocks { - for _, txHash := range miniBlock.TxHashes { - p.mapHashes[string(txHash)] = miniBlock + for { + p.mutPendingTx.Lock() + p.epochToSync = epoch + p.syncedAll = false + p.stopSync = false + + requestedTxs := 0 + for _, miniBlock := range miniBlocks { + for _, txHash := range miniBlock.TxHashes { + p.mapHashes[string(txHash)] = miniBlock + } + requestedTxs += p.requestTransactionsFor(miniBlock) } - requestedTxs += p.requestTransactionsFor(miniBlock) - } - p.mutPendingTx.Unlock() + p.mutPendingTx.Unlock() - var err error - defer func() { - p.mutPendingTx.Lock() - p.stopSync = true - if err == nil { + if requestedTxs == 0 { + p.mutPendingTx.Lock() + p.stopSync = true p.syncedAll = true + p.mutPendingTx.Unlock() + return nil } - p.mutPendingTx.Unlock() - }() - if requestedTxs > 0 { - err = WaitFor(p.chReceivedAll, waitTime) - if err != nil { - log.Warn("could not finish syncing", "error", err) - return err + select { + case <-p.chReceivedAll: + p.mutPendingTx.Lock() + p.stopSync = true + p.syncedAll = true + p.mutPendingTx.Unlock() + return nil + case <-time.After(p.waitTimeBetweenRequests): + continue + case <-ctx.Done(): + p.mutPendingTx.Lock() + p.stopSync = true + p.mutPendingTx.Unlock() + return update.ErrTimeIsOut } } - - return nil } func (p *pendingTransactions) requestTransactionsFor(miniBlock *block.MiniBlock) int { diff --git a/update/sync/syncTransactions_test.go b/update/sync/syncTransactions_test.go index 08d37354734..9248d397953 100644 --- a/update/sync/syncTransactions_test.go +++ b/update/sync/syncTransactions_test.go @@ -1,6 +1,7 @@ package sync import ( + "context" "encoding/json" "math/big" "testing" @@ -106,7 +107,9 @@ func TestSyncPendingTransactionsFor(t *testing.T) { miniBlocks := make(map[string]*block.MiniBlock) mb := &block.MiniBlock{TxHashes: [][]byte{[]byte("txHash")}} miniBlocks["key"] = mb - err = pendingTxsSyncer.SyncPendingTransactionsFor(miniBlocks, 1, time.Second) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + err = pendingTxsSyncer.SyncPendingTransactionsFor(miniBlocks, 1, ctx) + cancel() require.Nil(t, err) } @@ -132,7 +135,9 @@ func TestSyncPendingTransactionsFor_MissingTxFromPool(t *testing.T) { mb := &block.MiniBlock{TxHashes: [][]byte{[]byte("txHash")}} miniBlocks["key"] = mb - err = pendingTxsSyncer.SyncPendingTransactionsFor(miniBlocks, 1, time.Second) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + err = pendingTxsSyncer.SyncPendingTransactionsFor(miniBlocks, 1, ctx) + cancel() require.Equal(t, process.ErrTimeIsOut, err) } @@ -170,6 +175,8 @@ func TestSyncPendingTransactionsFor_ReceiveMissingTx(t *testing.T) { pendingTxsSyncer.receivedTransaction(txHash) }() - err = pendingTxsSyncer.SyncPendingTransactionsFor(miniBlocks, 1, time.Second) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + err = pendingTxsSyncer.SyncPendingTransactionsFor(miniBlocks, 1, ctx) + cancel() require.Nil(t, err) }