From 3277e6b3f3623811aaa769543dcf282597e6b1e6 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Thu, 27 Feb 2020 15:51:12 +0200 Subject: [PATCH 01/61] EN-5829: epoch start data provider first implementation --- cmd/node/factory/structs.go | 4 +- cmd/node/main.go | 140 +++++++++++++++--- .../bootstrap/epochStartDataProvider.go | 103 +++++++++++++ .../bootstrap/simpleMetaBlockInterceptor.go | 37 +++++ .../bootstrap/simpleShardHeaderInterceptor.go | 37 +++++ facade/elrondNodeFacade.go | 12 +- p2p/libp2p/netMessenger.go | 2 +- p2p/libp2p/netMessenger_test.go | 18 +-- 8 files changed, 315 insertions(+), 38 deletions(-) create mode 100644 epochStart/bootstrap/epochStartDataProvider.go create mode 100644 epochStart/bootstrap/simpleMetaBlockInterceptor.go create mode 100644 epochStart/bootstrap/simpleShardHeaderInterceptor.go diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index f5f29349524..442a9922255 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -473,10 +473,10 @@ func CryptoComponentsFactory(args *cryptoComponentsFactoryArgs) (*Crypto, error) } // NetworkComponentsFactory creates the network components -func NetworkComponentsFactory(p2pConfig *config.P2PConfig, log logger.Logger, core *Core) (*Network, error) { +func NetworkComponentsFactory(p2pConfig *config.P2PConfig, log logger.Logger, hasher hashing.Hasher) (*Network, error) { var randReader io.Reader if p2pConfig.Node.Seed != "" { - randReader = NewSeedRandReader(core.Hasher.Compute(p2pConfig.Node.Seed)) + randReader = NewSeedRandReader(hasher.Compute(p2pConfig.Node.Seed)) } else { randReader = rand.Reader } diff --git a/cmd/node/main.go b/cmd/node/main.go index aa08516ec9e..c12b447f683 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -10,7 +10,9 @@ import ( "os" "os/signal" "path/filepath" + "regexp" "runtime" + "sort" "strconv" "strings" "syscall" @@ -26,14 +28,17 @@ import ( "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/typeConverters" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/display" "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" "github.com/ElrondNetwork/elrond-go/epochStart/notifier" "github.com/ElrondNetwork/elrond-go/facade" "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/hashing/blake2b" "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/logger/redirects" "github.com/ElrondNetwork/elrond-go/marshal" @@ -49,7 +54,6 @@ import ( "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" - storageFactory "github.com/ElrondNetwork/elrond-go/storage/factory" "github.com/ElrondNetwork/elrond-go/storage/lrucache" "github.com/ElrondNetwork/elrond-go/storage/pathmanager" "github.com/ElrondNetwork/elrond-go/storage/timecache" @@ -292,6 +296,8 @@ var coreServiceContainer serviceContainer.Core // go build -i -v -ldflags="-X main.appVersion=%VERS%" var appVersion = core.UnVersionedAppString +var currentEpoch = uint32(0) + func main() { _ = display.SetDisplayByteSlice(display.ToHexShort) log := logger.GetOrCreate("main") @@ -500,12 +506,16 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { preferencesConfig.Preferences.NodeDisplayName = ctx.GlobalString(nodeDisplayName.Name) } - shardCoordinator, nodeType, err := createShardCoordinator(nodesConfig, pubKey, preferencesConfig.Preferences, log) - if err != nil { - return err + if ctx.IsSet(workingDirectory.Name) { + workingDir = ctx.GlobalString(workingDirectory.Name) + } else { + workingDir, err = os.Getwd() + if err != nil { + log.LogIfError(err) + workingDir = "" + } } - - var shardId = core.GetShardIdString(shardCoordinator.SelfId()) + log.Trace("working directory", "path", workingDir) pathTemplateForPruningStorer := filepath.Join( workingDir, @@ -529,19 +539,56 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { return err } - var currentEpoch uint32 - var errNotCritical error - currentEpoch, errNotCritical = storageFactory.FindLastEpochFromStorage( - workingDir, - nodesConfig.ChainID, - defaultDBPath, - defaultEpochString, - ) - if errNotCritical != nil { + currentEpoch, err = findLastEpochFromStorage(workingDir, nodesConfig.ChainID) + if err != nil { currentEpoch = 0 - log.Debug("no epoch db found in storage", "error", errNotCritical.Error()) + log.Debug("no epoch db found in storage", "error", err.Error()) + } + + epochFoundInStorage := err == nil + + isCurrentTimeBeforeGenesis := time.Now().Sub(startTime) < 0 + timeInFirstEpochAtMinRoundsPerEpoch := startTime.Add(time.Duration(nodesConfig.RoundDuration * + uint64(generalConfig.EpochStartConfig.MinRoundsBetweenEpochs))) + isEpochZero := time.Now().Sub(timeInFirstEpochAtMinRoundsPerEpoch) < 0 + shouldSyncWithTheNetwork := !isCurrentTimeBeforeGenesis && !isEpochZero && !epochFoundInStorage + var networkComponents *factory.Network + // TODO: remove next line which is hardcoded for testing + //shouldSyncWithTheNetwork = true + if shouldSyncWithTheNetwork { + //TODO : if the code reaches here, then we should request current epoch from network and build all the + // stuff after we received the information. + // This section should be blocking. + networkComponents, err = factory.NetworkComponentsFactory(p2pConfig, log, &blake2b.Blake2b{}) + if err != nil { + return err + } + + err = networkComponents.NetMessenger.Bootstrap() + if err != nil { + return err + } + + epochRes := bootstrap.NewEpochStartDataProvider(networkComponents.NetMessenger, &marshal.JsonMarshalizer{}) + var metaBlockForEpochStart *block.MetaBlock + metaBlockForEpochStart, err = epochRes.RequestEpochStartMetaBlock(currentEpoch) + if err != nil { + return err + } + // TODO : using the same component, fetch the shard blocks based on received metablock + + log.Info("received epoch start metablock from network", + "nonce", metaBlockForEpochStart.GetNonce(), + "epoch", metaBlockForEpochStart.GetEpoch()) } + shardCoordinator, nodeType, err := createShardCoordinator(nodesConfig, pubKey, preferencesConfig.Preferences, log) + if err != nil { + return err + } + + var shardId = core.GetShardIdString(shardCoordinator.SelfId()) + storageCleanupFlagValue := ctx.GlobalBool(storageCleanup.Name) if storageCleanupFlagValue { dbPath := filepath.Join( @@ -694,10 +741,12 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { err = ioutil.WriteFile(statsFile, []byte(sessionInfoFileOutput), os.ModePerm) log.LogIfError(err) - log.Trace("creating network components") - networkComponents, err := factory.NetworkComponentsFactory(p2pConfig, log, coreComponents) - if err != nil { - return err + if !shouldSyncWithTheNetwork { + log.Trace("creating network components") + networkComponents, err = factory.NetworkComponentsFactory(p2pConfig, log, coreComponents.Hasher) + if err != nil { + return err + } } log.Trace("creating tps benchmark components") @@ -864,7 +913,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { ef.StartBackgroundServices() log.Debug("bootstrapping node...") - err = ef.StartNode(currentEpoch) + err = ef.StartNode(currentEpoch, !shouldSyncWithTheNetwork) if err != nil { log.Error("starting node failed", err.Error()) return err @@ -1482,3 +1531,52 @@ func createApiResolver( return external.NewNodeApiResolver(scQueryService, statusMetrics) } + +// TODO: something similar should be done for determining the correct shardId +// when booting from storage with an epoch > 0 or add ShardId in boot storer +func findLastEpochFromStorage(workingDir string, chainID string) (uint32, error) { + parentDir := filepath.Join( + workingDir, + defaultDBPath, + chainID) + + f, err := os.Open(parentDir) + if err != nil { + return 0, err + } + + files, err := f.Readdir(-1) + _ = f.Close() + + if err != nil { + return 0, err + } + + epochDirs := make([]string, 0, len(files)) + for _, file := range files { + if !file.IsDir() { + continue + } + + isEpochDir := strings.HasPrefix(file.Name(), defaultEpochString) + if !isEpochDir { + continue + } + + epochDirs = append(epochDirs, file.Name()) + } + + if len(epochDirs) == 0 { + return 0, nil + } + + sort.Slice(epochDirs, func(i, j int) bool { + return epochDirs[i] > epochDirs[j] + }) + + re := regexp.MustCompile("[0-9]+") + epochStr := re.FindString(epochDirs[0]) + epoch, err := strconv.ParseInt(epochStr, 10, 64) + + return uint32(epoch), err +} diff --git a/epochStart/bootstrap/epochStartDataProvider.go b/epochStart/bootstrap/epochStartDataProvider.go new file mode 100644 index 00000000000..844d18ed7c5 --- /dev/null +++ b/epochStart/bootstrap/epochStartDataProvider.go @@ -0,0 +1,103 @@ +package bootstrap + +import ( + "fmt" + "math" + "time" + + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/logger" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/factory" +) + +var log = logger.GetOrCreate("registration") +var _ process.Interceptor = (*simpleMetaBlockInterceptor)(nil) + +const requestSuffix = "_REQUEST" + +type metaBlockInterceptorHandler interface { + process.Interceptor + GetAllReceivedMetaBlocks() []block.MetaBlock +} + +type shardHeaderInterceptorHandler interface { + process.Interceptor + GetAllReceivedShardHeaders() []block.ShardData +} + +type EpochStartDataProvider struct { + marshalizer marshal.Marshalizer + messenger p2p.Messenger + metaBlockInterceptor metaBlockInterceptorHandler + shardHeaderInterceptor shardHeaderInterceptorHandler +} + +func NewEpochStartDataProvider(messenger p2p.Messenger, marshalizer marshal.Marshalizer) *EpochStartDataProvider { + metaBlockInterceptor := NewSimpleMetaBlockInterceptor(marshalizer) + shardHdrInterceptor := NewSimpleShardHeaderInterceptor(marshalizer) + return &EpochStartDataProvider{ + marshalizer: marshalizer, + messenger: messenger, + metaBlockInterceptor: metaBlockInterceptor, + shardHeaderInterceptor: shardHdrInterceptor, + } +} + +func (ser *EpochStartDataProvider) RequestEpochStartMetaBlock(epoch uint32) (*block.MetaBlock, error) { + err := ser.messenger.CreateTopic(factory.MetachainBlocksTopic+requestSuffix, false) + if err != nil { + return nil, err + } + + err = ser.messenger.CreateTopic(factory.MetachainBlocksTopic, false) + if err != nil { + return nil, err + } + + err = ser.messenger.RegisterMessageProcessor(factory.MetachainBlocksTopic, ser.metaBlockInterceptor) + if err != nil { + return nil, err + } + defer func() { + err = ser.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) + if err != nil { + log.Info("error unregistering message processor", "error", err) + } + }() + + err = ser.requestMetaBlock() + if err != nil { + return nil, err + } + + // TODO: check if received block is correct by receiving the same block in majority + threshold := 1 + for { + if len(ser.metaBlockInterceptor.GetAllReceivedMetaBlocks()) >= threshold { + break + } + + time.Sleep(time.Second) + } + + return &ser.metaBlockInterceptor.GetAllReceivedMetaBlocks()[0], nil +} + +func (ser *EpochStartDataProvider) requestMetaBlock() error { + rd := dataRetriever.RequestData{ + Type: dataRetriever.EpochType, + Epoch: 0, + Value: []byte(fmt.Sprintf("epochStartBlock_%d", math.MaxUint32)), + } + rdBytes, err := ser.marshalizer.Marshal(rd) + if err != nil { + return err + } + + ser.messenger.Broadcast(factory.MetachainBlocksTopic+requestSuffix, rdBytes) + return nil +} diff --git a/epochStart/bootstrap/simpleMetaBlockInterceptor.go b/epochStart/bootstrap/simpleMetaBlockInterceptor.go new file mode 100644 index 00000000000..b8857c6b2c1 --- /dev/null +++ b/epochStart/bootstrap/simpleMetaBlockInterceptor.go @@ -0,0 +1,37 @@ +package bootstrap + +import ( + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/p2p" +) + +type simpleMetaBlockInterceptor struct { + marshalizer marshal.Marshalizer + receivedHandlers []block.MetaBlock +} + +func NewSimpleMetaBlockInterceptor(marshalizer marshal.Marshalizer) *simpleMetaBlockInterceptor { + return &simpleMetaBlockInterceptor{ + marshalizer: marshalizer, + receivedHandlers: make([]block.MetaBlock, 0), + } +} + +func (s *simpleMetaBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { + var hdr block.MetaBlock + err := s.marshalizer.Unmarshal(&hdr, message.Data()) + if err == nil { + s.receivedHandlers = append(s.receivedHandlers, hdr) + } + + return nil +} + +func (s *simpleMetaBlockInterceptor) GetAllReceivedMetaBlocks() []block.MetaBlock { + return s.receivedHandlers +} + +func (s *simpleMetaBlockInterceptor) IsInterfaceNil() bool { + return s == nil +} diff --git a/epochStart/bootstrap/simpleShardHeaderInterceptor.go b/epochStart/bootstrap/simpleShardHeaderInterceptor.go new file mode 100644 index 00000000000..e2599627793 --- /dev/null +++ b/epochStart/bootstrap/simpleShardHeaderInterceptor.go @@ -0,0 +1,37 @@ +package bootstrap + +import ( + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/p2p" +) + +type simpleShardHeaderInterceptor struct { + marshalizer marshal.Marshalizer + receivedHandlers []block.ShardData +} + +func NewSimpleShardHeaderInterceptor(marshalizer marshal.Marshalizer) *simpleShardHeaderInterceptor { + return &simpleShardHeaderInterceptor{ + marshalizer: marshalizer, + receivedHandlers: make([]block.ShardData, 0), + } +} + +func (s *simpleShardHeaderInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error { + var hdr block.ShardData + err := s.marshalizer.Unmarshal(&hdr, message.Data()) + if err == nil { + s.receivedHandlers = append(s.receivedHandlers, hdr) + } + + return nil +} + +func (s *simpleShardHeaderInterceptor) GetAllReceivedShardHeaders() []block.ShardData { + return s.receivedHandlers +} + +func (s *simpleShardHeaderInterceptor) IsInterfaceNil() bool { + return s == nil +} diff --git a/facade/elrondNodeFacade.go b/facade/elrondNodeFacade.go index ef9b454ad40..83c202c32d4 100644 --- a/facade/elrondNodeFacade.go +++ b/facade/elrondNodeFacade.go @@ -72,13 +72,15 @@ func (ef *ElrondNodeFacade) SetConfig(facadeConfig *config.FacadeConfig) { } // StartNode starts the underlying node -func (ef *ElrondNodeFacade) StartNode(epoch uint32) error { - err := ef.node.Start() - if err != nil { - return err +func (ef *ElrondNodeFacade) StartNode(epoch uint32, withP2pBootstrap bool) error { + if withP2pBootstrap { + err := ef.node.Start() + if err != nil { + return err + } } - err = ef.node.StartConsensus(epoch) + err := ef.node.StartConsensus(epoch) return err } diff --git a/p2p/libp2p/netMessenger.go b/p2p/libp2p/netMessenger.go index 5c6934a9360..6d3f62b5c40 100644 --- a/p2p/libp2p/netMessenger.go +++ b/p2p/libp2p/netMessenger.go @@ -365,7 +365,7 @@ func (netMes *networkMessenger) CreateTopic(name string, createChannelForTopic b _, found := netMes.topics[name] if found { netMes.mutTopics.Unlock() - return p2p.ErrTopicAlreadyExists + return nil } //TODO investigate if calling Subscribe on the pubsub impl does exactly the same thing as Topic.Subscribe diff --git a/p2p/libp2p/netMessenger_test.go b/p2p/libp2p/netMessenger_test.go index b8d12426689..37dcc02877d 100644 --- a/p2p/libp2p/netMessenger_test.go +++ b/p2p/libp2p/netMessenger_test.go @@ -501,15 +501,15 @@ func TestLibp2pMessenger_CreateTopicOkValsShouldWork(t *testing.T) { _ = mes.Close() } -func TestLibp2pMessenger_CreateTopicTwiceShouldErr(t *testing.T) { - mes := createMockMessenger() - - _ = mes.CreateTopic("test", false) - err := mes.CreateTopic("test", false) - assert.Equal(t, p2p.ErrTopicAlreadyExists, err) - - _ = mes.Close() -} +//func TestLibp2pMessenger_CreateTopicTwiceShouldErr(t *testing.T) { +// mes := createMockMessenger() +// +// _ = mes.CreateTopic("test", false) +// err := mes.CreateTopic("test", false) +// assert.Equal(t, p2p.ErrTopicAlreadyExists, err) +// +// _ = mes.Close() +//} func TestLibp2pMessenger_HasTopicIfHaveTopicShouldReturnTrue(t *testing.T) { mes := createMockMessenger() From 8a9acd07f6ab0430676b685aa319fb62a210e176 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Thu, 27 Feb 2020 15:56:14 +0200 Subject: [PATCH 02/61] EN-5829: removed a func --- cmd/node/main.go | 66 ++++++++---------------------------------------- 1 file changed, 11 insertions(+), 55 deletions(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index c12b447f683..91040ea0c26 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -10,9 +10,7 @@ import ( "os" "os/signal" "path/filepath" - "regexp" "runtime" - "sort" "strconv" "strings" "syscall" @@ -54,6 +52,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" + storageFactory "github.com/ElrondNetwork/elrond-go/storage/factory" "github.com/ElrondNetwork/elrond-go/storage/lrucache" "github.com/ElrondNetwork/elrond-go/storage/pathmanager" "github.com/ElrondNetwork/elrond-go/storage/timecache" @@ -539,10 +538,16 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { return err } - currentEpoch, err = findLastEpochFromStorage(workingDir, nodesConfig.ChainID) - if err != nil { + var errNotCritical error + currentEpoch, errNotCritical = storageFactory.FindLastEpochFromStorage( + workingDir, + nodesConfig.ChainID, + defaultDBPath, + defaultEpochString, + ) + if errNotCritical != nil { currentEpoch = 0 - log.Debug("no epoch db found in storage", "error", err.Error()) + log.Debug("no epoch db found in storage", "error", errNotCritical.Error()) } epochFoundInStorage := err == nil @@ -554,7 +559,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { shouldSyncWithTheNetwork := !isCurrentTimeBeforeGenesis && !isEpochZero && !epochFoundInStorage var networkComponents *factory.Network // TODO: remove next line which is hardcoded for testing - //shouldSyncWithTheNetwork = true + shouldSyncWithTheNetwork = true if shouldSyncWithTheNetwork { //TODO : if the code reaches here, then we should request current epoch from network and build all the // stuff after we received the information. @@ -1531,52 +1536,3 @@ func createApiResolver( return external.NewNodeApiResolver(scQueryService, statusMetrics) } - -// TODO: something similar should be done for determining the correct shardId -// when booting from storage with an epoch > 0 or add ShardId in boot storer -func findLastEpochFromStorage(workingDir string, chainID string) (uint32, error) { - parentDir := filepath.Join( - workingDir, - defaultDBPath, - chainID) - - f, err := os.Open(parentDir) - if err != nil { - return 0, err - } - - files, err := f.Readdir(-1) - _ = f.Close() - - if err != nil { - return 0, err - } - - epochDirs := make([]string, 0, len(files)) - for _, file := range files { - if !file.IsDir() { - continue - } - - isEpochDir := strings.HasPrefix(file.Name(), defaultEpochString) - if !isEpochDir { - continue - } - - epochDirs = append(epochDirs, file.Name()) - } - - if len(epochDirs) == 0 { - return 0, nil - } - - sort.Slice(epochDirs, func(i, j int) bool { - return epochDirs[i] > epochDirs[j] - }) - - re := regexp.MustCompile("[0-9]+") - epochStr := re.FindString(epochDirs[0]) - epoch, err := strconv.ParseInt(epochStr, 10, 64) - - return uint32(epoch), err -} From 1bcf1e9ffdd2ffb72dc9ae4cf4a2e1f8407987d2 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Fri, 28 Feb 2020 16:45:28 +0200 Subject: [PATCH 03/61] EN-5829: some changes --- cmd/node/main.go | 46 ++++---- dataRetriever/errors.go | 6 +- .../baseResolversContainerFactory.go | 5 + .../metaResolversContainerFactory.go | 11 +- .../shardResolversContainerFactory.go | 3 + .../topicResolverSender.go | 11 +- .../topicResolverSender_test.go | 26 +++++ epochStart/bootstrap/common.go | 25 +++++ .../bootstrap/epochStartDataProvider.go | 102 ++++++++++++------ .../bootstrap/simpleMetaBlockInterceptor.go | 81 ++++++++++++-- .../bootstrap/simpleMetaBlocksResolver.go | 95 ++++++++++++++++ p2p/libp2p/netMessenger_test.go | 18 ++-- 12 files changed, 345 insertions(+), 84 deletions(-) create mode 100644 epochStart/bootstrap/common.go create mode 100644 epochStart/bootstrap/simpleMetaBlocksResolver.go diff --git a/cmd/node/main.go b/cmd/node/main.go index 91040ea0c26..93f80216ad4 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -26,7 +26,6 @@ import ( "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/typeConverters" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -297,6 +296,8 @@ var appVersion = core.UnVersionedAppString var currentEpoch = uint32(0) +var networkComponents *factory.Network + func main() { _ = display.SetDisplayByteSlice(display.ToHexShort) log := logger.GetOrCreate("main") @@ -550,20 +551,16 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { log.Debug("no epoch db found in storage", "error", errNotCritical.Error()) } - epochFoundInStorage := err == nil - - isCurrentTimeBeforeGenesis := time.Now().Sub(startTime) < 0 - timeInFirstEpochAtMinRoundsPerEpoch := startTime.Add(time.Duration(nodesConfig.RoundDuration * - uint64(generalConfig.EpochStartConfig.MinRoundsBetweenEpochs))) - isEpochZero := time.Now().Sub(timeInFirstEpochAtMinRoundsPerEpoch) < 0 - shouldSyncWithTheNetwork := !isCurrentTimeBeforeGenesis && !isEpochZero && !epochFoundInStorage - var networkComponents *factory.Network - // TODO: remove next line which is hardcoded for testing - shouldSyncWithTheNetwork = true - if shouldSyncWithTheNetwork { - //TODO : if the code reaches here, then we should request current epoch from network and build all the - // stuff after we received the information. - // This section should be blocking. + epochFoundInStorage := errNotCritical == nil + + shouldCallEpochStartDataProvider := bootstrap.ShouldSyncWithTheNetwork( + startTime, + epochFoundInStorage, + nodesConfig, + generalConfig, + ) + shouldCallEpochStartDataProvider = true + if shouldCallEpochStartDataProvider { networkComponents, err = factory.NetworkComponentsFactory(p2pConfig, log, &blake2b.Blake2b{}) if err != nil { return err @@ -573,18 +570,21 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { if err != nil { return err } + time.Sleep(2 * time.Second) - epochRes := bootstrap.NewEpochStartDataProvider(networkComponents.NetMessenger, &marshal.JsonMarshalizer{}) - var metaBlockForEpochStart *block.MetaBlock - metaBlockForEpochStart, err = epochRes.RequestEpochStartMetaBlock(currentEpoch) + epochRes, err := bootstrap.NewEpochStartDataProvider(networkComponents.NetMessenger, &marshal.JsonMarshalizer{}, &blake2b.Blake2b{}) + if err != nil { + return err + } + var bootstrapComponents *bootstrap.ComponentsNeededForBootstrap + bootstrapComponents, err = epochRes.Bootstrap() if err != nil { return err } - // TODO : using the same component, fetch the shard blocks based on received metablock log.Info("received epoch start metablock from network", - "nonce", metaBlockForEpochStart.GetNonce(), - "epoch", metaBlockForEpochStart.GetEpoch()) + "nonce", bootstrapComponents.EpochStartMetaBlock.GetNonce(), + "epoch", bootstrapComponents.EpochStartMetaBlock.GetEpoch()) } shardCoordinator, nodeType, err := createShardCoordinator(nodesConfig, pubKey, preferencesConfig.Preferences, log) @@ -746,7 +746,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { err = ioutil.WriteFile(statsFile, []byte(sessionInfoFileOutput), os.ModePerm) log.LogIfError(err) - if !shouldSyncWithTheNetwork { + if !shouldCallEpochStartDataProvider { log.Trace("creating network components") networkComponents, err = factory.NetworkComponentsFactory(p2pConfig, log, coreComponents.Hasher) if err != nil { @@ -918,7 +918,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { ef.StartBackgroundServices() log.Debug("bootstrapping node...") - err = ef.StartNode(currentEpoch, !shouldSyncWithTheNetwork) + err = ef.StartNode(currentEpoch, !shouldCallEpochStartDataProvider) if err != nil { log.Error("starting node failed", err.Error()) return err diff --git a/dataRetriever/errors.go b/dataRetriever/errors.go index d4d2468b68f..c5c183d4254 100644 --- a/dataRetriever/errors.go +++ b/dataRetriever/errors.go @@ -125,9 +125,6 @@ var ErrCacheConfigInvalidEconomics = errors.New("cache-economics parameter is no // ErrCacheConfigInvalidSharding signals that a sharding parameter required by the cache is invalid var ErrCacheConfigInvalidSharding = errors.New("cache-sharding parameter is not valid") -// ErrNilMetaBlockPool signals that a nil meta block data pool was provided -var ErrNilMetaBlockPool = errors.New("nil meta block data pool") - // ErrNilTrieNodesPool signals that a nil trie nodes data pool was provided var ErrNilTrieNodesPool = errors.New("nil trie nodes data pool") @@ -152,6 +149,9 @@ var ErrInvalidMaxTxRequest = errors.New("max tx request number is invalid") // ErrNilPeerListCreator signals that a nil peer list creator implementation has been provided var ErrNilPeerListCreator = errors.New("nil peer list creator provided") +// ErrInvalidNumberOfPeersToQuery signals that an invalid number of peers to query has been provided +var ErrInvalidNumberOfPeersToQuery = errors.New("invalid number of peers to query provided") + // ErrNilTrieDataGetter signals that a nil trie data getter has been provided var ErrNilTrieDataGetter = errors.New("nil trie data getter provided") diff --git a/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go index 5bbc7f9167d..0b8b3b41b31 100644 --- a/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go @@ -13,6 +13,9 @@ import ( "github.com/ElrondNetwork/elrond-go/sharding" ) +// numPeersToQuery number of peers to send the message +const numPeersToQuery = 2 + const emptyExcludePeersOnTopic = "" type baseResolversContainerFactory struct { @@ -218,6 +221,7 @@ func (brcf *baseResolversContainerFactory) createOneResolverSender( peerListCreator, brcf.marshalizer, brcf.intRandomizer, + numPeersToQuery, uint32(0), ) if err != nil { @@ -239,6 +243,7 @@ func (brcf *baseResolversContainerFactory) createTrieNodesResolver(topic string, peerListCreator, brcf.marshalizer, brcf.intRandomizer, + numPeersToQuery, brcf.shardCoordinator.SelfId(), ) if err != nil { diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go index 48cc5dd9b81..d34e17beead 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go @@ -132,6 +132,7 @@ func (mrcf *metaResolversContainerFactory) createShardHeaderResolver(topic strin peerListCreator, mrcf.marshalizer, mrcf.intRandomizer, + numPeersToQuery, shardID, ) if err != nil { @@ -164,7 +165,7 @@ func (mrcf *metaResolversContainerFactory) createShardHeaderResolver(topic strin func (mrcf *metaResolversContainerFactory) generateMetaChainHeaderResolvers() error { identifierHeader := factory.MetachainBlocksTopic - resolver, err := mrcf.createMetaChainHeaderResolver(identifierHeader, core.MetachainShardId) + resolver, err := mrcf.CreateMetaChainHeaderResolver(identifierHeader, numPeersToQuery, core.MetachainShardId) if err != nil { return err } @@ -172,7 +173,12 @@ func (mrcf *metaResolversContainerFactory) generateMetaChainHeaderResolvers() er return mrcf.container.Add(identifierHeader, resolver) } -func (mrcf *metaResolversContainerFactory) createMetaChainHeaderResolver(identifier string, shardId uint32) (dataRetriever.Resolver, error) { +// CreateMetaChainHeaderResolver will return a resolver for metachain headers +func (mrcf *metaResolversContainerFactory) CreateMetaChainHeaderResolver( + identifier string, + numPeersToQuery int, + shardId uint32, +) (dataRetriever.Resolver, error) { hdrStorer := mrcf.store.GetStorer(dataRetriever.MetaBlockUnit) peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(mrcf.messenger, identifier, emptyExcludePeersOnTopic) @@ -186,6 +192,7 @@ func (mrcf *metaResolversContainerFactory) createMetaChainHeaderResolver(identif peerListCreator, mrcf.marshalizer, mrcf.intRandomizer, + numPeersToQuery, shardId, ) if err != nil { diff --git a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go index bbc3bfcd971..62d64ac13ff 100644 --- a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go @@ -127,6 +127,7 @@ func (srcf *shardResolversContainerFactory) generateHeaderResolvers() error { peerListCreator, srcf.marshalizer, srcf.intRandomizer, + numPeersToQuery, shardC.SelfId(), ) if err != nil { @@ -178,6 +179,7 @@ func (srcf *shardResolversContainerFactory) generatePeerChBlockBodyResolvers() e peerListCreator, srcf.marshalizer, srcf.intRandomizer, + numPeersToQuery, shardC.SelfId(), ) if err != nil { @@ -229,6 +231,7 @@ func (srcf *shardResolversContainerFactory) generateMetablockHeaderResolvers() e peerListCreator, srcf.marshalizer, srcf.intRandomizer, + numPeersToQuery, core.MetachainShardId, ) if err != nil { diff --git a/dataRetriever/resolvers/topicResolverSender/topicResolverSender.go b/dataRetriever/resolvers/topicResolverSender/topicResolverSender.go index ee4601e6ade..cc6858dede0 100644 --- a/dataRetriever/resolvers/topicResolverSender/topicResolverSender.go +++ b/dataRetriever/resolvers/topicResolverSender/topicResolverSender.go @@ -9,15 +9,13 @@ import ( // topicRequestSuffix represents the topic name suffix const topicRequestSuffix = "_REQUEST" -// NumPeersToQuery number of peers to send the message -const NumPeersToQuery = 2 - type topicResolverSender struct { messenger dataRetriever.MessageHandler marshalizer marshal.Marshalizer topicName string peerListCreator dataRetriever.PeerListCreator randomizer dataRetriever.IntRandomizer + numPeersToQuery int targetShardId uint32 } @@ -28,6 +26,7 @@ func NewTopicResolverSender( peerListCreator dataRetriever.PeerListCreator, marshalizer marshal.Marshalizer, randomizer dataRetriever.IntRandomizer, + numPeersToQuery int, targetShardId uint32, ) (*topicResolverSender, error) { @@ -43,6 +42,9 @@ func NewTopicResolverSender( if peerListCreator == nil || peerListCreator.IsInterfaceNil() { return nil, dataRetriever.ErrNilPeerListCreator } + if numPeersToQuery < 1 { + return nil, dataRetriever.ErrInvalidNumberOfPeersToQuery + } resolver := &topicResolverSender{ messenger: messenger, @@ -51,6 +53,7 @@ func NewTopicResolverSender( marshalizer: marshalizer, randomizer: randomizer, targetShardId: targetShardId, + numPeersToQuery: numPeersToQuery, } return resolver, nil @@ -87,7 +90,7 @@ func (trs *topicResolverSender) SendOnRequestTopic(rd *dataRetriever.RequestData } msgSentCounter++ - if msgSentCounter == NumPeersToQuery { + if msgSentCounter == trs.numPeersToQuery { break } } diff --git a/dataRetriever/resolvers/topicResolverSender/topicResolverSender_test.go b/dataRetriever/resolvers/topicResolverSender/topicResolverSender_test.go index 7674ad0f952..390dfcd62ba 100644 --- a/dataRetriever/resolvers/topicResolverSender/topicResolverSender_test.go +++ b/dataRetriever/resolvers/topicResolverSender/topicResolverSender_test.go @@ -23,6 +23,7 @@ func TestNewTopicResolverSender_NilMessengerShouldErr(t *testing.T) { &mock.PeerListCreatorStub{}, &mock.MarshalizerMock{}, &mock.IntRandomizerMock{}, + 2, 0, ) @@ -39,6 +40,7 @@ func TestNewTopicResolverSender_NilPeersListCreatorShouldErr(t *testing.T) { nil, &mock.MarshalizerMock{}, &mock.IntRandomizerMock{}, + 2, 0, ) @@ -55,6 +57,7 @@ func TestNewTopicResolverSender_NilMarshalizerShouldErr(t *testing.T) { &mock.PeerListCreatorStub{}, nil, &mock.IntRandomizerMock{}, + 2, 0, ) @@ -71,6 +74,7 @@ func TestNewTopicResolverSender_NilRandomizerShouldErr(t *testing.T) { &mock.PeerListCreatorStub{}, &mock.MarshalizerMock{}, nil, + 2, 0, ) @@ -78,6 +82,23 @@ func TestNewTopicResolverSender_NilRandomizerShouldErr(t *testing.T) { assert.Equal(t, dataRetriever.ErrNilRandomizer, err) } +func TestNewTopicResolverSender_InvalidNumberOfPeersToQueryShouldErr(t *testing.T) { + t.Parallel() + + trs, err := topicResolverSender.NewTopicResolverSender( + &mock.MessageHandlerStub{}, + "topic", + &mock.PeerListCreatorStub{}, + &mock.MarshalizerMock{}, + &mock.IntRandomizerMock{}, + 0, + 0, + ) + + assert.Nil(t, trs) + assert.Equal(t, dataRetriever.ErrInvalidNumberOfPeersToQuery, err) +} + func TestNewTopicResolverSender_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -87,6 +108,7 @@ func TestNewTopicResolverSender_OkValsShouldWork(t *testing.T) { &mock.PeerListCreatorStub{}, &mock.MarshalizerMock{}, &mock.IntRandomizerMock{}, + 2, 0, ) @@ -112,6 +134,7 @@ func TestTopicResolverSender_SendOnRequestTopicMarshalizerFailsShouldErr(t *test }, }, &mock.IntRandomizerMock{}, + 2, 0, ) @@ -133,6 +156,7 @@ func TestTopicResolverSender_SendOnRequestTopicNoOneToSendShouldErr(t *testing.T }, &mock.MarshalizerMock{}, &mock.IntRandomizerMock{}, + 2, 0, ) @@ -165,6 +189,7 @@ func TestTopicResolverSender_SendOnRequestTopicShouldWork(t *testing.T) { }, &mock.MarshalizerMock{}, &mock.IntRandomizerMock{}, + 2, 0, ) @@ -198,6 +223,7 @@ func TestTopicResolverSender_SendShouldWork(t *testing.T) { &mock.PeerListCreatorStub{}, &mock.MarshalizerMock{}, &mock.IntRandomizerMock{}, + 2, 0, ) diff --git a/epochStart/bootstrap/common.go b/epochStart/bootstrap/common.go new file mode 100644 index 00000000000..1877e0d5a81 --- /dev/null +++ b/epochStart/bootstrap/common.go @@ -0,0 +1,25 @@ +package bootstrap + +import ( + "time" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// ShouldSyncWithTheNetwork returns true if a peer is not synced with the latest epoch (especially used when a peer +// wants to join the network after the genesis) +func ShouldSyncWithTheNetwork( + startTime time.Time, + epochFoundInStorage bool, + nodesConfig *sharding.NodesSetup, + config *config.Config, +) bool { + isCurrentTimeBeforeGenesis := time.Now().Sub(startTime) < 0 + timeInFirstEpochAtMinRoundsPerEpoch := startTime.Add(time.Duration(nodesConfig.RoundDuration * + uint64(config.EpochStartConfig.MinRoundsBetweenEpochs))) + isEpochZero := time.Now().Sub(timeInFirstEpochAtMinRoundsPerEpoch) < 0 + shouldSyncWithTheNetwork := !isCurrentTimeBeforeGenesis && !isEpochZero && !epochFoundInStorage + + return shouldSyncWithTheNetwork +} diff --git a/epochStart/bootstrap/epochStartDataProvider.go b/epochStart/bootstrap/epochStartDataProvider.go index 844d18ed7c5..8ced6faa0bd 100644 --- a/epochStart/bootstrap/epochStartDataProvider.go +++ b/epochStart/bootstrap/epochStartDataProvider.go @@ -1,12 +1,10 @@ package bootstrap import ( - "fmt" - "math" "time" "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" @@ -18,10 +16,18 @@ var log = logger.GetOrCreate("registration") var _ process.Interceptor = (*simpleMetaBlockInterceptor)(nil) const requestSuffix = "_REQUEST" +const delayBetweenRequests = 200 * time.Millisecond +const thresholdForConsideringMetaBlockCorrect = 0.4 +const numRequestsToSendOnce = 4 + +// ComponentsNeededForBootstrap holds the components which need to be initialized from network +type ComponentsNeededForBootstrap struct { + EpochStartMetaBlock *block.MetaBlock +} type metaBlockInterceptorHandler interface { process.Interceptor - GetAllReceivedMetaBlocks() []block.MetaBlock + GetMetaBlock(target int) (*block.MetaBlock, error) } type shardHeaderInterceptorHandler interface { @@ -29,75 +35,105 @@ type shardHeaderInterceptorHandler interface { GetAllReceivedShardHeaders() []block.ShardData } -type EpochStartDataProvider struct { +type metaBlockResolverHandler interface { + RequestEpochStartMetaBlock() error +} + +// epochStartDataProvider will handle requesting the needed data to start when joining late the network +type epochStartDataProvider struct { marshalizer marshal.Marshalizer + hasher hashing.Hasher messenger p2p.Messenger metaBlockInterceptor metaBlockInterceptorHandler shardHeaderInterceptor shardHeaderInterceptorHandler + metaBlockResolver metaBlockResolverHandler } -func NewEpochStartDataProvider(messenger p2p.Messenger, marshalizer marshal.Marshalizer) *EpochStartDataProvider { - metaBlockInterceptor := NewSimpleMetaBlockInterceptor(marshalizer) +// NewEpochStartDataProvider will return a new instance of epochStartDataProvider +func NewEpochStartDataProvider( + messenger p2p.Messenger, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, +) (*epochStartDataProvider, error) { + metaBlockInterceptor := NewSimpleMetaBlockInterceptor(marshalizer, hasher) shardHdrInterceptor := NewSimpleShardHeaderInterceptor(marshalizer) - return &EpochStartDataProvider{ + metaBlockResolver, err := NewSimpleMetaBlocksResolver(messenger, marshalizer) + if err != nil { + return nil, err + } + + return &epochStartDataProvider{ marshalizer: marshalizer, + hasher: hasher, messenger: messenger, metaBlockInterceptor: metaBlockInterceptor, shardHeaderInterceptor: shardHdrInterceptor, - } + metaBlockResolver: metaBlockResolver, + }, nil } -func (ser *EpochStartDataProvider) RequestEpochStartMetaBlock(epoch uint32) (*block.MetaBlock, error) { - err := ser.messenger.CreateTopic(factory.MetachainBlocksTopic+requestSuffix, false) +// Bootstrap will handle requesting and receiving the needed information the node will bootstrap from +func (esdp *epochStartDataProvider) Bootstrap() (*ComponentsNeededForBootstrap, error) { + metaBlock, err := esdp.getEpochStartMetaBlock() if err != nil { return nil, err } - err = ser.messenger.CreateTopic(factory.MetachainBlocksTopic, false) + return &ComponentsNeededForBootstrap{ + EpochStartMetaBlock: metaBlock, + }, nil +} + +func (esdp *epochStartDataProvider) getEpochStartMetaBlock() (*block.MetaBlock, error) { + err := esdp.messenger.CreateTopic(factory.MetachainBlocksTopic, true) if err != nil { return nil, err } - err = ser.messenger.RegisterMessageProcessor(factory.MetachainBlocksTopic, ser.metaBlockInterceptor) + err = esdp.messenger.RegisterMessageProcessor(factory.MetachainBlocksTopic, esdp.metaBlockInterceptor) if err != nil { return nil, err } defer func() { - err = ser.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) + err = esdp.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) + if err != nil { + log.Info("error unregistering message processor", "error", err) + } + err = esdp.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic + requestSuffix) if err != nil { log.Info("error unregistering message processor", "error", err) } }() - err = ser.requestMetaBlock() + err = esdp.requestMetaBlock() if err != nil { return nil, err } - // TODO: check if received block is correct by receiving the same block in majority - threshold := 1 for { - if len(ser.metaBlockInterceptor.GetAllReceivedMetaBlocks()) >= threshold { - break + threshold := int(thresholdForConsideringMetaBlockCorrect * float64(len(esdp.messenger.Peers()))) + mb, errConsensusNotReached := esdp.metaBlockInterceptor.GetMetaBlock(threshold) + if errConsensusNotReached == nil { + return mb, nil + } + log.Info("consensus not reached for epoch start meta block. re-requesting and trying again...") + err = esdp.requestMetaBlock() + if err != nil { + return nil, err } - - time.Sleep(time.Second) } - - return &ser.metaBlockInterceptor.GetAllReceivedMetaBlocks()[0], nil } -func (ser *EpochStartDataProvider) requestMetaBlock() error { - rd := dataRetriever.RequestData{ - Type: dataRetriever.EpochType, - Epoch: 0, - Value: []byte(fmt.Sprintf("epochStartBlock_%d", math.MaxUint32)), - } - rdBytes, err := ser.marshalizer.Marshal(rd) - if err != nil { - return err +func (esdp *epochStartDataProvider) requestMetaBlock() error { + // send more requests + for i := 0; i < numRequestsToSendOnce; i++ { + time.Sleep(delayBetweenRequests) + log.Debug("sent request for epoch start metablock...") + err := esdp.metaBlockResolver.RequestEpochStartMetaBlock() + if err != nil { + return err + } } - ser.messenger.Broadcast(factory.MetachainBlocksTopic+requestSuffix, rdBytes) return nil } diff --git a/epochStart/bootstrap/simpleMetaBlockInterceptor.go b/epochStart/bootstrap/simpleMetaBlockInterceptor.go index b8857c6b2c1..c21ec37d792 100644 --- a/epochStart/bootstrap/simpleMetaBlockInterceptor.go +++ b/epochStart/bootstrap/simpleMetaBlockInterceptor.go @@ -1,37 +1,98 @@ package bootstrap import ( + "errors" + "sync" + "time" + + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" ) +const timeToWaitBeforeCheckingReceivedMetaBlocks = 500 * time.Millisecond +const numTriesUntilExit = 5 + type simpleMetaBlockInterceptor struct { - marshalizer marshal.Marshalizer - receivedHandlers []block.MetaBlock + marshalizer marshal.Marshalizer + hasher hashing.Hasher + mutReceivedMetaBlocks sync.RWMutex + mapReceivedMetaBlocks map[string]*block.MetaBlock + mapMetaBlocksFromPeers map[string][]p2p.PeerID } -func NewSimpleMetaBlockInterceptor(marshalizer marshal.Marshalizer) *simpleMetaBlockInterceptor { +// NewSimpleMetaBlockInterceptor will return a new instance of simpleMetaBlockInterceptor +func NewSimpleMetaBlockInterceptor(marshalizer marshal.Marshalizer, hasher hashing.Hasher) *simpleMetaBlockInterceptor { return &simpleMetaBlockInterceptor{ - marshalizer: marshalizer, - receivedHandlers: make([]block.MetaBlock, 0), + marshalizer: marshalizer, + hasher: hasher, + mutReceivedMetaBlocks: sync.RWMutex{}, + mapReceivedMetaBlocks: make(map[string]*block.MetaBlock), + mapMetaBlocksFromPeers: make(map[string][]p2p.PeerID), } } +// ProcessReceivedMessage will receive the metablocks and will add them to the maps func (s *simpleMetaBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { - var hdr block.MetaBlock - err := s.marshalizer.Unmarshal(&hdr, message.Data()) + var mb block.MetaBlock + err := s.marshalizer.Unmarshal(&mb, message.Data()) if err == nil { - s.receivedHandlers = append(s.receivedHandlers, hdr) + s.mutReceivedMetaBlocks.Lock() + mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, mb) + if err != nil { + s.mutReceivedMetaBlocks.Unlock() + return nil + } + s.mapReceivedMetaBlocks[string(mbHash)] = &mb + s.addToPeerList(string(mbHash), message.Peer()) + s.mutReceivedMetaBlocks.Unlock() } return nil } -func (s *simpleMetaBlockInterceptor) GetAllReceivedMetaBlocks() []block.MetaBlock { - return s.receivedHandlers +// this func should be called under mutex protection +func (s *simpleMetaBlockInterceptor) addToPeerList(hash string, id p2p.PeerID) { + peersListForHash, ok := s.mapMetaBlocksFromPeers[hash] + + // no entry for this hash. add it directly + if !ok { + s.mapMetaBlocksFromPeers[hash] = append(s.mapMetaBlocksFromPeers[hash], id) + return + } + + // entries exist for this hash. search so we don't have duplicates + for _, peer := range peersListForHash { + if peer == id { + return + } + } + + // entry not found so add it + s.mapMetaBlocksFromPeers[hash] = append(s.mapMetaBlocksFromPeers[hash], id) +} + +// GetMetaBlock will return the metablock after it is confirmed or an error if the number of tries was exceeded +func (s *simpleMetaBlockInterceptor) GetMetaBlock(target int) (*block.MetaBlock, error) { + for count := 0; count < numTriesUntilExit; count++ { + time.Sleep(timeToWaitBeforeCheckingReceivedMetaBlocks) + s.mutReceivedMetaBlocks.RLock() + for hash, peersList := range s.mapMetaBlocksFromPeers { + if len(peersList) >= target { + s.mutReceivedMetaBlocks.RUnlock() + log.Info("got consensus for metablock", "len", len(peersList)) + return s.mapReceivedMetaBlocks[hash], nil + } + } + s.mutReceivedMetaBlocks.RUnlock() + } + + return nil, errors.New("num of tries exceeded. try re-request") } +// IsInterfaceNil returns true if there is no value under the interface func (s *simpleMetaBlockInterceptor) IsInterfaceNil() bool { return s == nil } diff --git a/epochStart/bootstrap/simpleMetaBlocksResolver.go b/epochStart/bootstrap/simpleMetaBlocksResolver.go new file mode 100644 index 00000000000..bcca9e34a5b --- /dev/null +++ b/epochStart/bootstrap/simpleMetaBlocksResolver.go @@ -0,0 +1,95 @@ +package bootstrap + +import ( + "errors" + "fmt" + "math" + + "github.com/ElrondNetwork/elrond-go/core/partitioning" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/resolverscontainer" + "github.com/ElrondNetwork/elrond-go/marshal" + mock2 "github.com/ElrondNetwork/elrond-go/node/mock" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process/factory" + mock3 "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/storage" +) + +// simpleMetaBlocksResolver initializes a HeaderResolver and sends requests from it +type simpleMetaBlocksResolver struct { + messenger p2p.Messenger + marshalizer marshal.Marshalizer + mbResolver dataRetriever.HeaderResolver +} + +// NewSimpleMetaBlocksResolver returns a new instance of simpleMetaBlocksResolver +func NewSimpleMetaBlocksResolver( + messenger p2p.Messenger, + marshalizer marshal.Marshalizer, +) (*simpleMetaBlocksResolver, error) { + smbr := &simpleMetaBlocksResolver{ + messenger: messenger, + marshalizer: marshalizer, + } + err := smbr.init() + if err != nil { + return nil, err + } + + return smbr, nil +} + +func (smbr *simpleMetaBlocksResolver) init() error { + storageService := &mock2.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return &mock2.StorerMock{} + }, + } + cacher := mock3.NewPoolsHolderMock() + dataPacker, err := partitioning.NewSimpleDataPacker(smbr.marshalizer) + if err != nil { + return err + } + triesHolder := state.NewDataTriesHolder() + + resolversContainerArgs := resolverscontainer.FactoryArgs{ + ShardCoordinator: mock2.NewOneShardCoordinatorMock(), + Messenger: smbr.messenger, + Store: storageService, + Marshalizer: smbr.marshalizer, + DataPools: cacher, + Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), + DataPacker: dataPacker, + TriesContainer: triesHolder, + SizeCheckDelta: 0, + } + metaChainResolverContainer, err := resolverscontainer.NewMetaResolversContainerFactory(resolversContainerArgs) + if err != nil { + return err + } + + numPeersToQuery := int(0.4 * float64(len(smbr.messenger.Peers()))) + if numPeersToQuery == 0 { + numPeersToQuery = 2 + } + resolver, err := metaChainResolverContainer.CreateMetaChainHeaderResolver(factory.MetachainBlocksTopic, numPeersToQuery, 0) + if err != nil { + return err + } + + castedResolver, ok := resolver.(dataRetriever.HeaderResolver) + if !ok { + return errors.New("invalid resolver type") + } + smbr.mbResolver = castedResolver + + return nil +} + +// RequestEpochStartMetaBlock will request the metablock to the peers +func (smbr *simpleMetaBlocksResolver) RequestEpochStartMetaBlock() error { + return smbr.mbResolver.RequestDataFromEpoch([]byte(fmt.Sprintf("epochStartBlock_%d", math.MaxUint32))) +} diff --git a/p2p/libp2p/netMessenger_test.go b/p2p/libp2p/netMessenger_test.go index 37dcc02877d..f05b63f9d58 100644 --- a/p2p/libp2p/netMessenger_test.go +++ b/p2p/libp2p/netMessenger_test.go @@ -501,15 +501,15 @@ func TestLibp2pMessenger_CreateTopicOkValsShouldWork(t *testing.T) { _ = mes.Close() } -//func TestLibp2pMessenger_CreateTopicTwiceShouldErr(t *testing.T) { -// mes := createMockMessenger() -// -// _ = mes.CreateTopic("test", false) -// err := mes.CreateTopic("test", false) -// assert.Equal(t, p2p.ErrTopicAlreadyExists, err) -// -// _ = mes.Close() -//} +func TestLibp2pMessenger_CreateTopicTwiceShouldNotErr(t *testing.T) { + mes := createMockMessenger() + + _ = mes.CreateTopic("test", false) + err := mes.CreateTopic("test", false) + assert.Nil(t, err) + + _ = mes.Close() +} func TestLibp2pMessenger_HasTopicIfHaveTopicShouldReturnTrue(t *testing.T) { mes := createMockMessenger() From 335bdcb115ff0e3cadec40d22aa571d8e6d74122 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Mon, 2 Mar 2020 16:33:20 +0200 Subject: [PATCH 04/61] EN-5829: added requests for previous epoch start --- cmd/node/main.go | 12 ++- .../bootstrap/epochStartDataProvider.go | 83 +++++++++++-------- epochStart/bootstrap/interface.go | 26 ++++++ .../simpleNodesConfigProvider.go | 27 ++++++ .../bootstrap/simpleMetaBlockInterceptor.go | 8 +- .../bootstrap/simpleMetaBlocksResolver.go | 6 +- facade/elrondNodeFacade_test.go | 6 +- 7 files changed, 124 insertions(+), 44 deletions(-) create mode 100644 epochStart/bootstrap/interface.go create mode 100644 epochStart/bootstrap/nodesconfigprovider/simpleNodesConfigProvider.go diff --git a/cmd/node/main.go b/cmd/node/main.go index 93f80216ad4..d864a5ae898 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -32,6 +32,7 @@ import ( "github.com/ElrondNetwork/elrond-go/display" "github.com/ElrondNetwork/elrond-go/epochStart" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/nodesconfigprovider" "github.com/ElrondNetwork/elrond-go/epochStart/notifier" "github.com/ElrondNetwork/elrond-go/facade" "github.com/ElrondNetwork/elrond-go/hashing" @@ -572,7 +573,13 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } time.Sleep(2 * time.Second) - epochRes, err := bootstrap.NewEpochStartDataProvider(networkComponents.NetMessenger, &marshal.JsonMarshalizer{}, &blake2b.Blake2b{}) + simpleNodesConfigProvider := nodesconfigprovider.NewSimpleNodesConfigProvider(nodesConfig) + epochRes, err := bootstrap.NewEpochStartDataProvider( + networkComponents.NetMessenger, + &marshal.JsonMarshalizer{}, + &blake2b.Blake2b{}, + simpleNodesConfigProvider, + ) if err != nil { return err } @@ -582,6 +589,9 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { return err } + // override already defined node config + nodesConfig = bootstrapComponents.NodesConfig + log.Info("received epoch start metablock from network", "nonce", bootstrapComponents.EpochStartMetaBlock.GetNonce(), "epoch", bootstrapComponents.EpochStartMetaBlock.GetEpoch()) diff --git a/epochStart/bootstrap/epochStartDataProvider.go b/epochStart/bootstrap/epochStartDataProvider.go index 8ced6faa0bd..cb19997f1a8 100644 --- a/epochStart/bootstrap/epochStartDataProvider.go +++ b/epochStart/bootstrap/epochStartDataProvider.go @@ -1,6 +1,7 @@ package bootstrap import ( + "math" "time" "github.com/ElrondNetwork/elrond-go/data/block" @@ -10,6 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/sharding" ) var log = logger.GetOrCreate("registration") @@ -23,20 +25,7 @@ const numRequestsToSendOnce = 4 // ComponentsNeededForBootstrap holds the components which need to be initialized from network type ComponentsNeededForBootstrap struct { EpochStartMetaBlock *block.MetaBlock -} - -type metaBlockInterceptorHandler interface { - process.Interceptor - GetMetaBlock(target int) (*block.MetaBlock, error) -} - -type shardHeaderInterceptorHandler interface { - process.Interceptor - GetAllReceivedShardHeaders() []block.ShardData -} - -type metaBlockResolverHandler interface { - RequestEpochStartMetaBlock() error + NodesConfig *sharding.NodesSetup } // epochStartDataProvider will handle requesting the needed data to start when joining late the network @@ -44,6 +33,7 @@ type epochStartDataProvider struct { marshalizer marshal.Marshalizer hasher hashing.Hasher messenger p2p.Messenger + nodesConfigProvider NodesConfigProviderHandler metaBlockInterceptor metaBlockInterceptorHandler shardHeaderInterceptor shardHeaderInterceptorHandler metaBlockResolver metaBlockResolverHandler @@ -54,6 +44,7 @@ func NewEpochStartDataProvider( messenger p2p.Messenger, marshalizer marshal.Marshalizer, hasher hashing.Hasher, + nodesConfigProvider NodesConfigProviderHandler, ) (*epochStartDataProvider, error) { metaBlockInterceptor := NewSimpleMetaBlockInterceptor(marshalizer, hasher) shardHdrInterceptor := NewSimpleShardHeaderInterceptor(marshalizer) @@ -66,6 +57,7 @@ func NewEpochStartDataProvider( marshalizer: marshalizer, hasher: hasher, messenger: messenger, + nodesConfigProvider: nodesConfigProvider, metaBlockInterceptor: metaBlockInterceptor, shardHeaderInterceptor: shardHdrInterceptor, metaBlockResolver: metaBlockResolver, @@ -74,62 +66,85 @@ func NewEpochStartDataProvider( // Bootstrap will handle requesting and receiving the needed information the node will bootstrap from func (esdp *epochStartDataProvider) Bootstrap() (*ComponentsNeededForBootstrap, error) { - metaBlock, err := esdp.getEpochStartMetaBlock() + err := esdp.initTopicsAndInterceptors() + if err != nil { + return nil, err + } + defer func() { + esdp.resetTopicsAndInterceptors() + }() + + epochNumForRequestingTheLatestAvailable := uint32(math.MaxUint32) + metaBlock, err := esdp.getEpochStartMetaBlock(epochNumForRequestingTheLatestAvailable) + if err != nil { + return nil, err + } + prevMetaBlock, err := esdp.getEpochStartMetaBlock(metaBlock.Epoch - 1) + if err != nil { + return nil, err + } + log.Info("previous meta block", "epoch", prevMetaBlock.Epoch) + nodesConfig, err := esdp.nodesConfigProvider.GetNodesConfigForMetaBlock(metaBlock) if err != nil { return nil, err } return &ComponentsNeededForBootstrap{ EpochStartMetaBlock: metaBlock, + NodesConfig: nodesConfig, }, nil } -func (esdp *epochStartDataProvider) getEpochStartMetaBlock() (*block.MetaBlock, error) { +func (esdp *epochStartDataProvider) initTopicsAndInterceptors() error { err := esdp.messenger.CreateTopic(factory.MetachainBlocksTopic, true) if err != nil { - return nil, err + return err } err = esdp.messenger.RegisterMessageProcessor(factory.MetachainBlocksTopic, esdp.metaBlockInterceptor) if err != nil { - return nil, err + return err } - defer func() { - err = esdp.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) - if err != nil { - log.Info("error unregistering message processor", "error", err) - } - err = esdp.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic + requestSuffix) - if err != nil { - log.Info("error unregistering message processor", "error", err) - } - }() - err = esdp.requestMetaBlock() + return nil +} + +func (esdp *epochStartDataProvider) resetTopicsAndInterceptors() { + err := esdp.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) if err != nil { - return nil, err + log.Info("error unregistering message processor", "error", err) + } + err = esdp.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic + requestSuffix) + if err != nil { + log.Info("error unregistering message processor", "error", err) } +} +func (esdp *epochStartDataProvider) getEpochStartMetaBlock(epoch uint32) (*block.MetaBlock, error) { + err := esdp.requestMetaBlock(epoch) + if err != nil { + return nil, err + } for { threshold := int(thresholdForConsideringMetaBlockCorrect * float64(len(esdp.messenger.Peers()))) - mb, errConsensusNotReached := esdp.metaBlockInterceptor.GetMetaBlock(threshold) + mb, errConsensusNotReached := esdp.metaBlockInterceptor.GetMetaBlock(threshold, epoch) if errConsensusNotReached == nil { return mb, nil } log.Info("consensus not reached for epoch start meta block. re-requesting and trying again...") - err = esdp.requestMetaBlock() + err = esdp.requestMetaBlock(epoch) if err != nil { return nil, err } } } -func (esdp *epochStartDataProvider) requestMetaBlock() error { +func (esdp *epochStartDataProvider) requestMetaBlock(epoch uint32) error { // send more requests for i := 0; i < numRequestsToSendOnce; i++ { time.Sleep(delayBetweenRequests) log.Debug("sent request for epoch start metablock...") - err := esdp.metaBlockResolver.RequestEpochStartMetaBlock() + err := esdp.metaBlockResolver.RequestEpochStartMetaBlock(epoch) if err != nil { return err } diff --git a/epochStart/bootstrap/interface.go b/epochStart/bootstrap/interface.go new file mode 100644 index 00000000000..3ac2d9e2bf0 --- /dev/null +++ b/epochStart/bootstrap/interface.go @@ -0,0 +1,26 @@ +package bootstrap + +import ( + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type metaBlockInterceptorHandler interface { + process.Interceptor + GetMetaBlock(target int, epoch uint32) (*block.MetaBlock, error) +} + +type shardHeaderInterceptorHandler interface { + process.Interceptor + GetAllReceivedShardHeaders() []block.ShardData +} + +type metaBlockResolverHandler interface { + RequestEpochStartMetaBlock(epoch uint32) error +} + +// NodesConfigProviderHandler defines what a component which will handle the nodes config should be able to do +type NodesConfigProviderHandler interface { + GetNodesConfigForMetaBlock(metaBlock *block.MetaBlock) (*sharding.NodesSetup, error) +} diff --git a/epochStart/bootstrap/nodesconfigprovider/simpleNodesConfigProvider.go b/epochStart/bootstrap/nodesconfigprovider/simpleNodesConfigProvider.go new file mode 100644 index 00000000000..710ddff5d30 --- /dev/null +++ b/epochStart/bootstrap/nodesconfigprovider/simpleNodesConfigProvider.go @@ -0,0 +1,27 @@ +package nodesconfigprovider + +import ( + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type simpleNodesConfigProvider struct { + originalNodesConfig *sharding.NodesSetup +} + +// NewSimpleNodesConfigProvider returns a new instance of simpleNodesConfigProvider +func NewSimpleNodesConfigProvider(originalNodesConfig *sharding.NodesSetup) *simpleNodesConfigProvider { + return &simpleNodesConfigProvider{ + originalNodesConfig: originalNodesConfig, + } +} + +// GetNodesConfigForMetaBlock will return the original nodes setup +func (sncp *simpleNodesConfigProvider) GetNodesConfigForMetaBlock(_ *block.MetaBlock) (*sharding.NodesSetup, error) { + return sncp.originalNodesConfig, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sncp *simpleNodesConfigProvider) IsInterfaceNil() bool { + return sncp == nil +} diff --git a/epochStart/bootstrap/simpleMetaBlockInterceptor.go b/epochStart/bootstrap/simpleMetaBlockInterceptor.go index c21ec37d792..5b0170bc456 100644 --- a/epochStart/bootstrap/simpleMetaBlockInterceptor.go +++ b/epochStart/bootstrap/simpleMetaBlockInterceptor.go @@ -2,6 +2,7 @@ package bootstrap import ( "errors" + "math" "sync" "time" @@ -75,12 +76,15 @@ func (s *simpleMetaBlockInterceptor) addToPeerList(hash string, id p2p.PeerID) { } // GetMetaBlock will return the metablock after it is confirmed or an error if the number of tries was exceeded -func (s *simpleMetaBlockInterceptor) GetMetaBlock(target int) (*block.MetaBlock, error) { +func (s *simpleMetaBlockInterceptor) GetMetaBlock(target int, epoch uint32) (*block.MetaBlock, error) { for count := 0; count < numTriesUntilExit; count++ { time.Sleep(timeToWaitBeforeCheckingReceivedMetaBlocks) s.mutReceivedMetaBlocks.RLock() for hash, peersList := range s.mapMetaBlocksFromPeers { - if len(peersList) >= target { + mb := s.mapReceivedMetaBlocks[hash] + epochCheckNotRequired := epoch == math.MaxUint32 + isEpochOk := epochCheckNotRequired || mb.Epoch == epoch + if len(peersList) >= target && isEpochOk { s.mutReceivedMetaBlocks.RUnlock() log.Info("got consensus for metablock", "len", len(peersList)) return s.mapReceivedMetaBlocks[hash], nil diff --git a/epochStart/bootstrap/simpleMetaBlocksResolver.go b/epochStart/bootstrap/simpleMetaBlocksResolver.go index bcca9e34a5b..41c986517bc 100644 --- a/epochStart/bootstrap/simpleMetaBlocksResolver.go +++ b/epochStart/bootstrap/simpleMetaBlocksResolver.go @@ -3,8 +3,6 @@ package bootstrap import ( "errors" "fmt" - "math" - "github.com/ElrondNetwork/elrond-go/core/partitioning" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" @@ -90,6 +88,6 @@ func (smbr *simpleMetaBlocksResolver) init() error { } // RequestEpochStartMetaBlock will request the metablock to the peers -func (smbr *simpleMetaBlocksResolver) RequestEpochStartMetaBlock() error { - return smbr.mbResolver.RequestDataFromEpoch([]byte(fmt.Sprintf("epochStartBlock_%d", math.MaxUint32))) +func (smbr *simpleMetaBlocksResolver) RequestEpochStartMetaBlock(epoch uint32) error { + return smbr.mbResolver.RequestDataFromEpoch([]byte(fmt.Sprintf("epochStartBlock_%d", epoch))) } diff --git a/facade/elrondNodeFacade_test.go b/facade/elrondNodeFacade_test.go index dde2a58d6d3..fc2f05a8954 100644 --- a/facade/elrondNodeFacade_test.go +++ b/facade/elrondNodeFacade_test.go @@ -63,7 +63,7 @@ func TestElrondFacade_StartNodeWithNodeNotNullShouldNotReturnError(t *testing.T) ef := createElrondNodeFacadeWithMockResolver(node) - err := ef.StartNode(0) + err := ef.StartNode(0, true) assert.Nil(t, err) isRunning := ef.IsNodeRunning() @@ -83,7 +83,7 @@ func TestElrondFacade_StartNodeWithErrorOnStartNodeShouldReturnError(t *testing. ef := createElrondNodeFacadeWithMockResolver(node) - err := ef.StartNode(0) + err := ef.StartNode(0, true) assert.NotNil(t, err) isRunning := ef.IsNodeRunning() @@ -111,7 +111,7 @@ func TestElrondFacade_StartNodeWithErrorOnStartConsensusShouldReturnError(t *tes ef := createElrondNodeFacadeWithMockResolver(node) - err := ef.StartNode(0) + err := ef.StartNode(0, true) assert.NotNil(t, err) isRunning := ef.IsNodeRunning() From 8dec7d7303161bd373c3ba1c67b2c63125c29d97 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Tue, 3 Mar 2020 16:54:17 +0200 Subject: [PATCH 05/61] EN-5829: added unit tests and some changes --- cmd/node/main.go | 24 ++- .../bootstrap/disabled/disabledChainStorer.go | 100 ++++++++++ .../bootstrap/disabled/disabledPoolsHolder.go | 106 +++++++++++ .../disabled/disabledShardCoordinator.go | 60 ++++++ .../bootstrap/disabled/disabledStorer.go | 87 +++++++++ .../bootstrap/epochStartDataProvider.go | 73 ++++--- .../bootstrap/epochStartDataProvider_test.go | 178 ++++++++++++++++++ epochStart/bootstrap/errors.go | 27 +++ epochStart/bootstrap/export_test.go | 20 ++ epochStart/bootstrap/interface.go | 16 +- epochStart/bootstrap/mock/messengerStub.go | 165 ++++++++++++++++ .../mock/metaBlockInterceptorStub.go | 35 ++++ .../bootstrap/mock/metaBlockResolverStub.go | 20 ++ .../bootstrap/mock/nodesConfigProviderStub.go | 25 +++ epochStart/bootstrap/mock/p2pMessageMock.go | 54 ++++++ .../mock/shardHeaderInterceptorStub.go | 35 ++++ .../bootstrap/simpleMetaBlockInterceptor.go | 15 +- .../simpleMetaBlockInterceptor_test.go | 171 +++++++++++++++++ ...Resolver.go => simpleMetaBlockResolver.go} | 25 ++- .../bootstrap/simpleMetaBlockResolver_test.go | 57 ++++++ node/mock/chainStorerMock.go | 5 +- 21 files changed, 1252 insertions(+), 46 deletions(-) create mode 100644 epochStart/bootstrap/disabled/disabledChainStorer.go create mode 100644 epochStart/bootstrap/disabled/disabledPoolsHolder.go create mode 100644 epochStart/bootstrap/disabled/disabledShardCoordinator.go create mode 100644 epochStart/bootstrap/disabled/disabledStorer.go create mode 100644 epochStart/bootstrap/epochStartDataProvider_test.go create mode 100644 epochStart/bootstrap/errors.go create mode 100644 epochStart/bootstrap/export_test.go create mode 100644 epochStart/bootstrap/mock/messengerStub.go create mode 100644 epochStart/bootstrap/mock/metaBlockInterceptorStub.go create mode 100644 epochStart/bootstrap/mock/metaBlockResolverStub.go create mode 100644 epochStart/bootstrap/mock/nodesConfigProviderStub.go create mode 100644 epochStart/bootstrap/mock/p2pMessageMock.go create mode 100644 epochStart/bootstrap/mock/shardHeaderInterceptorStub.go create mode 100644 epochStart/bootstrap/simpleMetaBlockInterceptor_test.go rename epochStart/bootstrap/{simpleMetaBlocksResolver.go => simpleMetaBlockResolver.go} (82%) create mode 100644 epochStart/bootstrap/simpleMetaBlockResolver_test.go diff --git a/cmd/node/main.go b/cmd/node/main.go index d864a5ae898..11c373f6731 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -573,13 +573,25 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } time.Sleep(2 * time.Second) + marshalizer := &marshal.JsonMarshalizer{} + hasher := &blake2b.Blake2b{} simpleNodesConfigProvider := nodesconfigprovider.NewSimpleNodesConfigProvider(nodesConfig) - epochRes, err := bootstrap.NewEpochStartDataProvider( - networkComponents.NetMessenger, - &marshal.JsonMarshalizer{}, - &blake2b.Blake2b{}, - simpleNodesConfigProvider, - ) + metaBlockInterceptor, err := bootstrap.NewSimpleMetaBlockInterceptor(marshalizer, hasher) + if err != nil { + return err + } + shardHdrInterceptor := bootstrap.NewSimpleShardHeaderInterceptor(marshalizer) + metaBlockResolver, err := bootstrap.NewSimpleMetaBlocksResolver(networkComponents.NetMessenger, marshalizer) + epochStartDataArgs := bootstrap.ArgsEpochStartDataProvider{ + Messenger: networkComponents.NetMessenger, + Marshalizer: marshalizer, + Hasher: hasher, + NodesConfigProvider: simpleNodesConfigProvider, + MetaBlockInterceptor: metaBlockInterceptor, + ShardHeaderInterceptor: shardHdrInterceptor, + MetaBlockResolver: metaBlockResolver, + } + epochRes, err := bootstrap.NewEpochStartDataProvider(epochStartDataArgs) if err != nil { return err } diff --git a/epochStart/bootstrap/disabled/disabledChainStorer.go b/epochStart/bootstrap/disabled/disabledChainStorer.go new file mode 100644 index 00000000000..c0b6a06328e --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledChainStorer.go @@ -0,0 +1,100 @@ +package disabled + +import ( + "errors" + + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/storage" +) + +// ChainStorer is a mock implementation of the ChianStorer interface +type ChainStorer struct { + AddStorerCalled func(key dataRetriever.UnitType, s storage.Storer) + GetStorerCalled func(unitType dataRetriever.UnitType) storage.Storer + HasCalled func(unitType dataRetriever.UnitType, key []byte) error + GetCalled func(unitType dataRetriever.UnitType, key []byte) ([]byte, error) + PutCalled func(unitType dataRetriever.UnitType, key []byte, value []byte) error + GetAllCalled func(unitType dataRetriever.UnitType, keys [][]byte) (map[string][]byte, error) + DestroyCalled func() error + CloseAllCalled func() error +} + +// CloseAll - +func (bc *ChainStorer) CloseAll() error { + if bc.CloseAllCalled != nil { + return bc.CloseAllCalled() + } + + return nil +} + +// AddStorer will add a new storer to the chain map +func (bc *ChainStorer) AddStorer(key dataRetriever.UnitType, s storage.Storer) { + if bc.AddStorerCalled != nil { + bc.AddStorerCalled(key, s) + } +} + +// GetStorer returns the storer from the chain map or nil if the storer was not found +func (bc *ChainStorer) GetStorer(unitType dataRetriever.UnitType) storage.Storer { + if bc.GetStorerCalled != nil { + return bc.GetStorerCalled(unitType) + } + return nil +} + +// Has returns true if the key is found in the selected Unit or false otherwise +// It can return an error if the provided unit type is not supported or if the +// underlying implementation of the storage unit reports an error. +func (bc *ChainStorer) Has(unitType dataRetriever.UnitType, key []byte) error { + if bc.HasCalled != nil { + return bc.HasCalled(unitType, key) + } + return errors.New("key not found") +} + +// Get returns the value for the given key if found in the selected storage unit, +// nil otherwise. It can return an error if the provided unit type is not supported +// or if the storage unit underlying implementation reports an error +func (bc *ChainStorer) Get(unitType dataRetriever.UnitType, key []byte) ([]byte, error) { + if bc.GetCalled != nil { + return bc.GetCalled(unitType, key) + } + return nil, nil +} + +// Put stores the key, value pair in the selected storage unit +// It can return an error if the provided unit type is not supported +// or if the storage unit underlying implementation reports an error +func (bc *ChainStorer) Put(unitType dataRetriever.UnitType, key []byte, value []byte) error { + if bc.PutCalled != nil { + return bc.PutCalled(unitType, key, value) + } + return nil +} + +// GetAll gets all the elements with keys in the keys array, from the selected storage unit +// It can report an error if the provided unit type is not supported, if there is a missing +// key in the unit, or if the underlying implementation of the storage unit reports an error. +func (bc *ChainStorer) GetAll(unitType dataRetriever.UnitType, keys [][]byte) (map[string][]byte, error) { + if bc.GetAllCalled != nil { + return bc.GetAllCalled(unitType, keys) + } + return nil, nil +} + +// Destroy removes the underlying files/resources used by the storage service +func (bc *ChainStorer) Destroy() error { + if bc.DestroyCalled != nil { + return bc.DestroyCalled() + } + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (bc *ChainStorer) IsInterfaceNil() bool { + if bc == nil { + return true + } + return false +} diff --git a/epochStart/bootstrap/disabled/disabledPoolsHolder.go b/epochStart/bootstrap/disabled/disabledPoolsHolder.go new file mode 100644 index 00000000000..878d89e525a --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledPoolsHolder.go @@ -0,0 +1,106 @@ +package disabled + +import ( + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool/headersCache" + "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/dataRetriever/txpool" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" +) + +// PoolsHolder - +type PoolsHolder struct { + transactions dataRetriever.ShardedDataCacherNotifier + unsignedTransactions dataRetriever.ShardedDataCacherNotifier + rewardTransactions dataRetriever.ShardedDataCacherNotifier + headers dataRetriever.HeadersPool + miniBlocks storage.Cacher + peerChangesBlocks storage.Cacher + trieNodes storage.Cacher + currBlockTxs dataRetriever.TransactionCacher +} + +// NewDisabledPoolsHolder - +func NewDisabledPoolsHolder() *PoolsHolder { + phf := &PoolsHolder{} + + phf.transactions, _ = txpool.NewShardedTxPool( + txpool.ArgShardedTxPool{ + Config: storageUnit.CacheConfig{ + Size: 10000, + SizeInBytes: 1000000000, + Shards: 16, + }, + MinGasPrice: 100000000000000, + NumberOfShards: 1, + }, + ) + + phf.unsignedTransactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache}) + phf.rewardTransactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) + phf.headers, _ = headersCache.NewHeadersPool(config.HeadersPoolConfig{MaxHeadersPerShard: 1000, NumElementsToRemoveOnEviction: 100}) + phf.miniBlocks, _ = storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) + phf.peerChangesBlocks, _ = storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) + phf.currBlockTxs, _ = dataPool.NewCurrentBlockPool() + phf.trieNodes, _ = storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) + + return phf +} + +// CurrentBlockTxs - +func (phm *PoolsHolder) CurrentBlockTxs() dataRetriever.TransactionCacher { + return phm.currBlockTxs +} + +// Transactions - +func (phm *PoolsHolder) Transactions() dataRetriever.ShardedDataCacherNotifier { + return phm.transactions +} + +// UnsignedTransactions - +func (phm *PoolsHolder) UnsignedTransactions() dataRetriever.ShardedDataCacherNotifier { + return phm.unsignedTransactions +} + +// RewardTransactions - +func (phm *PoolsHolder) RewardTransactions() dataRetriever.ShardedDataCacherNotifier { + return phm.rewardTransactions +} + +// Headers - +func (phm *PoolsHolder) Headers() dataRetriever.HeadersPool { + return phm.headers +} + +// MiniBlocks - +func (phm *PoolsHolder) MiniBlocks() storage.Cacher { + return phm.miniBlocks +} + +// PeerChangesBlocks - +func (phm *PoolsHolder) PeerChangesBlocks() storage.Cacher { + return phm.peerChangesBlocks +} + +// SetTransactions - +func (phm *PoolsHolder) SetTransactions(transactions dataRetriever.ShardedDataCacherNotifier) { + phm.transactions = transactions +} + +// SetUnsignedTransactions - +func (phm *PoolsHolder) SetUnsignedTransactions(scrs dataRetriever.ShardedDataCacherNotifier) { + phm.unsignedTransactions = scrs +} + +// TrieNodes - +func (phm *PoolsHolder) TrieNodes() storage.Cacher { + return phm.trieNodes +} + +// IsInterfaceNil returns true if there is no value under the interface +func (phm *PoolsHolder) IsInterfaceNil() bool { + return phm == nil +} diff --git a/epochStart/bootstrap/disabled/disabledShardCoordinator.go b/epochStart/bootstrap/disabled/disabledShardCoordinator.go new file mode 100644 index 00000000000..8e97dd44541 --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledShardCoordinator.go @@ -0,0 +1,60 @@ +package disabled + +import ( + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data/state" +) + +type shardCoordinator struct { + numShards uint32 +} + +// NewShardCoordinator - +func NewShardCoordinator() *shardCoordinator { + return &shardCoordinator{numShards: 1} +} + +// NumberOfShards - +func (scm *shardCoordinator) NumberOfShards() uint32 { + return scm.numShards +} + +// SetNoShards - +func (scm *shardCoordinator) SetNoShards(shards uint32) { + scm.numShards = shards +} + +// ComputeId - +func (scm *shardCoordinator) ComputeId(address state.AddressContainer) uint32 { + + return uint32(0) +} + +// SelfId - +func (scm *shardCoordinator) SelfId() uint32 { + return 0 +} + +// SetSelfId - +func (scm *shardCoordinator) SetSelfId(shardId uint32) error { + return nil +} + +// SameShard - +func (scm *shardCoordinator) SameShard(firstAddress, secondAddress state.AddressContainer) bool { + return true +} + +// CommunicationIdentifier - +func (scm *shardCoordinator) CommunicationIdentifier(destShardID uint32) string { + if destShardID == core.MetachainShardId { + return "_0_META" + } + + return "_0" +} + +// IsInterfaceNil returns true if there is no value under the interface +func (scm *shardCoordinator) IsInterfaceNil() bool { + return scm == nil +} diff --git a/epochStart/bootstrap/disabled/disabledStorer.go b/epochStart/bootstrap/disabled/disabledStorer.go new file mode 100644 index 00000000000..61d7b8d9ce0 --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledStorer.go @@ -0,0 +1,87 @@ +package disabled + +import ( + "encoding/base64" + "errors" + "fmt" + "sync" +) + +// Storer - +type Storer struct { + mut sync.Mutex + data map[string][]byte +} + +// NewDisabledStorer - +func NewDisabledStorer() *Storer { + return &Storer{ + data: make(map[string][]byte), + } +} + +// Put - +func (sm *Storer) Put(key, data []byte) error { + sm.mut.Lock() + defer sm.mut.Unlock() + sm.data[string(key)] = data + + return nil +} + +// Get - +func (sm *Storer) Get(key []byte) ([]byte, error) { + sm.mut.Lock() + defer sm.mut.Unlock() + + val, ok := sm.data[string(key)] + if !ok { + return nil, fmt.Errorf("key: %s not found", base64.StdEncoding.EncodeToString(key)) + } + + return val, nil +} + +// GetFromEpoch - +func (sm *Storer) GetFromEpoch(key []byte, _ uint32) ([]byte, error) { + return sm.Get(key) +} + +// HasInEpoch - +func (sm *Storer) HasInEpoch(key []byte, epoch uint32) error { + return errors.New("not implemented") +} + +// SearchFirst - +func (sm *Storer) SearchFirst(key []byte) ([]byte, error) { + return nil, errors.New("not implemented") +} + +// Close - +func (sm *Storer) Close() error { + return nil +} + +// Has - +func (sm *Storer) Has(key []byte) error { + return errors.New("not implemented") +} + +// Remove - +func (sm *Storer) Remove(key []byte) error { + return errors.New("not implemented") +} + +// ClearCache - +func (sm *Storer) ClearCache() { +} + +// DestroyUnit - +func (sm *Storer) DestroyUnit() error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sm *Storer) IsInterfaceNil() bool { + return sm == nil +} diff --git a/epochStart/bootstrap/epochStartDataProvider.go b/epochStart/bootstrap/epochStartDataProvider.go index cb19997f1a8..8a8f6e5f5ae 100644 --- a/epochStart/bootstrap/epochStartDataProvider.go +++ b/epochStart/bootstrap/epochStartDataProvider.go @@ -4,6 +4,7 @@ import ( "math" "time" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/logger" @@ -34,33 +35,53 @@ type epochStartDataProvider struct { hasher hashing.Hasher messenger p2p.Messenger nodesConfigProvider NodesConfigProviderHandler - metaBlockInterceptor metaBlockInterceptorHandler - shardHeaderInterceptor shardHeaderInterceptorHandler - metaBlockResolver metaBlockResolverHandler + metaBlockInterceptor MetaBlockInterceptorHandler + shardHeaderInterceptor ShardHeaderInterceptorHandler + metaBlockResolver MetaBlockResolverHandler +} + +// ArgsEpochStartDataProvider holds the arguments needed for creating an epoch start data provider component +type ArgsEpochStartDataProvider struct { + Messenger p2p.Messenger + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + NodesConfigProvider NodesConfigProviderHandler + MetaBlockInterceptor MetaBlockInterceptorHandler + ShardHeaderInterceptor ShardHeaderInterceptorHandler + MetaBlockResolver MetaBlockResolverHandler } // NewEpochStartDataProvider will return a new instance of epochStartDataProvider -func NewEpochStartDataProvider( - messenger p2p.Messenger, - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - nodesConfigProvider NodesConfigProviderHandler, -) (*epochStartDataProvider, error) { - metaBlockInterceptor := NewSimpleMetaBlockInterceptor(marshalizer, hasher) - shardHdrInterceptor := NewSimpleShardHeaderInterceptor(marshalizer) - metaBlockResolver, err := NewSimpleMetaBlocksResolver(messenger, marshalizer) - if err != nil { - return nil, err +func NewEpochStartDataProvider(args ArgsEpochStartDataProvider) (*epochStartDataProvider, error) { + if check.IfNil(args.Messenger) { + return nil, ErrNilMessenger + } + if check.IfNil(args.Marshalizer) { + return nil, ErrNilMarshalizer + } + if check.IfNil(args.Hasher) { + return nil, ErrNilHasher + } + if check.IfNil(args.NodesConfigProvider) { + return nil, ErrNilNodesConfigProvider + } + if check.IfNil(args.MetaBlockInterceptor) { + return nil, ErrNilMetaBlockInterceptor + } + if check.IfNil(args.ShardHeaderInterceptor) { + return nil, ErrNilShardHeaderInterceptor + } + if check.IfNil(args.MetaBlockResolver) { + return nil, ErrNilMetaBlockResolver } - return &epochStartDataProvider{ - marshalizer: marshalizer, - hasher: hasher, - messenger: messenger, - nodesConfigProvider: nodesConfigProvider, - metaBlockInterceptor: metaBlockInterceptor, - shardHeaderInterceptor: shardHdrInterceptor, - metaBlockResolver: metaBlockResolver, + marshalizer: args.Marshalizer, + hasher: args.Hasher, + messenger: args.Messenger, + nodesConfigProvider: args.NodesConfigProvider, + metaBlockInterceptor: args.MetaBlockInterceptor, + shardHeaderInterceptor: args.ShardHeaderInterceptor, + metaBlockResolver: args.MetaBlockResolver, }, nil } @@ -126,7 +147,8 @@ func (esdp *epochStartDataProvider) getEpochStartMetaBlock(epoch uint32) (*block return nil, err } for { - threshold := int(thresholdForConsideringMetaBlockCorrect * float64(len(esdp.messenger.Peers()))) + numConnectedPeers := len(esdp.messenger.Peers()) + threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) mb, errConsensusNotReached := esdp.metaBlockInterceptor.GetMetaBlock(threshold, epoch) if errConsensusNotReached == nil { return mb, nil @@ -152,3 +174,8 @@ func (esdp *epochStartDataProvider) requestMetaBlock(epoch uint32) error { return nil } + +// IsInterfaceNil returns true if there is no value under the interface +func (esdp *epochStartDataProvider) IsInterfaceNil() bool { + return esdp == nil +} diff --git a/epochStart/bootstrap/epochStartDataProvider_test.go b/epochStart/bootstrap/epochStartDataProvider_test.go new file mode 100644 index 00000000000..1e748611c88 --- /dev/null +++ b/epochStart/bootstrap/epochStartDataProvider_test.go @@ -0,0 +1,178 @@ +package bootstrap_test + +import ( + "errors" + "testing" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/mock" + mock2 "github.com/ElrondNetwork/elrond-go/epochStart/mock" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/require" +) + +func TestNewEpochStartDataProvider_NilMessengerShouldErr(t *testing.T) { + t.Parallel() + + args := getArguments() + args.Messenger = nil + epStart, err := bootstrap.NewEpochStartDataProvider(args) + + require.Nil(t, epStart) + require.Equal(t, bootstrap.ErrNilMessenger, err) +} + +func TestNewEpochStartDataProvider_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + args := getArguments() + args.Marshalizer = nil + epStart, err := bootstrap.NewEpochStartDataProvider(args) + + require.Nil(t, epStart) + require.Equal(t, bootstrap.ErrNilMarshalizer, err) +} +func TestNewEpochStartDataProvider_NilHasherShouldErr(t *testing.T) { + t.Parallel() + + args := getArguments() + args.Hasher = nil + epStart, err := bootstrap.NewEpochStartDataProvider(args) + + require.Nil(t, epStart) + require.Equal(t, bootstrap.ErrNilHasher, err) +} +func TestNewEpochStartDataProvider_NilNodesConfigProviderShouldErr(t *testing.T) { + t.Parallel() + + args := getArguments() + args.NodesConfigProvider = nil + epStart, err := bootstrap.NewEpochStartDataProvider(args) + + require.Nil(t, epStart) + require.Equal(t, bootstrap.ErrNilNodesConfigProvider, err) +} +func TestNewEpochStartDataProvider_NilMetablockInterceptorShouldErr(t *testing.T) { + t.Parallel() + + args := getArguments() + args.MetaBlockInterceptor = nil + epStart, err := bootstrap.NewEpochStartDataProvider(args) + + require.Nil(t, epStart) + require.Equal(t, bootstrap.ErrNilMetaBlockInterceptor, err) +} +func TestNewEpochStartDataProvider_NilShardHeaderInterceptorShouldErr(t *testing.T) { + t.Parallel() + + args := getArguments() + args.ShardHeaderInterceptor = nil + epStart, err := bootstrap.NewEpochStartDataProvider(args) + + require.Nil(t, epStart) + require.Equal(t, bootstrap.ErrNilShardHeaderInterceptor, err) +} +func TestNewEpochStartDataProvider_NilMetaBlockResolverShouldErr(t *testing.T) { + t.Parallel() + + args := getArguments() + args.MetaBlockResolver = nil + epStart, err := bootstrap.NewEpochStartDataProvider(args) + + require.Nil(t, epStart) + require.Equal(t, bootstrap.ErrNilMetaBlockResolver, err) +} +func TestNewEpochStartDataProvider_OkValsShouldWork(t *testing.T) { + t.Parallel() + + args := getArguments() + epStart, err := bootstrap.NewEpochStartDataProvider(args) + + require.Nil(t, err) + require.False(t, check.IfNil(epStart)) +} + +func TestEpochStartDataProvider_Bootstrap_TopicCreationFailsShouldErr(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("error while creating topic") + args := getArguments() + args.Messenger = &mock.MessengerStub{ + CreateTopicCalled: func(_ string, _ bool) error { + return expectedErr + }, + } + epStart, _ := bootstrap.NewEpochStartDataProvider(args) + + res, err := epStart.Bootstrap() + + require.Nil(t, res) + require.Equal(t, expectedErr, err) +} + +func TestEpochStartDataProvider_Bootstrap_MetaBlockRequestFailsShouldErr(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("error while creating topic") + args := getArguments() + args.MetaBlockResolver = &mock.MetaBlockResolverStub{ + RequestEpochStartMetaBlockCalled: func(_ uint32) error { + return expectedErr + }, + } + epStart, _ := bootstrap.NewEpochStartDataProvider(args) + + res, err := epStart.Bootstrap() + + require.Nil(t, res) + require.Equal(t, expectedErr, err) +} + +func TestEpochStartDataProvider_Bootstrap_GetNodesConfigFailsShouldErr(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("error while creating topic") + args := getArguments() + args.NodesConfigProvider = &mock.NodesConfigProviderStub{ + GetNodesConfigForMetaBlockCalled: func(_ *block.MetaBlock) (*sharding.NodesSetup, error) { + return &sharding.NodesSetup{}, expectedErr + }, + } + epStart, _ := bootstrap.NewEpochStartDataProvider(args) + + res, err := epStart.Bootstrap() + + require.Nil(t, res) + require.Equal(t, expectedErr, err) +} + +func TestEpochStartDataProvider_Bootstrap_ShouldWork(t *testing.T) { + t.Parallel() + + args := getArguments() + args.NodesConfigProvider = &mock.NodesConfigProviderStub{ + GetNodesConfigForMetaBlockCalled: func(_ *block.MetaBlock) (*sharding.NodesSetup, error) { + return &sharding.NodesSetup{}, nil + }, + } + epStart, _ := bootstrap.NewEpochStartDataProvider(args) + + res, err := epStart.Bootstrap() + + require.Nil(t, err) + require.NotNil(t, res) +} + +func getArguments() bootstrap.ArgsEpochStartDataProvider { + return bootstrap.ArgsEpochStartDataProvider{ + Messenger: &mock.MessengerStub{}, + Marshalizer: &mock2.MarshalizerMock{}, + Hasher: mock2.HasherMock{}, + NodesConfigProvider: &mock.NodesConfigProviderStub{}, + MetaBlockInterceptor: &mock.MetaBlockInterceptorStub{}, + ShardHeaderInterceptor: &mock.ShardHeaderInterceptorStub{}, + MetaBlockResolver: &mock.MetaBlockResolverStub{}, + } +} diff --git a/epochStart/bootstrap/errors.go b/epochStart/bootstrap/errors.go new file mode 100644 index 00000000000..bad59989d93 --- /dev/null +++ b/epochStart/bootstrap/errors.go @@ -0,0 +1,27 @@ +package bootstrap + +import "errors" + +// ErrNilMessenger signals that a nil messenger has been provider +var ErrNilMessenger = errors.New("nil messenger") + +// ErrNilMarshalizer signals that a nil marshalizer has been provider +var ErrNilMarshalizer = errors.New("nil marshalizer") + +// ErrNilHasher signals that a nil hasher has been provider +var ErrNilHasher = errors.New("nil hasher") + +// ErrNilNodesConfigProvider signals that a nil nodes config provider has been given +var ErrNilNodesConfigProvider = errors.New("nil nodes config provider") + +// ErrNilMetaBlockInterceptor signals that a metablock interceptor has been provided +var ErrNilMetaBlockInterceptor = errors.New("nil metablock interceptor") + +// ErrNilShardHeaderInterceptor signals that a nil shard header interceptor has been provided +var ErrNilShardHeaderInterceptor = errors.New("nil shard header interceptor") + +// ErrNilMetaBlockResolver signals that a nil metablock resolver has been provided +var ErrNilMetaBlockResolver = errors.New("nil metablock resolver") + +// ErrNumTriesExceeded signals that there were too many tries for fetching a metablock +var ErrNumTriesExceeded = errors.New("num of tries exceeded. try re-request") diff --git a/epochStart/bootstrap/export_test.go b/epochStart/bootstrap/export_test.go new file mode 100644 index 00000000000..bdea755593c --- /dev/null +++ b/epochStart/bootstrap/export_test.go @@ -0,0 +1,20 @@ +package bootstrap + +import ( + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/p2p" +) + +func (smbi *simpleMetaBlockInterceptor) GetReceivedMetablocks() map[string]*block.MetaBlock { + smbi.mutReceivedMetaBlocks.RLock() + defer smbi.mutReceivedMetaBlocks.RUnlock() + + return smbi.mapReceivedMetaBlocks +} + +func (smbi *simpleMetaBlockInterceptor) GetPeersSliceForMetablocks() map[string][]p2p.PeerID { + smbi.mutReceivedMetaBlocks.RLock() + defer smbi.mutReceivedMetaBlocks.RUnlock() + + return smbi.mapMetaBlocksFromPeers +} diff --git a/epochStart/bootstrap/interface.go b/epochStart/bootstrap/interface.go index 3ac2d9e2bf0..21992ddfa4a 100644 --- a/epochStart/bootstrap/interface.go +++ b/epochStart/bootstrap/interface.go @@ -6,21 +6,31 @@ import ( "github.com/ElrondNetwork/elrond-go/sharding" ) -type metaBlockInterceptorHandler interface { +// MetaBlockInterceptorHandler defines what a component which will handle receiving the meta blocks should do +type MetaBlockInterceptorHandler interface { process.Interceptor GetMetaBlock(target int, epoch uint32) (*block.MetaBlock, error) } -type shardHeaderInterceptorHandler interface { +// ShardHeaderInterceptorHandler defines what a component which will handle receiving the the shard headers should do +type ShardHeaderInterceptorHandler interface { process.Interceptor GetAllReceivedShardHeaders() []block.ShardData } -type metaBlockResolverHandler interface { +// MetaBlockResolverHandler defines what a component which will handle requesting a metablock should do +type MetaBlockResolverHandler interface { RequestEpochStartMetaBlock(epoch uint32) error + IsInterfaceNil() bool } // NodesConfigProviderHandler defines what a component which will handle the nodes config should be able to do type NodesConfigProviderHandler interface { GetNodesConfigForMetaBlock(metaBlock *block.MetaBlock) (*sharding.NodesSetup, error) + IsInterfaceNil() bool +} + +// EpochStartDataProviderHandler defines what a component which fetches the data needed for starting in an epoch should do +type EpochStartDataProviderHandler interface { + Bootstrap() (*ComponentsNeededForBootstrap, error) } diff --git a/epochStart/bootstrap/mock/messengerStub.go b/epochStart/bootstrap/mock/messengerStub.go new file mode 100644 index 00000000000..3aac14f2ad0 --- /dev/null +++ b/epochStart/bootstrap/mock/messengerStub.go @@ -0,0 +1,165 @@ +package mock + +import "github.com/ElrondNetwork/elrond-go/p2p" + +// MessengerStub - +type MessengerStub struct { + CloseCalled func() error + IDCalled func() p2p.PeerID + PeersCalled func() []p2p.PeerID + AddressesCalled func() []string + ConnectToPeerCalled func(address string) error + ConnectedPeersOnTopicCalled func(topic string) []p2p.PeerID + TrimConnectionsCalled func() + IsConnectedCalled func(peerID p2p.PeerID) bool + ConnectedPeersCalled func() []p2p.PeerID + CreateTopicCalled func(name string, createChannelForTopic bool) error + HasTopicCalled func(name string) bool + HasTopicValidatorCalled func(name string) bool + BroadcastOnChannelCalled func(channel string, topic string, buff []byte) + BroadcastCalled func(topic string, buff []byte) + RegisterMessageProcessorCalled func(topic string, handler p2p.MessageProcessor) error + UnregisterMessageProcessorCalled func(topic string) error + SendToConnectedPeerCalled func(topic string, buff []byte, peerID p2p.PeerID) error + OutgoingChannelLoadBalancerCalled func() p2p.ChannelLoadBalancer + BootstrapCalled func() error +} + +func (ms *MessengerStub) ConnectedAddresses() []string { + panic("implement me") +} + +func (ms *MessengerStub) PeerAddress(pid p2p.PeerID) string { + panic("implement me") +} + +func (ms *MessengerStub) ConnectedPeersOnTopic(topic string) []p2p.PeerID { + return ms.ConnectedPeersOnTopicCalled(topic) +} + +func (ms *MessengerStub) BroadcastOnChannelBlocking(channel string, topic string, buff []byte) error { + panic("implement me") +} + +func (ms *MessengerStub) IsConnectedToTheNetwork() bool { + panic("implement me") +} + +func (ms *MessengerStub) ThresholdMinConnectedPeers() int { + panic("implement me") +} + +func (ms *MessengerStub) SetThresholdMinConnectedPeers(minConnectedPeers int) error { + panic("implement me") +} + +// RegisterMessageProcessor - +func (ms *MessengerStub) RegisterMessageProcessor(topic string, handler p2p.MessageProcessor) error { + if ms.RegisterMessageProcessorCalled != nil { + return ms.RegisterMessageProcessorCalled(topic, handler) + } + + return nil +} + +// UnregisterMessageProcessor - +func (ms *MessengerStub) UnregisterMessageProcessor(topic string) error { + if ms.UnregisterMessageProcessorCalled != nil { + return ms.UnregisterMessageProcessorCalled(topic) + } + + return nil +} + +// Broadcast - +func (ms *MessengerStub) Broadcast(topic string, buff []byte) { + ms.BroadcastCalled(topic, buff) +} + +// OutgoingChannelLoadBalancer - +func (ms *MessengerStub) OutgoingChannelLoadBalancer() p2p.ChannelLoadBalancer { + return ms.OutgoingChannelLoadBalancerCalled() +} + +// Close - +func (ms *MessengerStub) Close() error { + return ms.CloseCalled() +} + +// ID - +func (ms *MessengerStub) ID() p2p.PeerID { + return ms.IDCalled() +} + +// Peers - +func (ms *MessengerStub) Peers() []p2p.PeerID { + if ms.PeersCalled != nil { + return ms.PeersCalled() + } + + return []p2p.PeerID{"peer1", "peer2", "peer3", "peer4", "peer5", "peer6"} +} + +// Addresses - +func (ms *MessengerStub) Addresses() []string { + return ms.AddressesCalled() +} + +// ConnectToPeer - +func (ms *MessengerStub) ConnectToPeer(address string) error { + return ms.ConnectToPeerCalled(address) +} + +// TrimConnections - +func (ms *MessengerStub) TrimConnections() { + ms.TrimConnectionsCalled() +} + +// IsConnected - +func (ms *MessengerStub) IsConnected(peerID p2p.PeerID) bool { + return ms.IsConnectedCalled(peerID) +} + +// ConnectedPeers - +func (ms *MessengerStub) ConnectedPeers() []p2p.PeerID { + return ms.ConnectedPeersCalled() +} + +// CreateTopic - +func (ms *MessengerStub) CreateTopic(name string, createChannelForTopic bool) error { + if ms.CreateTopicCalled != nil { + return ms.CreateTopicCalled(name, createChannelForTopic) + } + + return nil +} + +// HasTopic - +func (ms *MessengerStub) HasTopic(name string) bool { + return ms.HasTopicCalled(name) +} + +// HasTopicValidator - +func (ms *MessengerStub) HasTopicValidator(name string) bool { + return ms.HasTopicValidatorCalled(name) +} + +// BroadcastOnChannel - +func (ms *MessengerStub) BroadcastOnChannel(channel string, topic string, buff []byte) { + ms.BroadcastOnChannelCalled(channel, topic, buff) +} + +// SendToConnectedPeer - +func (ms *MessengerStub) SendToConnectedPeer(topic string, buff []byte, peerID p2p.PeerID) error { + return ms.SendToConnectedPeerCalled(topic, buff, peerID) +} + +// Bootstrap - +func (ms *MessengerStub) Bootstrap() error { + return ms.BootstrapCalled() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ms *MessengerStub) IsInterfaceNil() bool { + return ms == nil +} diff --git a/epochStart/bootstrap/mock/metaBlockInterceptorStub.go b/epochStart/bootstrap/mock/metaBlockInterceptorStub.go new file mode 100644 index 00000000000..55e826ad89f --- /dev/null +++ b/epochStart/bootstrap/mock/metaBlockInterceptorStub.go @@ -0,0 +1,35 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/p2p" +) + +// MetaBlockInterceptorStub - +type MetaBlockInterceptorStub struct { + ProcessReceivedMessageCalled func(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error + GetMetaBlockCalled func(target int, epoch uint32) (*block.MetaBlock, error) +} + +// ProcessReceivedMessage - +func (m *MetaBlockInterceptorStub) ProcessReceivedMessage(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error { + if m.ProcessReceivedMessageCalled != nil { + return m.ProcessReceivedMessageCalled(message, broadcastHandler) + } + + return nil +} + +// GetMetaBlock - +func (m *MetaBlockInterceptorStub) GetMetaBlock(target int, epoch uint32) (*block.MetaBlock, error) { + if m.GetMetaBlockCalled != nil { + return m.GetMetaBlockCalled(target, epoch) + } + + return &block.MetaBlock{}, nil +} + +// IsInterfaceNil - +func (m *MetaBlockInterceptorStub) IsInterfaceNil() bool { + return m == nil +} diff --git a/epochStart/bootstrap/mock/metaBlockResolverStub.go b/epochStart/bootstrap/mock/metaBlockResolverStub.go new file mode 100644 index 00000000000..ed868592f40 --- /dev/null +++ b/epochStart/bootstrap/mock/metaBlockResolverStub.go @@ -0,0 +1,20 @@ +package mock + +// MetaBlockResolverStub - +type MetaBlockResolverStub struct { + RequestEpochStartMetaBlockCalled func(epoch uint32) error +} + +// RequestEpochStartMetaBlock - +func (m *MetaBlockResolverStub) RequestEpochStartMetaBlock(epoch uint32) error { + if m.RequestEpochStartMetaBlockCalled != nil { + return m.RequestEpochStartMetaBlockCalled(epoch) + } + + return nil +} + +// IsInterfaceNil - +func (m *MetaBlockResolverStub) IsInterfaceNil() bool { + return m == nil +} diff --git a/epochStart/bootstrap/mock/nodesConfigProviderStub.go b/epochStart/bootstrap/mock/nodesConfigProviderStub.go new file mode 100644 index 00000000000..fd8966549d9 --- /dev/null +++ b/epochStart/bootstrap/mock/nodesConfigProviderStub.go @@ -0,0 +1,25 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// NodesConfigProviderStub - +type NodesConfigProviderStub struct { + GetNodesConfigForMetaBlockCalled func(metaBlock *block.MetaBlock) (*sharding.NodesSetup, error) +} + +// GetNodesConfigForMetaBlock - +func (n *NodesConfigProviderStub) GetNodesConfigForMetaBlock(metaBlock *block.MetaBlock) (*sharding.NodesSetup, error) { + if n.GetNodesConfigForMetaBlockCalled != nil { + return n.GetNodesConfigForMetaBlockCalled(metaBlock) + } + + return &sharding.NodesSetup{}, nil +} + +// IsInterfaceNil - +func (n *NodesConfigProviderStub) IsInterfaceNil() bool { + return n == nil +} diff --git a/epochStart/bootstrap/mock/p2pMessageMock.go b/epochStart/bootstrap/mock/p2pMessageMock.go new file mode 100644 index 00000000000..99bd2f862c2 --- /dev/null +++ b/epochStart/bootstrap/mock/p2pMessageMock.go @@ -0,0 +1,54 @@ +package mock + +import "github.com/ElrondNetwork/elrond-go/p2p" + +// P2PMessageMock - +type P2PMessageMock struct { + FromField []byte + DataField []byte + SeqNoField []byte + TopicIDsField []string + SignatureField []byte + KeyField []byte + PeerField p2p.PeerID +} + +// From - +func (msg *P2PMessageMock) From() []byte { + return msg.FromField +} + +// Data - +func (msg *P2PMessageMock) Data() []byte { + return msg.DataField +} + +// SeqNo - +func (msg *P2PMessageMock) SeqNo() []byte { + return msg.SeqNoField +} + +// TopicIDs - +func (msg *P2PMessageMock) TopicIDs() []string { + return msg.TopicIDsField +} + +// Signature - +func (msg *P2PMessageMock) Signature() []byte { + return msg.SignatureField +} + +// Key - +func (msg *P2PMessageMock) Key() []byte { + return msg.KeyField +} + +// Peer - +func (msg *P2PMessageMock) Peer() p2p.PeerID { + return msg.PeerField +} + +// IsInterfaceNil returns true if there is no value under the interface +func (msg *P2PMessageMock) IsInterfaceNil() bool { + return msg == nil +} diff --git a/epochStart/bootstrap/mock/shardHeaderInterceptorStub.go b/epochStart/bootstrap/mock/shardHeaderInterceptorStub.go new file mode 100644 index 00000000000..181a95089a8 --- /dev/null +++ b/epochStart/bootstrap/mock/shardHeaderInterceptorStub.go @@ -0,0 +1,35 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/p2p" +) + +// ShardHeaderInterceptorStub - +type ShardHeaderInterceptorStub struct { + ProcessReceivedMessageCalled func(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error + GetAllReceivedShardHeadersCalled func() []block.ShardData +} + +// ProcessReceivedMessage - +func (s *ShardHeaderInterceptorStub) ProcessReceivedMessage(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error { + if s.ProcessReceivedMessageCalled != nil { + return s.ProcessReceivedMessageCalled(message, broadcastHandler) + } + + return nil +} + +// GetAllReceivedShardHeaders - +func (s *ShardHeaderInterceptorStub) GetAllReceivedShardHeaders() []block.ShardData { + if s.GetAllReceivedShardHeadersCalled != nil { + return s.GetAllReceivedShardHeadersCalled() + } + + return nil +} + +// IsInterfaceNil - +func (s *ShardHeaderInterceptorStub) IsInterfaceNil() bool { + return s == nil +} diff --git a/epochStart/bootstrap/simpleMetaBlockInterceptor.go b/epochStart/bootstrap/simpleMetaBlockInterceptor.go index 5b0170bc456..6df21622f4b 100644 --- a/epochStart/bootstrap/simpleMetaBlockInterceptor.go +++ b/epochStart/bootstrap/simpleMetaBlockInterceptor.go @@ -1,12 +1,12 @@ package bootstrap import ( - "errors" "math" "sync" "time" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" @@ -25,14 +25,21 @@ type simpleMetaBlockInterceptor struct { } // NewSimpleMetaBlockInterceptor will return a new instance of simpleMetaBlockInterceptor -func NewSimpleMetaBlockInterceptor(marshalizer marshal.Marshalizer, hasher hashing.Hasher) *simpleMetaBlockInterceptor { +func NewSimpleMetaBlockInterceptor(marshalizer marshal.Marshalizer, hasher hashing.Hasher) (*simpleMetaBlockInterceptor, error) { + if check.IfNil(marshalizer) { + return nil, ErrNilMarshalizer + } + if check.IfNil(hasher) { + return nil, ErrNilHasher + } + return &simpleMetaBlockInterceptor{ marshalizer: marshalizer, hasher: hasher, mutReceivedMetaBlocks: sync.RWMutex{}, mapReceivedMetaBlocks: make(map[string]*block.MetaBlock), mapMetaBlocksFromPeers: make(map[string][]p2p.PeerID), - } + }, nil } // ProcessReceivedMessage will receive the metablocks and will add them to the maps @@ -93,7 +100,7 @@ func (s *simpleMetaBlockInterceptor) GetMetaBlock(target int, epoch uint32) (*bl s.mutReceivedMetaBlocks.RUnlock() } - return nil, errors.New("num of tries exceeded. try re-request") + return nil, ErrNumTriesExceeded } // IsInterfaceNil returns true if there is no value under the interface diff --git a/epochStart/bootstrap/simpleMetaBlockInterceptor_test.go b/epochStart/bootstrap/simpleMetaBlockInterceptor_test.go new file mode 100644 index 00000000000..7e2bb103ac3 --- /dev/null +++ b/epochStart/bootstrap/simpleMetaBlockInterceptor_test.go @@ -0,0 +1,171 @@ +package bootstrap_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" + mock2 "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/mock" + "github.com/ElrondNetwork/elrond-go/epochStart/mock" + "github.com/stretchr/testify/require" +) + +func TestNewSimpleMetaBlockInterceptor_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + smbi, err := bootstrap.NewSimpleMetaBlockInterceptor(nil, &mock.HasherMock{}) + require.Nil(t, smbi) + require.Equal(t, bootstrap.ErrNilMarshalizer, err) +} + +func TestNewSimpleMetaBlockInterceptor_NilHasherShouldErr(t *testing.T) { + t.Parallel() + + smbi, err := bootstrap.NewSimpleMetaBlockInterceptor(&mock.MarshalizerMock{}, nil) + require.Nil(t, smbi) + require.Equal(t, bootstrap.ErrNilHasher, err) +} + +func TestNewSimpleMetaBlockInterceptor_OkValsShouldWork(t *testing.T) { + t.Parallel() + + smbi, err := bootstrap.NewSimpleMetaBlockInterceptor(&mock.MarshalizerMock{}, &mock.HasherMock{}) + require.Nil(t, err) + require.False(t, check.IfNil(smbi)) +} + +func TestSimpleMetaBlockInterceptor_ProcessReceivedMessage_ReceivedMessageIsNotAMetaBlockShouldNotAdd(t *testing.T) { + t.Parallel() + + smbi, _ := bootstrap.NewSimpleMetaBlockInterceptor(&mock.MarshalizerMock{}, &mock.HasherMock{}) + + message := mock2.P2PMessageMock{ + DataField: []byte("not a metablock"), + } + + _ = smbi.ProcessReceivedMessage(&message, nil) + + require.Zero(t, len(smbi.GetReceivedMetablocks())) +} + +func TestSimpleMetaBlockInterceptor_ProcessReceivedMessage_UnmarshalFailsShouldErr(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{Fail: true} + smbi, _ := bootstrap.NewSimpleMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) + + mb := &block.MetaBlock{Epoch: 5} + mbBytes, _ := marshalizer.Marshal(mb) + message := mock2.P2PMessageMock{ + DataField: mbBytes, + } + + _ = smbi.ProcessReceivedMessage(&message, nil) + + require.Zero(t, len(smbi.GetReceivedMetablocks())) +} + +func TestSimpleMetaBlockInterceptor_ProcessReceivedMessage_ReceivedMessageIsAMetaBlockShouldAdd(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + smbi, _ := bootstrap.NewSimpleMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) + + mb := &block.MetaBlock{Epoch: 5} + mbBytes, _ := marshalizer.Marshal(mb) + message := mock2.P2PMessageMock{ + DataField: mbBytes, + } + + _ = smbi.ProcessReceivedMessage(&message, nil) + + require.Equal(t, 1, len(smbi.GetReceivedMetablocks())) +} + +func TestSimpleMetaBlockInterceptor_ProcessReceivedMessage_ShouldAddForMorePeers(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + smbi, _ := bootstrap.NewSimpleMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) + + mb := &block.MetaBlock{Epoch: 5} + mbBytes, _ := marshalizer.Marshal(mb) + message1 := &mock2.P2PMessageMock{ + DataField: mbBytes, + PeerField: "peer1", + } + message2 := &mock2.P2PMessageMock{ + DataField: mbBytes, + PeerField: "peer2", + } + + _ = smbi.ProcessReceivedMessage(message1, nil) + _ = smbi.ProcessReceivedMessage(message2, nil) + + for _, res := range smbi.GetPeersSliceForMetablocks() { + require.Equal(t, 2, len(res)) + } +} + +func TestSimpleMetaBlockInterceptor_ProcessReceivedMessage_ShouldNotAddTwiceForTheSamePeer(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + smbi, _ := bootstrap.NewSimpleMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) + + mb := &block.MetaBlock{Epoch: 5} + mbBytes, _ := marshalizer.Marshal(mb) + message1 := &mock2.P2PMessageMock{ + DataField: mbBytes, + PeerField: "peer1", + } + message2 := &mock2.P2PMessageMock{ + DataField: mbBytes, + PeerField: "peer1", + } + + _ = smbi.ProcessReceivedMessage(message1, nil) + _ = smbi.ProcessReceivedMessage(message2, nil) + + for _, res := range smbi.GetPeersSliceForMetablocks() { + require.Equal(t, 1, len(res)) + } +} + +func TestSimpleMetaBlockInterceptor_GetMetaBlock_NumTriesExceededShouldErr(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + smbi, _ := bootstrap.NewSimpleMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) + + // no message received, so should exit with err + mb, err := smbi.GetMetaBlock(2, 5) + require.Zero(t, mb) + require.Equal(t, bootstrap.ErrNumTriesExceeded, err) +} + +func TestSimpleMetaBlockInterceptor_GetMetaBlockShouldWork(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + smbi, _ := bootstrap.NewSimpleMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) + + mb := &block.MetaBlock{Epoch: 5} + mbBytes, _ := marshalizer.Marshal(mb) + message1 := &mock2.P2PMessageMock{ + DataField: mbBytes, + PeerField: "peer1", + } + message2 := &mock2.P2PMessageMock{ + DataField: mbBytes, + PeerField: "peer2", + } + + _ = smbi.ProcessReceivedMessage(message1, nil) + _ = smbi.ProcessReceivedMessage(message2, nil) + + mb, err := smbi.GetMetaBlock(2, 5) + require.Nil(t, err) + require.NotNil(t, mb) +} diff --git a/epochStart/bootstrap/simpleMetaBlocksResolver.go b/epochStart/bootstrap/simpleMetaBlockResolver.go similarity index 82% rename from epochStart/bootstrap/simpleMetaBlocksResolver.go rename to epochStart/bootstrap/simpleMetaBlockResolver.go index 41c986517bc..99827a9e5cc 100644 --- a/epochStart/bootstrap/simpleMetaBlocksResolver.go +++ b/epochStart/bootstrap/simpleMetaBlockResolver.go @@ -3,16 +3,17 @@ package bootstrap import ( "errors" "fmt" + + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/core/partitioning" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/resolverscontainer" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" "github.com/ElrondNetwork/elrond-go/marshal" - mock2 "github.com/ElrondNetwork/elrond-go/node/mock" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/process/factory" - mock3 "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/storage" ) @@ -28,6 +29,13 @@ func NewSimpleMetaBlocksResolver( messenger p2p.Messenger, marshalizer marshal.Marshalizer, ) (*simpleMetaBlocksResolver, error) { + if check.IfNil(messenger) { + return nil, ErrNilMessenger + } + if check.IfNil(marshalizer) { + return nil, ErrNilMarshalizer + } + smbr := &simpleMetaBlocksResolver{ messenger: messenger, marshalizer: marshalizer, @@ -41,12 +49,12 @@ func NewSimpleMetaBlocksResolver( } func (smbr *simpleMetaBlocksResolver) init() error { - storageService := &mock2.ChainStorerMock{ + storageService := &disabled.ChainStorer{ GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { - return &mock2.StorerMock{} + return disabled.NewDisabledStorer() }, } - cacher := mock3.NewPoolsHolderMock() + cacher := disabled.NewDisabledPoolsHolder() dataPacker, err := partitioning.NewSimpleDataPacker(smbr.marshalizer) if err != nil { return err @@ -54,7 +62,7 @@ func (smbr *simpleMetaBlocksResolver) init() error { triesHolder := state.NewDataTriesHolder() resolversContainerArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: mock2.NewOneShardCoordinatorMock(), + ShardCoordinator: disabled.NewShardCoordinator(), Messenger: smbr.messenger, Store: storageService, Marshalizer: smbr.marshalizer, @@ -91,3 +99,8 @@ func (smbr *simpleMetaBlocksResolver) init() error { func (smbr *simpleMetaBlocksResolver) RequestEpochStartMetaBlock(epoch uint32) error { return smbr.mbResolver.RequestDataFromEpoch([]byte(fmt.Sprintf("epochStartBlock_%d", epoch))) } + +// IsInterfaceNil returns true if there is no value under the interface +func (smbr *simpleMetaBlocksResolver) IsInterfaceNil() bool { + return smbr == nil +} diff --git a/epochStart/bootstrap/simpleMetaBlockResolver_test.go b/epochStart/bootstrap/simpleMetaBlockResolver_test.go new file mode 100644 index 00000000000..e4d194f640b --- /dev/null +++ b/epochStart/bootstrap/simpleMetaBlockResolver_test.go @@ -0,0 +1,57 @@ +package bootstrap_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" + mock2 "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/mock" + "github.com/ElrondNetwork/elrond-go/epochStart/mock" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/stretchr/testify/require" +) + +func TestNewSimpleMetaBlocksResolver_NilMessengerShouldErr(t *testing.T) { + t.Parallel() + + smbr, err := bootstrap.NewSimpleMetaBlocksResolver(nil, &mock.MarshalizerMock{}) + require.Nil(t, smbr) + require.Equal(t, bootstrap.ErrNilMessenger, err) +} + +func TestNewSimpleMetaBlocksResolver_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + smbr, err := bootstrap.NewSimpleMetaBlocksResolver(&mock2.MessengerStub{}, nil) + require.Nil(t, smbr) + require.Equal(t, bootstrap.ErrNilMarshalizer, err) +} + +func TestNewSimpleMetaBlocksResolver_OkValsShouldWork(t *testing.T) { + t.Parallel() + + smbr, err := bootstrap.NewSimpleMetaBlocksResolver(&mock2.MessengerStub{}, &mock.MarshalizerMock{}) + require.Nil(t, err) + require.False(t, check.IfNil(smbr)) +} + +func TestSimpleMetaBlocksResolver_RequestEpochStartMetaBlock(t *testing.T) { + t.Parallel() + + requestWasSent := false + + messenger := &mock2.MessengerStub{ + ConnectedPeersOnTopicCalled: func(_ string) []p2p.PeerID { + return []p2p.PeerID{"peer1", "peer2", "peer2"} + }, + SendToConnectedPeerCalled: func(_ string, _ []byte, _ p2p.PeerID) error { + requestWasSent = true + return nil + }, + } + smbr, _ := bootstrap.NewSimpleMetaBlocksResolver(messenger, &mock.MarshalizerMock{}) + + err := smbr.RequestEpochStartMetaBlock(0) + require.Nil(t, err) + require.True(t, requestWasSent) +} diff --git a/node/mock/chainStorerMock.go b/node/mock/chainStorerMock.go index 3d6c48d333c..51d5de42132 100644 --- a/node/mock/chainStorerMock.go +++ b/node/mock/chainStorerMock.go @@ -92,8 +92,5 @@ func (bc *ChainStorerMock) Destroy() error { // IsInterfaceNil returns true if there is no value under the interface func (bc *ChainStorerMock) IsInterfaceNil() bool { - if bc == nil { - return true - } - return false + return bc == nil } From 346daa5261998162227509c9e7b60950ef7eab55 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Wed, 4 Mar 2020 15:31:29 +0200 Subject: [PATCH 06/61] EN-5829: fixed bug when requesting eoch start metablock from epoch 0 --- cmd/node/factory/structs.go | 34 ++------- epochStart/bootstrap/export_test.go | 16 ++--- .../factory/epochStartDataProviderFactory.go | 71 +++++++++++++++++++ integrationTests/testInitializer.go | 3 - 4 files changed, 83 insertions(+), 41 deletions(-) create mode 100644 epochStart/bootstrap/factory/epochStartDataProviderFactory.go diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 442a9922255..9c6d67a8097 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -1524,14 +1524,14 @@ func generateGenesisHeadersAndApplyInitialBalances(args *processComponentsFactor return nil, err } - newStore, newBlkc, errPoolCreation := createInMemoryStoreBlkc(newShardCoordinator) - if errPoolCreation != nil { - return nil, errPoolCreation + cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) + newBlkc, errNewMetachain := blockchain.NewMetaChain(cache) + if errNewMetachain != nil { + return nil, errNewMetachain } argsMetaGenesis.ShardCoordinator = newShardCoordinator argsMetaGenesis.Accounts = newAccounts - argsMetaGenesis.Store = newStore argsMetaGenesis.Blkc = newBlkc } @@ -1552,32 +1552,6 @@ func generateGenesisHeadersAndApplyInitialBalances(args *processComponentsFactor return genesisBlocks, nil } -func createInMemoryStoreBlkc( - shardCoordinator sharding.Coordinator, -) (dataRetriever.StorageService, data.ChainHandler, error) { - - cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) - blkc, err := blockchain.NewMetaChain(cache) - if err != nil { - return nil, nil, err - } - - store := dataRetriever.NewChainStorer() - store.AddStorer(dataRetriever.MetaBlockUnit, createMemUnit()) - store.AddStorer(dataRetriever.BlockHeaderUnit, createMemUnit()) - store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, createMemUnit()) - store.AddStorer(dataRetriever.TransactionUnit, createMemUnit()) - store.AddStorer(dataRetriever.UnsignedTransactionUnit, createMemUnit()) - store.AddStorer(dataRetriever.MiniBlockUnit, createMemUnit()) - for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) - store.AddStorer(hdrNonceHashDataUnit, createMemUnit()) - } - store.AddStorer(dataRetriever.HeartbeatUnit, createMemUnit()) - - return store, blkc, nil -} - func createGenesisBlockAndApplyInitialBalances( accounts state.AccountsAdapter, shardCoordinator sharding.Coordinator, diff --git a/epochStart/bootstrap/export_test.go b/epochStart/bootstrap/export_test.go index bdea755593c..ce44c401fda 100644 --- a/epochStart/bootstrap/export_test.go +++ b/epochStart/bootstrap/export_test.go @@ -5,16 +5,16 @@ import ( "github.com/ElrondNetwork/elrond-go/p2p" ) -func (smbi *simpleMetaBlockInterceptor) GetReceivedMetablocks() map[string]*block.MetaBlock { - smbi.mutReceivedMetaBlocks.RLock() - defer smbi.mutReceivedMetaBlocks.RUnlock() +func (s *simpleMetaBlockInterceptor) GetReceivedMetablocks() map[string]*block.MetaBlock { + s.mutReceivedMetaBlocks.RLock() + defer s.mutReceivedMetaBlocks.RUnlock() - return smbi.mapReceivedMetaBlocks + return s.mapReceivedMetaBlocks } -func (smbi *simpleMetaBlockInterceptor) GetPeersSliceForMetablocks() map[string][]p2p.PeerID { - smbi.mutReceivedMetaBlocks.RLock() - defer smbi.mutReceivedMetaBlocks.RUnlock() +func (s *simpleMetaBlockInterceptor) GetPeersSliceForMetablocks() map[string][]p2p.PeerID { + s.mutReceivedMetaBlocks.RLock() + defer s.mutReceivedMetaBlocks.RUnlock() - return smbi.mapMetaBlocksFromPeers + return s.mapMetaBlocksFromPeers } diff --git a/epochStart/bootstrap/factory/epochStartDataProviderFactory.go b/epochStart/bootstrap/factory/epochStartDataProviderFactory.go new file mode 100644 index 00000000000..fb022f5f217 --- /dev/null +++ b/epochStart/bootstrap/factory/epochStartDataProviderFactory.go @@ -0,0 +1,71 @@ +package factory + +// +//import ( +// "github.com/ElrondNetwork/elrond-go/core/check" +// "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" +// "github.com/ElrondNetwork/elrond-go/hashing" +// "github.com/ElrondNetwork/elrond-go/marshal" +// "github.com/ElrondNetwork/elrond-go/p2p" +//) +// +//type epochStartDataProviderFactory struct { +// messenger p2p.Messenger +// marshalizer marshal.Marshalizer +// hasher hashing.Hasher +// nodesConfigProvider bootstrap.NodesConfigProviderHandler +//} +// +//// EpochStartDataProviderFactoryArgs holds the arguments needed for creating aa factory for the epoch start data +//// provider component +//type EpochStartDataProviderFactoryArgs struct { +// Messenger p2p.Messenger +// Marshalizer marshal.Marshalizer +// Hasher hashing.Hasher +// NodesConfigProvider bootstrap.NodesConfigProviderHandler +//} +// +//// NewEpochStartDataProviderFactory returns a new instance of epochStartDataProviderFactory +//func NewEpochStartDataProviderFactory(args EpochStartDataProviderFactoryArgs) (*epochStartDataProviderFactory, error) { +// if check.IfNil(args.Messenger) { +// return nil, bootstrap.ErrNilMessenger +// } +// if check.IfNil(args.Marshalizer) { +// return nil, bootstrap.ErrNilMarshalizer +// } +// if check.IfNil(args.Hasher) { +// return nil, bootstrap.ErrNilHasher +// } +// if check.IfNil(args.NodesConfigProvider) { +// return nil, bootstrap.ErrNilNodesConfigProvider +// } +// +// return &epochStartDataProviderFactory{ +// messenger: args.Messenger, +// marshalizer: args.Marshalizer, +// hasher: args.Hasher, +// nodesConfigProvider: args.NodesConfigProvider, +// }, nil +//} +// +//// Create will init and return an instance of an epoch start data provider +//func (esdpf *epochStartDataProviderFactory) Create() (bootstrap.EpochStartDataProviderHandler, error) { +// metaBlockInterceptor := bootstrap.NewSimpleMetaBlockInterceptor(esdpf.marshalizer, esdpf.hasher) +// shardHdrInterceptor := bootstrap.NewSimpleShardHeaderInterceptor(esdpf.marshalizer) +// metaBlockResolver, err := bootstrap.NewSimpleMetaBlocksResolver(esdpf.messenger, esdpf.marshalizer) +// if err != nil { +// return nil, err +// } +// +// epochStartDataProvider, err := bootstrap.NewEpochStartDataProvider( +// esdpf.messenger, +// esdpf.marshalizer, +// esdpf.hasher, +// esdpf.nodesConfigProvider, +// metaBlockInterceptor, +// shardHdrInterceptor, +// metaBlockResolver, +// ) +// +// return epochStartDataProvider, nil +//} diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 47b17a453e3..30cf255621c 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -382,8 +382,6 @@ func CreateGenesisMetaBlock( core.MetachainShardId, ) - newStore := CreateMetaStore(newShardCoordinator) - newDataPool := CreateTestDataPool(nil) cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) @@ -392,7 +390,6 @@ func CreateGenesisMetaBlock( argsMetaGenesis.ShardCoordinator = newShardCoordinator argsMetaGenesis.Accounts = newAccounts - argsMetaGenesis.Store = newStore argsMetaGenesis.Blkc = newBlkc argsMetaGenesis.DataPool = newDataPool } From 321ec2f6ab0019ce5644d25e9baf79b8afd90005 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Thu, 5 Mar 2020 16:36:28 +0200 Subject: [PATCH 07/61] EN-5736: fix after review --- cmd/node/main.go | 94 +++++----- .../factory/disabledEpochStartDataProvider.go | 15 ++ .../factory/epochStartDataProviderFactory.go | 163 ++++++++++-------- .../bootstrap/simpleMetaBlockInterceptor.go | 46 +++-- .../bootstrap/simpleMetaBlockResolver.go | 15 +- 5 files changed, 191 insertions(+), 142 deletions(-) create mode 100644 epochStart/bootstrap/factory/disabledEpochStartDataProvider.go diff --git a/cmd/node/main.go b/cmd/node/main.go index 11c373f6731..fcdddff4665 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -31,7 +31,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/display" "github.com/ElrondNetwork/elrond-go/epochStart" - "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" + factory2 "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/factory" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/nodesconfigprovider" "github.com/ElrondNetwork/elrond-go/epochStart/notifier" "github.com/ElrondNetwork/elrond-go/facade" @@ -297,8 +297,6 @@ var appVersion = core.UnVersionedAppString var currentEpoch = uint32(0) -var networkComponents *factory.Network - func main() { _ = display.SetDisplayByteSlice(display.ToHexShort) log := logger.GetOrCreate("main") @@ -369,6 +367,7 @@ func getSuite(config *config.Config) (crypto.Suite, error) { func startNode(ctx *cli.Context, log logger.Logger, version string) error { log.Trace("startNode called") workingDir := getWorkingDir(ctx, log) + var networkComponents *factory.Network var err error withLogFile := ctx.GlobalBool(logSaveFile.Name) @@ -554,59 +553,48 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { epochFoundInStorage := errNotCritical == nil - shouldCallEpochStartDataProvider := bootstrap.ShouldSyncWithTheNetwork( - startTime, - epochFoundInStorage, - nodesConfig, - generalConfig, - ) - shouldCallEpochStartDataProvider = true - if shouldCallEpochStartDataProvider { - networkComponents, err = factory.NetworkComponentsFactory(p2pConfig, log, &blake2b.Blake2b{}) - if err != nil { - return err - } + networkComponents, err = factory.NetworkComponentsFactory(p2pConfig, log, &blake2b.Blake2b{}) + if err != nil { + return err + } + err = networkComponents.NetMessenger.Bootstrap() + if err != nil { + return err + } + time.Sleep(2 * time.Second) - err = networkComponents.NetMessenger.Bootstrap() - if err != nil { - return err - } - time.Sleep(2 * time.Second) + marshalizer := &marshal.JsonMarshalizer{} + hasher := &blake2b.Blake2b{} + epochStartComponentArgs := factory2.EpochStartDataProviderFactoryArgs{ + Messenger: networkComponents.NetMessenger, + Marshalizer: marshalizer, + Hasher: hasher, + NodesConfigProvider: nodesconfigprovider.NewSimpleNodesConfigProvider(nodesConfig), + StartTime: startTime, + OriginalNodesConfig: nodesConfig, + GeneralConfig: generalConfig, + IsEpochFoundInStorage: epochFoundInStorage, + } - marshalizer := &marshal.JsonMarshalizer{} - hasher := &blake2b.Blake2b{} - simpleNodesConfigProvider := nodesconfigprovider.NewSimpleNodesConfigProvider(nodesConfig) - metaBlockInterceptor, err := bootstrap.NewSimpleMetaBlockInterceptor(marshalizer, hasher) - if err != nil { - return err - } - shardHdrInterceptor := bootstrap.NewSimpleShardHeaderInterceptor(marshalizer) - metaBlockResolver, err := bootstrap.NewSimpleMetaBlocksResolver(networkComponents.NetMessenger, marshalizer) - epochStartDataArgs := bootstrap.ArgsEpochStartDataProvider{ - Messenger: networkComponents.NetMessenger, - Marshalizer: marshalizer, - Hasher: hasher, - NodesConfigProvider: simpleNodesConfigProvider, - MetaBlockInterceptor: metaBlockInterceptor, - ShardHeaderInterceptor: shardHdrInterceptor, - MetaBlockResolver: metaBlockResolver, - } - epochRes, err := bootstrap.NewEpochStartDataProvider(epochStartDataArgs) - if err != nil { - return err - } - var bootstrapComponents *bootstrap.ComponentsNeededForBootstrap - bootstrapComponents, err = epochRes.Bootstrap() - if err != nil { - return err - } + epochStartComponentFactory, err := factory2.NewEpochStartDataProviderFactory(epochStartComponentArgs) + if err != nil { + return err + } - // override already defined node config - nodesConfig = bootstrapComponents.NodesConfig + epochStartDataProvider, err := epochStartComponentFactory.Create() + if err != nil { + return err + } + res, err := epochStartDataProvider.Bootstrap() + isFreshStart := err != nil + if !isFreshStart { + nodesConfig = res.NodesConfig log.Info("received epoch start metablock from network", - "nonce", bootstrapComponents.EpochStartMetaBlock.GetNonce(), - "epoch", bootstrapComponents.EpochStartMetaBlock.GetEpoch()) + "nonce", res.EpochStartMetaBlock.GetNonce(), + "epoch", res.EpochStartMetaBlock.GetEpoch()) + } else { + log.Error("error bootstrapping", "error", err) } shardCoordinator, nodeType, err := createShardCoordinator(nodesConfig, pubKey, preferencesConfig.Preferences, log) @@ -768,7 +756,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { err = ioutil.WriteFile(statsFile, []byte(sessionInfoFileOutput), os.ModePerm) log.LogIfError(err) - if !shouldCallEpochStartDataProvider { + if isFreshStart { log.Trace("creating network components") networkComponents, err = factory.NetworkComponentsFactory(p2pConfig, log, coreComponents.Hasher) if err != nil { @@ -940,7 +928,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { ef.StartBackgroundServices() log.Debug("bootstrapping node...") - err = ef.StartNode(currentEpoch, !shouldCallEpochStartDataProvider) + err = ef.StartNode(currentEpoch, isFreshStart) if err != nil { log.Error("starting node failed", err.Error()) return err diff --git a/epochStart/bootstrap/factory/disabledEpochStartDataProvider.go b/epochStart/bootstrap/factory/disabledEpochStartDataProvider.go new file mode 100644 index 00000000000..f3085454b47 --- /dev/null +++ b/epochStart/bootstrap/factory/disabledEpochStartDataProvider.go @@ -0,0 +1,15 @@ +package factory + +import ( + "errors" + + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" +) + +type disabledEpochStartDataProvider struct { +} + +// Bootstrap will return an error indicating that the sync is not needed +func (d *disabledEpochStartDataProvider) Bootstrap() (*bootstrap.ComponentsNeededForBootstrap, error) { + return nil, errors.New("sync not needed") +} diff --git a/epochStart/bootstrap/factory/epochStartDataProviderFactory.go b/epochStart/bootstrap/factory/epochStartDataProviderFactory.go index fb022f5f217..b0655770a8b 100644 --- a/epochStart/bootstrap/factory/epochStartDataProviderFactory.go +++ b/epochStart/bootstrap/factory/epochStartDataProviderFactory.go @@ -1,71 +1,96 @@ package factory -// -//import ( -// "github.com/ElrondNetwork/elrond-go/core/check" -// "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" -// "github.com/ElrondNetwork/elrond-go/hashing" -// "github.com/ElrondNetwork/elrond-go/marshal" -// "github.com/ElrondNetwork/elrond-go/p2p" -//) -// -//type epochStartDataProviderFactory struct { -// messenger p2p.Messenger -// marshalizer marshal.Marshalizer -// hasher hashing.Hasher -// nodesConfigProvider bootstrap.NodesConfigProviderHandler -//} -// -//// EpochStartDataProviderFactoryArgs holds the arguments needed for creating aa factory for the epoch start data -//// provider component -//type EpochStartDataProviderFactoryArgs struct { -// Messenger p2p.Messenger -// Marshalizer marshal.Marshalizer -// Hasher hashing.Hasher -// NodesConfigProvider bootstrap.NodesConfigProviderHandler -//} -// -//// NewEpochStartDataProviderFactory returns a new instance of epochStartDataProviderFactory -//func NewEpochStartDataProviderFactory(args EpochStartDataProviderFactoryArgs) (*epochStartDataProviderFactory, error) { -// if check.IfNil(args.Messenger) { -// return nil, bootstrap.ErrNilMessenger -// } -// if check.IfNil(args.Marshalizer) { -// return nil, bootstrap.ErrNilMarshalizer -// } -// if check.IfNil(args.Hasher) { -// return nil, bootstrap.ErrNilHasher -// } -// if check.IfNil(args.NodesConfigProvider) { -// return nil, bootstrap.ErrNilNodesConfigProvider -// } -// -// return &epochStartDataProviderFactory{ -// messenger: args.Messenger, -// marshalizer: args.Marshalizer, -// hasher: args.Hasher, -// nodesConfigProvider: args.NodesConfigProvider, -// }, nil -//} -// -//// Create will init and return an instance of an epoch start data provider -//func (esdpf *epochStartDataProviderFactory) Create() (bootstrap.EpochStartDataProviderHandler, error) { -// metaBlockInterceptor := bootstrap.NewSimpleMetaBlockInterceptor(esdpf.marshalizer, esdpf.hasher) -// shardHdrInterceptor := bootstrap.NewSimpleShardHeaderInterceptor(esdpf.marshalizer) -// metaBlockResolver, err := bootstrap.NewSimpleMetaBlocksResolver(esdpf.messenger, esdpf.marshalizer) -// if err != nil { -// return nil, err -// } -// -// epochStartDataProvider, err := bootstrap.NewEpochStartDataProvider( -// esdpf.messenger, -// esdpf.marshalizer, -// esdpf.hasher, -// esdpf.nodesConfigProvider, -// metaBlockInterceptor, -// shardHdrInterceptor, -// metaBlockResolver, -// ) -// -// return epochStartDataProvider, nil -//} +import ( + "time" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type epochStartDataProviderFactory struct { + messenger p2p.Messenger + marshalizer marshal.Marshalizer + hasher hashing.Hasher + nodesConfigProvider bootstrap.NodesConfigProviderHandler + shouldSync bool +} + +// EpochStartDataProviderFactoryArgs holds the arguments needed for creating aa factory for the epoch start data +// provider component +type EpochStartDataProviderFactoryArgs struct { + Messenger p2p.Messenger + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + NodesConfigProvider bootstrap.NodesConfigProviderHandler + StartTime time.Time + OriginalNodesConfig *sharding.NodesSetup + GeneralConfig *config.Config + IsEpochFoundInStorage bool +} + +// NewEpochStartDataProviderFactory returns a new instance of epochStartDataProviderFactory +func NewEpochStartDataProviderFactory(args EpochStartDataProviderFactoryArgs) (*epochStartDataProviderFactory, error) { + if check.IfNil(args.Messenger) { + return nil, bootstrap.ErrNilMessenger + } + if check.IfNil(args.Marshalizer) { + return nil, bootstrap.ErrNilMarshalizer + } + if check.IfNil(args.Hasher) { + return nil, bootstrap.ErrNilHasher + } + if check.IfNil(args.NodesConfigProvider) { + return nil, bootstrap.ErrNilNodesConfigProvider + } + + shouldSync := bootstrap.ShouldSyncWithTheNetwork( + args.StartTime, + args.IsEpochFoundInStorage, + args.OriginalNodesConfig, + args.GeneralConfig, + ) + shouldSync = true // harcoded so we can test we can sync + + return &epochStartDataProviderFactory{ + messenger: args.Messenger, + marshalizer: args.Marshalizer, + hasher: args.Hasher, + nodesConfigProvider: args.NodesConfigProvider, + shouldSync: shouldSync, + }, nil +} + +// Create will init and return an instance of an epoch start data provider +func (esdpf *epochStartDataProviderFactory) Create() (bootstrap.EpochStartDataProviderHandler, error) { + if !esdpf.shouldSync { + return &disabledEpochStartDataProvider{}, nil + } + + metaBlockInterceptor, err := bootstrap.NewSimpleMetaBlockInterceptor(esdpf.marshalizer, esdpf.hasher) + if err != nil { + return nil, err + } + shardHdrInterceptor := bootstrap.NewSimpleShardHeaderInterceptor(esdpf.marshalizer) + metaBlockResolver, err := bootstrap.NewSimpleMetaBlocksResolver(esdpf.messenger, esdpf.marshalizer) + if err != nil { + return nil, err + } + + argsEpochStart := bootstrap.ArgsEpochStartDataProvider{ + Messenger: esdpf.messenger, + Marshalizer: esdpf.marshalizer, + Hasher: esdpf.hasher, + NodesConfigProvider: esdpf.nodesConfigProvider, + MetaBlockInterceptor: metaBlockInterceptor, + ShardHeaderInterceptor: shardHdrInterceptor, + MetaBlockResolver: metaBlockResolver, + } + epochStartDataProvider, err := bootstrap.NewEpochStartDataProvider(argsEpochStart) + + return epochStartDataProvider, nil +} diff --git a/epochStart/bootstrap/simpleMetaBlockInterceptor.go b/epochStart/bootstrap/simpleMetaBlockInterceptor.go index 6df21622f4b..ba7649d6c54 100644 --- a/epochStart/bootstrap/simpleMetaBlockInterceptor.go +++ b/epochStart/bootstrap/simpleMetaBlockInterceptor.go @@ -46,17 +46,18 @@ func NewSimpleMetaBlockInterceptor(marshalizer marshal.Marshalizer, hasher hashi func (s *simpleMetaBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { var mb block.MetaBlock err := s.marshalizer.Unmarshal(&mb, message.Data()) - if err == nil { - s.mutReceivedMetaBlocks.Lock() - mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, mb) - if err != nil { - s.mutReceivedMetaBlocks.Unlock() - return nil - } - s.mapReceivedMetaBlocks[string(mbHash)] = &mb - s.addToPeerList(string(mbHash), message.Peer()) + if err != nil { + return err + } + s.mutReceivedMetaBlocks.Lock() + mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, mb) + if err != nil { s.mutReceivedMetaBlocks.Unlock() + return err } + s.mapReceivedMetaBlocks[string(mbHash)] = &mb + s.addToPeerList(string(mbHash), message.Peer()) + s.mutReceivedMetaBlocks.Unlock() return nil } @@ -65,20 +66,17 @@ func (s *simpleMetaBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP func (s *simpleMetaBlockInterceptor) addToPeerList(hash string, id p2p.PeerID) { peersListForHash, ok := s.mapMetaBlocksFromPeers[hash] - // no entry for this hash. add it directly if !ok { s.mapMetaBlocksFromPeers[hash] = append(s.mapMetaBlocksFromPeers[hash], id) return } - // entries exist for this hash. search so we don't have duplicates for _, peer := range peersListForHash { if peer == id { return } } - // entry not found so add it s.mapMetaBlocksFromPeers[hash] = append(s.mapMetaBlocksFromPeers[hash], id) } @@ -88,12 +86,9 @@ func (s *simpleMetaBlockInterceptor) GetMetaBlock(target int, epoch uint32) (*bl time.Sleep(timeToWaitBeforeCheckingReceivedMetaBlocks) s.mutReceivedMetaBlocks.RLock() for hash, peersList := range s.mapMetaBlocksFromPeers { - mb := s.mapReceivedMetaBlocks[hash] - epochCheckNotRequired := epoch == math.MaxUint32 - isEpochOk := epochCheckNotRequired || mb.Epoch == epoch - if len(peersList) >= target && isEpochOk { + isOk := s.isMapEntryOk(peersList, hash, target, epoch) + if isOk { s.mutReceivedMetaBlocks.RUnlock() - log.Info("got consensus for metablock", "len", len(peersList)) return s.mapReceivedMetaBlocks[hash], nil } } @@ -103,6 +98,23 @@ func (s *simpleMetaBlockInterceptor) GetMetaBlock(target int, epoch uint32) (*bl return nil, ErrNumTriesExceeded } +func (s *simpleMetaBlockInterceptor) isMapEntryOk( + peersList []p2p.PeerID, + hash string, + target int, + epoch uint32, +) bool { + mb := s.mapReceivedMetaBlocks[hash] + epochCheckNotRequired := epoch == math.MaxUint32 + isEpochOk := epochCheckNotRequired || mb.Epoch == epoch + if len(peersList) >= target && isEpochOk { + log.Info("got consensus for metablock", "len", len(peersList)) + return true + } + + return false +} + // IsInterfaceNil returns true if there is no value under the interface func (s *simpleMetaBlockInterceptor) IsInterfaceNil() bool { return s == nil diff --git a/epochStart/bootstrap/simpleMetaBlockResolver.go b/epochStart/bootstrap/simpleMetaBlockResolver.go index 99827a9e5cc..149ca3cd01a 100644 --- a/epochStart/bootstrap/simpleMetaBlockResolver.go +++ b/epochStart/bootstrap/simpleMetaBlockResolver.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/core/partitioning" "github.com/ElrondNetwork/elrond-go/data/state" @@ -14,9 +15,13 @@ import ( "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" ) +const percentageOfPeersToSendRequests = 0.4 +const defaultNumOfPeersToSendRequests = 2 + // simpleMetaBlocksResolver initializes a HeaderResolver and sends requests from it type simpleMetaBlocksResolver struct { messenger p2p.Messenger @@ -60,9 +65,13 @@ func (smbr *simpleMetaBlocksResolver) init() error { return err } triesHolder := state.NewDataTriesHolder() + shardCoordinator, err := sharding.NewMultiShardCoordinator(3, core.MetachainShardId) + if err != nil { + return err + } resolversContainerArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: disabled.NewShardCoordinator(), + ShardCoordinator: shardCoordinator, Messenger: smbr.messenger, Store: storageService, Marshalizer: smbr.marshalizer, @@ -77,9 +86,9 @@ func (smbr *simpleMetaBlocksResolver) init() error { return err } - numPeersToQuery := int(0.4 * float64(len(smbr.messenger.Peers()))) + numPeersToQuery := int(percentageOfPeersToSendRequests * float64(len(smbr.messenger.Peers()))) if numPeersToQuery == 0 { - numPeersToQuery = 2 + numPeersToQuery = defaultNumOfPeersToSendRequests } resolver, err := metaChainResolverContainer.CreateMetaChainHeaderResolver(factory.MetachainBlocksTopic, numPeersToQuery, 0) if err != nil { From 79e1985d91e7a9edfa18b61525d532df9742e0c3 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Tue, 10 Mar 2020 18:15:21 +0200 Subject: [PATCH 08/61] further work (WIP) --- cmd/node/main.go | 17 +- .../metaResolversContainerFactory.go | 16 +- .../bootstrap/epochStartDataProvider.go | 300 ++++++++++++++++-- .../bootstrap/epochStartDataProvider_test.go | 12 + epochStart/bootstrap/errors.go | 10 +- .../factory/epochStartDataProviderFactory.go | 13 +- epochStart/bootstrap/interface.go | 16 +- epochStart/bootstrap/mock/publicKeyMock.go | 30 ++ .../bootstrap/mock/shardCoordinatorMock.go | 76 +++++ .../bootstrap/simpleMetaBlockInterceptor.go | 5 +- .../bootstrap/simpleMetaBlockResolver.go | 115 ------- .../bootstrap/simpleMetaBlockResolver_test.go | 57 ---- .../bootstrap/simpleMiniBlockInterceptor.go | 126 ++++++++ .../bootstrap/simpleShardHeaderInterceptor.go | 104 +++++- facade/elrondNodeFacade.go | 12 +- integrationTests/testProcessorNode.go | 2 +- process/block/shardblock.go | 2 +- process/common.go | 2 +- process/track/shardBlockTrack.go | 2 +- 19 files changed, 675 insertions(+), 242 deletions(-) create mode 100644 epochStart/bootstrap/mock/publicKeyMock.go create mode 100644 epochStart/bootstrap/mock/shardCoordinatorMock.go delete mode 100644 epochStart/bootstrap/simpleMetaBlockResolver.go delete mode 100644 epochStart/bootstrap/simpleMetaBlockResolver_test.go create mode 100644 epochStart/bootstrap/simpleMiniBlockInterceptor.go diff --git a/cmd/node/main.go b/cmd/node/main.go index fcdddff4665..d3a61cf2817 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -561,11 +561,12 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { if err != nil { return err } - time.Sleep(2 * time.Second) + time.Sleep(3 * time.Second) marshalizer := &marshal.JsonMarshalizer{} hasher := &blake2b.Blake2b{} epochStartComponentArgs := factory2.EpochStartDataProviderFactoryArgs{ + PubKey: pubKey, Messenger: networkComponents.NetMessenger, Marshalizer: marshalizer, Hasher: hasher, @@ -756,13 +757,13 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { err = ioutil.WriteFile(statsFile, []byte(sessionInfoFileOutput), os.ModePerm) log.LogIfError(err) - if isFreshStart { - log.Trace("creating network components") - networkComponents, err = factory.NetworkComponentsFactory(p2pConfig, log, coreComponents.Hasher) - if err != nil { - return err - } - } + //if isFreshStart { + // log.Trace("creating network components") + // networkComponents, err = factory.NetworkComponentsFactory(p2pConfig, log, coreComponents.Hasher) + // if err != nil { + // return err + // } + //} log.Trace("creating tps benchmark components") tpsBenchmark, err := statistics.NewTPSBenchmark(shardCoordinator.NumberOfShards(), nodesConfig.RoundDuration/1000) diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go index d34e17beead..d5548af864d 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go @@ -106,7 +106,7 @@ func (mrcf *metaResolversContainerFactory) generateShardHeaderResolvers() error identifierHeader := factory.ShardBlocksTopic + shardC.CommunicationIdentifier(idx) excludePeersFromTopic := emptyExcludePeersOnTopic - resolver, err := mrcf.createShardHeaderResolver(identifierHeader, excludePeersFromTopic, idx) + resolver, err := mrcf.createShardHeaderResolver(identifierHeader, excludePeersFromTopic, idx, numPeersToQuery) if err != nil { return err } @@ -118,7 +118,13 @@ func (mrcf *metaResolversContainerFactory) generateShardHeaderResolvers() error return mrcf.container.AddMultiple(keys, resolversSlice) } -func (mrcf *metaResolversContainerFactory) createShardHeaderResolver(topic string, excludedTopic string, shardID uint32) (dataRetriever.Resolver, error) { +// createShardHeaderResolver will return a shard header resolver for the given shard ID +func (mrcf *metaResolversContainerFactory) createShardHeaderResolver( + topic string, + excludedTopic string, + shardID uint32, + numPeersToQuery int, +) (dataRetriever.Resolver, error) { hdrStorer := mrcf.store.GetStorer(dataRetriever.BlockHeaderUnit) peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(mrcf.messenger, topic, excludedTopic) @@ -165,7 +171,7 @@ func (mrcf *metaResolversContainerFactory) createShardHeaderResolver(topic strin func (mrcf *metaResolversContainerFactory) generateMetaChainHeaderResolvers() error { identifierHeader := factory.MetachainBlocksTopic - resolver, err := mrcf.CreateMetaChainHeaderResolver(identifierHeader, numPeersToQuery, core.MetachainShardId) + resolver, err := mrcf.createMetaChainHeaderResolver(identifierHeader, numPeersToQuery, core.MetachainShardId) if err != nil { return err } @@ -173,8 +179,8 @@ func (mrcf *metaResolversContainerFactory) generateMetaChainHeaderResolvers() er return mrcf.container.Add(identifierHeader, resolver) } -// CreateMetaChainHeaderResolver will return a resolver for metachain headers -func (mrcf *metaResolversContainerFactory) CreateMetaChainHeaderResolver( +// createMetaChainHeaderResolver will return a resolver for metachain headers +func (mrcf *metaResolversContainerFactory) createMetaChainHeaderResolver( identifier string, numPeersToQuery int, shardId uint32, diff --git a/epochStart/bootstrap/epochStartDataProvider.go b/epochStart/bootstrap/epochStartDataProvider.go index 8a8f6e5f5ae..f0bb6b838cd 100644 --- a/epochStart/bootstrap/epochStartDataProvider.go +++ b/epochStart/bootstrap/epochStartDataProvider.go @@ -1,36 +1,64 @@ package bootstrap import ( + "encoding/hex" + "errors" "math" "time" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/core/partitioning" + "github.com/ElrondNetwork/elrond-go/crypto" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + factory2 "github.com/ElrondNetwork/elrond-go/data/state/factory" + factory3 "github.com/ElrondNetwork/elrond-go/data/trie/factory" + "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" + "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/resolverscontainer" + "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/timecache" ) var log = logger.GetOrCreate("registration") var _ process.Interceptor = (*simpleMetaBlockInterceptor)(nil) const requestSuffix = "_REQUEST" -const delayBetweenRequests = 200 * time.Millisecond -const thresholdForConsideringMetaBlockCorrect = 0.4 +const delayBetweenRequests = 1 * time.Second +const delayAfterRequesting = 1 * time.Second +const thresholdForConsideringMetaBlockCorrect = 0.2 const numRequestsToSendOnce = 4 // ComponentsNeededForBootstrap holds the components which need to be initialized from network type ComponentsNeededForBootstrap struct { EpochStartMetaBlock *block.MetaBlock NodesConfig *sharding.NodesSetup + ShardHeaders map[uint32]*block.Header + ShardCoordinator sharding.Coordinator + Tries state.TriesHolder +} + +type shardData struct { + headerResolver ShardHeaderResolverHandler + epochStartData *block.EpochStartShardData } // epochStartDataProvider will handle requesting the needed data to start when joining late the network type epochStartDataProvider struct { + publicKey crypto.PublicKey marshalizer marshal.Marshalizer hasher hashing.Hasher messenger p2p.Messenger @@ -38,21 +66,25 @@ type epochStartDataProvider struct { metaBlockInterceptor MetaBlockInterceptorHandler shardHeaderInterceptor ShardHeaderInterceptorHandler metaBlockResolver MetaBlockResolverHandler + requestHandlerMeta process.RequestHandler } // ArgsEpochStartDataProvider holds the arguments needed for creating an epoch start data provider component type ArgsEpochStartDataProvider struct { + PublicKey crypto.PublicKey Messenger p2p.Messenger Marshalizer marshal.Marshalizer Hasher hashing.Hasher NodesConfigProvider NodesConfigProviderHandler MetaBlockInterceptor MetaBlockInterceptorHandler ShardHeaderInterceptor ShardHeaderInterceptorHandler - MetaBlockResolver MetaBlockResolverHandler } // NewEpochStartDataProvider will return a new instance of epochStartDataProvider func NewEpochStartDataProvider(args ArgsEpochStartDataProvider) (*epochStartDataProvider, error) { + if check.IfNil(args.PublicKey) { + return nil, ErrNilPublicKey + } if check.IfNil(args.Messenger) { return nil, ErrNilMessenger } @@ -71,17 +103,14 @@ func NewEpochStartDataProvider(args ArgsEpochStartDataProvider) (*epochStartData if check.IfNil(args.ShardHeaderInterceptor) { return nil, ErrNilShardHeaderInterceptor } - if check.IfNil(args.MetaBlockResolver) { - return nil, ErrNilMetaBlockResolver - } return &epochStartDataProvider{ + publicKey: args.PublicKey, marshalizer: args.Marshalizer, hasher: args.Hasher, messenger: args.Messenger, nodesConfigProvider: args.NodesConfigProvider, metaBlockInterceptor: args.MetaBlockInterceptor, shardHeaderInterceptor: args.ShardHeaderInterceptor, - metaBlockResolver: args.MetaBlockResolver, }, nil } @@ -95,30 +124,147 @@ func (esdp *epochStartDataProvider) Bootstrap() (*ComponentsNeededForBootstrap, esdp.resetTopicsAndInterceptors() }() + requestHandlerMeta, err := esdp.createRequestHandler() + if err != nil { + return nil, err + } + + esdp.requestHandlerMeta = requestHandlerMeta + epochNumForRequestingTheLatestAvailable := uint32(math.MaxUint32) metaBlock, err := esdp.getEpochStartMetaBlock(epochNumForRequestingTheLatestAvailable) if err != nil { return nil, err } + prevMetaBlock, err := esdp.getEpochStartMetaBlock(metaBlock.Epoch - 1) if err != nil { return nil, err } + log.Info("previous meta block", "epoch", prevMetaBlock.Epoch) nodesConfig, err := esdp.nodesConfigProvider.GetNodesConfigForMetaBlock(metaBlock) if err != nil { return nil, err } + shardCoordinator, err := esdp.getShardCoordinator(metaBlock, nodesConfig) + if err != nil { + return nil, err + } + + shardHeaders, err := esdp.getShardHeaders(metaBlock, nodesConfig, shardCoordinator) + if err != nil { + return nil, err + } + + epochStartData, err := esdp.getCurrentEpochStartData(shardCoordinator, metaBlock) + if err != nil { + return nil, err + } + + trie, err := esdp.getTrieFromRootHash(epochStartData.RootHash) + if err != nil { + return nil, err + } + return &ComponentsNeededForBootstrap{ EpochStartMetaBlock: metaBlock, NodesConfig: nodesConfig, + ShardHeaders: shardHeaders, + ShardCoordinator: shardCoordinator, + Tries: trie, }, nil } +func (esdp *epochStartDataProvider) createRequestHandler() (process.RequestHandler, error) { + dataPacker, err := partitioning.NewSimpleDataPacker(esdp.marshalizer) + if err != nil { + return nil, err + } + + shardC, err := sharding.NewMultiShardCoordinator(2, core.MetachainShardId) + if err != nil { + return nil, err + } + + storageService := &disabled.ChainStorer{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return disabled.NewDisabledStorer() + }, + } + + cacher := disabled.NewDisabledPoolsHolder() + triesHolder := state.NewDataTriesHolder() + var stateTrie data.Trie + // TODO: change from integrationsTests.CreateAccountsDB + _, stateTrie, _ = integrationTests.CreateAccountsDB(factory2.UserAccount) + triesHolder.Put([]byte(factory3.UserAccountTrie), stateTrie) + + var peerTrie data.Trie + _, peerTrie, _ = integrationTests.CreateAccountsDB(factory2.ValidatorAccount) + triesHolder.Put([]byte(factory3.PeerAccountTrie), peerTrie) + + resolversContainerArgs := resolverscontainer.FactoryArgs{ + ShardCoordinator: shardC, + Messenger: esdp.messenger, + Store: storageService, + Marshalizer: esdp.marshalizer, + DataPools: cacher, + Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), + DataPacker: dataPacker, + TriesContainer: triesHolder, + SizeCheckDelta: 0, + } + + resolverFactory, err := resolverscontainer.NewMetaResolversContainerFactory(resolversContainerArgs) + if err != nil { + return nil, err + } + + container, err := resolverFactory.Create() + if err != nil { + return nil, err + } + + finder, err := containers.NewResolversFinder(container, shardC) + if err != nil { + return nil, err + } + + requestedItemsHandler := timecache.NewTimeCache(100) + + maxToRequest := 100 + + return requestHandlers.NewMetaResolverRequestHandler(finder, requestedItemsHandler, maxToRequest) +} + +func (esdp *epochStartDataProvider) getMiniBlock(miniBlockHeader *block.ShardMiniBlockHeader) (*block.MiniBlock, error) { + esdp.requestMiniBlock(miniBlockHeader) +} + +func (esdp *epochStartDataProvider) requestMiniBlock(miniBlockHeader *block.ShardMiniBlockHeader) { + esdp.requestHandlerMeta.RequestMiniBlock(miniBlockHeader.ReceiverShardID, miniBlockHeader.Hash) +} + +func (esdp *epochStartDataProvider) getCurrentEpochStartData( + shardCoordinator sharding.Coordinator, + metaBlock *block.MetaBlock, +) (*block.EpochStartShardData, error) { + shardID := shardCoordinator.SelfId() + for _, epochStartData := range metaBlock.EpochStart.LastFinalizedHeaders { + if epochStartData.ShardId == shardID { + return &epochStartData, nil + } + } + + return nil, errors.New("not found") +} + func (esdp *epochStartDataProvider) initTopicsAndInterceptors() error { err := esdp.messenger.CreateTopic(factory.MetachainBlocksTopic, true) if err != nil { + log.Info("error unregistering message processor", "error", err) return err } @@ -127,52 +273,158 @@ func (esdp *epochStartDataProvider) initTopicsAndInterceptors() error { return err } + err = esdp.messenger.CreateTopic(factory.ShardBlocksTopic+"_1_META", true) + if err != nil { + log.Info("error unregistering message processor", "error", err) + return err + } + + err = esdp.messenger.RegisterMessageProcessor(factory.ShardBlocksTopic+"_1_META", esdp.shardHeaderInterceptor) + if err != nil { + return err + } + return nil } +func (esdp *epochStartDataProvider) getShardID(nodesConfig *sharding.NodesSetup) (uint32, error) { + pubKeyBytes, err := esdp.publicKey.ToByteArray() + if err != nil { + return 0, err + } + pubKeyStr := hex.EncodeToString(pubKeyBytes) + for shardID, nodesPerShard := range nodesConfig.InitialNodesPubKeys() { + for _, nodePubKey := range nodesPerShard { + if nodePubKey == pubKeyStr { + return shardID, nil + } + } + } + + return 0, nil +} + +func (esdp *epochStartDataProvider) getTrieFromRootHash(_ []byte) (state.TriesHolder, error) { + // TODO: get trie from trie syncer + return state.NewDataTriesHolder(), nil +} + func (esdp *epochStartDataProvider) resetTopicsAndInterceptors() { err := esdp.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) if err != nil { log.Info("error unregistering message processor", "error", err) } - err = esdp.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic + requestSuffix) +} + +func (esdp *epochStartDataProvider) getEpochStartMetaBlock(epoch uint32) (*block.MetaBlock, error) { + esdp.requestMetaBlock(epoch) + + time.Sleep(delayAfterRequesting) + + for { + numConnectedPeers := len(esdp.messenger.Peers()) + threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) + mb, errConsensusNotReached := esdp.metaBlockInterceptor.GetMetaBlock(threshold, epoch) + if errConsensusNotReached == nil { + return mb, nil + } + log.Info("consensus not reached for epoch start meta block. re-requesting and trying again...") + esdp.requestMetaBlock(epoch) + } +} + +func (esdp *epochStartDataProvider) getShardCoordinator(metaBlock *block.MetaBlock, nodesConfig *sharding.NodesSetup) (sharding.Coordinator, error) { + shardID, err := esdp.getShardID(nodesConfig) if err != nil { - log.Info("error unregistering message processor", "error", err) + return nil, err } + + numOfShards := len(metaBlock.EpochStart.LastFinalizedHeaders) + if numOfShards == 1 { + return &sharding.OneShardCoordinator{}, nil + } + + return sharding.NewMultiShardCoordinator(uint32(numOfShards), shardID) } -func (esdp *epochStartDataProvider) getEpochStartMetaBlock(epoch uint32) (*block.MetaBlock, error) { - err := esdp.requestMetaBlock(epoch) +func (esdp *epochStartDataProvider) getShardHeaders( + metaBlock *block.MetaBlock, + nodesConfig *sharding.NodesSetup, + shardCoordinator sharding.Coordinator, +) (map[uint32]*block.Header, error) { + headersMap := make(map[uint32]*block.Header) + + shardID := shardCoordinator.SelfId() + if shardID == core.MetachainShardId { + for _, entry := range metaBlock.EpochStart.LastFinalizedHeaders { + var hdr *block.Header + hdr, err := esdp.getShardHeader(entry.HeaderHash, entry.ShardId) + if err != nil { + return nil, err + } + headersMap[entry.ShardId] = hdr + } + + return headersMap, nil + } + + var entryForShard *block.EpochStartShardData + for _, entry := range metaBlock.EpochStart.LastFinalizedHeaders { + if entry.ShardId == shardID { + entryForShard = &entry + } + } + + if entryForShard == nil { + return nil, errors.New("shard data not found") + } + + hdr, err := esdp.getShardHeader( + entryForShard.HeaderHash, + entryForShard.ShardId, + ) if err != nil { return nil, err } + + headersMap[shardID] = hdr + return headersMap, nil +} + +func (esdp *epochStartDataProvider) getShardHeader( + hash []byte, + shardID uint32, +) (*block.Header, error) { + esdp.requestShardHeader(shardID, hash) + time.Sleep(delayBetweenRequests) + for { numConnectedPeers := len(esdp.messenger.Peers()) threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) - mb, errConsensusNotReached := esdp.metaBlockInterceptor.GetMetaBlock(threshold, epoch) + mb, errConsensusNotReached := esdp.shardHeaderInterceptor.GetShardHeader(threshold) if errConsensusNotReached == nil { return mb, nil } - log.Info("consensus not reached for epoch start meta block. re-requesting and trying again...") - err = esdp.requestMetaBlock(epoch) - if err != nil { - return nil, err - } + log.Info("consensus not reached for shard header. re-requesting and trying again...") + esdp.requestShardHeader(shardID, hash) } } -func (esdp *epochStartDataProvider) requestMetaBlock(epoch uint32) error { +func (esdp *epochStartDataProvider) requestShardHeader(shardID uint32, hash []byte) { // send more requests + log.Debug("requsted shard block", "shard ID", shardID, "hash", hash) for i := 0; i < numRequestsToSendOnce; i++ { time.Sleep(delayBetweenRequests) - log.Debug("sent request for epoch start metablock...") - err := esdp.metaBlockResolver.RequestEpochStartMetaBlock(epoch) - if err != nil { - return err - } + esdp.requestHandlerMeta.RequestShardHeader(shardID, hash) } +} - return nil +func (esdp *epochStartDataProvider) requestMetaBlock(epoch uint32) { + // send more requests + for i := 0; i < numRequestsToSendOnce; i++ { + time.Sleep(delayBetweenRequests) + esdp.requestHandlerMeta.RequestStartOfEpochMetaBlock(epoch) + } } // IsInterfaceNil returns true if there is no value under the interface diff --git a/epochStart/bootstrap/epochStartDataProvider_test.go b/epochStart/bootstrap/epochStartDataProvider_test.go index 1e748611c88..485da61c690 100644 --- a/epochStart/bootstrap/epochStartDataProvider_test.go +++ b/epochStart/bootstrap/epochStartDataProvider_test.go @@ -13,6 +13,17 @@ import ( "github.com/stretchr/testify/require" ) +func TestNewEpochStartDataProvider_NilPublicKeyShouldErr(t *testing.T) { + t.Parallel() + + args := getArguments() + args.PublicKey = nil + epStart, err := bootstrap.NewEpochStartDataProvider(args) + + require.Nil(t, epStart) + require.Equal(t, bootstrap.ErrNilPublicKey, err) +} + func TestNewEpochStartDataProvider_NilMessengerShouldErr(t *testing.T) { t.Parallel() @@ -167,6 +178,7 @@ func TestEpochStartDataProvider_Bootstrap_ShouldWork(t *testing.T) { func getArguments() bootstrap.ArgsEpochStartDataProvider { return bootstrap.ArgsEpochStartDataProvider{ + PublicKey: &mock.PublicKeyMock{}, Messenger: &mock.MessengerStub{}, Marshalizer: &mock2.MarshalizerMock{}, Hasher: mock2.HasherMock{}, diff --git a/epochStart/bootstrap/errors.go b/epochStart/bootstrap/errors.go index bad59989d93..e87204a0d81 100644 --- a/epochStart/bootstrap/errors.go +++ b/epochStart/bootstrap/errors.go @@ -2,10 +2,13 @@ package bootstrap import "errors" -// ErrNilMessenger signals that a nil messenger has been provider +// ErrNilPublicKey signals that a nil public key has been provided +var ErrNilPublicKey = errors.New("nil public key") + +// ErrNilMessenger signals that a nil messenger has been provided var ErrNilMessenger = errors.New("nil messenger") -// ErrNilMarshalizer signals that a nil marshalizer has been provider +// ErrNilMarshalizer signals that a nil marshalizer has been provided var ErrNilMarshalizer = errors.New("nil marshalizer") // ErrNilHasher signals that a nil hasher has been provider @@ -25,3 +28,6 @@ var ErrNilMetaBlockResolver = errors.New("nil metablock resolver") // ErrNumTriesExceeded signals that there were too many tries for fetching a metablock var ErrNumTriesExceeded = errors.New("num of tries exceeded. try re-request") + +// ErrNilShardCoordinator signals that a nil shard coordinator has been provided +var ErrNilShardCoordinator = errors.New("nil shard coordinator") diff --git a/epochStart/bootstrap/factory/epochStartDataProviderFactory.go b/epochStart/bootstrap/factory/epochStartDataProviderFactory.go index b0655770a8b..eeb59a9adfe 100644 --- a/epochStart/bootstrap/factory/epochStartDataProviderFactory.go +++ b/epochStart/bootstrap/factory/epochStartDataProviderFactory.go @@ -5,6 +5,7 @@ import ( "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" @@ -13,6 +14,7 @@ import ( ) type epochStartDataProviderFactory struct { + pubKey crypto.PublicKey messenger p2p.Messenger marshalizer marshal.Marshalizer hasher hashing.Hasher @@ -23,6 +25,7 @@ type epochStartDataProviderFactory struct { // EpochStartDataProviderFactoryArgs holds the arguments needed for creating aa factory for the epoch start data // provider component type EpochStartDataProviderFactoryArgs struct { + PubKey crypto.PublicKey Messenger p2p.Messenger Marshalizer marshal.Marshalizer Hasher hashing.Hasher @@ -35,6 +38,9 @@ type EpochStartDataProviderFactoryArgs struct { // NewEpochStartDataProviderFactory returns a new instance of epochStartDataProviderFactory func NewEpochStartDataProviderFactory(args EpochStartDataProviderFactoryArgs) (*epochStartDataProviderFactory, error) { + if check.IfNil(args.PubKey) { + return nil, bootstrap.ErrNilPublicKey + } if check.IfNil(args.Messenger) { return nil, bootstrap.ErrNilMessenger } @@ -57,6 +63,7 @@ func NewEpochStartDataProviderFactory(args EpochStartDataProviderFactoryArgs) (* shouldSync = true // harcoded so we can test we can sync return &epochStartDataProviderFactory{ + pubKey: args.PubKey, messenger: args.Messenger, marshalizer: args.Marshalizer, hasher: args.Hasher, @@ -75,20 +82,18 @@ func (esdpf *epochStartDataProviderFactory) Create() (bootstrap.EpochStartDataPr if err != nil { return nil, err } - shardHdrInterceptor := bootstrap.NewSimpleShardHeaderInterceptor(esdpf.marshalizer) - metaBlockResolver, err := bootstrap.NewSimpleMetaBlocksResolver(esdpf.messenger, esdpf.marshalizer) + shardHdrInterceptor, err := bootstrap.NewSimpleShardHeaderInterceptor(esdpf.marshalizer, esdpf.hasher) if err != nil { return nil, err } - argsEpochStart := bootstrap.ArgsEpochStartDataProvider{ + PublicKey: esdpf.pubKey, Messenger: esdpf.messenger, Marshalizer: esdpf.marshalizer, Hasher: esdpf.hasher, NodesConfigProvider: esdpf.nodesConfigProvider, MetaBlockInterceptor: metaBlockInterceptor, ShardHeaderInterceptor: shardHdrInterceptor, - MetaBlockResolver: metaBlockResolver, } epochStartDataProvider, err := bootstrap.NewEpochStartDataProvider(argsEpochStart) diff --git a/epochStart/bootstrap/interface.go b/epochStart/bootstrap/interface.go index 21992ddfa4a..e1e766599f0 100644 --- a/epochStart/bootstrap/interface.go +++ b/epochStart/bootstrap/interface.go @@ -15,15 +15,27 @@ type MetaBlockInterceptorHandler interface { // ShardHeaderInterceptorHandler defines what a component which will handle receiving the the shard headers should do type ShardHeaderInterceptorHandler interface { process.Interceptor - GetAllReceivedShardHeaders() []block.ShardData + GetShardHeader(target int) (*block.Header, error) } -// MetaBlockResolverHandler defines what a component which will handle requesting a metablock should do +// MetaBlockResolverHandler defines what a component which will handle requesting a meta block should do type MetaBlockResolverHandler interface { RequestEpochStartMetaBlock(epoch uint32) error IsInterfaceNil() bool } +// ShardHeaderResolverHandler defines what a component which will handle requesting a shard block should do +type ShardHeaderResolverHandler interface { + RequestHeaderByHash(hash []byte, epoch uint32) error + IsInterfaceNil() bool +} + +// MiniBlockResolverHandler defines what a component which will handle requesting a mini block should do +type MiniBlockResolverHandler interface { + RequestHeaderByHash(hash []byte, epoch uint32) error + IsInterfaceNil() bool +} + // NodesConfigProviderHandler defines what a component which will handle the nodes config should be able to do type NodesConfigProviderHandler interface { GetNodesConfigForMetaBlock(metaBlock *block.MetaBlock) (*sharding.NodesSetup, error) diff --git a/epochStart/bootstrap/mock/publicKeyMock.go b/epochStart/bootstrap/mock/publicKeyMock.go new file mode 100644 index 00000000000..e018a183715 --- /dev/null +++ b/epochStart/bootstrap/mock/publicKeyMock.go @@ -0,0 +1,30 @@ +package mock + +import "github.com/ElrondNetwork/elrond-go/crypto" + +// PublicKeyMock mocks a public key implementation +type PublicKeyMock struct { + ToByteArrayMock func() ([]byte, error) + SuiteMock func() crypto.Suite + PointMock func() crypto.Point +} + +// ToByteArray mocks converting a public key to a byte array +func (pubKey *PublicKeyMock) ToByteArray() ([]byte, error) { + return []byte("publicKeyMock"), nil +} + +// Suite - +func (pubKey *PublicKeyMock) Suite() crypto.Suite { + return pubKey.SuiteMock() +} + +// Point - +func (pubKey *PublicKeyMock) Point() crypto.Point { + return pubKey.PointMock() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pubKey *PublicKeyMock) IsInterfaceNil() bool { + return pubKey == nil +} diff --git a/epochStart/bootstrap/mock/shardCoordinatorMock.go b/epochStart/bootstrap/mock/shardCoordinatorMock.go new file mode 100644 index 00000000000..12dee4aad79 --- /dev/null +++ b/epochStart/bootstrap/mock/shardCoordinatorMock.go @@ -0,0 +1,76 @@ +package mock + +import ( + "fmt" + + "github.com/ElrondNetwork/elrond-go/data/state" +) + +// MultipleShardsCoordinatorMock - +type MultipleShardsCoordinatorMock struct { + NoShards uint32 + ComputeIdCalled func(address state.AddressContainer) uint32 + SelfIDCalled func() uint32 + CurrentShard uint32 +} + +// NewMultiShardsCoordinatorMock - +func NewMultiShardsCoordinatorMock(nrShard uint32) *MultipleShardsCoordinatorMock { + return &MultipleShardsCoordinatorMock{NoShards: nrShard} +} + +// NumberOfShards - +func (scm *MultipleShardsCoordinatorMock) NumberOfShards() uint32 { + return scm.NoShards +} + +// ComputeId - +func (scm *MultipleShardsCoordinatorMock) ComputeId(address state.AddressContainer) uint32 { + if scm.ComputeIdCalled == nil { + return scm.SelfId() + } + return scm.ComputeIdCalled(address) +} + +// SelfId - +func (scm *MultipleShardsCoordinatorMock) SelfId() uint32 { + if scm.SelfIDCalled != nil { + return scm.SelfIDCalled() + } + + return scm.CurrentShard +} + +// SetSelfId - +func (scm *MultipleShardsCoordinatorMock) SetSelfId(shardId uint32) error { + return nil +} + +// SameShard - +func (scm *MultipleShardsCoordinatorMock) SameShard(firstAddress, secondAddress state.AddressContainer) bool { + return true +} + +// SetNoShards - +func (scm *MultipleShardsCoordinatorMock) SetNoShards(noShards uint32) { + scm.NoShards = noShards +} + +// CommunicationIdentifier returns the identifier between current shard ID and destination shard ID +// identifier is generated such as the first shard from identifier is always smaller than the last +func (scm *MultipleShardsCoordinatorMock) CommunicationIdentifier(destShardID uint32) string { + if destShardID == scm.CurrentShard { + return fmt.Sprintf("_%d", scm.CurrentShard) + } + + if destShardID < scm.CurrentShard { + return fmt.Sprintf("_%d_%d", destShardID, scm.CurrentShard) + } + + return fmt.Sprintf("_%d_%d", scm.CurrentShard, destShardID) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (scm *MultipleShardsCoordinatorMock) IsInterfaceNil() bool { + return scm == nil +} diff --git a/epochStart/bootstrap/simpleMetaBlockInterceptor.go b/epochStart/bootstrap/simpleMetaBlockInterceptor.go index ba7649d6c54..f3a715f3449 100644 --- a/epochStart/bootstrap/simpleMetaBlockInterceptor.go +++ b/epochStart/bootstrap/simpleMetaBlockInterceptor.go @@ -13,7 +13,7 @@ import ( "github.com/ElrondNetwork/elrond-go/p2p" ) -const timeToWaitBeforeCheckingReceivedMetaBlocks = 500 * time.Millisecond +const timeToWaitBeforeCheckingReceivedHeaders = 1 * time.Second const numTriesUntilExit = 5 type simpleMetaBlockInterceptor struct { @@ -83,9 +83,10 @@ func (s *simpleMetaBlockInterceptor) addToPeerList(hash string, id p2p.PeerID) { // GetMetaBlock will return the metablock after it is confirmed or an error if the number of tries was exceeded func (s *simpleMetaBlockInterceptor) GetMetaBlock(target int, epoch uint32) (*block.MetaBlock, error) { for count := 0; count < numTriesUntilExit; count++ { - time.Sleep(timeToWaitBeforeCheckingReceivedMetaBlocks) + time.Sleep(timeToWaitBeforeCheckingReceivedHeaders) s.mutReceivedMetaBlocks.RLock() for hash, peersList := range s.mapMetaBlocksFromPeers { + log.Debug("metablock from peers", "num peers", len(peersList), "target", target, "hash", []byte(hash)) isOk := s.isMapEntryOk(peersList, hash, target, epoch) if isOk { s.mutReceivedMetaBlocks.RUnlock() diff --git a/epochStart/bootstrap/simpleMetaBlockResolver.go b/epochStart/bootstrap/simpleMetaBlockResolver.go deleted file mode 100644 index 149ca3cd01a..00000000000 --- a/epochStart/bootstrap/simpleMetaBlockResolver.go +++ /dev/null @@ -1,115 +0,0 @@ -package bootstrap - -import ( - "errors" - "fmt" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/check" - "github.com/ElrondNetwork/elrond-go/core/partitioning" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/resolverscontainer" - "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" -) - -const percentageOfPeersToSendRequests = 0.4 -const defaultNumOfPeersToSendRequests = 2 - -// simpleMetaBlocksResolver initializes a HeaderResolver and sends requests from it -type simpleMetaBlocksResolver struct { - messenger p2p.Messenger - marshalizer marshal.Marshalizer - mbResolver dataRetriever.HeaderResolver -} - -// NewSimpleMetaBlocksResolver returns a new instance of simpleMetaBlocksResolver -func NewSimpleMetaBlocksResolver( - messenger p2p.Messenger, - marshalizer marshal.Marshalizer, -) (*simpleMetaBlocksResolver, error) { - if check.IfNil(messenger) { - return nil, ErrNilMessenger - } - if check.IfNil(marshalizer) { - return nil, ErrNilMarshalizer - } - - smbr := &simpleMetaBlocksResolver{ - messenger: messenger, - marshalizer: marshalizer, - } - err := smbr.init() - if err != nil { - return nil, err - } - - return smbr, nil -} - -func (smbr *simpleMetaBlocksResolver) init() error { - storageService := &disabled.ChainStorer{ - GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { - return disabled.NewDisabledStorer() - }, - } - cacher := disabled.NewDisabledPoolsHolder() - dataPacker, err := partitioning.NewSimpleDataPacker(smbr.marshalizer) - if err != nil { - return err - } - triesHolder := state.NewDataTriesHolder() - shardCoordinator, err := sharding.NewMultiShardCoordinator(3, core.MetachainShardId) - if err != nil { - return err - } - - resolversContainerArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: shardCoordinator, - Messenger: smbr.messenger, - Store: storageService, - Marshalizer: smbr.marshalizer, - DataPools: cacher, - Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), - DataPacker: dataPacker, - TriesContainer: triesHolder, - SizeCheckDelta: 0, - } - metaChainResolverContainer, err := resolverscontainer.NewMetaResolversContainerFactory(resolversContainerArgs) - if err != nil { - return err - } - - numPeersToQuery := int(percentageOfPeersToSendRequests * float64(len(smbr.messenger.Peers()))) - if numPeersToQuery == 0 { - numPeersToQuery = defaultNumOfPeersToSendRequests - } - resolver, err := metaChainResolverContainer.CreateMetaChainHeaderResolver(factory.MetachainBlocksTopic, numPeersToQuery, 0) - if err != nil { - return err - } - - castedResolver, ok := resolver.(dataRetriever.HeaderResolver) - if !ok { - return errors.New("invalid resolver type") - } - smbr.mbResolver = castedResolver - - return nil -} - -// RequestEpochStartMetaBlock will request the metablock to the peers -func (smbr *simpleMetaBlocksResolver) RequestEpochStartMetaBlock(epoch uint32) error { - return smbr.mbResolver.RequestDataFromEpoch([]byte(fmt.Sprintf("epochStartBlock_%d", epoch))) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (smbr *simpleMetaBlocksResolver) IsInterfaceNil() bool { - return smbr == nil -} diff --git a/epochStart/bootstrap/simpleMetaBlockResolver_test.go b/epochStart/bootstrap/simpleMetaBlockResolver_test.go deleted file mode 100644 index e4d194f640b..00000000000 --- a/epochStart/bootstrap/simpleMetaBlockResolver_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package bootstrap_test - -import ( - "testing" - - "github.com/ElrondNetwork/elrond-go/core/check" - "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" - mock2 "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/mock" - "github.com/ElrondNetwork/elrond-go/epochStart/mock" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/stretchr/testify/require" -) - -func TestNewSimpleMetaBlocksResolver_NilMessengerShouldErr(t *testing.T) { - t.Parallel() - - smbr, err := bootstrap.NewSimpleMetaBlocksResolver(nil, &mock.MarshalizerMock{}) - require.Nil(t, smbr) - require.Equal(t, bootstrap.ErrNilMessenger, err) -} - -func TestNewSimpleMetaBlocksResolver_NilMarshalizerShouldErr(t *testing.T) { - t.Parallel() - - smbr, err := bootstrap.NewSimpleMetaBlocksResolver(&mock2.MessengerStub{}, nil) - require.Nil(t, smbr) - require.Equal(t, bootstrap.ErrNilMarshalizer, err) -} - -func TestNewSimpleMetaBlocksResolver_OkValsShouldWork(t *testing.T) { - t.Parallel() - - smbr, err := bootstrap.NewSimpleMetaBlocksResolver(&mock2.MessengerStub{}, &mock.MarshalizerMock{}) - require.Nil(t, err) - require.False(t, check.IfNil(smbr)) -} - -func TestSimpleMetaBlocksResolver_RequestEpochStartMetaBlock(t *testing.T) { - t.Parallel() - - requestWasSent := false - - messenger := &mock2.MessengerStub{ - ConnectedPeersOnTopicCalled: func(_ string) []p2p.PeerID { - return []p2p.PeerID{"peer1", "peer2", "peer2"} - }, - SendToConnectedPeerCalled: func(_ string, _ []byte, _ p2p.PeerID) error { - requestWasSent = true - return nil - }, - } - smbr, _ := bootstrap.NewSimpleMetaBlocksResolver(messenger, &mock.MarshalizerMock{}) - - err := smbr.RequestEpochStartMetaBlock(0) - require.Nil(t, err) - require.True(t, requestWasSent) -} diff --git a/epochStart/bootstrap/simpleMiniBlockInterceptor.go b/epochStart/bootstrap/simpleMiniBlockInterceptor.go new file mode 100644 index 00000000000..391c9db9b50 --- /dev/null +++ b/epochStart/bootstrap/simpleMiniBlockInterceptor.go @@ -0,0 +1,126 @@ +package bootstrap + +import ( + "bytes" + "sync" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/p2p" +) + +type simpleMiniBlockInterceptor struct { + marshalizer marshal.Marshalizer + hasher hashing.Hasher + mutReceivedMiniBlocks sync.RWMutex + mapReceivedMiniBlocks map[string]*block.MiniBlock + mapMiniBlocksFromPeers map[string][]p2p.PeerID +} + +// NewSimpleMiniBlockInterceptor will return a new instance of simpleShardHeaderInterceptor +func NewSimpleMiniBlockInterceptor(marshalizer marshal.Marshalizer, hasher hashing.Hasher) (*simpleMiniBlockInterceptor, error) { + if check.IfNil(marshalizer) { + return nil, ErrNilMarshalizer + } + if check.IfNil(hasher) { + return nil, ErrNilHasher + } + + return &simpleMiniBlockInterceptor{ + marshalizer: marshalizer, + hasher: hasher, + mutReceivedMiniBlocks: sync.RWMutex{}, + mapReceivedMiniBlocks: make(map[string]*block.MiniBlock), + mapMiniBlocksFromPeers: make(map[string][]p2p.PeerID), + }, nil +} + +// ProcessReceivedMessage will receive the metablocks and will add them to the maps +func (s *simpleMiniBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { + log.Info("received shard header") + var mb block.MiniBlock + err := s.marshalizer.Unmarshal(&mb, message.Data()) + if err != nil { + return err + } + s.mutReceivedMiniBlocks.Lock() + mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, mb) + if err != nil { + s.mutReceivedMiniBlocks.Unlock() + return err + } + s.mapReceivedMiniBlocks[string(mbHash)] = &mb + s.addToPeerList(string(mbHash), message.Peer()) + s.mutReceivedMiniBlocks.Unlock() + + return nil +} + +// this func should be called under mutex protection +func (s *simpleMiniBlockInterceptor) addToPeerList(hash string, id p2p.PeerID) { + peersListForHash, ok := s.mapMiniBlocksFromPeers[hash] + + if !ok { + s.mapMiniBlocksFromPeers[hash] = append(s.mapMiniBlocksFromPeers[hash], id) + return + } + + for _, peer := range peersListForHash { + if peer == id { + return + } + } + + s.mapMiniBlocksFromPeers[hash] = append(s.mapMiniBlocksFromPeers[hash], id) +} + +// GetMiniBlock will return the metablock after it is confirmed or an error if the number of tries was exceeded +func (s *simpleMiniBlockInterceptor) GetMiniBlock(hash []byte, target int) (*block.MiniBlock, error) { + for count := 0; count < numTriesUntilExit; count++ { + time.Sleep(timeToWaitBeforeCheckingReceivedHeaders) + s.mutReceivedMiniBlocks.RLock() + for hashInMap, peersList := range s.mapMiniBlocksFromPeers { + isOk := s.isMapEntryOk(hash, peersList, hashInMap, target) + if isOk { + s.mutReceivedMiniBlocks.RUnlock() + return s.mapReceivedMiniBlocks[hashInMap], nil + } + } + s.mutReceivedMiniBlocks.RUnlock() + } + + return nil, ErrNumTriesExceeded +} + +func (s *simpleMiniBlockInterceptor) isMapEntryOk( + expectedHash []byte, + peersList []p2p.PeerID, + hash string, + target int, +) bool { + mb, ok := s.mapReceivedMiniBlocks[string(expectedHash)] + if !ok { + return false + } + + mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, mb) + if err != nil { + return false + } + log.Info("peers map for shard hdr", "target", target, "num", len(peersList)) + if bytes.Equal(expectedHash, mbHash) && len(peersList) >= target { + log.Info("got consensus for metablock", "len", len(peersList)) + return true + } + + return false +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *simpleMiniBlockInterceptor) IsInterfaceNil() bool { + return s == nil +} diff --git a/epochStart/bootstrap/simpleShardHeaderInterceptor.go b/epochStart/bootstrap/simpleShardHeaderInterceptor.go index e2599627793..17758062920 100644 --- a/epochStart/bootstrap/simpleShardHeaderInterceptor.go +++ b/epochStart/bootstrap/simpleShardHeaderInterceptor.go @@ -1,37 +1,115 @@ package bootstrap import ( + "sync" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" ) type simpleShardHeaderInterceptor struct { - marshalizer marshal.Marshalizer - receivedHandlers []block.ShardData + marshalizer marshal.Marshalizer + hasher hashing.Hasher + mutReceivedShardHeaders sync.RWMutex + mapReceivedShardHeaders map[string]*block.Header + mapShardHeadersFromPeers map[string][]p2p.PeerID } -func NewSimpleShardHeaderInterceptor(marshalizer marshal.Marshalizer) *simpleShardHeaderInterceptor { - return &simpleShardHeaderInterceptor{ - marshalizer: marshalizer, - receivedHandlers: make([]block.ShardData, 0), +// NewSimpleShardHeaderInterceptor will return a new instance of simpleShardHeaderInterceptor +func NewSimpleShardHeaderInterceptor(marshalizer marshal.Marshalizer, hasher hashing.Hasher) (*simpleShardHeaderInterceptor, error) { + if check.IfNil(marshalizer) { + return nil, ErrNilMarshalizer + } + if check.IfNil(hasher) { + return nil, ErrNilHasher } + + return &simpleShardHeaderInterceptor{ + marshalizer: marshalizer, + hasher: hasher, + mutReceivedShardHeaders: sync.RWMutex{}, + mapReceivedShardHeaders: make(map[string]*block.Header), + mapShardHeadersFromPeers: make(map[string][]p2p.PeerID), + }, nil } -func (s *simpleShardHeaderInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error { - var hdr block.ShardData - err := s.marshalizer.Unmarshal(&hdr, message.Data()) - if err == nil { - s.receivedHandlers = append(s.receivedHandlers, hdr) +// ProcessReceivedMessage will receive the metablocks and will add them to the maps +func (s *simpleShardHeaderInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { + log.Info("received shard header") + var mb block.Header + err := s.marshalizer.Unmarshal(&mb, message.Data()) + if err != nil { + return err } + s.mutReceivedShardHeaders.Lock() + mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, mb) + if err != nil { + s.mutReceivedShardHeaders.Unlock() + return err + } + s.mapReceivedShardHeaders[string(mbHash)] = &mb + s.addToPeerList(string(mbHash), message.Peer()) + s.mutReceivedShardHeaders.Unlock() return nil } -func (s *simpleShardHeaderInterceptor) GetAllReceivedShardHeaders() []block.ShardData { - return s.receivedHandlers +// this func should be called under mutex protection +func (s *simpleShardHeaderInterceptor) addToPeerList(hash string, id p2p.PeerID) { + peersListForHash, ok := s.mapShardHeadersFromPeers[hash] + + if !ok { + s.mapShardHeadersFromPeers[hash] = append(s.mapShardHeadersFromPeers[hash], id) + return + } + + for _, peer := range peersListForHash { + if peer == id { + return + } + } + + s.mapShardHeadersFromPeers[hash] = append(s.mapShardHeadersFromPeers[hash], id) +} + +// GetMiniBlock will return the metablock after it is confirmed or an error if the number of tries was exceeded +func (s *simpleShardHeaderInterceptor) GetShardHeader(target int) (*block.Header, error) { + for count := 0; count < numTriesUntilExit; count++ { + time.Sleep(timeToWaitBeforeCheckingReceivedHeaders) + s.mutReceivedShardHeaders.RLock() + for hash, peersList := range s.mapShardHeadersFromPeers { + isOk := s.isMapEntryOk(peersList, hash, target) + if isOk { + s.mutReceivedShardHeaders.RUnlock() + return s.mapReceivedShardHeaders[hash], nil + } + } + s.mutReceivedShardHeaders.RUnlock() + } + + return nil, ErrNumTriesExceeded +} + +func (s *simpleShardHeaderInterceptor) isMapEntryOk( + peersList []p2p.PeerID, + hash string, + target int, +) bool { + log.Info("peers map for shard hdr", "target", target, "num", len(peersList)) + if len(peersList) >= target { + log.Info("got consensus for metablock", "len", len(peersList)) + return true + } + + return false } +// IsInterfaceNil returns true if there is no value under the interface func (s *simpleShardHeaderInterceptor) IsInterfaceNil() bool { return s == nil } diff --git a/facade/elrondNodeFacade.go b/facade/elrondNodeFacade.go index 83c202c32d4..7d0ae3d1aec 100644 --- a/facade/elrondNodeFacade.go +++ b/facade/elrondNodeFacade.go @@ -73,12 +73,12 @@ func (ef *ElrondNodeFacade) SetConfig(facadeConfig *config.FacadeConfig) { // StartNode starts the underlying node func (ef *ElrondNodeFacade) StartNode(epoch uint32, withP2pBootstrap bool) error { - if withP2pBootstrap { - err := ef.node.Start() - if err != nil { - return err - } - } + //if withP2pBootstrap { + // err := ef.node.Start() + // if err != nil { + // return err + // } + //} err := ef.node.StartConsensus(epoch) return err diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 7e3f9ab24e1..10bbe2fc2bd 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -1113,7 +1113,7 @@ func (tpn *TestProcessorNode) CommitBlock(body data.BodyHandler, header data.Hea _ = tpn.BlockProcessor.CommitBlock(tpn.BlockChain, header, body) } -// GetShardHeader returns the first *dataBlock.Header stored in datapools having the nonce provided as parameter +// GetMiniBlock returns the first *dataBlock.Header stored in datapools having the nonce provided as parameter func (tpn *TestProcessorNode) GetShardHeader(nonce uint64) (*dataBlock.Header, error) { invalidCachers := tpn.DataPool == nil || tpn.DataPool.Headers() == nil if invalidCachers { diff --git a/process/block/shardblock.go b/process/block/shardblock.go index f82d9e4a102..49ff09bd294 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -1024,7 +1024,7 @@ func (sp *shardProcessor) getLastSelfNotarizedHeaderByMetachain(chainHandler dat hash := sp.forkDetector.GetHighestFinalBlockHash() header, err := process.GetShardHeader(hash, sp.dataPool.Headers(), sp.marshalizer, sp.store) if err != nil { - log.Warn("getLastSelfNotarizedHeaderByMetachain.GetShardHeader", "error", err.Error(), "hash", hash, "nonce", sp.forkDetector.GetHighestFinalBlockNonce()) + log.Warn("getLastSelfNotarizedHeaderByMetachain.GetMiniBlock", "error", err.Error(), "hash", hash, "nonce", sp.forkDetector.GetHighestFinalBlockNonce()) return nil, nil } diff --git a/process/common.go b/process/common.go index 2bb9a6cee02..0b0fa26bc5d 100644 --- a/process/common.go +++ b/process/common.go @@ -32,7 +32,7 @@ func EmptyChannel(ch chan bool) int { } } -// GetShardHeader gets the header, which is associated with the given hash, from pool or storage +// GetMiniBlock gets the header, which is associated with the given hash, from pool or storage func GetShardHeader( hash []byte, headersCacher dataRetriever.HeadersPool, diff --git a/process/track/shardBlockTrack.go b/process/track/shardBlockTrack.go index 98e23776848..6c9b3a2663b 100644 --- a/process/track/shardBlockTrack.go +++ b/process/track/shardBlockTrack.go @@ -115,7 +115,7 @@ func (sbt *shardBlockTrack) GetSelfHeaders(headerHandler data.HeaderHandler) []* header, err := process.GetShardHeader(shardInfo.HeaderHash, sbt.headersPool, sbt.marshalizer, sbt.store) if err != nil { - log.Trace("GetSelfHeaders.GetShardHeader", "error", err.Error()) + log.Trace("GetSelfHeaders.GetMiniBlock", "error", err.Error()) continue } From 4147784623ad0dbeebd57fbf62b8029c3908fbb1 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Wed, 11 Mar 2020 14:15:05 +0200 Subject: [PATCH 09/61] some refactoring + added more interceptors --- cmd/node/main.go | 4 +- .../bootstrap/epochStartDataProvider.go | 154 +++++++++++++----- .../bootstrap/epochStartDataProvider_test.go | 95 ++++------- epochStart/bootstrap/errors.go | 7 +- epochStart/bootstrap/export_test.go | 4 +- .../factory/epochStartDataProviderFactory.go | 25 ++- epochStart/bootstrap/interface.go | 28 ++-- .../epochStartMetaBlockInterceptorStub.go | 35 ++++ epochStart/bootstrap/mock/messengerStub.go | 61 ++++--- .../mock/metaBlockInterceptorStub.go | 6 +- .../mock/miniBlockInterceptorStub.go | 35 ++++ .../mock/shardHeaderInterceptorStub.go | 10 ++ .../simpleEpochStartMetaBlockInterceptor.go | 131 +++++++++++++++ ...pleEpochStartMetaBlockInterceptor_test.go} | 24 +-- .../bootstrap/simpleMetaBlockInterceptor.go | 32 ++-- .../bootstrap/simpleShardHeaderInterceptor.go | 4 +- facade/elrondNodeFacade.go | 10 +- facade/elrondNodeFacade_test.go | 30 +--- facade/interface.go | 4 +- facade/mock/nodeMock.go | 6 +- integrationTests/consensus/testInitializer.go | 2 +- integrationTests/resolvers/testInitializer.go | 4 +- .../block/interceptedRequestHdr_test.go | 8 +- .../interceptedRequestTxBlockBody_test.go | 4 +- .../transaction/interceptedBulkTx_test.go | 2 +- .../interceptedBulkUnsignedTx_test.go | 2 +- .../transaction/interceptedResolvedTx_test.go | 8 +- .../interceptedResolvedUnsignedTx_test.go | 4 +- integrationTests/state/stateTrieSync_test.go | 4 +- integrationTests/testInitializer.go | 2 +- node/node.go | 10 +- node/node_test.go | 53 ++---- p2p/libp2p/netMessenger.go | 23 ++- p2p/p2p.go | 5 + 34 files changed, 550 insertions(+), 286 deletions(-) create mode 100644 epochStart/bootstrap/mock/epochStartMetaBlockInterceptorStub.go create mode 100644 epochStart/bootstrap/mock/miniBlockInterceptorStub.go create mode 100644 epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go rename epochStart/bootstrap/{simpleMetaBlockInterceptor_test.go => simpleEpochStartMetaBlockInterceptor_test.go} (78%) diff --git a/cmd/node/main.go b/cmd/node/main.go index d3a61cf2817..0c28006615f 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -928,8 +928,8 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { log.Trace("starting background services") ef.StartBackgroundServices() - log.Debug("bootstrapping node...") - err = ef.StartNode(currentEpoch, isFreshStart) + log.Debug("starting node...") + err = ef.StartNode(currentEpoch) if err != nil { log.Error("starting node failed", err.Error()) return err diff --git a/epochStart/bootstrap/epochStartDataProvider.go b/epochStart/bootstrap/epochStartDataProvider.go index f0bb6b838cd..d3879d36625 100644 --- a/epochStart/bootstrap/epochStartDataProvider.go +++ b/epochStart/bootstrap/epochStartDataProvider.go @@ -51,37 +51,36 @@ type ComponentsNeededForBootstrap struct { Tries state.TriesHolder } -type shardData struct { - headerResolver ShardHeaderResolverHandler - epochStartData *block.EpochStartShardData -} - // epochStartDataProvider will handle requesting the needed data to start when joining late the network type epochStartDataProvider struct { - publicKey crypto.PublicKey - marshalizer marshal.Marshalizer - hasher hashing.Hasher - messenger p2p.Messenger - nodesConfigProvider NodesConfigProviderHandler - metaBlockInterceptor MetaBlockInterceptorHandler - shardHeaderInterceptor ShardHeaderInterceptorHandler - metaBlockResolver MetaBlockResolverHandler - requestHandlerMeta process.RequestHandler + publicKey crypto.PublicKey + marshalizer marshal.Marshalizer + hasher hashing.Hasher + messenger p2p.Messenger + nodesConfigProvider NodesConfigProviderHandler + epochStartMetaBlockInterceptor EpochStartMetaBlockInterceptorHandler + metaBlockInterceptor MetaBlockInterceptorHandler + shardHeaderInterceptor ShardHeaderInterceptorHandler + miniBlockInterceptor MiniBlockInterceptorHandler + requestHandlerMeta process.RequestHandler } // ArgsEpochStartDataProvider holds the arguments needed for creating an epoch start data provider component type ArgsEpochStartDataProvider struct { - PublicKey crypto.PublicKey - Messenger p2p.Messenger - Marshalizer marshal.Marshalizer - Hasher hashing.Hasher - NodesConfigProvider NodesConfigProviderHandler - MetaBlockInterceptor MetaBlockInterceptorHandler - ShardHeaderInterceptor ShardHeaderInterceptorHandler + PublicKey crypto.PublicKey + Messenger p2p.Messenger + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + NodesConfigProvider NodesConfigProviderHandler + EpochStartMetaBlockInterceptor EpochStartMetaBlockInterceptorHandler + MetaBlockInterceptor MetaBlockInterceptorHandler + ShardHeaderInterceptor ShardHeaderInterceptorHandler + MiniBlockInterceptor MiniBlockInterceptorHandler } // NewEpochStartDataProvider will return a new instance of epochStartDataProvider func NewEpochStartDataProvider(args ArgsEpochStartDataProvider) (*epochStartDataProvider, error) { + // TODO: maybe remove these nil checks as all of them have been done in the factory if check.IfNil(args.PublicKey) { return nil, ErrNilPublicKey } @@ -97,20 +96,28 @@ func NewEpochStartDataProvider(args ArgsEpochStartDataProvider) (*epochStartData if check.IfNil(args.NodesConfigProvider) { return nil, ErrNilNodesConfigProvider } + if check.IfNil(args.EpochStartMetaBlockInterceptor) { + return nil, ErrNilEpochStartMetaBlockInterceptor + } if check.IfNil(args.MetaBlockInterceptor) { return nil, ErrNilMetaBlockInterceptor } if check.IfNil(args.ShardHeaderInterceptor) { return nil, ErrNilShardHeaderInterceptor } + if check.IfNil(args.MiniBlockInterceptor) { + return nil, ErrNilMiniBlockInterceptor + } return &epochStartDataProvider{ - publicKey: args.PublicKey, - marshalizer: args.Marshalizer, - hasher: args.Hasher, - messenger: args.Messenger, - nodesConfigProvider: args.NodesConfigProvider, - metaBlockInterceptor: args.MetaBlockInterceptor, - shardHeaderInterceptor: args.ShardHeaderInterceptor, + publicKey: args.PublicKey, + marshalizer: args.Marshalizer, + hasher: args.Hasher, + messenger: args.Messenger, + nodesConfigProvider: args.NodesConfigProvider, + epochStartMetaBlockInterceptor: args.EpochStartMetaBlockInterceptor, + metaBlockInterceptor: args.MetaBlockInterceptor, + shardHeaderInterceptor: args.ShardHeaderInterceptor, + miniBlockInterceptor: args.MiniBlockInterceptor, }, nil } @@ -142,6 +149,8 @@ func (esdp *epochStartDataProvider) Bootstrap() (*ComponentsNeededForBootstrap, return nil, err } + esdp.changeMessageProcessorsForMetaBlocks() + log.Info("previous meta block", "epoch", prevMetaBlock.Epoch) nodesConfig, err := esdp.nodesConfigProvider.GetNodesConfigForMetaBlock(metaBlock) if err != nil { @@ -163,6 +172,26 @@ func (esdp *epochStartDataProvider) Bootstrap() (*ComponentsNeededForBootstrap, return nil, err } + for _, mb := range epochStartData.PendingMiniBlockHeaders { + receivedMb, err := esdp.getMiniBlock(&mb) + if err != nil { + return nil, err + } + log.Info("received miniblock", "type", receivedMb.Type) + } + + lastFinalizedMetaBlock, err := esdp.getMetaBlock(epochStartData.LastFinishedMetaBlock) + if err != nil { + return nil, err + } + log.Info("received last finalized meta block", "nonce", lastFinalizedMetaBlock.Nonce) + + firstPendingMetaBlock, err := esdp.getMetaBlock(epochStartData.FirstPendingMetaBlock) + if err != nil { + return nil, err + } + log.Info("received first pending meta block", "nonce", firstPendingMetaBlock.Nonce) + trie, err := esdp.getTrieFromRootHash(epochStartData.RootHash) if err != nil { return nil, err @@ -177,6 +206,18 @@ func (esdp *epochStartDataProvider) Bootstrap() (*ComponentsNeededForBootstrap, }, nil } +func (esdp *epochStartDataProvider) changeMessageProcessorsForMetaBlocks() { + err := esdp.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) + if err != nil { + log.Info("error unregistering message processor", "error", err) + } + + err = esdp.messenger.RegisterMessageProcessor(factory.MetachainBlocksTopic, esdp.metaBlockInterceptor) + if err != nil { + log.Info("error unregistering message processor", "error", err) + } +} + func (esdp *epochStartDataProvider) createRequestHandler() (process.RequestHandler, error) { dataPacker, err := partitioning.NewSimpleDataPacker(esdp.marshalizer) if err != nil { @@ -241,6 +282,19 @@ func (esdp *epochStartDataProvider) createRequestHandler() (process.RequestHandl func (esdp *epochStartDataProvider) getMiniBlock(miniBlockHeader *block.ShardMiniBlockHeader) (*block.MiniBlock, error) { esdp.requestMiniBlock(miniBlockHeader) + + time.Sleep(delayAfterRequesting) + + for { + numConnectedPeers := len(esdp.messenger.Peers()) + threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) + mb, errConsensusNotReached := esdp.miniBlockInterceptor.GetMiniBlock(miniBlockHeader.Hash, threshold) + if errConsensusNotReached == nil { + return mb, nil + } + log.Info("consensus not reached for epoch start meta block. re-requesting and trying again...") + esdp.requestMiniBlock(miniBlockHeader) + } } func (esdp *epochStartDataProvider) requestMiniBlock(miniBlockHeader *block.ShardMiniBlockHeader) { @@ -268,7 +322,7 @@ func (esdp *epochStartDataProvider) initTopicsAndInterceptors() error { return err } - err = esdp.messenger.RegisterMessageProcessor(factory.MetachainBlocksTopic, esdp.metaBlockInterceptor) + err = esdp.messenger.RegisterMessageProcessor(factory.MetachainBlocksTopic, esdp.epochStartMetaBlockInterceptor) if err != nil { return err } @@ -310,26 +364,43 @@ func (esdp *epochStartDataProvider) getTrieFromRootHash(_ []byte) (state.TriesHo } func (esdp *epochStartDataProvider) resetTopicsAndInterceptors() { - err := esdp.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) + err := esdp.messenger.UnregisterAllMessageProcessors() if err != nil { - log.Info("error unregistering message processor", "error", err) + log.Info("error unregistering message processors", "error", err) + } +} + +func (esdp *epochStartDataProvider) getMetaBlock(hash []byte) (*block.MetaBlock, error) { + esdp.requestMetaBlock(hash) + + time.Sleep(delayAfterRequesting) + + for { + numConnectedPeers := len(esdp.messenger.Peers()) + threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) + mb, errConsensusNotReached := esdp.metaBlockInterceptor.GetMetaBlock(hash, threshold) + if errConsensusNotReached == nil { + return mb, nil + } + log.Info("consensus not reached for meta block. re-requesting and trying again...") + esdp.requestMetaBlock(hash) } } func (esdp *epochStartDataProvider) getEpochStartMetaBlock(epoch uint32) (*block.MetaBlock, error) { - esdp.requestMetaBlock(epoch) + esdp.requestEpochStartMetaBlock(epoch) time.Sleep(delayAfterRequesting) for { numConnectedPeers := len(esdp.messenger.Peers()) threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) - mb, errConsensusNotReached := esdp.metaBlockInterceptor.GetMetaBlock(threshold, epoch) + mb, errConsensusNotReached := esdp.epochStartMetaBlockInterceptor.GetEpochStartMetaBlock(threshold, epoch) if errConsensusNotReached == nil { return mb, nil } - log.Info("consensus not reached for epoch start meta block. re-requesting and trying again...") - esdp.requestMetaBlock(epoch) + log.Info("consensus not reached for meta block. re-requesting and trying again...") + esdp.requestEpochStartMetaBlock(epoch) } } @@ -410,16 +481,25 @@ func (esdp *epochStartDataProvider) getShardHeader( } } +func (esdp *epochStartDataProvider) requestMetaBlock(hash []byte) { + // send more requests + log.Debug("requested meta block", "hash", hash) + for i := 0; i < numRequestsToSendOnce; i++ { + time.Sleep(delayBetweenRequests) + esdp.requestHandlerMeta.RequestMetaHeader(hash) + } +} + func (esdp *epochStartDataProvider) requestShardHeader(shardID uint32, hash []byte) { // send more requests - log.Debug("requsted shard block", "shard ID", shardID, "hash", hash) + log.Debug("requested shard block", "shard ID", shardID, "hash", hash) for i := 0; i < numRequestsToSendOnce; i++ { time.Sleep(delayBetweenRequests) esdp.requestHandlerMeta.RequestShardHeader(shardID, hash) } } -func (esdp *epochStartDataProvider) requestMetaBlock(epoch uint32) { +func (esdp *epochStartDataProvider) requestEpochStartMetaBlock(epoch uint32) { // send more requests for i := 0; i < numRequestsToSendOnce; i++ { time.Sleep(delayBetweenRequests) diff --git a/epochStart/bootstrap/epochStartDataProvider_test.go b/epochStart/bootstrap/epochStartDataProvider_test.go index 485da61c690..681e7a98b3b 100644 --- a/epochStart/bootstrap/epochStartDataProvider_test.go +++ b/epochStart/bootstrap/epochStartDataProvider_test.go @@ -5,11 +5,9 @@ import ( "testing" "github.com/ElrondNetwork/elrond-go/core/check" - "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/mock" mock2 "github.com/ElrondNetwork/elrond-go/epochStart/mock" - "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/require" ) @@ -45,6 +43,7 @@ func TestNewEpochStartDataProvider_NilMarshalizerShouldErr(t *testing.T) { require.Nil(t, epStart) require.Equal(t, bootstrap.ErrNilMarshalizer, err) } + func TestNewEpochStartDataProvider_NilHasherShouldErr(t *testing.T) { t.Parallel() @@ -55,6 +54,7 @@ func TestNewEpochStartDataProvider_NilHasherShouldErr(t *testing.T) { require.Nil(t, epStart) require.Equal(t, bootstrap.ErrNilHasher, err) } + func TestNewEpochStartDataProvider_NilNodesConfigProviderShouldErr(t *testing.T) { t.Parallel() @@ -65,6 +65,7 @@ func TestNewEpochStartDataProvider_NilNodesConfigProviderShouldErr(t *testing.T) require.Nil(t, epStart) require.Equal(t, bootstrap.ErrNilNodesConfigProvider, err) } + func TestNewEpochStartDataProvider_NilMetablockInterceptorShouldErr(t *testing.T) { t.Parallel() @@ -75,6 +76,7 @@ func TestNewEpochStartDataProvider_NilMetablockInterceptorShouldErr(t *testing.T require.Nil(t, epStart) require.Equal(t, bootstrap.ErrNilMetaBlockInterceptor, err) } + func TestNewEpochStartDataProvider_NilShardHeaderInterceptorShouldErr(t *testing.T) { t.Parallel() @@ -85,51 +87,46 @@ func TestNewEpochStartDataProvider_NilShardHeaderInterceptorShouldErr(t *testing require.Nil(t, epStart) require.Equal(t, bootstrap.ErrNilShardHeaderInterceptor, err) } -func TestNewEpochStartDataProvider_NilMetaBlockResolverShouldErr(t *testing.T) { + +func TestNewEpochStartDataProvider_NilMetaBlockInterceptorShouldErr(t *testing.T) { t.Parallel() args := getArguments() - args.MetaBlockResolver = nil + args.MetaBlockInterceptor = nil epStart, err := bootstrap.NewEpochStartDataProvider(args) require.Nil(t, epStart) - require.Equal(t, bootstrap.ErrNilMetaBlockResolver, err) + require.Equal(t, bootstrap.ErrNilMetaBlockInterceptor, err) } -func TestNewEpochStartDataProvider_OkValsShouldWork(t *testing.T) { + +func TestNewEpochStartDataProvider_NilMiniBlockInterceptorShouldErr(t *testing.T) { t.Parallel() args := getArguments() + args.MiniBlockInterceptor = nil epStart, err := bootstrap.NewEpochStartDataProvider(args) - require.Nil(t, err) - require.False(t, check.IfNil(epStart)) + require.Nil(t, epStart) + require.Equal(t, bootstrap.ErrNilMiniBlockInterceptor, err) } -func TestEpochStartDataProvider_Bootstrap_TopicCreationFailsShouldErr(t *testing.T) { +func TestNewEpochStartDataProvider_OkValsShouldWork(t *testing.T) { t.Parallel() - expectedErr := errors.New("error while creating topic") args := getArguments() - args.Messenger = &mock.MessengerStub{ - CreateTopicCalled: func(_ string, _ bool) error { - return expectedErr - }, - } - epStart, _ := bootstrap.NewEpochStartDataProvider(args) - - res, err := epStart.Bootstrap() + epStart, err := bootstrap.NewEpochStartDataProvider(args) - require.Nil(t, res) - require.Equal(t, expectedErr, err) + require.Nil(t, err) + require.False(t, check.IfNil(epStart)) } -func TestEpochStartDataProvider_Bootstrap_MetaBlockRequestFailsShouldErr(t *testing.T) { +func TestEpochStartDataProvider_Bootstrap_TopicCreationFailsShouldErr(t *testing.T) { t.Parallel() expectedErr := errors.New("error while creating topic") args := getArguments() - args.MetaBlockResolver = &mock.MetaBlockResolverStub{ - RequestEpochStartMetaBlockCalled: func(_ uint32) error { + args.Messenger = &mock.MessengerStub{ + CreateTopicCalled: func(_ string, _ bool) error { return expectedErr }, } @@ -141,50 +138,16 @@ func TestEpochStartDataProvider_Bootstrap_MetaBlockRequestFailsShouldErr(t *test require.Equal(t, expectedErr, err) } -func TestEpochStartDataProvider_Bootstrap_GetNodesConfigFailsShouldErr(t *testing.T) { - t.Parallel() - - expectedErr := errors.New("error while creating topic") - args := getArguments() - args.NodesConfigProvider = &mock.NodesConfigProviderStub{ - GetNodesConfigForMetaBlockCalled: func(_ *block.MetaBlock) (*sharding.NodesSetup, error) { - return &sharding.NodesSetup{}, expectedErr - }, - } - epStart, _ := bootstrap.NewEpochStartDataProvider(args) - - res, err := epStart.Bootstrap() - - require.Nil(t, res) - require.Equal(t, expectedErr, err) -} - -func TestEpochStartDataProvider_Bootstrap_ShouldWork(t *testing.T) { - t.Parallel() - - args := getArguments() - args.NodesConfigProvider = &mock.NodesConfigProviderStub{ - GetNodesConfigForMetaBlockCalled: func(_ *block.MetaBlock) (*sharding.NodesSetup, error) { - return &sharding.NodesSetup{}, nil - }, - } - epStart, _ := bootstrap.NewEpochStartDataProvider(args) - - res, err := epStart.Bootstrap() - - require.Nil(t, err) - require.NotNil(t, res) -} - func getArguments() bootstrap.ArgsEpochStartDataProvider { return bootstrap.ArgsEpochStartDataProvider{ - PublicKey: &mock.PublicKeyMock{}, - Messenger: &mock.MessengerStub{}, - Marshalizer: &mock2.MarshalizerMock{}, - Hasher: mock2.HasherMock{}, - NodesConfigProvider: &mock.NodesConfigProviderStub{}, - MetaBlockInterceptor: &mock.MetaBlockInterceptorStub{}, - ShardHeaderInterceptor: &mock.ShardHeaderInterceptorStub{}, - MetaBlockResolver: &mock.MetaBlockResolverStub{}, + PublicKey: &mock.PublicKeyMock{}, + Messenger: &mock.MessengerStub{}, + Marshalizer: &mock2.MarshalizerMock{}, + Hasher: mock2.HasherMock{}, + NodesConfigProvider: &mock.NodesConfigProviderStub{}, + EpochStartMetaBlockInterceptor: &mock.EpochStartMetaBlockInterceptorStub{}, + MetaBlockInterceptor: &mock.MetaBlockInterceptorStub{}, + ShardHeaderInterceptor: &mock.ShardHeaderInterceptorStub{}, + MiniBlockInterceptor: &mock.MiniBlockInterceptorStub{}, } } diff --git a/epochStart/bootstrap/errors.go b/epochStart/bootstrap/errors.go index e87204a0d81..0734ee070fe 100644 --- a/epochStart/bootstrap/errors.go +++ b/epochStart/bootstrap/errors.go @@ -17,14 +17,17 @@ var ErrNilHasher = errors.New("nil hasher") // ErrNilNodesConfigProvider signals that a nil nodes config provider has been given var ErrNilNodesConfigProvider = errors.New("nil nodes config provider") +// ErrNilEpochStartMetaBlockInterceptor signals that a epoch start metablock interceptor has been provided +var ErrNilEpochStartMetaBlockInterceptor = errors.New("nil epoch start metablock interceptor") + // ErrNilMetaBlockInterceptor signals that a metablock interceptor has been provided var ErrNilMetaBlockInterceptor = errors.New("nil metablock interceptor") // ErrNilShardHeaderInterceptor signals that a nil shard header interceptor has been provided var ErrNilShardHeaderInterceptor = errors.New("nil shard header interceptor") -// ErrNilMetaBlockResolver signals that a nil metablock resolver has been provided -var ErrNilMetaBlockResolver = errors.New("nil metablock resolver") +// ErrNilMiniBlockInterceptor signals that a nil mini block interceptor has been provided +var ErrNilMiniBlockInterceptor = errors.New("nil mini block interceptor") // ErrNumTriesExceeded signals that there were too many tries for fetching a metablock var ErrNumTriesExceeded = errors.New("num of tries exceeded. try re-request") diff --git a/epochStart/bootstrap/export_test.go b/epochStart/bootstrap/export_test.go index ce44c401fda..7920331caef 100644 --- a/epochStart/bootstrap/export_test.go +++ b/epochStart/bootstrap/export_test.go @@ -5,14 +5,14 @@ import ( "github.com/ElrondNetwork/elrond-go/p2p" ) -func (s *simpleMetaBlockInterceptor) GetReceivedMetablocks() map[string]*block.MetaBlock { +func (s *simpleEpochStartMetaBlockInterceptor) GetReceivedMetablocks() map[string]*block.MetaBlock { s.mutReceivedMetaBlocks.RLock() defer s.mutReceivedMetaBlocks.RUnlock() return s.mapReceivedMetaBlocks } -func (s *simpleMetaBlockInterceptor) GetPeersSliceForMetablocks() map[string][]p2p.PeerID { +func (s *simpleEpochStartMetaBlockInterceptor) GetPeersSliceForMetablocks() map[string][]p2p.PeerID { s.mutReceivedMetaBlocks.RLock() defer s.mutReceivedMetaBlocks.RUnlock() diff --git a/epochStart/bootstrap/factory/epochStartDataProviderFactory.go b/epochStart/bootstrap/factory/epochStartDataProviderFactory.go index eeb59a9adfe..3634fbc6dd6 100644 --- a/epochStart/bootstrap/factory/epochStartDataProviderFactory.go +++ b/epochStart/bootstrap/factory/epochStartDataProviderFactory.go @@ -78,6 +78,10 @@ func (esdpf *epochStartDataProviderFactory) Create() (bootstrap.EpochStartDataPr return &disabledEpochStartDataProvider{}, nil } + epochStartMetaBlockInterceptor, err := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(esdpf.marshalizer, esdpf.hasher) + if err != nil { + return nil, err + } metaBlockInterceptor, err := bootstrap.NewSimpleMetaBlockInterceptor(esdpf.marshalizer, esdpf.hasher) if err != nil { return nil, err @@ -86,14 +90,21 @@ func (esdpf *epochStartDataProviderFactory) Create() (bootstrap.EpochStartDataPr if err != nil { return nil, err } + miniBlockInterceptor, err := bootstrap.NewSimpleMiniBlockInterceptor(esdpf.marshalizer, esdpf.hasher) + if err != nil { + return nil, err + } + argsEpochStart := bootstrap.ArgsEpochStartDataProvider{ - PublicKey: esdpf.pubKey, - Messenger: esdpf.messenger, - Marshalizer: esdpf.marshalizer, - Hasher: esdpf.hasher, - NodesConfigProvider: esdpf.nodesConfigProvider, - MetaBlockInterceptor: metaBlockInterceptor, - ShardHeaderInterceptor: shardHdrInterceptor, + PublicKey: esdpf.pubKey, + Messenger: esdpf.messenger, + Marshalizer: esdpf.marshalizer, + Hasher: esdpf.hasher, + NodesConfigProvider: esdpf.nodesConfigProvider, + EpochStartMetaBlockInterceptor: epochStartMetaBlockInterceptor, + MetaBlockInterceptor: metaBlockInterceptor, + ShardHeaderInterceptor: shardHdrInterceptor, + MiniBlockInterceptor: miniBlockInterceptor, } epochStartDataProvider, err := bootstrap.NewEpochStartDataProvider(argsEpochStart) diff --git a/epochStart/bootstrap/interface.go b/epochStart/bootstrap/interface.go index e1e766599f0..7bb3dd4b1f4 100644 --- a/epochStart/bootstrap/interface.go +++ b/epochStart/bootstrap/interface.go @@ -6,10 +6,16 @@ import ( "github.com/ElrondNetwork/elrond-go/sharding" ) +// EpochStartMetaBlockInterceptorHandler defines what a component which will handle receiving the epoch start meta blocks should do +type EpochStartMetaBlockInterceptorHandler interface { + process.Interceptor + GetEpochStartMetaBlock(target int, epoch uint32) (*block.MetaBlock, error) +} + // MetaBlockInterceptorHandler defines what a component which will handle receiving the meta blocks should do type MetaBlockInterceptorHandler interface { process.Interceptor - GetMetaBlock(target int, epoch uint32) (*block.MetaBlock, error) + GetMetaBlock(hash []byte, target int) (*block.MetaBlock, error) } // ShardHeaderInterceptorHandler defines what a component which will handle receiving the the shard headers should do @@ -18,22 +24,10 @@ type ShardHeaderInterceptorHandler interface { GetShardHeader(target int) (*block.Header, error) } -// MetaBlockResolverHandler defines what a component which will handle requesting a meta block should do -type MetaBlockResolverHandler interface { - RequestEpochStartMetaBlock(epoch uint32) error - IsInterfaceNil() bool -} - -// ShardHeaderResolverHandler defines what a component which will handle requesting a shard block should do -type ShardHeaderResolverHandler interface { - RequestHeaderByHash(hash []byte, epoch uint32) error - IsInterfaceNil() bool -} - -// MiniBlockResolverHandler defines what a component which will handle requesting a mini block should do -type MiniBlockResolverHandler interface { - RequestHeaderByHash(hash []byte, epoch uint32) error - IsInterfaceNil() bool +// MiniBlockInterceptorHandler defines what a component which will handle receiving the mini blocks should do +type MiniBlockInterceptorHandler interface { + process.Interceptor + GetMiniBlock(hash []byte, target int) (*block.MiniBlock, error) } // NodesConfigProviderHandler defines what a component which will handle the nodes config should be able to do diff --git a/epochStart/bootstrap/mock/epochStartMetaBlockInterceptorStub.go b/epochStart/bootstrap/mock/epochStartMetaBlockInterceptorStub.go new file mode 100644 index 00000000000..9e64b75e25a --- /dev/null +++ b/epochStart/bootstrap/mock/epochStartMetaBlockInterceptorStub.go @@ -0,0 +1,35 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/p2p" +) + +// EpochStartMetaBlockInterceptorStub - +type EpochStartMetaBlockInterceptorStub struct { + ProcessReceivedMessageCalled func(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error + GetMetaBlockCalled func(target int, epoch uint32) (*block.MetaBlock, error) +} + +// ProcessReceivedMessage - +func (m *EpochStartMetaBlockInterceptorStub) ProcessReceivedMessage(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error { + if m.ProcessReceivedMessageCalled != nil { + return m.ProcessReceivedMessageCalled(message, broadcastHandler) + } + + return nil +} + +// GetEpochStartMetaBlock - +func (m *EpochStartMetaBlockInterceptorStub) GetEpochStartMetaBlock(target int, epoch uint32) (*block.MetaBlock, error) { + if m.GetMetaBlockCalled != nil { + return m.GetMetaBlockCalled(target, epoch) + } + + return &block.MetaBlock{}, nil +} + +// IsInterfaceNil - +func (m *EpochStartMetaBlockInterceptorStub) IsInterfaceNil() bool { + return m == nil +} diff --git a/epochStart/bootstrap/mock/messengerStub.go b/epochStart/bootstrap/mock/messengerStub.go index 3aac14f2ad0..b796fcc5066 100644 --- a/epochStart/bootstrap/mock/messengerStub.go +++ b/epochStart/bootstrap/mock/messengerStub.go @@ -4,51 +4,72 @@ import "github.com/ElrondNetwork/elrond-go/p2p" // MessengerStub - type MessengerStub struct { - CloseCalled func() error - IDCalled func() p2p.PeerID - PeersCalled func() []p2p.PeerID - AddressesCalled func() []string - ConnectToPeerCalled func(address string) error - ConnectedPeersOnTopicCalled func(topic string) []p2p.PeerID - TrimConnectionsCalled func() - IsConnectedCalled func(peerID p2p.PeerID) bool - ConnectedPeersCalled func() []p2p.PeerID - CreateTopicCalled func(name string, createChannelForTopic bool) error - HasTopicCalled func(name string) bool - HasTopicValidatorCalled func(name string) bool - BroadcastOnChannelCalled func(channel string, topic string, buff []byte) - BroadcastCalled func(topic string, buff []byte) - RegisterMessageProcessorCalled func(topic string, handler p2p.MessageProcessor) error - UnregisterMessageProcessorCalled func(topic string) error - SendToConnectedPeerCalled func(topic string, buff []byte, peerID p2p.PeerID) error - OutgoingChannelLoadBalancerCalled func() p2p.ChannelLoadBalancer - BootstrapCalled func() error + CloseCalled func() error + IDCalled func() p2p.PeerID + PeersCalled func() []p2p.PeerID + AddressesCalled func() []string + ConnectToPeerCalled func(address string) error + ConnectedPeersOnTopicCalled func(topic string) []p2p.PeerID + TrimConnectionsCalled func() + IsConnectedCalled func(peerID p2p.PeerID) bool + ConnectedPeersCalled func() []p2p.PeerID + CreateTopicCalled func(name string, createChannelForTopic bool) error + HasTopicCalled func(name string) bool + HasTopicValidatorCalled func(name string) bool + BroadcastOnChannelCalled func(channel string, topic string, buff []byte) + BroadcastCalled func(topic string, buff []byte) + RegisterMessageProcessorCalled func(topic string, handler p2p.MessageProcessor) error + UnregisterAllMessageProcessorsCalled func() error + UnregisterMessageProcessorCalled func(topic string) error + SendToConnectedPeerCalled func(topic string, buff []byte, peerID p2p.PeerID) error + OutgoingChannelLoadBalancerCalled func() p2p.ChannelLoadBalancer + BootstrapCalled func() error +} + +// UnregisterAllMessageProcessors - +func (ms *MessengerStub) UnregisterAllMessageProcessors() error { + if ms.UnregisterAllMessageProcessorsCalled != nil { + return ms.UnregisterAllMessageProcessorsCalled() + } + + return nil } +// ConnectedAddresses - func (ms *MessengerStub) ConnectedAddresses() []string { panic("implement me") } +// PeerAddress - func (ms *MessengerStub) PeerAddress(pid p2p.PeerID) string { panic("implement me") } +// ConnectedPeersOnTopic - func (ms *MessengerStub) ConnectedPeersOnTopic(topic string) []p2p.PeerID { - return ms.ConnectedPeersOnTopicCalled(topic) + if ms.ConnectedPeersOnTopicCalled != nil { + return ms.ConnectedPeersOnTopicCalled(topic) + } + + return nil } +// BroadcastOnChannelBlocking - func (ms *MessengerStub) BroadcastOnChannelBlocking(channel string, topic string, buff []byte) error { panic("implement me") } +// IsConnectedToTheNetwork - func (ms *MessengerStub) IsConnectedToTheNetwork() bool { panic("implement me") } +// ThresholdMinConnectedPeers - func (ms *MessengerStub) ThresholdMinConnectedPeers() int { panic("implement me") } +// SetThresholdMinConnectedPeers - func (ms *MessengerStub) SetThresholdMinConnectedPeers(minConnectedPeers int) error { panic("implement me") } diff --git a/epochStart/bootstrap/mock/metaBlockInterceptorStub.go b/epochStart/bootstrap/mock/metaBlockInterceptorStub.go index 55e826ad89f..b6697372f71 100644 --- a/epochStart/bootstrap/mock/metaBlockInterceptorStub.go +++ b/epochStart/bootstrap/mock/metaBlockInterceptorStub.go @@ -8,7 +8,7 @@ import ( // MetaBlockInterceptorStub - type MetaBlockInterceptorStub struct { ProcessReceivedMessageCalled func(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error - GetMetaBlockCalled func(target int, epoch uint32) (*block.MetaBlock, error) + GetMetaBlockCalled func(hash []byte, target int) (*block.MetaBlock, error) } // ProcessReceivedMessage - @@ -21,9 +21,9 @@ func (m *MetaBlockInterceptorStub) ProcessReceivedMessage(message p2p.MessageP2P } // GetMetaBlock - -func (m *MetaBlockInterceptorStub) GetMetaBlock(target int, epoch uint32) (*block.MetaBlock, error) { +func (m *MetaBlockInterceptorStub) GetMetaBlock(hash []byte, target int) (*block.MetaBlock, error) { if m.GetMetaBlockCalled != nil { - return m.GetMetaBlockCalled(target, epoch) + return m.GetMetaBlockCalled(hash, target) } return &block.MetaBlock{}, nil diff --git a/epochStart/bootstrap/mock/miniBlockInterceptorStub.go b/epochStart/bootstrap/mock/miniBlockInterceptorStub.go new file mode 100644 index 00000000000..4b6569f9435 --- /dev/null +++ b/epochStart/bootstrap/mock/miniBlockInterceptorStub.go @@ -0,0 +1,35 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/p2p" +) + +// MiniBlockInterceptorStub - +type MiniBlockInterceptorStub struct { + ProcessReceivedMessageCalled func(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error + GetMiniBlockCalled func(hash []byte, target int) (*block.MiniBlock, error) +} + +// ProcessReceivedMessage - +func (m *MiniBlockInterceptorStub) ProcessReceivedMessage(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error { + if m.ProcessReceivedMessageCalled != nil { + return m.ProcessReceivedMessageCalled(message, broadcastHandler) + } + + return nil +} + +// GetMiniBlock - +func (m *MiniBlockInterceptorStub) GetMiniBlock(hash []byte, target int) (*block.MiniBlock, error) { + if m.GetMiniBlockCalled != nil { + return m.GetMiniBlockCalled(hash, target) + } + + return &block.MiniBlock{}, nil +} + +// IsInterfaceNil - +func (m *MiniBlockInterceptorStub) IsInterfaceNil() bool { + return m == nil +} diff --git a/epochStart/bootstrap/mock/shardHeaderInterceptorStub.go b/epochStart/bootstrap/mock/shardHeaderInterceptorStub.go index 181a95089a8..69c8f0ad6f3 100644 --- a/epochStart/bootstrap/mock/shardHeaderInterceptorStub.go +++ b/epochStart/bootstrap/mock/shardHeaderInterceptorStub.go @@ -9,6 +9,16 @@ import ( type ShardHeaderInterceptorStub struct { ProcessReceivedMessageCalled func(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error GetAllReceivedShardHeadersCalled func() []block.ShardData + GetShardHeaderCalled func(target int) (*block.Header, error) +} + +// GetShardHeader - +func (s *ShardHeaderInterceptorStub) GetShardHeader(target int) (*block.Header, error) { + if s.GetShardHeaderCalled != nil { + return s.GetShardHeaderCalled(target) + } + + return &block.Header{}, nil } // ProcessReceivedMessage - diff --git a/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go new file mode 100644 index 00000000000..022b88245bb --- /dev/null +++ b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go @@ -0,0 +1,131 @@ +package bootstrap + +import ( + "math" + "sync" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/p2p" +) + +const timeToWaitBeforeCheckingReceivedHeaders = 1 * time.Second +const numTriesUntilExit = 5 + +type simpleEpochStartMetaBlockInterceptor struct { + marshalizer marshal.Marshalizer + hasher hashing.Hasher + mutReceivedMetaBlocks sync.RWMutex + mapReceivedMetaBlocks map[string]*block.MetaBlock + mapMetaBlocksFromPeers map[string][]p2p.PeerID +} + +// NewSimpleEpochStartMetaBlockInterceptor will return a new instance of simpleEpochStartMetaBlockInterceptor +func NewSimpleEpochStartMetaBlockInterceptor(marshalizer marshal.Marshalizer, hasher hashing.Hasher) (*simpleEpochStartMetaBlockInterceptor, error) { + if check.IfNil(marshalizer) { + return nil, ErrNilMarshalizer + } + if check.IfNil(hasher) { + return nil, ErrNilHasher + } + + return &simpleEpochStartMetaBlockInterceptor{ + marshalizer: marshalizer, + hasher: hasher, + mutReceivedMetaBlocks: sync.RWMutex{}, + mapReceivedMetaBlocks: make(map[string]*block.MetaBlock), + mapMetaBlocksFromPeers: make(map[string][]p2p.PeerID), + }, nil +} + +// ProcessReceivedMessage will receive the metablocks and will add them to the maps +func (s *simpleEpochStartMetaBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { + var mb block.MetaBlock + err := s.marshalizer.Unmarshal(&mb, message.Data()) + if err != nil { + return err + } + s.mutReceivedMetaBlocks.Lock() + mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, mb) + if err != nil { + s.mutReceivedMetaBlocks.Unlock() + return err + } + s.mapReceivedMetaBlocks[string(mbHash)] = &mb + s.addToPeerList(string(mbHash), message.Peer()) + s.mutReceivedMetaBlocks.Unlock() + + return nil +} + +// this func should be called under mutex protection +func (s *simpleEpochStartMetaBlockInterceptor) addToPeerList(hash string, id p2p.PeerID) { + peersListForHash, ok := s.mapMetaBlocksFromPeers[hash] + + if !ok { + s.mapMetaBlocksFromPeers[hash] = append(s.mapMetaBlocksFromPeers[hash], id) + return + } + + for _, peer := range peersListForHash { + if peer == id { + return + } + } + + s.mapMetaBlocksFromPeers[hash] = append(s.mapMetaBlocksFromPeers[hash], id) +} + +// GetEpochStartMetaBlock will return the metablock after it is confirmed or an error if the number of tries was exceeded +func (s *simpleEpochStartMetaBlockInterceptor) GetEpochStartMetaBlock(target int, epoch uint32) (*block.MetaBlock, error) { + for count := 0; count < numTriesUntilExit; count++ { + time.Sleep(timeToWaitBeforeCheckingReceivedHeaders) + s.mutReceivedMetaBlocks.RLock() + for hash, peersList := range s.mapMetaBlocksFromPeers { + log.Debug("metablock from peers", "num peers", len(peersList), "target", target, "hash", []byte(hash)) + isOk := s.isMapEntryOk(peersList, hash, target, epoch) + if isOk { + s.mutReceivedMetaBlocks.RUnlock() + metaBlockToReturn := s.mapReceivedMetaBlocks[hash] + s.clearFields() + return metaBlockToReturn, nil + } + } + s.mutReceivedMetaBlocks.RUnlock() + } + + return nil, ErrNumTriesExceeded +} + +func (s *simpleEpochStartMetaBlockInterceptor) isMapEntryOk( + peersList []p2p.PeerID, + hash string, + target int, + epoch uint32, +) bool { + mb := s.mapReceivedMetaBlocks[hash] + epochCheckNotRequired := epoch == math.MaxUint32 + isEpochOk := epochCheckNotRequired || mb.Epoch == epoch + if len(peersList) >= target && isEpochOk { + log.Info("got consensus for epoch start metablock", "len", len(peersList)) + return true + } + + return false +} + +func (s *simpleEpochStartMetaBlockInterceptor) clearFields() { + s.mutReceivedMetaBlocks.Lock() + s.mapReceivedMetaBlocks = make(map[string]*block.MetaBlock) + s.mapMetaBlocksFromPeers = make(map[string][]p2p.PeerID) + s.mutReceivedMetaBlocks.Unlock() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *simpleEpochStartMetaBlockInterceptor) IsInterfaceNil() bool { + return s == nil +} diff --git a/epochStart/bootstrap/simpleMetaBlockInterceptor_test.go b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor_test.go similarity index 78% rename from epochStart/bootstrap/simpleMetaBlockInterceptor_test.go rename to epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor_test.go index 7e2bb103ac3..675f14571ac 100644 --- a/epochStart/bootstrap/simpleMetaBlockInterceptor_test.go +++ b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor_test.go @@ -14,7 +14,7 @@ import ( func TestNewSimpleMetaBlockInterceptor_NilMarshalizerShouldErr(t *testing.T) { t.Parallel() - smbi, err := bootstrap.NewSimpleMetaBlockInterceptor(nil, &mock.HasherMock{}) + smbi, err := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(nil, &mock.HasherMock{}) require.Nil(t, smbi) require.Equal(t, bootstrap.ErrNilMarshalizer, err) } @@ -22,7 +22,7 @@ func TestNewSimpleMetaBlockInterceptor_NilMarshalizerShouldErr(t *testing.T) { func TestNewSimpleMetaBlockInterceptor_NilHasherShouldErr(t *testing.T) { t.Parallel() - smbi, err := bootstrap.NewSimpleMetaBlockInterceptor(&mock.MarshalizerMock{}, nil) + smbi, err := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(&mock.MarshalizerMock{}, nil) require.Nil(t, smbi) require.Equal(t, bootstrap.ErrNilHasher, err) } @@ -30,7 +30,7 @@ func TestNewSimpleMetaBlockInterceptor_NilHasherShouldErr(t *testing.T) { func TestNewSimpleMetaBlockInterceptor_OkValsShouldWork(t *testing.T) { t.Parallel() - smbi, err := bootstrap.NewSimpleMetaBlockInterceptor(&mock.MarshalizerMock{}, &mock.HasherMock{}) + smbi, err := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(&mock.MarshalizerMock{}, &mock.HasherMock{}) require.Nil(t, err) require.False(t, check.IfNil(smbi)) } @@ -38,7 +38,7 @@ func TestNewSimpleMetaBlockInterceptor_OkValsShouldWork(t *testing.T) { func TestSimpleMetaBlockInterceptor_ProcessReceivedMessage_ReceivedMessageIsNotAMetaBlockShouldNotAdd(t *testing.T) { t.Parallel() - smbi, _ := bootstrap.NewSimpleMetaBlockInterceptor(&mock.MarshalizerMock{}, &mock.HasherMock{}) + smbi, _ := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(&mock.MarshalizerMock{}, &mock.HasherMock{}) message := mock2.P2PMessageMock{ DataField: []byte("not a metablock"), @@ -53,7 +53,7 @@ func TestSimpleMetaBlockInterceptor_ProcessReceivedMessage_UnmarshalFailsShouldE t.Parallel() marshalizer := &mock.MarshalizerMock{Fail: true} - smbi, _ := bootstrap.NewSimpleMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) + smbi, _ := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) mb := &block.MetaBlock{Epoch: 5} mbBytes, _ := marshalizer.Marshal(mb) @@ -70,7 +70,7 @@ func TestSimpleMetaBlockInterceptor_ProcessReceivedMessage_ReceivedMessageIsAMet t.Parallel() marshalizer := &mock.MarshalizerMock{} - smbi, _ := bootstrap.NewSimpleMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) + smbi, _ := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) mb := &block.MetaBlock{Epoch: 5} mbBytes, _ := marshalizer.Marshal(mb) @@ -87,7 +87,7 @@ func TestSimpleMetaBlockInterceptor_ProcessReceivedMessage_ShouldAddForMorePeers t.Parallel() marshalizer := &mock.MarshalizerMock{} - smbi, _ := bootstrap.NewSimpleMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) + smbi, _ := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) mb := &block.MetaBlock{Epoch: 5} mbBytes, _ := marshalizer.Marshal(mb) @@ -112,7 +112,7 @@ func TestSimpleMetaBlockInterceptor_ProcessReceivedMessage_ShouldNotAddTwiceForT t.Parallel() marshalizer := &mock.MarshalizerMock{} - smbi, _ := bootstrap.NewSimpleMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) + smbi, _ := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) mb := &block.MetaBlock{Epoch: 5} mbBytes, _ := marshalizer.Marshal(mb) @@ -137,10 +137,10 @@ func TestSimpleMetaBlockInterceptor_GetMetaBlock_NumTriesExceededShouldErr(t *te t.Parallel() marshalizer := &mock.MarshalizerMock{} - smbi, _ := bootstrap.NewSimpleMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) + smbi, _ := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) // no message received, so should exit with err - mb, err := smbi.GetMetaBlock(2, 5) + mb, err := smbi.GetEpochStartMetaBlock(2, 5) require.Zero(t, mb) require.Equal(t, bootstrap.ErrNumTriesExceeded, err) } @@ -149,7 +149,7 @@ func TestSimpleMetaBlockInterceptor_GetMetaBlockShouldWork(t *testing.T) { t.Parallel() marshalizer := &mock.MarshalizerMock{} - smbi, _ := bootstrap.NewSimpleMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) + smbi, _ := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) mb := &block.MetaBlock{Epoch: 5} mbBytes, _ := marshalizer.Marshal(mb) @@ -165,7 +165,7 @@ func TestSimpleMetaBlockInterceptor_GetMetaBlockShouldWork(t *testing.T) { _ = smbi.ProcessReceivedMessage(message1, nil) _ = smbi.ProcessReceivedMessage(message2, nil) - mb, err := smbi.GetMetaBlock(2, 5) + mb, err := smbi.GetEpochStartMetaBlock(2, 5) require.Nil(t, err) require.NotNil(t, mb) } diff --git a/epochStart/bootstrap/simpleMetaBlockInterceptor.go b/epochStart/bootstrap/simpleMetaBlockInterceptor.go index f3a715f3449..161fb2e64ba 100644 --- a/epochStart/bootstrap/simpleMetaBlockInterceptor.go +++ b/epochStart/bootstrap/simpleMetaBlockInterceptor.go @@ -1,7 +1,7 @@ package bootstrap import ( - "math" + "bytes" "sync" "time" @@ -13,9 +13,6 @@ import ( "github.com/ElrondNetwork/elrond-go/p2p" ) -const timeToWaitBeforeCheckingReceivedHeaders = 1 * time.Second -const numTriesUntilExit = 5 - type simpleMetaBlockInterceptor struct { marshalizer marshal.Marshalizer hasher hashing.Hasher @@ -44,6 +41,7 @@ func NewSimpleMetaBlockInterceptor(marshalizer marshal.Marshalizer, hasher hashi // ProcessReceivedMessage will receive the metablocks and will add them to the maps func (s *simpleMetaBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { + log.Info("received meta block") var mb block.MetaBlock err := s.marshalizer.Unmarshal(&mb, message.Data()) if err != nil { @@ -81,16 +79,15 @@ func (s *simpleMetaBlockInterceptor) addToPeerList(hash string, id p2p.PeerID) { } // GetMetaBlock will return the metablock after it is confirmed or an error if the number of tries was exceeded -func (s *simpleMetaBlockInterceptor) GetMetaBlock(target int, epoch uint32) (*block.MetaBlock, error) { +func (s *simpleMetaBlockInterceptor) GetMetaBlock(hash []byte, target int) (*block.MetaBlock, error) { for count := 0; count < numTriesUntilExit; count++ { time.Sleep(timeToWaitBeforeCheckingReceivedHeaders) s.mutReceivedMetaBlocks.RLock() - for hash, peersList := range s.mapMetaBlocksFromPeers { - log.Debug("metablock from peers", "num peers", len(peersList), "target", target, "hash", []byte(hash)) - isOk := s.isMapEntryOk(peersList, hash, target, epoch) + for hashInMap, peersList := range s.mapMetaBlocksFromPeers { + isOk := s.isMapEntryOk(hash, peersList, hashInMap, target) if isOk { s.mutReceivedMetaBlocks.RUnlock() - return s.mapReceivedMetaBlocks[hash], nil + return s.mapReceivedMetaBlocks[hashInMap], nil } } s.mutReceivedMetaBlocks.RUnlock() @@ -100,15 +97,22 @@ func (s *simpleMetaBlockInterceptor) GetMetaBlock(target int, epoch uint32) (*bl } func (s *simpleMetaBlockInterceptor) isMapEntryOk( + expectedHash []byte, peersList []p2p.PeerID, hash string, target int, - epoch uint32, ) bool { - mb := s.mapReceivedMetaBlocks[hash] - epochCheckNotRequired := epoch == math.MaxUint32 - isEpochOk := epochCheckNotRequired || mb.Epoch == epoch - if len(peersList) >= target && isEpochOk { + mb, ok := s.mapReceivedMetaBlocks[string(expectedHash)] + if !ok { + return false + } + + mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, mb) + if err != nil { + return false + } + log.Info("peers map for meta block", "target", target, "num", len(peersList)) + if bytes.Equal(expectedHash, mbHash) && len(peersList) >= target { log.Info("got consensus for metablock", "len", len(peersList)) return true } diff --git a/epochStart/bootstrap/simpleShardHeaderInterceptor.go b/epochStart/bootstrap/simpleShardHeaderInterceptor.go index 17758062920..5ca9f00535c 100644 --- a/epochStart/bootstrap/simpleShardHeaderInterceptor.go +++ b/epochStart/bootstrap/simpleShardHeaderInterceptor.go @@ -77,7 +77,7 @@ func (s *simpleShardHeaderInterceptor) addToPeerList(hash string, id p2p.PeerID) s.mapShardHeadersFromPeers[hash] = append(s.mapShardHeadersFromPeers[hash], id) } -// GetMiniBlock will return the metablock after it is confirmed or an error if the number of tries was exceeded +// GetShardHeader will return the metablock after it is confirmed or an error if the number of tries was exceeded func (s *simpleShardHeaderInterceptor) GetShardHeader(target int) (*block.Header, error) { for count := 0; count < numTriesUntilExit; count++ { time.Sleep(timeToWaitBeforeCheckingReceivedHeaders) @@ -102,7 +102,7 @@ func (s *simpleShardHeaderInterceptor) isMapEntryOk( ) bool { log.Info("peers map for shard hdr", "target", target, "num", len(peersList)) if len(peersList) >= target { - log.Info("got consensus for metablock", "len", len(peersList)) + log.Info("got consensus for shard header", "len", len(peersList)) return true } diff --git a/facade/elrondNodeFacade.go b/facade/elrondNodeFacade.go index 7d0ae3d1aec..fad500c2fc1 100644 --- a/facade/elrondNodeFacade.go +++ b/facade/elrondNodeFacade.go @@ -72,14 +72,8 @@ func (ef *ElrondNodeFacade) SetConfig(facadeConfig *config.FacadeConfig) { } // StartNode starts the underlying node -func (ef *ElrondNodeFacade) StartNode(epoch uint32, withP2pBootstrap bool) error { - //if withP2pBootstrap { - // err := ef.node.Start() - // if err != nil { - // return err - // } - //} - +func (ef *ElrondNodeFacade) StartNode(epoch uint32) error { + ef.node.Start() err := ef.node.StartConsensus(epoch) return err } diff --git a/facade/elrondNodeFacade_test.go b/facade/elrondNodeFacade_test.go index fc2f05a8954..f1482567232 100644 --- a/facade/elrondNodeFacade_test.go +++ b/facade/elrondNodeFacade_test.go @@ -46,9 +46,8 @@ func TestNewElrondFacade_FromNilApiResolverShouldReturnNil(t *testing.T) { func TestElrondFacade_StartNodeWithNodeNotNullShouldNotReturnError(t *testing.T) { started := false node := &mock.NodeMock{ - StartHandler: func() error { + StartHandler: func() { started = true - return nil }, P2PBootstrapHandler: func() error { return nil @@ -63,39 +62,18 @@ func TestElrondFacade_StartNodeWithNodeNotNullShouldNotReturnError(t *testing.T) ef := createElrondNodeFacadeWithMockResolver(node) - err := ef.StartNode(0, true) + err := ef.StartNode(0) assert.Nil(t, err) isRunning := ef.IsNodeRunning() assert.True(t, isRunning) } -func TestElrondFacade_StartNodeWithErrorOnStartNodeShouldReturnError(t *testing.T) { - started := false - node := &mock.NodeMock{ - StartHandler: func() error { - return fmt.Errorf("error on start node") - }, - IsRunningHandler: func() bool { - return started - }, - } - - ef := createElrondNodeFacadeWithMockResolver(node) - - err := ef.StartNode(0, true) - assert.NotNil(t, err) - - isRunning := ef.IsNodeRunning() - assert.False(t, isRunning) -} - func TestElrondFacade_StartNodeWithErrorOnStartConsensusShouldReturnError(t *testing.T) { started := false node := &mock.NodeMock{ - StartHandler: func() error { + StartHandler: func() { started = true - return nil }, P2PBootstrapHandler: func() error { return nil @@ -111,7 +89,7 @@ func TestElrondFacade_StartNodeWithErrorOnStartConsensusShouldReturnError(t *tes ef := createElrondNodeFacadeWithMockResolver(node) - err := ef.StartNode(0, true) + err := ef.StartNode(0) assert.NotNil(t, err) isRunning := ef.IsNodeRunning() diff --git a/facade/interface.go b/facade/interface.go index 6df1946a270..be7698c2e4d 100644 --- a/facade/interface.go +++ b/facade/interface.go @@ -13,8 +13,8 @@ import ( //NodeWrapper contains all functions that a node should contain. type NodeWrapper interface { - // Start will create a new messenger and and set up the Node state as running - Start() error + // Start will set up the Node state as running + Start() // P2PBootstrap starts the peer discovery process and peer connection filtering P2PBootstrap() error diff --git a/facade/mock/nodeMock.go b/facade/mock/nodeMock.go index 2b96ec8d3c6..c839b53752e 100644 --- a/facade/mock/nodeMock.go +++ b/facade/mock/nodeMock.go @@ -11,7 +11,7 @@ import ( // NodeMock - type NodeMock struct { AddressHandler func() (string, error) - StartHandler func() error + StartHandler func() StopHandler func() error P2PBootstrapHandler func() error IsRunningHandler func() bool @@ -38,8 +38,8 @@ func (nm *NodeMock) Address() (string, error) { } // Start - -func (nm *NodeMock) Start() error { - return nm.StartHandler() +func (nm *NodeMock) Start() { + nm.StartHandler() } // P2PBootstrap - diff --git a/integrationTests/consensus/testInitializer.go b/integrationTests/consensus/testInitializer.go index 400a4909a3a..1a2958b9807 100644 --- a/integrationTests/consensus/testInitializer.go +++ b/integrationTests/consensus/testInitializer.go @@ -154,7 +154,7 @@ func displayAndStartNodes(nodes []*testNode) { hex.EncodeToString(skBuff), hex.EncodeToString(pkBuff), ) - _ = n.node.Start() + n.node.Start() _ = n.node.P2PBootstrap() } } diff --git a/integrationTests/resolvers/testInitializer.go b/integrationTests/resolvers/testInitializer.go index c44c365db56..52f9d348f6f 100644 --- a/integrationTests/resolvers/testInitializer.go +++ b/integrationTests/resolvers/testInitializer.go @@ -26,8 +26,8 @@ func createResolverRequester( nResolver := integrationTests.NewTestProcessorNode(numShards, resolverShardID, txSignShardId, advertiserAddress) nRequester := integrationTests.NewTestProcessorNode(numShards, requesterShardID, txSignShardId, advertiserAddress) - _ = nRequester.Node.Start() - _ = nResolver.Node.Start() + nRequester.Node.Start() + nResolver.Node.Start() time.Sleep(time.Second) err := nRequester.Messenger.ConnectToPeer(integrationTests.GetConnectableAddress(nResolver.Messenger)) diff --git a/integrationTests/singleShard/block/interceptedRequestHdr_test.go b/integrationTests/singleShard/block/interceptedRequestHdr_test.go index a684b8a5587..4e2d8752f2a 100644 --- a/integrationTests/singleShard/block/interceptedRequestHdr_test.go +++ b/integrationTests/singleShard/block/interceptedRequestHdr_test.go @@ -39,8 +39,8 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { fmt.Println("Resolver:") nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) - _ = nRequester.Node.Start() - _ = nResolver.Node.Start() + nRequester.Node.Start() + nResolver.Node.Start() defer func() { _ = nRequester.Node.Stop() _ = nResolver.Node.Stop() @@ -98,8 +98,8 @@ func TestNode_InterceptedHeaderWithWrongChainIDShouldBeDiscarded(t *testing.T) { fmt.Println("Resolver:") nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) - _ = nRequester.Node.Start() - _ = nResolver.Node.Start() + nRequester.Node.Start() + nResolver.Node.Start() defer func() { _ = nRequester.Node.Stop() _ = nResolver.Node.Stop() diff --git a/integrationTests/singleShard/block/interceptedRequestTxBlockBody_test.go b/integrationTests/singleShard/block/interceptedRequestTxBlockBody_test.go index 43a611bc347..f33c6908ddc 100644 --- a/integrationTests/singleShard/block/interceptedRequestTxBlockBody_test.go +++ b/integrationTests/singleShard/block/interceptedRequestTxBlockBody_test.go @@ -32,8 +32,8 @@ func TestNode_GenerateSendInterceptTxBlockBodyWithNetMessenger(t *testing.T) { fmt.Println("Resolver:") nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) - _ = nRequester.Node.Start() - _ = nResolver.Node.Start() + nRequester.Node.Start() + nResolver.Node.Start() defer func() { _ = nRequester.Node.Stop() diff --git a/integrationTests/singleShard/transaction/interceptedBulkTx_test.go b/integrationTests/singleShard/transaction/interceptedBulkTx_test.go index 575236e8559..d914b61eccf 100644 --- a/integrationTests/singleShard/transaction/interceptedBulkTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedBulkTx_test.go @@ -30,7 +30,7 @@ func TestNode_GenerateSendInterceptBulkTransactionsWithMessenger(t *testing.T) { nodeAddr := "0" n := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, nodeAddr) - _ = n.Node.Start() + n.Node.Start() defer func() { _ = n.Node.Stop() diff --git a/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx_test.go b/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx_test.go index 2f029bc69e1..69816da6b30 100644 --- a/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx_test.go @@ -36,7 +36,7 @@ func TestNode_GenerateSendInterceptBulkUnsignedTransactionsWithMessenger(t *test nodeAddr := "0" n := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, nodeAddr) - _ = n.Node.Start() + n.Node.Start() defer func() { _ = n.Node.Stop() diff --git a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go index 941701735db..3f8cbae7ebf 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go @@ -33,8 +33,8 @@ func TestNode_RequestInterceptTransactionWithMessenger(t *testing.T) { fmt.Println("Resolver:") nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) - _ = nRequester.Node.Start() - _ = nResolver.Node.Start() + nRequester.Node.Start() + nResolver.Node.Start() defer func() { _ = nRequester.Node.Stop() _ = nResolver.Node.Stop() @@ -125,8 +125,8 @@ func TestNode_RequestInterceptRewardTransactionWithMessenger(t *testing.T) { fmt.Println("Resolver:") nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) - _ = nRequester.Node.Start() - _ = nResolver.Node.Start() + nRequester.Node.Start() + nResolver.Node.Start() defer func() { _ = nRequester.Node.Stop() _ = nResolver.Node.Stop() diff --git a/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx_test.go index 563a8b5e493..2fd1bd73f54 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx_test.go @@ -30,8 +30,8 @@ func TestNode_RequestInterceptUnsignedTransactionWithMessenger(t *testing.T) { fmt.Println("Resolver:") nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) - _ = nRequester.Node.Start() - _ = nResolver.Node.Start() + nRequester.Node.Start() + nResolver.Node.Start() defer func() { _ = nRequester.Node.Stop() _ = nResolver.Node.Stop() diff --git a/integrationTests/state/stateTrieSync_test.go b/integrationTests/state/stateTrieSync_test.go index ed5aa0a9d5a..4756dc0cc10 100644 --- a/integrationTests/state/stateTrieSync_test.go +++ b/integrationTests/state/stateTrieSync_test.go @@ -29,8 +29,8 @@ func TestNode_RequestInterceptTrieNodesWithMessenger(t *testing.T) { fmt.Println("Resolver:") nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) - _ = nRequester.Node.Start() - _ = nResolver.Node.Start() + nRequester.Node.Start() + nResolver.Node.Start() defer func() { _ = nRequester.Node.Stop() _ = nResolver.Node.Stop() diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 30cf255621c..19d569b58f1 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -860,7 +860,7 @@ func DisplayAndStartNodes(nodes []*TestProcessorNode) { hex.EncodeToString(skBuff), hex.EncodeToString(pkBuff), ) - _ = n.Node.Start() + n.Node.Start() _ = n.Node.P2PBootstrap() } diff --git a/node/node.go b/node/node.go index dfc47e620f9..16fc052b3a6 100644 --- a/node/node.go +++ b/node/node.go @@ -161,13 +161,9 @@ func (n *Node) IsRunning() bool { return n.isRunning } -// Start will create a new messenger and and set up the Node state as running -func (n *Node) Start() error { - err := n.P2PBootstrap() - if err == nil { - n.isRunning = true - } - return err +// Start will set up the Node state as running +func (n *Node) Start() { + n.isRunning = true } // Stop closes the messenger and undos everything done in Start diff --git a/node/node_test.go b/node/node_test.go index 4aa9acb3d29..d3558f1114e 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -112,13 +112,6 @@ func TestNewNode_ApplyNilOptionShouldError(t *testing.T) { assert.NotNil(t, err) } -func TestStart_NoMessenger(t *testing.T) { - n, _ := node.NewNode() - err := n.Start() - defer func() { _ = n.Stop() }() - assert.NotNil(t, err) -} - func TestStart_CorrectParams(t *testing.T) { messenger := getMessenger() n, _ := node.NewNode( @@ -128,9 +121,8 @@ func TestStart_CorrectParams(t *testing.T) { node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), ) - err := n.Start() + n.Start() defer func() { _ = n.Stop() }() - assert.Nil(t, err) assert.True(t, n.IsRunning()) } @@ -144,11 +136,10 @@ func TestStart_CannotApplyOptions(t *testing.T) { node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), ) - err := n.Start() - require.Nil(t, err) + n.Start() defer func() { _ = n.Stop() }() - err = n.ApplyOptions(node.WithDataPool(&mock.PoolsHolderStub{})) + err := n.ApplyOptions(node.WithDataPool(&mock.PoolsHolderStub{})) require.Error(t, err) } @@ -166,9 +157,8 @@ func TestStart_CorrectParamsApplyingOptions(t *testing.T) { logError(err) - err = n.Start() + n.Start() defer func() { _ = n.Stop() }() - assert.Nil(t, err) assert.True(t, n.IsRunning()) } @@ -180,9 +170,8 @@ func TestApplyOptions_NodeStarted(t *testing.T) { node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), ) - err := n.Start() + n.Start() defer func() { _ = n.Stop() }() - logError(err) assert.True(t, n.IsRunning()) } @@ -211,7 +200,7 @@ func TestStop_MessengerCloseErrors(t *testing.T) { node.WithHasher(getHasher()), ) - _ = n.Start() + n.Start() err := n.Stop() assert.NotNil(t, err) @@ -224,10 +213,9 @@ func TestStop(t *testing.T) { node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), ) - err := n.Start() - logError(err) + n.Start() - err = n.Stop() + err := n.Stop() assert.Nil(t, err) assert.False(t, n.IsRunning()) } @@ -765,10 +753,9 @@ func TestCreateShardedStores_NilShardCoordinatorShouldError(t *testing.T) { node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), ) - err := n.Start() - logError(err) + n.Start() defer func() { _ = n.Stop() }() - err = n.CreateShardedStores() + err := n.CreateShardedStores() assert.NotNil(t, err) assert.Contains(t, err.Error(), "nil shard coordinator") } @@ -784,10 +771,9 @@ func TestCreateShardedStores_NilDataPoolShouldError(t *testing.T) { node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), ) - err := n.Start() - logError(err) + n.Start() defer func() { _ = n.Stop() }() - err = n.CreateShardedStores() + err := n.CreateShardedStores() assert.NotNil(t, err) assert.Contains(t, err.Error(), "nil data pool") } @@ -811,10 +797,9 @@ func TestCreateShardedStores_NilTransactionDataPoolShouldError(t *testing.T) { node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), ) - err := n.Start() - logError(err) + n.Start() defer func() { _ = n.Stop() }() - err = n.CreateShardedStores() + err := n.CreateShardedStores() assert.NotNil(t, err) assert.Contains(t, err.Error(), "nil transaction sharded data store") } @@ -839,10 +824,9 @@ func TestCreateShardedStores_NilHeaderDataPoolShouldError(t *testing.T) { node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), ) - err := n.Start() - logError(err) + n.Start() defer func() { _ = n.Stop() }() - err = n.CreateShardedStores() + err := n.CreateShardedStores() assert.NotNil(t, err) assert.Contains(t, err.Error(), "nil header sharded data store") } @@ -874,10 +858,9 @@ func TestCreateShardedStores_ReturnsSuccessfully(t *testing.T) { node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), ) - err := n.Start() - logError(err) + n.Start() defer func() { _ = n.Stop() }() - err = n.CreateShardedStores() + err := n.CreateShardedStores() assert.Nil(t, err) assert.True(t, containString(process.ShardCacherIdentifier(0, 0), txShardedStores)) diff --git a/p2p/libp2p/netMessenger.go b/p2p/libp2p/netMessenger.go index 6d3f62b5c40..e8407bb3669 100644 --- a/p2p/libp2p/netMessenger.go +++ b/p2p/libp2p/netMessenger.go @@ -500,7 +500,28 @@ func (netMes *networkMessenger) RegisterMessageProcessor(topic string, handler p return nil } -// UnregisterMessageProcessor registers a message processes on a topic +// UnregisterAllMessageProcessors will unregister all message processors for topics +func (netMes *networkMessenger) UnregisterAllMessageProcessors() error { + netMes.mutTopics.Lock() + defer netMes.mutTopics.Unlock() + + for topic, validator := range netMes.topics { + if validator == nil { + return p2p.ErrTopicValidatorOperationNotSupported + } + + err := netMes.pb.UnregisterTopicValidator(topic) + if err != nil { + return err + } + + netMes.topics[topic] = nil + } + + return nil +} + +// UnregisterMessageProcessor unregisters a message processes on a topic func (netMes *networkMessenger) UnregisterMessageProcessor(topic string) error { netMes.mutTopics.Lock() defer netMes.mutTopics.Unlock() diff --git a/p2p/p2p.go b/p2p/p2p.go index 0ee781b2db9..e49eb01ffce 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -133,6 +133,11 @@ type Messenger interface { // specified topic. RegisterMessageProcessor(topic string, handler MessageProcessor) error + // UnregisterAllMessageProcessors removes all the MessageProcessor set by the + // Messenger from the list of registered handlers for the messages on the + // given topic. + UnregisterAllMessageProcessors() error + // UnregisterMessageProcessor removes the MessageProcessor set by the // Messenger from the list of registered handlers for the messages on the // given topic. From 39f89f5ee99f8608c5c47f9aea5826dd7ba28a27 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Wed, 11 Mar 2020 14:20:58 +0200 Subject: [PATCH 10/61] removed unused const --- epochStart/bootstrap/epochStartDataProvider.go | 1 - 1 file changed, 1 deletion(-) diff --git a/epochStart/bootstrap/epochStartDataProvider.go b/epochStart/bootstrap/epochStartDataProvider.go index d3879d36625..86bbeba3a22 100644 --- a/epochStart/bootstrap/epochStartDataProvider.go +++ b/epochStart/bootstrap/epochStartDataProvider.go @@ -36,7 +36,6 @@ import ( var log = logger.GetOrCreate("registration") var _ process.Interceptor = (*simpleMetaBlockInterceptor)(nil) -const requestSuffix = "_REQUEST" const delayBetweenRequests = 1 * time.Second const delayAfterRequesting = 1 * time.Second const thresholdForConsideringMetaBlockCorrect = 0.2 From ca5384ae32c4970854933393b63f8c1fb4c6461d Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Wed, 11 Mar 2020 15:14:15 +0200 Subject: [PATCH 11/61] fix after merge --- cmd/node/config/config.toml | 8 ++++---- cmd/node/main.go | 3 ++- epochStart/bootstrap/epochStartDataProvider.go | 10 +++++----- .../bootstrap/factory/epochStartDataProviderFactory.go | 2 +- .../bootstrap/simpleEpochStartMetaBlockInterceptor.go | 2 +- epochStart/bootstrap/simpleMetaBlockInterceptor.go | 2 +- epochStart/bootstrap/simpleMiniBlockInterceptor.go | 2 +- epochStart/bootstrap/simpleShardHeaderInterceptor.go | 2 +- node/node_test.go | 10 +++++++++- 9 files changed, 25 insertions(+), 16 deletions(-) diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 67ad7f0c4f7..b748e5a6d65 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -241,8 +241,8 @@ [MultisigHasher] Type = "blake2b" -# The main marshalizer, used in internod communication -# Type idenftifies the marshalizer +# The main marshalizer, used in internodes communication +# Type identifies the marshalizer # SizeCheckDelta the maximum allow drift between the input data buffer and # the reencoded version (in percents). # 0 disables the feature. @@ -250,11 +250,11 @@ Type = "protobuf" SizeCheckDelta = 0 -# The marshalizer used for smartcontracts data exchage +# The marshalizer used for smartcontracts data exchange [VmMarshalizer] Type = "json" -# The marshalizer used in transction signing +# The marshalizer used in transaction signing [TxSignMarshalizer] Type = "json" diff --git a/cmd/node/main.go b/cmd/node/main.go index 221afad7c14..bf3722551a9 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -564,7 +564,8 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } time.Sleep(3 * time.Second) - marshalizer := &marshal.JsonMarshalizer{} + // TODO : get marshalizer and hasher based on config and maybe move the functions for extracting these 2 in the core package + marshalizer := &marshal.GogoProtoMarshalizer{} hasher := &blake2b.Blake2b{} epochStartComponentArgs := factory2.EpochStartDataProviderFactoryArgs{ PubKey: pubKey, diff --git a/epochStart/bootstrap/epochStartDataProvider.go b/epochStart/bootstrap/epochStartDataProvider.go index 86bbeba3a22..f059931201f 100644 --- a/epochStart/bootstrap/epochStartDataProvider.go +++ b/epochStart/bootstrap/epochStartDataProvider.go @@ -306,7 +306,7 @@ func (esdp *epochStartDataProvider) getCurrentEpochStartData( ) (*block.EpochStartShardData, error) { shardID := shardCoordinator.SelfId() for _, epochStartData := range metaBlock.EpochStart.LastFinalizedHeaders { - if epochStartData.ShardId == shardID { + if epochStartData.ShardID == shardID { return &epochStartData, nil } } @@ -428,11 +428,11 @@ func (esdp *epochStartDataProvider) getShardHeaders( if shardID == core.MetachainShardId { for _, entry := range metaBlock.EpochStart.LastFinalizedHeaders { var hdr *block.Header - hdr, err := esdp.getShardHeader(entry.HeaderHash, entry.ShardId) + hdr, err := esdp.getShardHeader(entry.HeaderHash, entry.ShardID) if err != nil { return nil, err } - headersMap[entry.ShardId] = hdr + headersMap[entry.ShardID] = hdr } return headersMap, nil @@ -440,7 +440,7 @@ func (esdp *epochStartDataProvider) getShardHeaders( var entryForShard *block.EpochStartShardData for _, entry := range metaBlock.EpochStart.LastFinalizedHeaders { - if entry.ShardId == shardID { + if entry.ShardID == shardID { entryForShard = &entry } } @@ -451,7 +451,7 @@ func (esdp *epochStartDataProvider) getShardHeaders( hdr, err := esdp.getShardHeader( entryForShard.HeaderHash, - entryForShard.ShardId, + entryForShard.ShardID, ) if err != nil { return nil, err diff --git a/epochStart/bootstrap/factory/epochStartDataProviderFactory.go b/epochStart/bootstrap/factory/epochStartDataProviderFactory.go index 3634fbc6dd6..84f3626bff1 100644 --- a/epochStart/bootstrap/factory/epochStartDataProviderFactory.go +++ b/epochStart/bootstrap/factory/epochStartDataProviderFactory.go @@ -60,7 +60,7 @@ func NewEpochStartDataProviderFactory(args EpochStartDataProviderFactoryArgs) (* args.OriginalNodesConfig, args.GeneralConfig, ) - shouldSync = true // harcoded so we can test we can sync + shouldSync = true // hardcoded so we can test we can sync return &epochStartDataProviderFactory{ pubKey: args.PubKey, diff --git a/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go index 022b88245bb..9a5a4527802 100644 --- a/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go +++ b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go @@ -50,7 +50,7 @@ func (s *simpleEpochStartMetaBlockInterceptor) ProcessReceivedMessage(message p2 return err } s.mutReceivedMetaBlocks.Lock() - mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, mb) + mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, &mb) if err != nil { s.mutReceivedMetaBlocks.Unlock() return err diff --git a/epochStart/bootstrap/simpleMetaBlockInterceptor.go b/epochStart/bootstrap/simpleMetaBlockInterceptor.go index 161fb2e64ba..8192a0254a6 100644 --- a/epochStart/bootstrap/simpleMetaBlockInterceptor.go +++ b/epochStart/bootstrap/simpleMetaBlockInterceptor.go @@ -48,7 +48,7 @@ func (s *simpleMetaBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP return err } s.mutReceivedMetaBlocks.Lock() - mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, mb) + mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, &mb) if err != nil { s.mutReceivedMetaBlocks.Unlock() return err diff --git a/epochStart/bootstrap/simpleMiniBlockInterceptor.go b/epochStart/bootstrap/simpleMiniBlockInterceptor.go index 391c9db9b50..00dadedcb51 100644 --- a/epochStart/bootstrap/simpleMiniBlockInterceptor.go +++ b/epochStart/bootstrap/simpleMiniBlockInterceptor.go @@ -48,7 +48,7 @@ func (s *simpleMiniBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP return err } s.mutReceivedMiniBlocks.Lock() - mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, mb) + mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, &mb) if err != nil { s.mutReceivedMiniBlocks.Unlock() return err diff --git a/epochStart/bootstrap/simpleShardHeaderInterceptor.go b/epochStart/bootstrap/simpleShardHeaderInterceptor.go index 5ca9f00535c..339f9781ed7 100644 --- a/epochStart/bootstrap/simpleShardHeaderInterceptor.go +++ b/epochStart/bootstrap/simpleShardHeaderInterceptor.go @@ -47,7 +47,7 @@ func (s *simpleShardHeaderInterceptor) ProcessReceivedMessage(message p2p.Messag return err } s.mutReceivedShardHeaders.Lock() - mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, mb) + mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, &mb) if err != nil { s.mutReceivedShardHeaders.Unlock() return err diff --git a/node/node_test.go b/node/node_test.go index 2b8e94332a7..923d38269e1 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -220,16 +220,24 @@ func TestStop_MessengerCloseErrors(t *testing.T) { func TestStop(t *testing.T) { + messengerCloseWasCalled := false + + messenger := getMessenger() + messenger.CloseCalled = func() error { + messengerCloseWasCalled = true + return nil + } n, _ := node.NewNode( node.WithProtoMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithVmMarshalizer(getMarshalizer()), node.WithHasher(getHasher()), + node.WithMessenger(messenger), ) n.Start() err := n.Stop() assert.Nil(t, err) - assert.False(t, n.IsRunning()) + assert.True(t, messengerCloseWasCalled) } func TestGetBalance_NoAddrConverterShouldError(t *testing.T) { From 38c5f2a03216a8f8fe0203735d68ca7b8ba752da Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Wed, 11 Mar 2020 17:42:16 +0200 Subject: [PATCH 12/61] fix after second merge + cleanup --- cmd/node/factory/structs.go | 15 ++----- cmd/node/main.go | 19 ++++++--- .../bootstrap/simpleMetaBlockInterceptor.go | 2 - .../bootstrap/simpleMiniBlockInterceptor.go | 2 - .../bootstrap/simpleShardHeaderInterceptor.go | 2 - hashing/factory/errors.go | 6 +++ hashing/factory/hasherFactory.go | 19 +++++++++ hashing/factory/hasherFactory_test.go | 42 +++++++++++++++++++ 8 files changed, 83 insertions(+), 24 deletions(-) create mode 100644 hashing/factory/errors.go create mode 100644 hashing/factory/hasherFactory.go create mode 100644 hashing/factory/hasherFactory_test.go diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index b315ed95e9d..c5004a79bd9 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -52,6 +52,7 @@ import ( "github.com/ElrondNetwork/elrond-go/epochStart/shardchain" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/hashing/blake2b" + factoryHasher "github.com/ElrondNetwork/elrond-go/hashing/factory" "github.com/ElrondNetwork/elrond-go/hashing/sha256" "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/marshal" @@ -203,7 +204,7 @@ func NewCoreComponentsFactoryArgs(config *config.Config, pathManager storage.Pat // CoreComponentsFactory creates the core components func CoreComponentsFactory(args *coreComponentsFactoryArgs) (*Core, error) { - hasher, err := getHasherFromConfig(args.config) + hasher, err := factoryHasher.NewHasher(args.config.Hasher.Type) if err != nil { return nil, errors.New("could not create hasher: " + err.Error()) } @@ -959,17 +960,6 @@ func CreateSoftwareVersionChecker(statusHandler core.AppStatusHandler) (*softwar return softwareVersionChecker, nil } -func getHasherFromConfig(cfg *config.Config) (hashing.Hasher, error) { - switch cfg.Hasher.Type { - case "sha256": - return sha256.Sha256{}, nil - case "blake2b": - return &blake2b.Blake2b{}, nil - } - - return nil, errors.New("no hasher provided in config file") -} - func createBlockChainFromConfig(config *config.Config, coordinator sharding.Coordinator, ash core.AppStatusHandler) (data.ChainHandler, error) { badBlockCache, err := storageUnit.NewCache( storageUnit.CacheType(config.BadBlocksCache.Type), @@ -2203,6 +2193,7 @@ func newMetaBlockProcessor( argsEpochEconomics := metachainEpochStart.ArgsNewEpochEconomics{ Marshalizer: core.InternalMarshalizer, + Hasher: core.Hasher, Store: data.Store, ShardCoordinator: shardCoordinator, NodesCoordinator: nodesCoordinator, diff --git a/cmd/node/main.go b/cmd/node/main.go index 0a919c47fda..c3060371aa1 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -31,15 +31,17 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/display" "github.com/ElrondNetwork/elrond-go/epochStart" - factory2 "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/factory" + factoryEpochBootstrap "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/factory" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/nodesconfigprovider" "github.com/ElrondNetwork/elrond-go/epochStart/notifier" "github.com/ElrondNetwork/elrond-go/facade" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/hashing/blake2b" + factoryHasher "github.com/ElrondNetwork/elrond-go/hashing/factory" "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/logger/redirects" "github.com/ElrondNetwork/elrond-go/marshal" + factoryMarshal "github.com/ElrondNetwork/elrond-go/marshal/factory" "github.com/ElrondNetwork/elrond-go/node" "github.com/ElrondNetwork/elrond-go/node/external" "github.com/ElrondNetwork/elrond-go/ntp" @@ -566,10 +568,15 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } time.Sleep(3 * time.Second) - // TODO : get marshalizer and hasher based on config and maybe move the functions for extracting these 2 in the core package - marshalizer := &marshal.GogoProtoMarshalizer{} - hasher := &blake2b.Blake2b{} - epochStartComponentArgs := factory2.EpochStartDataProviderFactoryArgs{ + marshalizer, err := factoryMarshal.NewMarshalizer(generalConfig.Marshalizer.Type) + if err != nil { + return err + } + hasher, err := factoryHasher.NewHasher(generalConfig.Hasher.Type) + if err != nil { + return err + } + epochStartComponentArgs := factoryEpochBootstrap.EpochStartDataProviderFactoryArgs{ PubKey: pubKey, Messenger: networkComponents.NetMessenger, Marshalizer: marshalizer, @@ -581,7 +588,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { IsEpochFoundInStorage: epochFoundInStorage, } - epochStartComponentFactory, err := factory2.NewEpochStartDataProviderFactory(epochStartComponentArgs) + epochStartComponentFactory, err := factoryEpochBootstrap.NewEpochStartDataProviderFactory(epochStartComponentArgs) if err != nil { return err } diff --git a/epochStart/bootstrap/simpleMetaBlockInterceptor.go b/epochStart/bootstrap/simpleMetaBlockInterceptor.go index 8192a0254a6..c7540cb7a65 100644 --- a/epochStart/bootstrap/simpleMetaBlockInterceptor.go +++ b/epochStart/bootstrap/simpleMetaBlockInterceptor.go @@ -41,7 +41,6 @@ func NewSimpleMetaBlockInterceptor(marshalizer marshal.Marshalizer, hasher hashi // ProcessReceivedMessage will receive the metablocks and will add them to the maps func (s *simpleMetaBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { - log.Info("received meta block") var mb block.MetaBlock err := s.marshalizer.Unmarshal(&mb, message.Data()) if err != nil { @@ -111,7 +110,6 @@ func (s *simpleMetaBlockInterceptor) isMapEntryOk( if err != nil { return false } - log.Info("peers map for meta block", "target", target, "num", len(peersList)) if bytes.Equal(expectedHash, mbHash) && len(peersList) >= target { log.Info("got consensus for metablock", "len", len(peersList)) return true diff --git a/epochStart/bootstrap/simpleMiniBlockInterceptor.go b/epochStart/bootstrap/simpleMiniBlockInterceptor.go index 00dadedcb51..c9a631cbad9 100644 --- a/epochStart/bootstrap/simpleMiniBlockInterceptor.go +++ b/epochStart/bootstrap/simpleMiniBlockInterceptor.go @@ -41,7 +41,6 @@ func NewSimpleMiniBlockInterceptor(marshalizer marshal.Marshalizer, hasher hashi // ProcessReceivedMessage will receive the metablocks and will add them to the maps func (s *simpleMiniBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { - log.Info("received shard header") var mb block.MiniBlock err := s.marshalizer.Unmarshal(&mb, message.Data()) if err != nil { @@ -111,7 +110,6 @@ func (s *simpleMiniBlockInterceptor) isMapEntryOk( if err != nil { return false } - log.Info("peers map for shard hdr", "target", target, "num", len(peersList)) if bytes.Equal(expectedHash, mbHash) && len(peersList) >= target { log.Info("got consensus for metablock", "len", len(peersList)) return true diff --git a/epochStart/bootstrap/simpleShardHeaderInterceptor.go b/epochStart/bootstrap/simpleShardHeaderInterceptor.go index 339f9781ed7..f4fb9ff63d7 100644 --- a/epochStart/bootstrap/simpleShardHeaderInterceptor.go +++ b/epochStart/bootstrap/simpleShardHeaderInterceptor.go @@ -40,7 +40,6 @@ func NewSimpleShardHeaderInterceptor(marshalizer marshal.Marshalizer, hasher has // ProcessReceivedMessage will receive the metablocks and will add them to the maps func (s *simpleShardHeaderInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { - log.Info("received shard header") var mb block.Header err := s.marshalizer.Unmarshal(&mb, message.Data()) if err != nil { @@ -100,7 +99,6 @@ func (s *simpleShardHeaderInterceptor) isMapEntryOk( hash string, target int, ) bool { - log.Info("peers map for shard hdr", "target", target, "num", len(peersList)) if len(peersList) >= target { log.Info("got consensus for shard header", "len", len(peersList)) return true diff --git a/hashing/factory/errors.go b/hashing/factory/errors.go new file mode 100644 index 00000000000..f990e5fbff1 --- /dev/null +++ b/hashing/factory/errors.go @@ -0,0 +1,6 @@ +package factory + +import "errors" + +// ErrNoHasherInConfig signals that no hasher was provided in the config file +var ErrNoHasherInConfig = errors.New("no hasher provided in config file") diff --git a/hashing/factory/hasherFactory.go b/hashing/factory/hasherFactory.go new file mode 100644 index 00000000000..266c3d9be16 --- /dev/null +++ b/hashing/factory/hasherFactory.go @@ -0,0 +1,19 @@ +package factory + +import ( + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/hashing/blake2b" + "github.com/ElrondNetwork/elrond-go/hashing/sha256" +) + +// NewHasher will return a new instance of hasher based on the value stored in config +func NewHasher(name string) (hashing.Hasher, error) { + switch name { + case "sha256": + return sha256.Sha256{}, nil + case "blake2b": + return &blake2b.Blake2b{}, nil + } + + return nil, ErrNoHasherInConfig +} diff --git a/hashing/factory/hasherFactory_test.go b/hashing/factory/hasherFactory_test.go new file mode 100644 index 00000000000..08661e35ffd --- /dev/null +++ b/hashing/factory/hasherFactory_test.go @@ -0,0 +1,42 @@ +package factory + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/hashing/blake2b" + "github.com/ElrondNetwork/elrond-go/hashing/sha256" + "github.com/stretchr/testify/assert" +) + +func TestNewHasher(t *testing.T) { + t.Parallel() + + type res struct { + hasher hashing.Hasher + err error + } + testData := make(map[string]res) + testData["sha256"] = res{ + hasher: sha256.Sha256{}, + err: nil, + } + testData["blake2b"] = res{ + hasher: &blake2b.Blake2b{}, + err: nil, + } + testData[""] = res{ + hasher: nil, + err: ErrNoHasherInConfig, + } + testData["invalid hasher name"] = res{ + hasher: nil, + err: ErrNoHasherInConfig, + } + + for key, value := range testData { + hasher, err := NewHasher(key) + assert.Equal(t, value.err, err) + assert.Equal(t, value.hasher, hasher) + } +} From f6b80e1f0b4e54b7c754e8f83e4526c36d103e73 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Fri, 13 Mar 2020 16:32:21 +0200 Subject: [PATCH 13/61] EN-5829: integration test for start in epoch --- .../bootstrap/epochStartDataProvider.go | 8 +- epochStart/bootstrap/errors.go | 3 + .../endOfEpoch/startInEpoch_test.go | 174 ++++++++++++++++++ sharding/nodesSetup.go | 5 + 4 files changed, 186 insertions(+), 4 deletions(-) create mode 100644 integrationTests/multiShard/endOfEpoch/startInEpoch_test.go diff --git a/epochStart/bootstrap/epochStartDataProvider.go b/epochStart/bootstrap/epochStartDataProvider.go index f059931201f..b7fadf9c3cf 100644 --- a/epochStart/bootstrap/epochStartDataProvider.go +++ b/epochStart/bootstrap/epochStartDataProvider.go @@ -163,7 +163,7 @@ func (esdp *epochStartDataProvider) Bootstrap() (*ComponentsNeededForBootstrap, shardHeaders, err := esdp.getShardHeaders(metaBlock, nodesConfig, shardCoordinator) if err != nil { - return nil, err + log.Debug("shard headers not found", "error", err) } epochStartData, err := esdp.getCurrentEpochStartData(shardCoordinator, metaBlock) @@ -409,12 +409,12 @@ func (esdp *epochStartDataProvider) getShardCoordinator(metaBlock *block.MetaBlo return nil, err } - numOfShards := len(metaBlock.EpochStart.LastFinalizedHeaders) + numOfShards := nodesConfig.NumberOfShards() if numOfShards == 1 { return &sharding.OneShardCoordinator{}, nil } - return sharding.NewMultiShardCoordinator(uint32(numOfShards), shardID) + return sharding.NewMultiShardCoordinator(numOfShards, shardID) } func (esdp *epochStartDataProvider) getShardHeaders( @@ -446,7 +446,7 @@ func (esdp *epochStartDataProvider) getShardHeaders( } if entryForShard == nil { - return nil, errors.New("shard data not found") + return nil, ErrShardDataNotFound } hdr, err := esdp.getShardHeader( diff --git a/epochStart/bootstrap/errors.go b/epochStart/bootstrap/errors.go index 0734ee070fe..9bb71f9a311 100644 --- a/epochStart/bootstrap/errors.go +++ b/epochStart/bootstrap/errors.go @@ -34,3 +34,6 @@ var ErrNumTriesExceeded = errors.New("num of tries exceeded. try re-request") // ErrNilShardCoordinator signals that a nil shard coordinator has been provided var ErrNilShardCoordinator = errors.New("nil shard coordinator") + +// ErrShardDataNotFound signals that no shard header has been found for the calculated shard +var ErrShardDataNotFound = errors.New("shard data not found") diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch_test.go new file mode 100644 index 00000000000..fabf097ca7d --- /dev/null +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch_test.go @@ -0,0 +1,174 @@ +package epochStart + +import ( + "context" + "encoding/hex" + "math/big" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/factory" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/nodesconfigprovider" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/integrationTests/mock" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" +) + +func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfShards := 2 + totalNodesPerShard := 4 + numNodesPerShardOnline := totalNodesPerShard - 1 + shardCnsSize := 2 + metaCnsSize := 3 + numMetachainNodes := 3 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + nodesMap := integrationTests.CreateNodesWithNodesCoordinator( + numNodesPerShardOnline, + numMetachainNodes, + numOfShards, + shardCnsSize, + metaCnsSize, + integrationTests.GetConnectableAddress(advertiser), + ) + + nodes := convertToSlice(nodesMap) + + nodeToJoinLate := nodes[numNodesPerShardOnline] // will return the last node in shard 0 which was not used in consensus + _ = nodeToJoinLate.Messenger.Close() // set not offline + + nodes = append(nodes[:numNodesPerShardOnline], nodes[numNodesPerShardOnline+1:]...) + nodes = append(nodes[:2*numNodesPerShardOnline], nodes[2*numNodesPerShardOnline+1:]...) + + roundsPerEpoch := uint64(10) + for _, node := range nodes { + node.EpochStartTrigger.SetRoundsPerEpoch(roundsPerEpoch) + } + + idxProposers := make([]int, numOfShards+1) + for i := 0; i < numOfShards; i++ { + idxProposers[i] = i * numNodesPerShardOnline + } + idxProposers[numOfShards] = numOfShards * numNodesPerShardOnline + + integrationTests.DisplayAndStartNodes(nodes) + + defer func() { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Node.Stop() + } + }() + + initialVal := big.NewInt(10000000) + sendValue := big.NewInt(5) + integrationTests.MintAllNodes(nodes, initialVal) + receiverAddress := []byte("12345678901234567890123456789012") + + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + time.Sleep(time.Second) + + /////////----- wait for epoch end period + epoch := uint32(2) + nrRoundsToPropagateMultiShard := uint64(5) + for i := uint64(0); i <= (uint64(epoch)*roundsPerEpoch)+nrRoundsToPropagateMultiShard; i++ { + integrationTests.UpdateRound(nodes, round) + integrationTests.ProposeBlock(nodes, idxProposers, round, nonce) + integrationTests.SyncBlock(t, nodes, idxProposers, round) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + for _, node := range nodes { + integrationTests.CreateAndSendTransaction(node, sendValue, receiverAddress, "") + } + + time.Sleep(time.Second) + } + + time.Sleep(time.Second) + + verifyIfNodesHasCorrectEpoch(t, epoch, nodes) + verifyIfAddedShardHeadersAreWithNewEpoch(t, nodes) + + epochHandler := &mock.EpochStartTriggerStub{ + EpochCalled: func() uint32 { + return epoch + }, + } + for _, node := range nodes { + _ = dataRetriever.SetEpochHandlerToHdrResolver(node.ResolversContainer, epochHandler) + } + + nodesConfig := sharding.NodesSetup{ + RoundDuration: 4000, + InitialNodes: getInitialNodes(nodesMap), + } + nodesConfig.SetNumberOfShards(uint32(numOfShards)) + + epochStartProviderFactoryArgs := factory.EpochStartDataProviderFactoryArgs{ + PubKey: nodeToJoinLate.NodeKeys.Pk, + Messenger: advertiser, + Marshalizer: integrationTests.TestMarshalizer, + Hasher: integrationTests.TestHasher, + NodesConfigProvider: nodesconfigprovider.NewSimpleNodesConfigProvider(&nodesConfig), + StartTime: time.Time{}, + OriginalNodesConfig: &nodesConfig, + GeneralConfig: &config.Config{ + EpochStartConfig: config.EpochStartConfig{ + MinRoundsBetweenEpochs: 5, + RoundsPerEpoch: 10, + }, + }, + IsEpochFoundInStorage: false, + } + epochStartDataProviderFactory, _ := factory.NewEpochStartDataProviderFactory(epochStartProviderFactoryArgs) + epochStartDataProvider, _ := epochStartDataProviderFactory.Create() + + res, err := epochStartDataProvider.Bootstrap() + assert.NoError(t, err) + assert.NotNil(t, res) + // TODO: add more checks + assert.Equal(t, epoch, res.EpochStartMetaBlock.Epoch) +} + +func convertToSlice(originalMap map[uint32][]*integrationTests.TestProcessorNode) []*integrationTests.TestProcessorNode { + sliceToRet := make([]*integrationTests.TestProcessorNode, 0) + for _, nodesPerShard := range originalMap { + for _, node := range nodesPerShard { + sliceToRet = append(sliceToRet, node) + } + } + + return sliceToRet +} + +func getInitialNodes(nodesMap map[uint32][]*integrationTests.TestProcessorNode) []*sharding.InitialNode { + sliceToRet := make([]*sharding.InitialNode, 0) + for _, nodesPerShard := range nodesMap { + for _, node := range nodesPerShard { + pubKeyBytes, _ := node.NodeKeys.Pk.ToByteArray() + addressBytes := node.OwnAccount.Address.Bytes() + entry := &sharding.InitialNode{ + PubKey: hex.EncodeToString(pubKeyBytes), + Address: hex.EncodeToString(addressBytes), + NodeInfo: sharding.NodeInfo{}, + } + sliceToRet = append(sliceToRet, entry) + } + } + + return sliceToRet +} diff --git a/sharding/nodesSetup.go b/sharding/nodesSetup.go index 53e7ca13fef..d56e3e43d7c 100644 --- a/sharding/nodesSetup.go +++ b/sharding/nodesSetup.go @@ -253,6 +253,11 @@ func (ns *NodesSetup) InitialNodesInfoForShard(shardId uint32) ([]*NodeInfo, []* return ns.eligible[shardId], ns.waiting[shardId], nil } +// SetNumberOfShards will update the number of shards. Should be used only when testing +func (ns *NodesSetup) SetNumberOfShards(numShards uint32) { + ns.nrOfShards = numShards +} + // NumberOfShards returns the calculated number of shards func (ns *NodesSetup) NumberOfShards() uint32 { return ns.nrOfShards From b1898226a310d52cea721c98f3e8cee00ca824fd Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Fri, 13 Mar 2020 19:07:22 +0200 Subject: [PATCH 14/61] EN-5829: fix after review --- cmd/node/factory/structs.go | 5 +- cmd/node/main.go | 20 +++--- .../topicResolverSender.go | 4 +- .../bootstrap/epochStartDataProvider.go | 66 ++++++++++++------- epochStart/bootstrap/interface.go | 2 +- .../mock/shardHeaderInterceptorStub.go | 6 +- .../simpleEpochStartMetaBlockInterceptor.go | 4 +- .../bootstrap/simpleMetaBlockInterceptor.go | 4 +- .../bootstrap/simpleMiniBlockInterceptor.go | 8 ++- .../bootstrap/simpleShardHeaderInterceptor.go | 31 ++++++--- 10 files changed, 98 insertions(+), 52 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index c5004a79bd9..e17b7c5eab1 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -1533,7 +1533,10 @@ func generateGenesisHeadersAndApplyInitialBalances(args *processComponentsFactor return nil, err } - cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) + cache, errNewCache := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) + if errNewCache != nil { + return nil, errNewCache + } newBlkc, errNewMetachain := blockchain.NewMetaChain(cache) if errNewMetachain != nil { return nil, errNewMetachain diff --git a/cmd/node/main.go b/cmd/node/main.go index c3060371aa1..64cbc7e4339 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -66,13 +66,14 @@ import ( ) const ( - defaultStatsPath = "stats" - defaultLogsPath = "logs" - defaultDBPath = "db" - defaultEpochString = "Epoch" - defaultStaticDbString = "Static" - defaultShardString = "Shard" - metachainShardName = "metachain" + defaultStatsPath = "stats" + defaultLogsPath = "logs" + defaultDBPath = "db" + defaultEpochString = "Epoch" + defaultStaticDbString = "Static" + defaultShardString = "Shard" + metachainShardName = "metachain" + secondsToWaitForP2PBootstrap = 3 ) var ( @@ -545,6 +546,9 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } var errNotCritical error + // TODO: add a component which opens headers storer and gets the last epoch start metablock + // in order to provide the last known epoch in storage. Right now, it won't work as expected + // if storage pruning is disabled currentEpoch, errNotCritical = storageFactory.FindLastEpochFromStorage( workingDir, nodesConfig.ChainID, @@ -566,7 +570,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { if err != nil { return err } - time.Sleep(3 * time.Second) + time.Sleep(secondsToWaitForP2PBootstrap * time.Second) marshalizer, err := factoryMarshal.NewMarshalizer(generalConfig.Marshalizer.Type) if err != nil { diff --git a/dataRetriever/resolvers/topicResolverSender/topicResolverSender.go b/dataRetriever/resolvers/topicResolverSender/topicResolverSender.go index cc6858dede0..235ba15b06f 100644 --- a/dataRetriever/resolvers/topicResolverSender/topicResolverSender.go +++ b/dataRetriever/resolvers/topicResolverSender/topicResolverSender.go @@ -9,6 +9,8 @@ import ( // topicRequestSuffix represents the topic name suffix const topicRequestSuffix = "_REQUEST" +const minNumPeersToQuery = 1 + type topicResolverSender struct { messenger dataRetriever.MessageHandler marshalizer marshal.Marshalizer @@ -42,7 +44,7 @@ func NewTopicResolverSender( if peerListCreator == nil || peerListCreator.IsInterfaceNil() { return nil, dataRetriever.ErrNilPeerListCreator } - if numPeersToQuery < 1 { + if numPeersToQuery < minNumPeersToQuery { return nil, dataRetriever.ErrInvalidNumberOfPeersToQuery } diff --git a/epochStart/bootstrap/epochStartDataProvider.go b/epochStart/bootstrap/epochStartDataProvider.go index f059931201f..7cdd4e4cfcf 100644 --- a/epochStart/bootstrap/epochStartDataProvider.go +++ b/epochStart/bootstrap/epochStartDataProvider.go @@ -10,10 +10,9 @@ import ( "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/core/partitioning" "github.com/ElrondNetwork/elrond-go/crypto" - "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" - factory2 "github.com/ElrondNetwork/elrond-go/data/state/factory" + "github.com/ElrondNetwork/elrond-go/data/trie" factory3 "github.com/ElrondNetwork/elrond-go/data/trie/factory" "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -22,7 +21,6 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" @@ -40,6 +38,7 @@ const delayBetweenRequests = 1 * time.Second const delayAfterRequesting = 1 * time.Second const thresholdForConsideringMetaBlockCorrect = 0.2 const numRequestsToSendOnce = 4 +const maxNumTimesToRetry = 100 // ComponentsNeededForBootstrap holds the components which need to be initialized from network type ComponentsNeededForBootstrap struct { @@ -61,7 +60,7 @@ type epochStartDataProvider struct { metaBlockInterceptor MetaBlockInterceptorHandler shardHeaderInterceptor ShardHeaderInterceptorHandler miniBlockInterceptor MiniBlockInterceptorHandler - requestHandlerMeta process.RequestHandler + requestHandler process.RequestHandler } // ArgsEpochStartDataProvider holds the arguments needed for creating an epoch start data provider component @@ -135,7 +134,7 @@ func (esdp *epochStartDataProvider) Bootstrap() (*ComponentsNeededForBootstrap, return nil, err } - esdp.requestHandlerMeta = requestHandlerMeta + esdp.requestHandler = requestHandlerMeta epochNumForRequestingTheLatestAvailable := uint32(math.MaxUint32) metaBlock, err := esdp.getEpochStartMetaBlock(epochNumForRequestingTheLatestAvailable) @@ -172,9 +171,9 @@ func (esdp *epochStartDataProvider) Bootstrap() (*ComponentsNeededForBootstrap, } for _, mb := range epochStartData.PendingMiniBlockHeaders { - receivedMb, err := esdp.getMiniBlock(&mb) - if err != nil { - return nil, err + receivedMb, errGetMb := esdp.getMiniBlock(&mb) + if errGetMb != nil { + return nil, errGetMb } log.Info("received miniblock", "type", receivedMb.Type) } @@ -191,7 +190,7 @@ func (esdp *epochStartDataProvider) Bootstrap() (*ComponentsNeededForBootstrap, } log.Info("received first pending meta block", "nonce", firstPendingMetaBlock.Nonce) - trie, err := esdp.getTrieFromRootHash(epochStartData.RootHash) + trieToReturn, err := esdp.getTrieFromRootHash(epochStartData.RootHash) if err != nil { return nil, err } @@ -201,7 +200,7 @@ func (esdp *epochStartDataProvider) Bootstrap() (*ComponentsNeededForBootstrap, NodesConfig: nodesConfig, ShardHeaders: shardHeaders, ShardCoordinator: shardCoordinator, - Tries: trie, + Tries: trieToReturn, }, nil } @@ -236,13 +235,26 @@ func (esdp *epochStartDataProvider) createRequestHandler() (process.RequestHandl cacher := disabled.NewDisabledPoolsHolder() triesHolder := state.NewDataTriesHolder() - var stateTrie data.Trie - // TODO: change from integrationsTests.CreateAccountsDB - _, stateTrie, _ = integrationTests.CreateAccountsDB(factory2.UserAccount) + + stateTrieStorageManager, err := trie.NewTrieStorageManagerWithoutPruning(disabled.NewDisabledStorer()) + if err != nil { + return nil, err + } + stateTrie, err := trie.NewTrie(stateTrieStorageManager, esdp.marshalizer, esdp.hasher) + if err != nil { + return nil, err + } triesHolder.Put([]byte(factory3.UserAccountTrie), stateTrie) - var peerTrie data.Trie - _, peerTrie, _ = integrationTests.CreateAccountsDB(factory2.ValidatorAccount) + peerTrieStorageManager, err := trie.NewTrieStorageManagerWithoutPruning(disabled.NewDisabledStorer()) + if err != nil { + return nil, err + } + + peerTrie, err := trie.NewTrie(peerTrieStorageManager, esdp.marshalizer, esdp.hasher) + if err != nil { + return nil, err + } triesHolder.Put([]byte(factory3.PeerAccountTrie), peerTrie) resolversContainerArgs := resolverscontainer.FactoryArgs{ @@ -297,7 +309,7 @@ func (esdp *epochStartDataProvider) getMiniBlock(miniBlockHeader *block.ShardMin } func (esdp *epochStartDataProvider) requestMiniBlock(miniBlockHeader *block.ShardMiniBlockHeader) { - esdp.requestHandlerMeta.RequestMiniBlock(miniBlockHeader.ReceiverShardID, miniBlockHeader.Hash) + esdp.requestHandler.RequestMiniBlock(miniBlockHeader.ReceiverShardID, miniBlockHeader.Hash) } func (esdp *epochStartDataProvider) getCurrentEpochStartData( @@ -390,8 +402,13 @@ func (esdp *epochStartDataProvider) getEpochStartMetaBlock(epoch uint32) (*block esdp.requestEpochStartMetaBlock(epoch) time.Sleep(delayAfterRequesting) + count := 0 for { + if count > maxNumTimesToRetry { + panic("can't sync with other peers") + } + count++ numConnectedPeers := len(esdp.messenger.Peers()) threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) mb, errConsensusNotReached := esdp.epochStartMetaBlockInterceptor.GetEpochStartMetaBlock(threshold, epoch) @@ -410,10 +427,6 @@ func (esdp *epochStartDataProvider) getShardCoordinator(metaBlock *block.MetaBlo } numOfShards := len(metaBlock.EpochStart.LastFinalizedHeaders) - if numOfShards == 1 { - return &sharding.OneShardCoordinator{}, nil - } - return sharding.NewMultiShardCoordinator(uint32(numOfShards), shardID) } @@ -468,10 +481,15 @@ func (esdp *epochStartDataProvider) getShardHeader( esdp.requestShardHeader(shardID, hash) time.Sleep(delayBetweenRequests) + count := 0 for { + if count > maxNumTimesToRetry { + panic("can't sync with the other peers") + } + count++ numConnectedPeers := len(esdp.messenger.Peers()) threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) - mb, errConsensusNotReached := esdp.shardHeaderInterceptor.GetShardHeader(threshold) + mb, errConsensusNotReached := esdp.shardHeaderInterceptor.GetShardHeader(hash, threshold) if errConsensusNotReached == nil { return mb, nil } @@ -485,7 +503,7 @@ func (esdp *epochStartDataProvider) requestMetaBlock(hash []byte) { log.Debug("requested meta block", "hash", hash) for i := 0; i < numRequestsToSendOnce; i++ { time.Sleep(delayBetweenRequests) - esdp.requestHandlerMeta.RequestMetaHeader(hash) + esdp.requestHandler.RequestMetaHeader(hash) } } @@ -494,7 +512,7 @@ func (esdp *epochStartDataProvider) requestShardHeader(shardID uint32, hash []by log.Debug("requested shard block", "shard ID", shardID, "hash", hash) for i := 0; i < numRequestsToSendOnce; i++ { time.Sleep(delayBetweenRequests) - esdp.requestHandlerMeta.RequestShardHeader(shardID, hash) + esdp.requestHandler.RequestShardHeader(shardID, hash) } } @@ -502,7 +520,7 @@ func (esdp *epochStartDataProvider) requestEpochStartMetaBlock(epoch uint32) { // send more requests for i := 0; i < numRequestsToSendOnce; i++ { time.Sleep(delayBetweenRequests) - esdp.requestHandlerMeta.RequestStartOfEpochMetaBlock(epoch) + esdp.requestHandler.RequestStartOfEpochMetaBlock(epoch) } } diff --git a/epochStart/bootstrap/interface.go b/epochStart/bootstrap/interface.go index 7bb3dd4b1f4..cb7c5f9dbfd 100644 --- a/epochStart/bootstrap/interface.go +++ b/epochStart/bootstrap/interface.go @@ -21,7 +21,7 @@ type MetaBlockInterceptorHandler interface { // ShardHeaderInterceptorHandler defines what a component which will handle receiving the the shard headers should do type ShardHeaderInterceptorHandler interface { process.Interceptor - GetShardHeader(target int) (*block.Header, error) + GetShardHeader(hash []byte, target int) (*block.Header, error) } // MiniBlockInterceptorHandler defines what a component which will handle receiving the mini blocks should do diff --git a/epochStart/bootstrap/mock/shardHeaderInterceptorStub.go b/epochStart/bootstrap/mock/shardHeaderInterceptorStub.go index 69c8f0ad6f3..3e9e8a3af8f 100644 --- a/epochStart/bootstrap/mock/shardHeaderInterceptorStub.go +++ b/epochStart/bootstrap/mock/shardHeaderInterceptorStub.go @@ -9,13 +9,13 @@ import ( type ShardHeaderInterceptorStub struct { ProcessReceivedMessageCalled func(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error GetAllReceivedShardHeadersCalled func() []block.ShardData - GetShardHeaderCalled func(target int) (*block.Header, error) + GetShardHeaderCalled func(hash []byte, target int) (*block.Header, error) } // GetShardHeader - -func (s *ShardHeaderInterceptorStub) GetShardHeader(target int) (*block.Header, error) { +func (s *ShardHeaderInterceptorStub) GetShardHeader(hash []byte, target int) (*block.Header, error) { if s.GetShardHeaderCalled != nil { - return s.GetShardHeaderCalled(target) + return s.GetShardHeaderCalled(hash, target) } return &block.Header{}, nil diff --git a/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go index 9a5a4527802..6a4fbf84cc3 100644 --- a/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go +++ b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go @@ -49,12 +49,14 @@ func (s *simpleEpochStartMetaBlockInterceptor) ProcessReceivedMessage(message p2 if err != nil { return err } + s.mutReceivedMetaBlocks.Lock() mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, &mb) if err != nil { s.mutReceivedMetaBlocks.Unlock() return err } + s.mapReceivedMetaBlocks[string(mbHash)] = &mb s.addToPeerList(string(mbHash), message.Peer()) s.mutReceivedMetaBlocks.Unlock() @@ -65,7 +67,6 @@ func (s *simpleEpochStartMetaBlockInterceptor) ProcessReceivedMessage(message p2 // this func should be called under mutex protection func (s *simpleEpochStartMetaBlockInterceptor) addToPeerList(hash string, id p2p.PeerID) { peersListForHash, ok := s.mapMetaBlocksFromPeers[hash] - if !ok { s.mapMetaBlocksFromPeers[hash] = append(s.mapMetaBlocksFromPeers[hash], id) return @@ -82,6 +83,7 @@ func (s *simpleEpochStartMetaBlockInterceptor) addToPeerList(hash string, id p2p // GetEpochStartMetaBlock will return the metablock after it is confirmed or an error if the number of tries was exceeded func (s *simpleEpochStartMetaBlockInterceptor) GetEpochStartMetaBlock(target int, epoch uint32) (*block.MetaBlock, error) { + // TODO : replace this with a channel which will be written in when data is ready for count := 0; count < numTriesUntilExit; count++ { time.Sleep(timeToWaitBeforeCheckingReceivedHeaders) s.mutReceivedMetaBlocks.RLock() diff --git a/epochStart/bootstrap/simpleMetaBlockInterceptor.go b/epochStart/bootstrap/simpleMetaBlockInterceptor.go index c7540cb7a65..8399b8db4e5 100644 --- a/epochStart/bootstrap/simpleMetaBlockInterceptor.go +++ b/epochStart/bootstrap/simpleMetaBlockInterceptor.go @@ -46,12 +46,14 @@ func (s *simpleMetaBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP if err != nil { return err } + s.mutReceivedMetaBlocks.Lock() mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, &mb) if err != nil { s.mutReceivedMetaBlocks.Unlock() return err } + s.mapReceivedMetaBlocks[string(mbHash)] = &mb s.addToPeerList(string(mbHash), message.Peer()) s.mutReceivedMetaBlocks.Unlock() @@ -62,7 +64,6 @@ func (s *simpleMetaBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP // this func should be called under mutex protection func (s *simpleMetaBlockInterceptor) addToPeerList(hash string, id p2p.PeerID) { peersListForHash, ok := s.mapMetaBlocksFromPeers[hash] - if !ok { s.mapMetaBlocksFromPeers[hash] = append(s.mapMetaBlocksFromPeers[hash], id) return @@ -79,6 +80,7 @@ func (s *simpleMetaBlockInterceptor) addToPeerList(hash string, id p2p.PeerID) { // GetMetaBlock will return the metablock after it is confirmed or an error if the number of tries was exceeded func (s *simpleMetaBlockInterceptor) GetMetaBlock(hash []byte, target int) (*block.MetaBlock, error) { + // TODO : replace this with a channel which will be written in when data is ready for count := 0; count < numTriesUntilExit; count++ { time.Sleep(timeToWaitBeforeCheckingReceivedHeaders) s.mutReceivedMetaBlocks.RLock() diff --git a/epochStart/bootstrap/simpleMiniBlockInterceptor.go b/epochStart/bootstrap/simpleMiniBlockInterceptor.go index c9a631cbad9..307461d0136 100644 --- a/epochStart/bootstrap/simpleMiniBlockInterceptor.go +++ b/epochStart/bootstrap/simpleMiniBlockInterceptor.go @@ -46,12 +46,14 @@ func (s *simpleMiniBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP if err != nil { return err } + s.mutReceivedMiniBlocks.Lock() mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, &mb) if err != nil { s.mutReceivedMiniBlocks.Unlock() return err } + s.mapReceivedMiniBlocks[string(mbHash)] = &mb s.addToPeerList(string(mbHash), message.Peer()) s.mutReceivedMiniBlocks.Unlock() @@ -62,7 +64,6 @@ func (s *simpleMiniBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP // this func should be called under mutex protection func (s *simpleMiniBlockInterceptor) addToPeerList(hash string, id p2p.PeerID) { peersListForHash, ok := s.mapMiniBlocksFromPeers[hash] - if !ok { s.mapMiniBlocksFromPeers[hash] = append(s.mapMiniBlocksFromPeers[hash], id) return @@ -77,8 +78,9 @@ func (s *simpleMiniBlockInterceptor) addToPeerList(hash string, id p2p.PeerID) { s.mapMiniBlocksFromPeers[hash] = append(s.mapMiniBlocksFromPeers[hash], id) } -// GetMiniBlock will return the metablock after it is confirmed or an error if the number of tries was exceeded +// GetMiniBlock will return the miniblock with the given hash func (s *simpleMiniBlockInterceptor) GetMiniBlock(hash []byte, target int) (*block.MiniBlock, error) { + // TODO : replace this with a channel which will be written in when data is ready for count := 0; count < numTriesUntilExit; count++ { time.Sleep(timeToWaitBeforeCheckingReceivedHeaders) s.mutReceivedMiniBlocks.RLock() @@ -111,7 +113,7 @@ func (s *simpleMiniBlockInterceptor) isMapEntryOk( return false } if bytes.Equal(expectedHash, mbHash) && len(peersList) >= target { - log.Info("got consensus for metablock", "len", len(peersList)) + log.Info("got consensus for mini block", "len", len(peersList)) return true } diff --git a/epochStart/bootstrap/simpleShardHeaderInterceptor.go b/epochStart/bootstrap/simpleShardHeaderInterceptor.go index f4fb9ff63d7..98ae355d561 100644 --- a/epochStart/bootstrap/simpleShardHeaderInterceptor.go +++ b/epochStart/bootstrap/simpleShardHeaderInterceptor.go @@ -1,6 +1,7 @@ package bootstrap import ( + "bytes" "sync" "time" @@ -45,12 +46,14 @@ func (s *simpleShardHeaderInterceptor) ProcessReceivedMessage(message p2p.Messag if err != nil { return err } + s.mutReceivedShardHeaders.Lock() mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, &mb) if err != nil { s.mutReceivedShardHeaders.Unlock() return err } + s.mapReceivedShardHeaders[string(mbHash)] = &mb s.addToPeerList(string(mbHash), message.Peer()) s.mutReceivedShardHeaders.Unlock() @@ -61,7 +64,6 @@ func (s *simpleShardHeaderInterceptor) ProcessReceivedMessage(message p2p.Messag // this func should be called under mutex protection func (s *simpleShardHeaderInterceptor) addToPeerList(hash string, id p2p.PeerID) { peersListForHash, ok := s.mapShardHeadersFromPeers[hash] - if !ok { s.mapShardHeadersFromPeers[hash] = append(s.mapShardHeadersFromPeers[hash], id) return @@ -76,16 +78,17 @@ func (s *simpleShardHeaderInterceptor) addToPeerList(hash string, id p2p.PeerID) s.mapShardHeadersFromPeers[hash] = append(s.mapShardHeadersFromPeers[hash], id) } -// GetShardHeader will return the metablock after it is confirmed or an error if the number of tries was exceeded -func (s *simpleShardHeaderInterceptor) GetShardHeader(target int) (*block.Header, error) { +// GetShardHeader will return the shard header +func (s *simpleShardHeaderInterceptor) GetShardHeader(hash []byte, target int) (*block.Header, error) { + // TODO : replace this with a channel which will be written in when data is ready for count := 0; count < numTriesUntilExit; count++ { time.Sleep(timeToWaitBeforeCheckingReceivedHeaders) s.mutReceivedShardHeaders.RLock() - for hash, peersList := range s.mapShardHeadersFromPeers { - isOk := s.isMapEntryOk(peersList, hash, target) + for hashInMap, peersList := range s.mapShardHeadersFromPeers { + isOk := s.isMapEntryOk(hash, peersList, hashInMap, target) if isOk { s.mutReceivedShardHeaders.RUnlock() - return s.mapReceivedShardHeaders[hash], nil + return s.mapReceivedShardHeaders[hashInMap], nil } } s.mutReceivedShardHeaders.RUnlock() @@ -95,12 +98,22 @@ func (s *simpleShardHeaderInterceptor) GetShardHeader(target int) (*block.Header } func (s *simpleShardHeaderInterceptor) isMapEntryOk( + expectedHash []byte, peersList []p2p.PeerID, - hash string, + hashInMap string, target int, ) bool { - if len(peersList) >= target { - log.Info("got consensus for shard header", "len", len(peersList)) + mb, ok := s.mapReceivedShardHeaders[string(expectedHash)] + if !ok { + return false + } + + hdrHash, err := core.CalculateHash(s.marshalizer, s.hasher, mb) + if err != nil { + return false + } + if bytes.Equal(expectedHash, hdrHash) && len(peersList) >= target { + log.Info("got consensus for shard block", "len", len(peersList)) return true } From 8f49fea75e144a40d055bcffbe3e83a051477191 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Tue, 17 Mar 2020 18:35:40 +0200 Subject: [PATCH 15/61] EN-5829: link with bootstrap from storage (WIP) --- cmd/node/main.go | 3 + .../disabled/disabledEpochStartNotifier.go | 31 +++ .../bootstrap/epochStartDataProvider.go | 81 +++++-- .../bootstrap/epochStartDataProvider_test.go | 12 ++ epochStart/bootstrap/errors.go | 3 + .../factory/disabledEpochStartDataProvider.go | 4 +- .../factory/epochStartDataProviderFactory.go | 11 + epochStart/bootstrap/interface.go | 16 +- epochStart/bootstrap/mock/pathManagerStub.go | 32 +++ .../storagehandler/baseStorageHandler.go | 69 ++++++ .../storagehandler/metaStorageHandler.go | 181 ++++++++++++++++ .../storagehandler/shardStorageHandler.go | 197 ++++++++++++++++++ epochStart/bootstrap/structs/components.go | 19 ++ .../metaStorageBootstrapper.go | 6 +- .../shardStorageBootstrapper.go | 8 +- 15 files changed, 646 insertions(+), 27 deletions(-) create mode 100644 epochStart/bootstrap/disabled/disabledEpochStartNotifier.go create mode 100644 epochStart/bootstrap/mock/pathManagerStub.go create mode 100644 epochStart/bootstrap/storagehandler/baseStorageHandler.go create mode 100644 epochStart/bootstrap/storagehandler/metaStorageHandler.go create mode 100644 epochStart/bootstrap/storagehandler/shardStorageHandler.go create mode 100644 epochStart/bootstrap/structs/components.go diff --git a/cmd/node/main.go b/cmd/node/main.go index 64cbc7e4339..1efa1e297f3 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -586,6 +586,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { Marshalizer: marshalizer, Hasher: hasher, NodesConfigProvider: nodesconfigprovider.NewSimpleNodesConfigProvider(nodesConfig), + PathManager: pathManager, StartTime: startTime, OriginalNodesConfig: nodesConfig, GeneralConfig: generalConfig, @@ -606,6 +607,8 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { isFreshStart := err != nil if !isFreshStart { nodesConfig = res.NodesConfig + currentEpoch = res.EpochStartMetaBlock.Epoch + bootstrapRoundIndex.Value = res.EpochStartMetaBlock.Round log.Info("received epoch start metablock from network", "nonce", res.EpochStartMetaBlock.GetNonce(), "epoch", res.EpochStartMetaBlock.GetEpoch()) diff --git a/epochStart/bootstrap/disabled/disabledEpochStartNotifier.go b/epochStart/bootstrap/disabled/disabledEpochStartNotifier.go new file mode 100644 index 00000000000..7abbb4950b4 --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledEpochStartNotifier.go @@ -0,0 +1,31 @@ +package disabled + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/epochStart" +) + +// EpochStartNotifier - +type EpochStartNotifier struct { +} + +// RegisterHandler - +func (desn *EpochStartNotifier) RegisterHandler(handler epochStart.ActionHandler) { +} + +// UnregisterHandler - +func (desn *EpochStartNotifier) UnregisterHandler(handler epochStart.ActionHandler) { +} + +// NotifyAllPrepare - +func (desn *EpochStartNotifier) NotifyAllPrepare(metaHeader data.HeaderHandler) { +} + +// NotifyAll - +func (desn *EpochStartNotifier) NotifyAll(hdr data.HeaderHandler) { +} + +// IsInterfaceNil - +func (desn *EpochStartNotifier) IsInterfaceNil() bool { + return desn == nil +} diff --git a/epochStart/bootstrap/epochStartDataProvider.go b/epochStart/bootstrap/epochStartDataProvider.go index 7cdd4e4cfcf..6c668eea2b4 100644 --- a/epochStart/bootstrap/epochStartDataProvider.go +++ b/epochStart/bootstrap/epochStartDataProvider.go @@ -6,6 +6,7 @@ import ( "math" "time" + "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/core/partitioning" @@ -20,6 +21,8 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/resolverscontainer" "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/storagehandler" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/structs" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/marshal" @@ -40,21 +43,14 @@ const thresholdForConsideringMetaBlockCorrect = 0.2 const numRequestsToSendOnce = 4 const maxNumTimesToRetry = 100 -// ComponentsNeededForBootstrap holds the components which need to be initialized from network -type ComponentsNeededForBootstrap struct { - EpochStartMetaBlock *block.MetaBlock - NodesConfig *sharding.NodesSetup - ShardHeaders map[uint32]*block.Header - ShardCoordinator sharding.Coordinator - Tries state.TriesHolder -} - // epochStartDataProvider will handle requesting the needed data to start when joining late the network type epochStartDataProvider struct { publicKey crypto.PublicKey marshalizer marshal.Marshalizer hasher hashing.Hasher messenger p2p.Messenger + generalConfig config.Config + pathManager PathManagerHandler nodesConfigProvider NodesConfigProviderHandler epochStartMetaBlockInterceptor EpochStartMetaBlockInterceptorHandler metaBlockInterceptor MetaBlockInterceptorHandler @@ -69,6 +65,8 @@ type ArgsEpochStartDataProvider struct { Messenger p2p.Messenger Marshalizer marshal.Marshalizer Hasher hashing.Hasher + GeneralConfig config.Config + PathManager PathManagerHandler NodesConfigProvider NodesConfigProviderHandler EpochStartMetaBlockInterceptor EpochStartMetaBlockInterceptorHandler MetaBlockInterceptor MetaBlockInterceptorHandler @@ -91,6 +89,9 @@ func NewEpochStartDataProvider(args ArgsEpochStartDataProvider) (*epochStartData if check.IfNil(args.Hasher) { return nil, ErrNilHasher } + if check.IfNil(args.PathManager) { + return nil, ErrNilPathManager + } if check.IfNil(args.NodesConfigProvider) { return nil, ErrNilNodesConfigProvider } @@ -111,6 +112,8 @@ func NewEpochStartDataProvider(args ArgsEpochStartDataProvider) (*epochStartData marshalizer: args.Marshalizer, hasher: args.Hasher, messenger: args.Messenger, + generalConfig: args.GeneralConfig, + pathManager: args.PathManager, nodesConfigProvider: args.NodesConfigProvider, epochStartMetaBlockInterceptor: args.EpochStartMetaBlockInterceptor, metaBlockInterceptor: args.MetaBlockInterceptor, @@ -120,7 +123,7 @@ func NewEpochStartDataProvider(args ArgsEpochStartDataProvider) (*epochStartData } // Bootstrap will handle requesting and receiving the needed information the node will bootstrap from -func (esdp *epochStartDataProvider) Bootstrap() (*ComponentsNeededForBootstrap, error) { +func (esdp *epochStartDataProvider) Bootstrap() (*structs.ComponentsNeededForBootstrap, error) { err := esdp.initTopicsAndInterceptors() if err != nil { return nil, err @@ -165,16 +168,23 @@ func (esdp *epochStartDataProvider) Bootstrap() (*ComponentsNeededForBootstrap, return nil, err } + var shardHeaderForShard *block.Header + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { + shardHeaderForShard = shardHeaders[shardCoordinator.SelfId()] + } + epochStartData, err := esdp.getCurrentEpochStartData(shardCoordinator, metaBlock) if err != nil { return nil, err } + pendingMiniBlocks := make([]*block.MiniBlock, 0) for _, mb := range epochStartData.PendingMiniBlockHeaders { receivedMb, errGetMb := esdp.getMiniBlock(&mb) if errGetMb != nil { return nil, errGetMb } + pendingMiniBlocks = append(pendingMiniBlocks, receivedMb) log.Info("received miniblock", "type", receivedMb.Type) } @@ -195,13 +205,50 @@ func (esdp *epochStartDataProvider) Bootstrap() (*ComponentsNeededForBootstrap, return nil, err } - return &ComponentsNeededForBootstrap{ - EpochStartMetaBlock: metaBlock, - NodesConfig: nodesConfig, - ShardHeaders: shardHeaders, - ShardCoordinator: shardCoordinator, - Tries: trieToReturn, - }, nil + components := &structs.ComponentsNeededForBootstrap{ + EpochStartMetaBlock: metaBlock, + PreviousEpochStartMetaBlock: prevMetaBlock, + ShardHeader: shardHeaderForShard, + NodesConfig: nodesConfig, + ShardHeaders: shardHeaders, + ShardCoordinator: shardCoordinator, + Tries: trieToReturn, + PendingMiniBlocks: pendingMiniBlocks, + } + + var storageHandlerComponent StorageHandler + if shardCoordinator.SelfId() > shardCoordinator.NumberOfShards() { + storageHandlerComponent, err = storagehandler.NewMetaStorageHandler( + esdp.generalConfig, + shardCoordinator, + esdp.pathManager, + esdp.marshalizer, + esdp.hasher, + metaBlock.Epoch, + ) + if err != nil { + return nil, err + } + } else { + storageHandlerComponent, err = storagehandler.NewShardStorageHandler( + esdp.generalConfig, + shardCoordinator, + esdp.pathManager, + esdp.marshalizer, + esdp.hasher, + metaBlock.Epoch, + ) + if err != nil { + return nil, err + } + } + + errSavingToStorage := storageHandlerComponent.SaveDataToStorage(*components) + if errSavingToStorage != nil { + return nil, errSavingToStorage + } + + return components, nil } func (esdp *epochStartDataProvider) changeMessageProcessorsForMetaBlocks() { diff --git a/epochStart/bootstrap/epochStartDataProvider_test.go b/epochStart/bootstrap/epochStartDataProvider_test.go index 681e7a98b3b..32554e44a86 100644 --- a/epochStart/bootstrap/epochStartDataProvider_test.go +++ b/epochStart/bootstrap/epochStartDataProvider_test.go @@ -55,6 +55,17 @@ func TestNewEpochStartDataProvider_NilHasherShouldErr(t *testing.T) { require.Equal(t, bootstrap.ErrNilHasher, err) } +func TestNewEpochStartDataProvider_NilPathManagerShouldErr(t *testing.T) { + t.Parallel() + + args := getArguments() + args.PathManager = nil + epStart, err := bootstrap.NewEpochStartDataProvider(args) + + require.Nil(t, epStart) + require.Equal(t, bootstrap.ErrNilPathManager, err) +} + func TestNewEpochStartDataProvider_NilNodesConfigProviderShouldErr(t *testing.T) { t.Parallel() @@ -146,6 +157,7 @@ func getArguments() bootstrap.ArgsEpochStartDataProvider { Hasher: mock2.HasherMock{}, NodesConfigProvider: &mock.NodesConfigProviderStub{}, EpochStartMetaBlockInterceptor: &mock.EpochStartMetaBlockInterceptorStub{}, + PathManager: &mock.PathManagerStub{}, MetaBlockInterceptor: &mock.MetaBlockInterceptorStub{}, ShardHeaderInterceptor: &mock.ShardHeaderInterceptorStub{}, MiniBlockInterceptor: &mock.MiniBlockInterceptorStub{}, diff --git a/epochStart/bootstrap/errors.go b/epochStart/bootstrap/errors.go index 0734ee070fe..d4c02a4f25c 100644 --- a/epochStart/bootstrap/errors.go +++ b/epochStart/bootstrap/errors.go @@ -11,6 +11,9 @@ var ErrNilMessenger = errors.New("nil messenger") // ErrNilMarshalizer signals that a nil marshalizer has been provided var ErrNilMarshalizer = errors.New("nil marshalizer") +// ErrNilPathManager signals that a nil path manager has been provided +var ErrNilPathManager = errors.New("nil path manager") + // ErrNilHasher signals that a nil hasher has been provider var ErrNilHasher = errors.New("nil hasher") diff --git a/epochStart/bootstrap/factory/disabledEpochStartDataProvider.go b/epochStart/bootstrap/factory/disabledEpochStartDataProvider.go index f3085454b47..f15e932b743 100644 --- a/epochStart/bootstrap/factory/disabledEpochStartDataProvider.go +++ b/epochStart/bootstrap/factory/disabledEpochStartDataProvider.go @@ -3,13 +3,13 @@ package factory import ( "errors" - "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/structs" ) type disabledEpochStartDataProvider struct { } // Bootstrap will return an error indicating that the sync is not needed -func (d *disabledEpochStartDataProvider) Bootstrap() (*bootstrap.ComponentsNeededForBootstrap, error) { +func (d *disabledEpochStartDataProvider) Bootstrap() (*structs.ComponentsNeededForBootstrap, error) { return nil, errors.New("sync not needed") } diff --git a/epochStart/bootstrap/factory/epochStartDataProviderFactory.go b/epochStart/bootstrap/factory/epochStartDataProviderFactory.go index 84f3626bff1..488f6d0d7d6 100644 --- a/epochStart/bootstrap/factory/epochStartDataProviderFactory.go +++ b/epochStart/bootstrap/factory/epochStartDataProviderFactory.go @@ -11,6 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" ) type epochStartDataProviderFactory struct { @@ -18,7 +19,9 @@ type epochStartDataProviderFactory struct { messenger p2p.Messenger marshalizer marshal.Marshalizer hasher hashing.Hasher + pathManager storage.PathManagerHandler nodesConfigProvider bootstrap.NodesConfigProviderHandler + generalConfig config.Config shouldSync bool } @@ -30,6 +33,7 @@ type EpochStartDataProviderFactoryArgs struct { Marshalizer marshal.Marshalizer Hasher hashing.Hasher NodesConfigProvider bootstrap.NodesConfigProviderHandler + PathManager storage.PathManagerHandler StartTime time.Time OriginalNodesConfig *sharding.NodesSetup GeneralConfig *config.Config @@ -47,6 +51,9 @@ func NewEpochStartDataProviderFactory(args EpochStartDataProviderFactoryArgs) (* if check.IfNil(args.Marshalizer) { return nil, bootstrap.ErrNilMarshalizer } + if check.IfNil(args.PathManager) { + return nil, bootstrap.ErrNilPathManager + } if check.IfNil(args.Hasher) { return nil, bootstrap.ErrNilHasher } @@ -67,6 +74,8 @@ func NewEpochStartDataProviderFactory(args EpochStartDataProviderFactoryArgs) (* messenger: args.Messenger, marshalizer: args.Marshalizer, hasher: args.Hasher, + pathManager: args.PathManager, + generalConfig: *args.GeneralConfig, nodesConfigProvider: args.NodesConfigProvider, shouldSync: shouldSync, }, nil @@ -101,6 +110,8 @@ func (esdpf *epochStartDataProviderFactory) Create() (bootstrap.EpochStartDataPr Marshalizer: esdpf.marshalizer, Hasher: esdpf.hasher, NodesConfigProvider: esdpf.nodesConfigProvider, + GeneralConfig: esdpf.generalConfig, + PathManager: esdpf.pathManager, EpochStartMetaBlockInterceptor: epochStartMetaBlockInterceptor, MetaBlockInterceptor: metaBlockInterceptor, ShardHeaderInterceptor: shardHdrInterceptor, diff --git a/epochStart/bootstrap/interface.go b/epochStart/bootstrap/interface.go index cb7c5f9dbfd..314a90a7e28 100644 --- a/epochStart/bootstrap/interface.go +++ b/epochStart/bootstrap/interface.go @@ -2,6 +2,7 @@ package bootstrap import ( "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/structs" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" ) @@ -38,5 +39,18 @@ type NodesConfigProviderHandler interface { // EpochStartDataProviderHandler defines what a component which fetches the data needed for starting in an epoch should do type EpochStartDataProviderHandler interface { - Bootstrap() (*ComponentsNeededForBootstrap, error) + Bootstrap() (*structs.ComponentsNeededForBootstrap, error) +} + +// PathManagerHandler defines which actions should be done for generating paths for databases directories +type PathManagerHandler interface { + PathForEpoch(shardId string, epoch uint32, identifier string) string + PathForStatic(shardId string, identifier string) string + IsInterfaceNil() bool +} + +// StorageHandler defines which actions should be done by a component which handles the storage of bootstrap data +type StorageHandler interface { + SaveDataToStorage(components structs.ComponentsNeededForBootstrap) error + IsInterfaceNil() bool } diff --git a/epochStart/bootstrap/mock/pathManagerStub.go b/epochStart/bootstrap/mock/pathManagerStub.go new file mode 100644 index 00000000000..78aa45b6b67 --- /dev/null +++ b/epochStart/bootstrap/mock/pathManagerStub.go @@ -0,0 +1,32 @@ +package mock + +import "fmt" + +// PathManagerStub - +type PathManagerStub struct { + PathForEpochCalled func(shardId string, epoch uint32, identifier string) string + PathForStaticCalled func(shardId string, identifier string) string +} + +// PathForEpoch - +func (p *PathManagerStub) PathForEpoch(shardId string, epoch uint32, identifier string) string { + if p.PathForEpochCalled != nil { + return p.PathForEpochCalled(shardId, epoch, identifier) + } + + return fmt.Sprintf("Epoch_%d/Shard_%s/%s", epoch, shardId, identifier) +} + +// PathForStatic - +func (p *PathManagerStub) PathForStatic(shardId string, identifier string) string { + if p.PathForEpochCalled != nil { + return p.PathForStaticCalled(shardId, identifier) + } + + return fmt.Sprintf("Static/Shard_%s/%s", shardId, identifier) +} + +// IsInterfaceNil - +func (p *PathManagerStub) IsInterfaceNil() bool { + return p == nil +} diff --git a/epochStart/bootstrap/storagehandler/baseStorageHandler.go b/epochStart/bootstrap/storagehandler/baseStorageHandler.go new file mode 100644 index 00000000000..d0f67a1f4b3 --- /dev/null +++ b/epochStart/bootstrap/storagehandler/baseStorageHandler.go @@ -0,0 +1,69 @@ +package storagehandler + +import ( + "encoding/json" + + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/logger" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// HighestRoundFromBootStorage is the key for the highest round that is saved in storage +const highestRoundFromBootStorage = "highestRoundFromBootStorage" + +const triggerRegistrykeyPrefix = "epochStartTrigger_" + +const nodesCoordinatorRegistrykeyPrefix = "indexHashed_" + +var log = logger.GetOrCreate("boostrap/storagehandler") + +// baseStorageHandler handles the storage functions for saving bootstrap data +type baseStorageHandler struct { + storageService dataRetriever.StorageService + shardCoordinator sharding.Coordinator + marshalizer marshal.Marshalizer + hasher hashing.Hasher + currentEpoch uint32 +} + +func (bsh *baseStorageHandler) getAndSavePendingMiniBlocks(miniBlocks []*block.MiniBlock) ([]bootstrapStorage.PendingMiniBlockInfo, error) { + countersMap := make(map[uint32]int) + for _, miniBlock := range miniBlocks { + countersMap[miniBlock.SenderShardID]++ + } + + sliceToRet := make([]bootstrapStorage.PendingMiniBlockInfo, 0) + for shardID, count := range countersMap { + sliceToRet = append(sliceToRet, bootstrapStorage.PendingMiniBlockInfo{ + ShardID: shardID, + NumPendingMiniBlocks: uint32(count), + }) + } + + return sliceToRet, nil +} + +func (bsh *baseStorageHandler) getAndSaveNodesCoordinatorKey(metaBlock *block.MetaBlock) ([]byte, error) { + key := append([]byte(nodesCoordinatorRegistrykeyPrefix), metaBlock.RandSeed...) + + registry := sharding.NodesCoordinatorRegistry{ + EpochsConfig: nil, // TODO : populate this field when nodes coordinator is done + CurrentEpoch: metaBlock.Epoch, + } + + registryBytes, err := json.Marshal(®istry) + if err != nil { + return nil, err + } + + err = bsh.storageService.GetStorer(dataRetriever.BootstrapUnit).Put(key, registryBytes) + if err != nil { + return nil, err + } + + return key, nil +} diff --git a/epochStart/bootstrap/storagehandler/metaStorageHandler.go b/epochStart/bootstrap/storagehandler/metaStorageHandler.go new file mode 100644 index 00000000000..42950154433 --- /dev/null +++ b/epochStart/bootstrap/storagehandler/metaStorageHandler.go @@ -0,0 +1,181 @@ +package storagehandler + +import ( + "encoding/json" + "fmt" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/structs" + "github.com/ElrondNetwork/elrond-go/epochStart/metachain" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/factory" +) + +type metaStorageHandler struct { + *baseStorageHandler +} + +// NewMetaStorageHandler will return a new instance of metaStorageHandler +func NewMetaStorageHandler( + generalConfig config.Config, + shardCoordinator sharding.Coordinator, + pathManagerHandler storage.PathManagerHandler, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + currentEpoch uint32, +) (*metaStorageHandler, error) { + epochStartNotifier := &disabled.EpochStartNotifier{} + storageFactory, err := factory.NewStorageServiceFactory( + &generalConfig, + shardCoordinator, + pathManagerHandler, + epochStartNotifier, + currentEpoch, + ) + if err != nil { + return nil, err + } + storageService, err := storageFactory.CreateForMeta() + if err != nil { + return nil, err + } + base := &baseStorageHandler{ + storageService: storageService, + shardCoordinator: shardCoordinator, + marshalizer: marshalizer, + hasher: hasher, + currentEpoch: currentEpoch, + } + + return &metaStorageHandler{baseStorageHandler: base}, nil +} + +// SaveDataToStorage will save the fetched data to storage so it will be used by the storage bootstrap component +func (msh *metaStorageHandler) SaveDataToStorage(components structs.ComponentsNeededForBootstrap) error { + // TODO: here we should save all needed data + + defer func() { + err := msh.storageService.CloseAll() + if err != nil { + log.Debug("error while closing storers", "error", err) + } + }() + + bootStorer := msh.storageService.GetStorer(dataRetriever.BootstrapUnit) + + lastHeader, err := msh.getAndSaveLastHeader(components.EpochStartMetaBlock) + if err != nil { + return err + } + + miniBlocks, err := msh.getAndSavePendingMiniBlocks(components.PendingMiniBlocks) + if err != nil { + return err + } + + triggerConfigKey, err := msh.getAndSaveTriggerRegistry(components) + if err != nil { + return err + } + + nodesCoordinatorConfigKey, err := msh.getAndSaveNodesCoordinatorKey(components.EpochStartMetaBlock) + if err != nil { + return err + } + + bootStrapData := bootstrapStorage.BootstrapData{ + LastHeader: lastHeader, // meta - epoch start metablock ; shard - shard header + LastCrossNotarizedHeaders: nil, // lastFinalizedMetaBlock + firstPendingMetaBlock + LastSelfNotarizedHeaders: []bootstrapStorage.BootstrapHeaderInfo{lastHeader}, // meta - epoch start metablock , shard: shard header + ProcessedMiniBlocks: nil, // first pending metablock and pending miniblocks - difference between them + // (shard - only shard ; meta - possible not to fill at all) + PendingMiniBlocks: miniBlocks, // pending miniblocks + NodesCoordinatorConfigKey: nodesCoordinatorConfigKey, // wait for radu's component + EpochStartTriggerConfigKey: triggerConfigKey, // metachain/shard trigger registery + HighestFinalBlockNonce: lastHeader.Nonce, // + LastRound: int64(components.EpochStartMetaBlock.Round), + } + bootStrapDataBytes, err := msh.marshalizer.Marshal(&bootStrapData) + if err != nil { + return err + } + err = bootStorer.Put([]byte(highestRoundFromBootStorage), bootStrapDataBytes) + if err != nil { + return err + } + log.Info("saved bootstrap data to storage") + return nil +} + +func (msh *metaStorageHandler) getAndSaveLastHeader(metaBlock *block.MetaBlock) (bootstrapStorage.BootstrapHeaderInfo, error) { + lastHeaderHash, err := core.CalculateHash(msh.marshalizer, msh.hasher, metaBlock) + if err != nil { + return bootstrapStorage.BootstrapHeaderInfo{}, err + } + + //metaBlock. + + lastHeaderBytes, err := msh.marshalizer.Marshal(metaBlock) + if err != nil { + return bootstrapStorage.BootstrapHeaderInfo{}, err + } + + err = msh.storageService.GetStorer(dataRetriever.MetaBlockUnit).Put(lastHeaderHash, lastHeaderBytes) + if err != nil { + return bootstrapStorage.BootstrapHeaderInfo{}, err + } + + bootstrapHdrInfo := bootstrapStorage.BootstrapHeaderInfo{ + ShardId: core.MetachainShardId, + Nonce: metaBlock.Nonce, + Hash: lastHeaderHash, + } + + return bootstrapHdrInfo, nil +} + +func (msh *metaStorageHandler) getAndSaveTriggerRegistry(components structs.ComponentsNeededForBootstrap) ([]byte, error) { + metaBlock := components.EpochStartMetaBlock + hash, err := core.CalculateHash(msh.marshalizer, msh.hasher, metaBlock) + if err != nil { + return nil, err + } + + triggerReg := metachain.TriggerRegistry{ + Epoch: metaBlock.Epoch, + CurrentRound: metaBlock.Round, + EpochFinalityAttestingRound: metaBlock.Round, + CurrEpochStartRound: metaBlock.Round, + PrevEpochStartRound: components.PreviousEpochStartMetaBlock.Round, + EpochStartMetaHash: hash, + EpochStartMeta: metaBlock, + } + + trigStateKey := fmt.Sprintf("initial_value_epoch%d", metaBlock.Epoch) + key := []byte(triggerRegistrykeyPrefix + trigStateKey) + + triggerRegBytes, err := json.Marshal(&triggerReg) + if err != nil { + return nil, err + } + + errPut := msh.storageService.GetStorer(dataRetriever.BootstrapUnit).Put(key, triggerRegBytes) + if errPut != nil { + return nil, errPut + } + + return key, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (msh *metaStorageHandler) IsInterfaceNil() bool { + return msh == nil +} diff --git a/epochStart/bootstrap/storagehandler/shardStorageHandler.go b/epochStart/bootstrap/storagehandler/shardStorageHandler.go new file mode 100644 index 00000000000..309f1729c14 --- /dev/null +++ b/epochStart/bootstrap/storagehandler/shardStorageHandler.go @@ -0,0 +1,197 @@ +package storagehandler + +import ( + "encoding/json" + "fmt" + "strconv" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/structs" + "github.com/ElrondNetwork/elrond-go/epochStart/shardchain" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/factory" +) + +type shardStorageHandler struct { + *baseStorageHandler +} + +// NewShardStorageHandler will return a new instance of shardStorageHandler +func NewShardStorageHandler( + generalConfig config.Config, + shardCoordinator sharding.Coordinator, + pathManagerHandler storage.PathManagerHandler, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + currentEpoch uint32, +) (*shardStorageHandler, error) { + epochStartNotifier := &disabled.EpochStartNotifier{} + storageFactory, err := factory.NewStorageServiceFactory( + &generalConfig, + shardCoordinator, + pathManagerHandler, + epochStartNotifier, + currentEpoch, + ) + if err != nil { + return nil, err + } + storageService, err := storageFactory.CreateForShard() + if err != nil { + return nil, err + } + base := &baseStorageHandler{ + storageService: storageService, + shardCoordinator: shardCoordinator, + marshalizer: marshalizer, + hasher: hasher, + currentEpoch: currentEpoch, + } + + return &shardStorageHandler{baseStorageHandler: base}, nil +} + +// SaveDataToStorage will save the fetched data to storage so it will be used by the storage bootstrap component +func (ssh *shardStorageHandler) SaveDataToStorage(components structs.ComponentsNeededForBootstrap) error { + // TODO: here we should save all needed data + + defer func() { + err := ssh.storageService.CloseAll() + if err != nil { + log.Debug("error while closing storers", "error", err) + } + }() + + bootStorer := ssh.storageService.GetStorer(dataRetriever.BootstrapUnit) + + lastHeader, err := ssh.getAndSaveLastHeader(components.ShardHeader) + if err != nil { + return err + } + + miniBlocks, err := ssh.getAndSavePendingMiniBlocks(components.PendingMiniBlocks) + if err != nil { + return err + } + + triggerConfigKey, err := ssh.getAndSaveTriggerRegistry(components) + if err != nil { + return err + } + + nodesCoordinatorConfigKey, err := ssh.getAndSaveNodesCoordinatorKey(components.EpochStartMetaBlock) + if err != nil { + return err + } + + bootStrapData := bootstrapStorage.BootstrapData{ + LastHeader: lastHeader, // meta - epoch start metablock ; shard - shard header + LastCrossNotarizedHeaders: nil, // lastFinalizedMetaBlock + firstPendingMetaBlock + LastSelfNotarizedHeaders: []bootstrapStorage.BootstrapHeaderInfo{lastHeader}, // meta - epoch start metablock , shard: shard header + ProcessedMiniBlocks: nil, // first pending metablock si pending miniblocks - difference between them + // (shard - only shard ; meta - possible not to fill at all) + PendingMiniBlocks: miniBlocks, // pending miniblocks + NodesCoordinatorConfigKey: nodesCoordinatorConfigKey, // wait for radu's component + EpochStartTriggerConfigKey: triggerConfigKey, // metachain/shard trigger registery + HighestFinalBlockNonce: 0, // + LastRound: int64(components.ShardHeader.Round), + } + bootStrapDataBytes, err := ssh.marshalizer.Marshal(&bootStrapData) + if err != nil { + return err + } + roundToUseAsKey := int64(components.ShardHeader.Round + 2) // TODO: change this. added 2 in order to skip + // equality check between round and LastRound from bootstrap from storage component + roundNum := bootstrapStorage.RoundNum{Num: roundToUseAsKey} + roundNumBytes, err := ssh.marshalizer.Marshal(&roundNum) + if err != nil { + return err + } + + err = bootStorer.Put([]byte(highestRoundFromBootStorage), roundNumBytes) + if err != nil { + return err + } + + log.Info("saved bootstrap data to storage") + key := []byte(strconv.FormatInt(roundToUseAsKey, 10)) + err = bootStorer.Put(key, bootStrapDataBytes) + if err != nil { + return err + } + + return nil +} + +func (ssh *shardStorageHandler) getAndSaveLastHeader(shardHeader *block.Header) (bootstrapStorage.BootstrapHeaderInfo, error) { + lastHeaderHash, err := core.CalculateHash(ssh.marshalizer, ssh.hasher, shardHeader) + if err != nil { + return bootstrapStorage.BootstrapHeaderInfo{}, err + } + + lastHeaderBytes, err := ssh.marshalizer.Marshal(shardHeader) + if err != nil { + return bootstrapStorage.BootstrapHeaderInfo{}, err + } + + err = ssh.storageService.GetStorer(dataRetriever.BlockHeaderUnit).Put(lastHeaderHash, lastHeaderBytes) + if err != nil { + return bootstrapStorage.BootstrapHeaderInfo{}, err + } + + bootstrapHdrInfo := bootstrapStorage.BootstrapHeaderInfo{ + ShardId: core.MetachainShardId, + Nonce: shardHeader.Nonce, + Hash: lastHeaderHash, + } + + return bootstrapHdrInfo, nil +} + +func (ssh *shardStorageHandler) getAndSaveTriggerRegistry(components structs.ComponentsNeededForBootstrap) ([]byte, error) { + shardHeader := components.ShardHeader + + metaBlock := components.EpochStartMetaBlock + metaBlockHash, err := core.CalculateHash(ssh.marshalizer, ssh.hasher, metaBlock) + if err != nil { + return nil, err + } + + triggerReg := shardchain.TriggerRegistry{ + Epoch: shardHeader.Epoch, + CurrentRoundIndex: int64(shardHeader.Round), + EpochStartRound: shardHeader.Round, + EpochMetaBlockHash: metaBlockHash, + IsEpochStart: false, + NewEpochHeaderReceived: false, + EpochFinalityAttestingRound: 0, + } + + trigStateKey := fmt.Sprintf("initial_value_epoch%d", metaBlock.Epoch) + key := []byte(triggerRegistrykeyPrefix + trigStateKey) + + triggerRegBytes, err := json.Marshal(&triggerReg) + if err != nil { + return nil, err + } + + errPut := ssh.storageService.GetStorer(dataRetriever.BootstrapUnit).Put(key, triggerRegBytes) + if errPut != nil { + return nil, errPut + } + + return key, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ssh *shardStorageHandler) IsInterfaceNil() bool { + return ssh == nil +} diff --git a/epochStart/bootstrap/structs/components.go b/epochStart/bootstrap/structs/components.go new file mode 100644 index 00000000000..fb365ae61b6 --- /dev/null +++ b/epochStart/bootstrap/structs/components.go @@ -0,0 +1,19 @@ +package structs + +import ( + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// ComponentsNeededForBootstrap holds the components which need to be initialized from network +type ComponentsNeededForBootstrap struct { + EpochStartMetaBlock *block.MetaBlock + PreviousEpochStartMetaBlock *block.MetaBlock + ShardHeader *block.Header //only for shards, nil for meta + NodesConfig *sharding.NodesSetup + ShardHeaders map[uint32]*block.Header + ShardCoordinator sharding.Coordinator + Tries state.TriesHolder + PendingMiniBlocks []*block.MiniBlock +} diff --git a/process/sync/storageBootstrap/metaStorageBootstrapper.go b/process/sync/storageBootstrap/metaStorageBootstrapper.go index 78c8f09e8d8..1e6be23d9c1 100644 --- a/process/sync/storageBootstrap/metaStorageBootstrapper.go +++ b/process/sync/storageBootstrap/metaStorageBootstrapper.go @@ -14,7 +14,7 @@ type metaStorageBootstrapper struct { pendingMiniBlocksHandler process.PendingMiniBlocksHandler } -// NewMetaStorageBootstrapper is method used to create a nes storage bootstrapper +// NewMetaStorageBootstrapper is method used to create a new storage bootstrapper func NewMetaStorageBootstrapper(arguments ArgsMetaStorageBootstrapper) (*metaStorageBootstrapper, error) { err := checkMetaStorageBootstrapperArgs(arguments) if err != nil { @@ -111,7 +111,7 @@ func (msb *metaStorageBootstrapper) cleanupNotarizedStorage(metaBlockHash []byte } log.Debug("removing shard header from ShardHdrNonceHashDataUnit storage", - "shradId", shardHeader.GetShardID(), + "shardId", shardHeader.GetShardID(), "nonce", shardHeader.GetNonce(), "hash", shardHeaderHash) @@ -121,7 +121,7 @@ func (msb *metaStorageBootstrapper) cleanupNotarizedStorage(metaBlockHash []byte err = storer.Remove(nonceToByteSlice) if err != nil { log.Debug("shard header was not removed from ShardHdrNonceHashDataUnit storage", - "shradId", shardHeader.GetShardID(), + "shardId", shardHeader.GetShardID(), "nonce", shardHeader.GetNonce(), "hash", shardHeaderHash, "error", err.Error()) diff --git a/process/sync/storageBootstrap/shardStorageBootstrapper.go b/process/sync/storageBootstrap/shardStorageBootstrapper.go index 2326031879c..710f38a0208 100644 --- a/process/sync/storageBootstrap/shardStorageBootstrapper.go +++ b/process/sync/storageBootstrap/shardStorageBootstrapper.go @@ -15,7 +15,7 @@ type shardStorageBootstrapper struct { miniBlocksResolver dataRetriever.MiniBlocksResolver } -// NewShardStorageBootstrapper is method used to create a nes storage bootstrapper +// NewShardStorageBootstrapper is method used to create a new storage bootstrapper func NewShardStorageBootstrapper(arguments ArgsShardStorageBootstrapper) (*shardStorageBootstrapper, error) { err := checkShardStorageBootstrapperArgs(arguments) if err != nil { @@ -137,7 +137,7 @@ func (ssb *shardStorageBootstrapper) cleanupNotarizedStorage(shardHeaderHash []b } log.Debug("removing meta block from storage", - "shradId", metaBlock.GetShardID(), + "shardId", metaBlock.GetShardID(), "nonce", metaBlock.GetNonce(), "hash", metaBlockHash) @@ -145,7 +145,7 @@ func (ssb *shardStorageBootstrapper) cleanupNotarizedStorage(shardHeaderHash []b err = ssb.store.GetStorer(dataRetriever.MetaHdrNonceHashDataUnit).Remove(nonceToByteSlice) if err != nil { log.Debug("meta block was not removed from MetaHdrNonceHashDataUnit storage", - "shradId", metaBlock.GetShardID(), + "shardId", metaBlock.GetShardID(), "nonce", metaBlock.GetNonce(), "hash", metaBlockHash, "error", err.Error()) @@ -154,7 +154,7 @@ func (ssb *shardStorageBootstrapper) cleanupNotarizedStorage(shardHeaderHash []b err = ssb.store.GetStorer(dataRetriever.MetaBlockUnit).Remove(metaBlockHash) if err != nil { log.Debug("meta block was not removed from MetaBlockUnit storage", - "shradId", metaBlock.GetShardID(), + "shardId", metaBlock.GetShardID(), "nonce", metaBlock.GetNonce(), "hash", metaBlockHash, "error", err.Error()) From 690f885cd1db40fcc3dfe33e127042286b55ac32 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Wed, 18 Mar 2020 18:13:33 +0200 Subject: [PATCH 16/61] merges and fixes. --- epochStart/bootstrap/epochStartDataProvider.go | 8 +++++++- epochStart/bootstrap/errors.go | 3 +++ .../bootstrap/mock/epochStartMetaBlockInterceptorStub.go | 6 ++++++ epochStart/bootstrap/mock/metaBlockInterceptorStub.go | 6 ++++++ epochStart/bootstrap/mock/miniBlockInterceptorStub.go | 6 ++++++ epochStart/bootstrap/mock/shardHeaderInterceptorStub.go | 6 ++++++ .../bootstrap/simpleEpochStartMetaBlockInterceptor.go | 6 ++++++ epochStart/bootstrap/simpleMetaBlockInterceptor.go | 6 ++++++ epochStart/bootstrap/simpleMiniBlockInterceptor.go | 6 ++++++ epochStart/bootstrap/simpleShardHeaderInterceptor.go | 6 ++++++ update/factory/fullSyncResolversContainerFactory.go | 3 +++ 11 files changed, 61 insertions(+), 1 deletion(-) diff --git a/epochStart/bootstrap/epochStartDataProvider.go b/epochStart/bootstrap/epochStartDataProvider.go index 6c668eea2b4..3a4642dd568 100644 --- a/epochStart/bootstrap/epochStartDataProvider.go +++ b/epochStart/bootstrap/epochStartDataProvider.go @@ -57,6 +57,7 @@ type epochStartDataProvider struct { shardHeaderInterceptor ShardHeaderInterceptorHandler miniBlockInterceptor MiniBlockInterceptorHandler requestHandler process.RequestHandler + whiteListHandler dataRetriever.WhiteListHandler } // ArgsEpochStartDataProvider holds the arguments needed for creating an epoch start data provider component @@ -72,6 +73,7 @@ type ArgsEpochStartDataProvider struct { MetaBlockInterceptor MetaBlockInterceptorHandler ShardHeaderInterceptor ShardHeaderInterceptorHandler MiniBlockInterceptor MiniBlockInterceptorHandler + WhiteListHandler dataRetriever.WhiteListHandler } // NewEpochStartDataProvider will return a new instance of epochStartDataProvider @@ -107,6 +109,9 @@ func NewEpochStartDataProvider(args ArgsEpochStartDataProvider) (*epochStartData if check.IfNil(args.MiniBlockInterceptor) { return nil, ErrNilMiniBlockInterceptor } + if check.IfNil(args.WhiteListHandler) { + return nil, ErrNilWhiteListHandler + } return &epochStartDataProvider{ publicKey: args.PublicKey, marshalizer: args.Marshalizer, @@ -119,6 +124,7 @@ func NewEpochStartDataProvider(args ArgsEpochStartDataProvider) (*epochStartData metaBlockInterceptor: args.MetaBlockInterceptor, shardHeaderInterceptor: args.ShardHeaderInterceptor, miniBlockInterceptor: args.MiniBlockInterceptor, + whiteListHandler: args.WhiteListHandler, }, nil } @@ -335,7 +341,7 @@ func (esdp *epochStartDataProvider) createRequestHandler() (process.RequestHandl maxToRequest := 100 - return requestHandlers.NewMetaResolverRequestHandler(finder, requestedItemsHandler, maxToRequest) + return requestHandlers.NewResolverRequestHandler(finder, requestedItemsHandler, esdp.whiteListHandler, maxToRequest, core.MetachainShardId) } func (esdp *epochStartDataProvider) getMiniBlock(miniBlockHeader *block.ShardMiniBlockHeader) (*block.MiniBlock, error) { diff --git a/epochStart/bootstrap/errors.go b/epochStart/bootstrap/errors.go index d4c02a4f25c..81e3c993ebc 100644 --- a/epochStart/bootstrap/errors.go +++ b/epochStart/bootstrap/errors.go @@ -37,3 +37,6 @@ var ErrNumTriesExceeded = errors.New("num of tries exceeded. try re-request") // ErrNilShardCoordinator signals that a nil shard coordinator has been provided var ErrNilShardCoordinator = errors.New("nil shard coordinator") + +// ErrNilWhiteListHandler +var ErrNilWhiteListHandler = errors.New("nil white list handler") diff --git a/epochStart/bootstrap/mock/epochStartMetaBlockInterceptorStub.go b/epochStart/bootstrap/mock/epochStartMetaBlockInterceptorStub.go index 9e64b75e25a..88dac0b5a99 100644 --- a/epochStart/bootstrap/mock/epochStartMetaBlockInterceptorStub.go +++ b/epochStart/bootstrap/mock/epochStartMetaBlockInterceptorStub.go @@ -3,6 +3,7 @@ package mock import ( "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" ) // EpochStartMetaBlockInterceptorStub - @@ -11,6 +12,11 @@ type EpochStartMetaBlockInterceptorStub struct { GetMetaBlockCalled func(target int, epoch uint32) (*block.MetaBlock, error) } +// SetIsDataForCurrentShardVerifier - +func (m *EpochStartMetaBlockInterceptorStub) SetIsDataForCurrentShardVerifier(_ process.InterceptedDataVerifier) error { + return nil +} + // ProcessReceivedMessage - func (m *EpochStartMetaBlockInterceptorStub) ProcessReceivedMessage(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error { if m.ProcessReceivedMessageCalled != nil { diff --git a/epochStart/bootstrap/mock/metaBlockInterceptorStub.go b/epochStart/bootstrap/mock/metaBlockInterceptorStub.go index b6697372f71..e2a82ab9413 100644 --- a/epochStart/bootstrap/mock/metaBlockInterceptorStub.go +++ b/epochStart/bootstrap/mock/metaBlockInterceptorStub.go @@ -3,6 +3,7 @@ package mock import ( "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" ) // MetaBlockInterceptorStub - @@ -11,6 +12,11 @@ type MetaBlockInterceptorStub struct { GetMetaBlockCalled func(hash []byte, target int) (*block.MetaBlock, error) } +// SetIsDataForCurrentShardVerifier - +func (m *MetaBlockInterceptorStub) SetIsDataForCurrentShardVerifier(_ process.InterceptedDataVerifier) error { + return nil +} + // ProcessReceivedMessage - func (m *MetaBlockInterceptorStub) ProcessReceivedMessage(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error { if m.ProcessReceivedMessageCalled != nil { diff --git a/epochStart/bootstrap/mock/miniBlockInterceptorStub.go b/epochStart/bootstrap/mock/miniBlockInterceptorStub.go index 4b6569f9435..c7de260c77a 100644 --- a/epochStart/bootstrap/mock/miniBlockInterceptorStub.go +++ b/epochStart/bootstrap/mock/miniBlockInterceptorStub.go @@ -3,6 +3,7 @@ package mock import ( "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" ) // MiniBlockInterceptorStub - @@ -11,6 +12,11 @@ type MiniBlockInterceptorStub struct { GetMiniBlockCalled func(hash []byte, target int) (*block.MiniBlock, error) } +// SetIsDataForCurrentShardVerifier - +func (m *MiniBlockInterceptorStub) SetIsDataForCurrentShardVerifier(_ process.InterceptedDataVerifier) error { + return nil +} + // ProcessReceivedMessage - func (m *MiniBlockInterceptorStub) ProcessReceivedMessage(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error { if m.ProcessReceivedMessageCalled != nil { diff --git a/epochStart/bootstrap/mock/shardHeaderInterceptorStub.go b/epochStart/bootstrap/mock/shardHeaderInterceptorStub.go index 3e9e8a3af8f..a2f10f6b98a 100644 --- a/epochStart/bootstrap/mock/shardHeaderInterceptorStub.go +++ b/epochStart/bootstrap/mock/shardHeaderInterceptorStub.go @@ -3,6 +3,7 @@ package mock import ( "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" ) // ShardHeaderInterceptorStub - @@ -12,6 +13,11 @@ type ShardHeaderInterceptorStub struct { GetShardHeaderCalled func(hash []byte, target int) (*block.Header, error) } +// SetIsDataForCurrentShardVerifier - +func (s *ShardHeaderInterceptorStub) SetIsDataForCurrentShardVerifier(_ process.InterceptedDataVerifier) error { + return nil +} + // GetShardHeader - func (s *ShardHeaderInterceptorStub) GetShardHeader(hash []byte, target int) (*block.Header, error) { if s.GetShardHeaderCalled != nil { diff --git a/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go index 6a4fbf84cc3..051e5bdc6db 100644 --- a/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go +++ b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go @@ -11,6 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" ) const timeToWaitBeforeCheckingReceivedHeaders = 1 * time.Second @@ -42,6 +43,11 @@ func NewSimpleEpochStartMetaBlockInterceptor(marshalizer marshal.Marshalizer, ha }, nil } +// SetIsDataForCurrentShardVerifier - +func (s *simpleEpochStartMetaBlockInterceptor) SetIsDataForCurrentShardVerifier(_ process.InterceptedDataVerifier) error { + return nil +} + // ProcessReceivedMessage will receive the metablocks and will add them to the maps func (s *simpleEpochStartMetaBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { var mb block.MetaBlock diff --git a/epochStart/bootstrap/simpleMetaBlockInterceptor.go b/epochStart/bootstrap/simpleMetaBlockInterceptor.go index 8399b8db4e5..6a8ca7a02d8 100644 --- a/epochStart/bootstrap/simpleMetaBlockInterceptor.go +++ b/epochStart/bootstrap/simpleMetaBlockInterceptor.go @@ -11,6 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" ) type simpleMetaBlockInterceptor struct { @@ -21,6 +22,11 @@ type simpleMetaBlockInterceptor struct { mapMetaBlocksFromPeers map[string][]p2p.PeerID } +// SetIsDataForCurrentShardVerifier - +func (s *simpleMetaBlockInterceptor) SetIsDataForCurrentShardVerifier(_ process.InterceptedDataVerifier) error { + return nil +} + // NewSimpleMetaBlockInterceptor will return a new instance of simpleMetaBlockInterceptor func NewSimpleMetaBlockInterceptor(marshalizer marshal.Marshalizer, hasher hashing.Hasher) (*simpleMetaBlockInterceptor, error) { if check.IfNil(marshalizer) { diff --git a/epochStart/bootstrap/simpleMiniBlockInterceptor.go b/epochStart/bootstrap/simpleMiniBlockInterceptor.go index 307461d0136..c4ecd963a7d 100644 --- a/epochStart/bootstrap/simpleMiniBlockInterceptor.go +++ b/epochStart/bootstrap/simpleMiniBlockInterceptor.go @@ -11,6 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" ) type simpleMiniBlockInterceptor struct { @@ -39,6 +40,11 @@ func NewSimpleMiniBlockInterceptor(marshalizer marshal.Marshalizer, hasher hashi }, nil } +// SetIsDataForCurrentShardVerifier - +func (s *simpleMiniBlockInterceptor) SetIsDataForCurrentShardVerifier(_ process.InterceptedDataVerifier) error { + return nil +} + // ProcessReceivedMessage will receive the metablocks and will add them to the maps func (s *simpleMiniBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { var mb block.MiniBlock diff --git a/epochStart/bootstrap/simpleShardHeaderInterceptor.go b/epochStart/bootstrap/simpleShardHeaderInterceptor.go index 98ae355d561..ccad5c07874 100644 --- a/epochStart/bootstrap/simpleShardHeaderInterceptor.go +++ b/epochStart/bootstrap/simpleShardHeaderInterceptor.go @@ -11,6 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" ) type simpleShardHeaderInterceptor struct { @@ -39,6 +40,11 @@ func NewSimpleShardHeaderInterceptor(marshalizer marshal.Marshalizer, hasher has }, nil } +// SetIsDataForCurrentShardVerifier - +func (s *simpleShardHeaderInterceptor) SetIsDataForCurrentShardVerifier(_ process.InterceptedDataVerifier) error { + return nil +} + // ProcessReceivedMessage will receive the metablocks and will add them to the maps func (s *simpleShardHeaderInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { var mb block.Header diff --git a/update/factory/fullSyncResolversContainerFactory.go b/update/factory/fullSyncResolversContainerFactory.go index 151d9e3b889..490c403f5ab 100644 --- a/update/factory/fullSyncResolversContainerFactory.go +++ b/update/factory/fullSyncResolversContainerFactory.go @@ -15,6 +15,8 @@ import ( "github.com/ElrondNetwork/elrond-go/update/genesis" ) +const numPeersToQuery = 2 + type resolversContainerFactory struct { shardCoordinator sharding.Coordinator messenger dataRetriever.TopicMessageHandler @@ -139,6 +141,7 @@ func (rcf *resolversContainerFactory) createTrieNodesResolver(baseTopic string, peerListCreator, rcf.marshalizer, rcf.intRandomizer, + numPeersToQuery, rcf.shardCoordinator.SelfId(), ) if err != nil { From c330312a6445165b2246282e3417e61fba993c62 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Wed, 18 Mar 2020 18:30:25 +0200 Subject: [PATCH 17/61] merges and fixes. --- .../bootstrap/epochStartDataProvider_test.go | 1 + .../factory/epochStartDataProviderFactory.go | 13 ++++++++ .../bootstrap/mock/whiteListHandlerStub.go | 32 +++++++++++++++++++ 3 files changed, 46 insertions(+) create mode 100644 epochStart/bootstrap/mock/whiteListHandlerStub.go diff --git a/epochStart/bootstrap/epochStartDataProvider_test.go b/epochStart/bootstrap/epochStartDataProvider_test.go index 32554e44a86..c14b126f5c8 100644 --- a/epochStart/bootstrap/epochStartDataProvider_test.go +++ b/epochStart/bootstrap/epochStartDataProvider_test.go @@ -161,5 +161,6 @@ func getArguments() bootstrap.ArgsEpochStartDataProvider { MetaBlockInterceptor: &mock.MetaBlockInterceptorStub{}, ShardHeaderInterceptor: &mock.ShardHeaderInterceptorStub{}, MiniBlockInterceptor: &mock.MiniBlockInterceptorStub{}, + WhiteListHandler: &mock.WhiteListHandlerStub{}, } } diff --git a/epochStart/bootstrap/factory/epochStartDataProviderFactory.go b/epochStart/bootstrap/factory/epochStartDataProviderFactory.go index 488f6d0d7d6..5cdf30a9de9 100644 --- a/epochStart/bootstrap/factory/epochStartDataProviderFactory.go +++ b/epochStart/bootstrap/factory/epochStartDataProviderFactory.go @@ -10,8 +10,10 @@ import ( "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process/interceptors" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" ) type epochStartDataProviderFactory struct { @@ -104,6 +106,16 @@ func (esdpf *epochStartDataProviderFactory) Create() (bootstrap.EpochStartDataPr return nil, err } + whiteListCache, err := storageUnit.NewCache( + storageUnit.CacheType(esdpf.generalConfig.WhiteListPool.Type), + esdpf.generalConfig.WhiteListPool.Size, + esdpf.generalConfig.WhiteListPool.Shards, + ) + if err != nil { + return nil, err + } + whiteListHandler, err := interceptors.NewWhiteListDataVerifier(whiteListCache) + argsEpochStart := bootstrap.ArgsEpochStartDataProvider{ PublicKey: esdpf.pubKey, Messenger: esdpf.messenger, @@ -116,6 +128,7 @@ func (esdpf *epochStartDataProviderFactory) Create() (bootstrap.EpochStartDataPr MetaBlockInterceptor: metaBlockInterceptor, ShardHeaderInterceptor: shardHdrInterceptor, MiniBlockInterceptor: miniBlockInterceptor, + WhiteListHandler: whiteListHandler, } epochStartDataProvider, err := bootstrap.NewEpochStartDataProvider(argsEpochStart) diff --git a/epochStart/bootstrap/mock/whiteListHandlerStub.go b/epochStart/bootstrap/mock/whiteListHandlerStub.go new file mode 100644 index 00000000000..3fa020ecd84 --- /dev/null +++ b/epochStart/bootstrap/mock/whiteListHandlerStub.go @@ -0,0 +1,32 @@ +package mock + +import "github.com/ElrondNetwork/elrond-go/process" + +type WhiteListHandlerStub struct { + RemoveCalled func(keys [][]byte) + AddCalled func(keys [][]byte) + IsForCurrentShardCalled func(interceptedData process.InterceptedData) bool +} + +func (w *WhiteListHandlerStub) IsForCurrentShard(interceptedData process.InterceptedData) bool { + if w.IsForCurrentShardCalled != nil { + return w.IsForCurrentShardCalled(interceptedData) + } + return true +} + +func (w *WhiteListHandlerStub) Remove(keys [][]byte) { + if w.RemoveCalled != nil { + w.RemoveCalled(keys) + } +} + +func (w *WhiteListHandlerStub) Add(keys [][]byte) { + if w.AddCalled != nil { + w.AddCalled(keys) + } +} + +func (w *WhiteListHandlerStub) IsInterfaceNil() bool { + return w == nil +} From 1f5cc4b85891b3670fdfb518794de36389242ec1 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Thu, 19 Mar 2020 13:02:30 +0200 Subject: [PATCH 18/61] merges and fixes. --- integrationTests/mock/pathManagerStub.go | 32 +++++++++++++++++++ .../endOfEpoch/startInEpoch_test.go | 12 +++++++ 2 files changed, 44 insertions(+) create mode 100644 integrationTests/mock/pathManagerStub.go diff --git a/integrationTests/mock/pathManagerStub.go b/integrationTests/mock/pathManagerStub.go new file mode 100644 index 00000000000..78aa45b6b67 --- /dev/null +++ b/integrationTests/mock/pathManagerStub.go @@ -0,0 +1,32 @@ +package mock + +import "fmt" + +// PathManagerStub - +type PathManagerStub struct { + PathForEpochCalled func(shardId string, epoch uint32, identifier string) string + PathForStaticCalled func(shardId string, identifier string) string +} + +// PathForEpoch - +func (p *PathManagerStub) PathForEpoch(shardId string, epoch uint32, identifier string) string { + if p.PathForEpochCalled != nil { + return p.PathForEpochCalled(shardId, epoch, identifier) + } + + return fmt.Sprintf("Epoch_%d/Shard_%s/%s", epoch, shardId, identifier) +} + +// PathForStatic - +func (p *PathManagerStub) PathForStatic(shardId string, identifier string) string { + if p.PathForEpochCalled != nil { + return p.PathForStaticCalled(shardId, identifier) + } + + return fmt.Sprintf("Static/Shard_%s/%s", shardId, identifier) +} + +// IsInterfaceNil - +func (p *PathManagerStub) IsInterfaceNil() bool { + return p == nil +} diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch_test.go index fabf097ca7d..795a615cf4c 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch_test.go @@ -126,11 +126,23 @@ func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { NodesConfigProvider: nodesconfigprovider.NewSimpleNodesConfigProvider(&nodesConfig), StartTime: time.Time{}, OriginalNodesConfig: &nodesConfig, + PathManager: &mock.PathManagerStub{}, GeneralConfig: &config.Config{ EpochStartConfig: config.EpochStartConfig{ MinRoundsBetweenEpochs: 5, RoundsPerEpoch: 10, }, + WhiteListPool: config.CacheConfig{ + Size: 10000, + Type: "LRU", + Shards: 1, + }, + StoragePruning: config.StoragePruningConfig{ + Enabled: false, + FullArchive: true, + NumEpochsToKeep: 3, + NumActivePersisters: 3, + }, }, IsEpochFoundInStorage: false, } From f22b91d56fb81bfe58b35df14f155da049f8e84d Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Thu, 19 Mar 2020 18:44:46 +0200 Subject: [PATCH 19/61] fixes, moving implementation to bootstrapper. --- cmd/node/main.go | 39 ++---- .../bootstrap/epochStartDataProvider.go | 118 ++++++++++++------ .../factory/epochStartDataProviderFactory.go | 12 ++ 3 files changed, 105 insertions(+), 64 deletions(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index 3f22d6d587c..3a44e651a6e 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -548,23 +548,6 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { return err } - var errNotCritical error - // TODO: add a component which opens headers storer and gets the last epoch start metablock - // in order to provide the last known epoch in storage. Right now, it won't work as expected - // if storage pruning is disabled - currentEpoch, errNotCritical = storageFactory.FindLastEpochFromStorage( - workingDir, - nodesConfig.ChainID, - defaultDBPath, - defaultEpochString, - ) - if errNotCritical != nil { - currentEpoch = 0 - log.Debug("no epoch db found in storage", "error", errNotCritical.Error()) - } - - epochFoundInStorage := errNotCritical == nil - networkComponents, err = factory.NetworkComponentsFactory(p2pConfig, log, &blake2b.Blake2b{}) if err != nil { return err @@ -584,16 +567,18 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { return err } epochStartComponentArgs := factoryEpochBootstrap.EpochStartDataProviderFactoryArgs{ - PubKey: pubKey, - Messenger: networkComponents.NetMessenger, - Marshalizer: marshalizer, - Hasher: hasher, - NodesConfigProvider: nodesconfigprovider.NewSimpleNodesConfigProvider(nodesConfig), - PathManager: pathManager, - StartTime: startTime, - OriginalNodesConfig: nodesConfig, - GeneralConfig: generalConfig, - IsEpochFoundInStorage: epochFoundInStorage, + PubKey: pubKey, + Messenger: networkComponents.NetMessenger, + Marshalizer: marshalizer, + Hasher: hasher, + NodesConfigProvider: nodesconfigprovider.NewSimpleNodesConfigProvider(nodesConfig), + PathManager: pathManager, + StartTime: startTime, + OriginalNodesConfig: nodesConfig, + GeneralConfig: generalConfig, + WorkingDir: workingDir, + DefaultDBPath: defaultDBPath, + DefaultEpochString: defaultEpochString, } epochStartComponentFactory, err := factoryEpochBootstrap.NewEpochStartDataProviderFactory(epochStartComponentArgs) diff --git a/epochStart/bootstrap/epochStartDataProvider.go b/epochStart/bootstrap/epochStartDataProvider.go index cca4c36187e..8e944421f0d 100644 --- a/epochStart/bootstrap/epochStartDataProvider.go +++ b/epochStart/bootstrap/epochStartDataProvider.go @@ -31,6 +31,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" + storageFactory "github.com/ElrondNetwork/elrond-go/storage/factory" "github.com/ElrondNetwork/elrond-go/storage/timecache" ) @@ -58,6 +59,11 @@ type epochStartDataProvider struct { miniBlockInterceptor MiniBlockInterceptorHandler requestHandler process.RequestHandler whiteListHandler dataRetriever.WhiteListHandler + shardCoordinator sharding.Coordinator + genesisNodesConfig *sharding.NodesSetup + workingDir string + defaultDBPath string + defaultEpochString string } // ArgsEpochStartDataProvider holds the arguments needed for creating an epoch start data provider component @@ -74,11 +80,14 @@ type ArgsEpochStartDataProvider struct { ShardHeaderInterceptor ShardHeaderInterceptorHandler MiniBlockInterceptor MiniBlockInterceptorHandler WhiteListHandler dataRetriever.WhiteListHandler + GenesisNodesConfig *sharding.NodesSetup + WorkingDir string + DefaultDBPath string + DefaultEpochString string } // NewEpochStartDataProvider will return a new instance of epochStartDataProvider func NewEpochStartDataProvider(args ArgsEpochStartDataProvider) (*epochStartDataProvider, error) { - // TODO: maybe remove these nil checks as all of them have been done in the factory if check.IfNil(args.PublicKey) { return nil, ErrNilPublicKey } @@ -112,7 +121,8 @@ func NewEpochStartDataProvider(args ArgsEpochStartDataProvider) (*epochStartData if check.IfNil(args.WhiteListHandler) { return nil, ErrNilWhiteListHandler } - return &epochStartDataProvider{ + + epochStartProvider := &epochStartDataProvider{ publicKey: args.PublicKey, marshalizer: args.Marshalizer, hasher: args.Hasher, @@ -125,25 +135,64 @@ func NewEpochStartDataProvider(args ArgsEpochStartDataProvider) (*epochStartData shardHeaderInterceptor: args.ShardHeaderInterceptor, miniBlockInterceptor: args.MiniBlockInterceptor, whiteListHandler: args.WhiteListHandler, - }, nil -} + genesisNodesConfig: args.GenesisNodesConfig, + workingDir: args.WorkingDir, + defaultEpochString: args.DefaultEpochString, + defaultDBPath: args.DefaultEpochString, + } -// Bootstrap will handle requesting and receiving the needed information the node will bootstrap from -func (esdp *epochStartDataProvider) Bootstrap() (*structs.ComponentsNeededForBootstrap, error) { - err := esdp.initTopicsAndInterceptors() + err := epochStartProvider.initInternalComponents() if err != nil { return nil, err } + + return epochStartProvider, nil +} + +func (esdp *epochStartDataProvider) initInternalComponents() error { + var err error + esdp.shardCoordinator, err = sharding.NewMultiShardCoordinator(esdp.genesisNodesConfig.NumberOfShards(), core.MetachainShardId) + if err != nil { + return err + } + + err = esdp.initTopicsAndInterceptors() + if err != nil { + return err + } defer func() { esdp.resetTopicsAndInterceptors() }() - requestHandlerMeta, err := esdp.createRequestHandler() + err = esdp.createRequestHandler() if err != nil { - return nil, err + return err + } + + return nil +} + +func (esdp *epochStartDataProvider) searchDataInLocalStorage() { + var errNotCritical error + // TODO: add a component which opens headers storer and gets the last epoch start metablock + // in order to provide the last known epoch in storage. Right now, it won't work as expected + // if storage pruning is disabled + currentEpoch, errNotCritical := storageFactory.FindLastEpochFromStorage( + esdp.workingDir, + esdp.genesisNodesConfig.ChainID, + esdp.defaultDBPath, + esdp.defaultEpochString, + ) + if errNotCritical != nil { + log.Debug("no epoch db found in storage", "error", errNotCritical.Error()) } - esdp.requestHandler = requestHandlerMeta + log.Debug("current epoch from the storage : ", "epoch", currentEpoch) +} + +// Bootstrap will handle requesting and receiving the needed information the node will bootstrap from +func (esdp *epochStartDataProvider) Bootstrap() (*structs.ComponentsNeededForBootstrap, error) { + // TODO: add searching for epoch start metablock and other data inside this component epochNumForRequestingTheLatestAvailable := uint32(math.MaxUint32) metaBlock, err := esdp.getEpochStartMetaBlock(epochNumForRequestingTheLatestAvailable) @@ -164,22 +213,22 @@ func (esdp *epochStartDataProvider) Bootstrap() (*structs.ComponentsNeededForBoo return nil, err } - shardCoordinator, err := esdp.getShardCoordinator(metaBlock, nodesConfig) + esdp.shardCoordinator, err = esdp.getShardCoordinator(metaBlock, nodesConfig) if err != nil { return nil, err } - shardHeaders, err := esdp.getShardHeaders(metaBlock, nodesConfig, shardCoordinator) + shardHeaders, err := esdp.getShardHeaders(metaBlock, esdp.shardCoordinator) if err != nil { log.Debug("shard headers not found", "error", err) } var shardHeaderForShard *block.Header - if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { - shardHeaderForShard = shardHeaders[shardCoordinator.SelfId()] + if esdp.shardCoordinator.SelfId() < esdp.shardCoordinator.NumberOfShards() { + shardHeaderForShard = shardHeaders[esdp.shardCoordinator.SelfId()] } - epochStartData, err := esdp.getCurrentEpochStartData(shardCoordinator, metaBlock) + epochStartData, err := esdp.getCurrentEpochStartData(esdp.shardCoordinator, metaBlock) if err != nil { return nil, err } @@ -217,16 +266,16 @@ func (esdp *epochStartDataProvider) Bootstrap() (*structs.ComponentsNeededForBoo ShardHeader: shardHeaderForShard, NodesConfig: nodesConfig, ShardHeaders: shardHeaders, - ShardCoordinator: shardCoordinator, + ShardCoordinator: esdp.shardCoordinator, Tries: trieToReturn, PendingMiniBlocks: pendingMiniBlocks, } var storageHandlerComponent StorageHandler - if shardCoordinator.SelfId() > shardCoordinator.NumberOfShards() { + if esdp.shardCoordinator.SelfId() > esdp.shardCoordinator.NumberOfShards() { storageHandlerComponent, err = storagehandler.NewMetaStorageHandler( esdp.generalConfig, - shardCoordinator, + esdp.shardCoordinator, esdp.pathManager, esdp.marshalizer, esdp.hasher, @@ -238,7 +287,7 @@ func (esdp *epochStartDataProvider) Bootstrap() (*structs.ComponentsNeededForBoo } else { storageHandlerComponent, err = storagehandler.NewShardStorageHandler( esdp.generalConfig, - shardCoordinator, + esdp.shardCoordinator, esdp.pathManager, esdp.marshalizer, esdp.hasher, @@ -269,15 +318,10 @@ func (esdp *epochStartDataProvider) changeMessageProcessorsForMetaBlocks() { } } -func (esdp *epochStartDataProvider) createRequestHandler() (process.RequestHandler, error) { +func (esdp *epochStartDataProvider) createRequestHandler() error { dataPacker, err := partitioning.NewSimpleDataPacker(esdp.marshalizer) if err != nil { - return nil, err - } - - shardC, err := sharding.NewMultiShardCoordinator(2, core.MetachainShardId) - if err != nil { - return nil, err + return err } storageService := &disabled.ChainStorer{ @@ -291,27 +335,27 @@ func (esdp *epochStartDataProvider) createRequestHandler() (process.RequestHandl stateTrieStorageManager, err := trie.NewTrieStorageManagerWithoutPruning(disabled.NewDisabledStorer()) if err != nil { - return nil, err + return err } stateTrie, err := trie.NewTrie(stateTrieStorageManager, esdp.marshalizer, esdp.hasher) if err != nil { - return nil, err + return err } triesHolder.Put([]byte(factory3.UserAccountTrie), stateTrie) peerTrieStorageManager, err := trie.NewTrieStorageManagerWithoutPruning(disabled.NewDisabledStorer()) if err != nil { - return nil, err + return err } peerTrie, err := trie.NewTrie(peerTrieStorageManager, esdp.marshalizer, esdp.hasher) if err != nil { - return nil, err + return err } triesHolder.Put([]byte(factory3.PeerAccountTrie), peerTrie) resolversContainerArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: shardC, + ShardCoordinator: esdp.shardCoordinator, Messenger: esdp.messenger, Store: storageService, Marshalizer: esdp.marshalizer, @@ -324,24 +368,25 @@ func (esdp *epochStartDataProvider) createRequestHandler() (process.RequestHandl resolverFactory, err := resolverscontainer.NewMetaResolversContainerFactory(resolversContainerArgs) if err != nil { - return nil, err + return err } container, err := resolverFactory.Create() if err != nil { - return nil, err + return err } - finder, err := containers.NewResolversFinder(container, shardC) + finder, err := containers.NewResolversFinder(container, esdp.shardCoordinator) if err != nil { - return nil, err + return err } requestedItemsHandler := timecache.NewTimeCache(100) maxToRequest := 100 - return requestHandlers.NewResolverRequestHandler(finder, requestedItemsHandler, esdp.whiteListHandler, maxToRequest, core.MetachainShardId) + esdp.requestHandler, err = requestHandlers.NewResolverRequestHandler(finder, requestedItemsHandler, esdp.whiteListHandler, maxToRequest, core.MetachainShardId) + return err } func (esdp *epochStartDataProvider) getMiniBlock(miniBlockHeader *block.ShardMiniBlockHeader) (*block.MiniBlock, error) { @@ -485,7 +530,6 @@ func (esdp *epochStartDataProvider) getShardCoordinator(metaBlock *block.MetaBlo func (esdp *epochStartDataProvider) getShardHeaders( metaBlock *block.MetaBlock, - nodesConfig *sharding.NodesSetup, shardCoordinator sharding.Coordinator, ) (map[uint32]*block.Header, error) { headersMap := make(map[uint32]*block.Header) diff --git a/epochStart/bootstrap/factory/epochStartDataProviderFactory.go b/epochStart/bootstrap/factory/epochStartDataProviderFactory.go index 5cdf30a9de9..b98dca2914e 100644 --- a/epochStart/bootstrap/factory/epochStartDataProviderFactory.go +++ b/epochStart/bootstrap/factory/epochStartDataProviderFactory.go @@ -25,6 +25,9 @@ type epochStartDataProviderFactory struct { nodesConfigProvider bootstrap.NodesConfigProviderHandler generalConfig config.Config shouldSync bool + workingDir string + defaultDBPath string + defaultEpochString string } // EpochStartDataProviderFactoryArgs holds the arguments needed for creating aa factory for the epoch start data @@ -40,6 +43,9 @@ type EpochStartDataProviderFactoryArgs struct { OriginalNodesConfig *sharding.NodesSetup GeneralConfig *config.Config IsEpochFoundInStorage bool + WorkingDir string + DefaultDBPath string + DefaultEpochString string } // NewEpochStartDataProviderFactory returns a new instance of epochStartDataProviderFactory @@ -80,6 +86,9 @@ func NewEpochStartDataProviderFactory(args EpochStartDataProviderFactoryArgs) (* generalConfig: *args.GeneralConfig, nodesConfigProvider: args.NodesConfigProvider, shouldSync: shouldSync, + workingDir: args.WorkingDir, + defaultEpochString: args.DefaultEpochString, + defaultDBPath: args.DefaultEpochString, }, nil } @@ -129,6 +138,9 @@ func (esdpf *epochStartDataProviderFactory) Create() (bootstrap.EpochStartDataPr ShardHeaderInterceptor: shardHdrInterceptor, MiniBlockInterceptor: miniBlockInterceptor, WhiteListHandler: whiteListHandler, + WorkingDir: esdpf.workingDir, + DefaultEpochString: esdpf.defaultEpochString, + DefaultDBPath: esdpf.defaultDBPath, } epochStartDataProvider, err := bootstrap.NewEpochStartDataProvider(argsEpochStart) From f48bc1f3fcae589b866a5f39cd7a3094ef176f85 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Thu, 19 Mar 2020 18:49:44 +0200 Subject: [PATCH 20/61] EN-5829 created interceptors container (WIP) --- cmd/node/factory/structs.go | 85 +-- cmd/node/main.go | 88 ++- dataRetriever/factory/dataPoolFactory.go | 98 +++ .../disabled/disabledAccountsAdapter.go | 98 +++ .../bootstrap/disabled/disabledCacher.go | 60 ++ .../disabled/disabledEpochStartTrigger.go | 64 ++ .../disabled/disabledHeaderSigVerifier.go | 25 + .../bootstrap/disabled/disabledMultiSigner.go | 53 ++ .../disabled/disabledNodesCoordinator.go | 91 +++ .../disabled/disabledValidityAttester.go | 25 + .../bootstrap/epochStartDataProvider.go | 156 ++++- epochStart/bootstrap/errors.go | 17 +- .../factory/epochStartDataProviderFactory.go | 100 ++- .../epochStartInterceptorsContainerFactory.go | 107 +++ .../storagehandler/baseStorageHandler.go | 2 +- epochStart/bootstrap/structs/components.go | 2 +- .../baseInterceptorsContainerFactory.go | 17 + .../metachain/interceptorsContainerFactory.go | 626 ------------------ update/interface.go | 1 + update/sync/syncHeadersByHash.go | 196 ++++++ update/sync/syncMiniBlocks.go | 12 + 21 files changed, 1112 insertions(+), 811 deletions(-) create mode 100644 dataRetriever/factory/dataPoolFactory.go create mode 100644 epochStart/bootstrap/disabled/disabledAccountsAdapter.go create mode 100644 epochStart/bootstrap/disabled/disabledCacher.go create mode 100644 epochStart/bootstrap/disabled/disabledEpochStartTrigger.go create mode 100644 epochStart/bootstrap/disabled/disabledHeaderSigVerifier.go create mode 100644 epochStart/bootstrap/disabled/disabledMultiSigner.go create mode 100644 epochStart/bootstrap/disabled/disabledNodesCoordinator.go create mode 100644 epochStart/bootstrap/disabled/disabledValidityAttester.go create mode 100644 epochStart/bootstrap/factory/interceptors/epochStartInterceptorsContainerFactory.go delete mode 100644 process/factory/metachain/interceptorsContainerFactory.go create mode 100644 update/sync/syncHeadersByHash.go diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index b9f5a8019d1..88c3af2ad6c 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -38,14 +38,10 @@ import ( "github.com/ElrondNetwork/elrond-go/data/typeConverters" "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool/headersCache" + factory2 "github.com/ElrondNetwork/elrond-go/dataRetriever/factory" "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/resolverscontainer" - txpoolFactory "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/txpool" "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" - "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" - "github.com/ElrondNetwork/elrond-go/dataRetriever/txpool" "github.com/ElrondNetwork/elrond-go/epochStart" "github.com/ElrondNetwork/elrond-go/epochStart/genesis" metachainEpochStart "github.com/ElrondNetwork/elrond-go/epochStart/metachain" @@ -408,7 +404,12 @@ func DataComponentsFactory(args *dataComponentsFactoryArgs) (*Data, error) { return nil, errors.New("could not create local data store: " + err.Error()) } - datapool, err = createDataPoolFromConfig(args) + dataPoolArgs := factory2.ArgsDataPool{ + Config: args.config, + EconomicsData: args.economicsData, + ShardCoordinator: args.shardCoordinator, + } + datapool, err = factory2.NewDataPoolFromConfig(dataPoolArgs) if err != nil { return nil, errors.New("could not create data pools: ") } @@ -1013,78 +1014,6 @@ func createDataStoreFromConfig( return nil, errors.New("can not create data store") } -func createDataPoolFromConfig(args *dataComponentsFactoryArgs) (dataRetriever.PoolsHolder, error) { - log.Debug("creatingDataPool from config") - - mainConfig := args.config - - txPool, err := txpoolFactory.CreateTxPool(txpool.ArgShardedTxPool{ - Config: storageFactory.GetCacherFromConfig(mainConfig.TxDataPool), - MinGasPrice: args.economicsData.MinGasPrice(), - NumberOfShards: args.shardCoordinator.NumberOfShards(), - SelfShardID: args.shardCoordinator.SelfId(), - }) - if err != nil { - log.Error("error creating txpool") - return nil, err - } - - uTxPool, err := shardedData.NewShardedData(storageFactory.GetCacherFromConfig(mainConfig.UnsignedTransactionDataPool)) - if err != nil { - log.Error("error creating smart contract result pool") - return nil, err - } - - rewardTxPool, err := shardedData.NewShardedData(storageFactory.GetCacherFromConfig(mainConfig.RewardTransactionDataPool)) - if err != nil { - log.Error("error creating reward transaction pool") - return nil, err - } - - hdrPool, err := headersCache.NewHeadersPool(mainConfig.HeadersPoolConfig) - if err != nil { - log.Error("error creating headers pool") - return nil, err - } - - cacherCfg := storageFactory.GetCacherFromConfig(mainConfig.TxBlockBodyDataPool) - txBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Error("error creating txBlockBody") - return nil, err - } - - cacherCfg = storageFactory.GetCacherFromConfig(mainConfig.PeerBlockBodyDataPool) - peerChangeBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Error("error creating peerChangeBlockBody") - return nil, err - } - - cacherCfg = storageFactory.GetCacherFromConfig(mainConfig.TrieNodesDataPool) - trieNodes, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Info("error creating trieNodes") - return nil, err - } - - currBlockTxs, err := dataPool.NewCurrentBlockPool() - if err != nil { - return nil, err - } - - return dataPool.NewDataPool( - txPool, - uTxPool, - rewardTxPool, - hdrPool, - txBlockBody, - peerChangeBlockBody, - trieNodes, - currBlockTxs, - ) -} - func createSingleSigner(config *config.Config) (crypto.SingleSigner, error) { switch config.Consensus.Type { case BlsConsensusType: diff --git a/cmd/node/main.go b/cmd/node/main.go index 3f22d6d587c..005202d1e33 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -38,11 +38,9 @@ import ( "github.com/ElrondNetwork/elrond-go/facade" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/hashing/blake2b" - factoryHasher "github.com/ElrondNetwork/elrond-go/hashing/factory" "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/logger/redirects" "github.com/ElrondNetwork/elrond-go/marshal" - factoryMarshal "github.com/ElrondNetwork/elrond-go/marshal/factory" "github.com/ElrondNetwork/elrond-go/node" "github.com/ElrondNetwork/elrond-go/node/external" "github.com/ElrondNetwork/elrond-go/ntp" @@ -565,35 +563,62 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { epochFoundInStorage := errNotCritical == nil - networkComponents, err = factory.NetworkComponentsFactory(p2pConfig, log, &blake2b.Blake2b{}) + shardCoordinator, nodeType, err := createShardCoordinator(nodesConfig, pubKey, preferencesConfig.Preferences, log) if err != nil { return err } - err = networkComponents.NetMessenger.Bootstrap() + + var shardId = core.GetShardIdString(shardCoordinator.SelfId()) + + log.Trace("creating crypto components") + cryptoArgs := factory.NewCryptoComponentsFactoryArgs( + ctx, + generalConfig, + nodesConfig, + shardCoordinator, + keyGen, + privKey, + log, + ) + cryptoComponents, err := factory.CryptoComponentsFactory(cryptoArgs) + if err != nil { + return err + } + + log.Trace("creating core components") + coreArgs := factory.NewCoreComponentsFactoryArgs(generalConfig, pathManager, shardId, []byte(nodesConfig.ChainID)) + coreComponents, err := factory.CoreComponentsFactory(coreArgs) if err != nil { return err } - time.Sleep(secondsToWaitForP2PBootstrap * time.Second) - marshalizer, err := factoryMarshal.NewMarshalizer(generalConfig.Marshalizer.Type) + networkComponents, err = factory.NetworkComponentsFactory(p2pConfig, log, &blake2b.Blake2b{}) if err != nil { return err } - hasher, err := factoryHasher.NewHasher(generalConfig.Hasher.Type) + err = networkComponents.NetMessenger.Bootstrap() if err != nil { return err } + time.Sleep(secondsToWaitForP2PBootstrap * time.Second) + epochStartComponentArgs := factoryEpochBootstrap.EpochStartDataProviderFactoryArgs{ - PubKey: pubKey, - Messenger: networkComponents.NetMessenger, - Marshalizer: marshalizer, - Hasher: hasher, - NodesConfigProvider: nodesconfigprovider.NewSimpleNodesConfigProvider(nodesConfig), - PathManager: pathManager, - StartTime: startTime, - OriginalNodesConfig: nodesConfig, - GeneralConfig: generalConfig, - IsEpochFoundInStorage: epochFoundInStorage, + PubKey: pubKey, + Messenger: networkComponents.NetMessenger, + Marshalizer: coreComponents.InternalMarshalizer, + Hasher: coreComponents.Hasher, + NodesConfigProvider: nodesconfigprovider.NewSimpleNodesConfigProvider(nodesConfig), + DefaultShardCoordinator: shardCoordinator, + PathManager: pathManager, + StartTime: startTime, + OriginalNodesConfig: nodesConfig, + EconomicsConfig: economicsConfig, + GeneralConfig: generalConfig, + KeyGen: cryptoComponents.TxSignKeyGen, + BlockKeyGen: cryptoComponents.BlockSignKeyGen, + SingleSigner: cryptoComponents.TxSingleSigner, + BlockSingleSigner: cryptoComponents.SingleSigner, + IsEpochFoundInStorage: epochFoundInStorage, } epochStartComponentFactory, err := factoryEpochBootstrap.NewEpochStartDataProviderFactory(epochStartComponentArgs) @@ -619,13 +644,6 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { log.Error("error bootstrapping", "error", err) } - shardCoordinator, nodeType, err := createShardCoordinator(nodesConfig, pubKey, preferencesConfig.Preferences, log) - if err != nil { - return err - } - - var shardId = core.GetShardIdString(shardCoordinator.SelfId()) - storageCleanupFlagValue := ctx.GlobalBool(storageCleanup.Name) if storageCleanupFlagValue { dbPath := filepath.Join( @@ -638,13 +656,6 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } } - log.Trace("creating core components") - coreArgs := factory.NewCoreComponentsFactoryArgs(generalConfig, pathManager, shardId, []byte(nodesConfig.ChainID)) - coreComponents, err := factory.CoreComponentsFactory(coreArgs) - if err != nil { - return err - } - log.Trace("creating economics data components") economicsData, err := economics.NewEconomicsData(economicsConfig) if err != nil { @@ -728,21 +739,6 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { return err } - log.Trace("creating crypto components") - cryptoArgs := factory.NewCryptoComponentsFactoryArgs( - ctx, - generalConfig, - nodesConfig, - shardCoordinator, - keyGen, - privKey, - log, - ) - cryptoComponents, err := factory.CryptoComponentsFactory(cryptoArgs) - if err != nil { - return err - } - metrics.SaveStringMetric(coreComponents.StatusHandler, core.MetricNodeDisplayName, preferencesConfig.Preferences.NodeDisplayName) metrics.SaveStringMetric(coreComponents.StatusHandler, core.MetricChainId, nodesConfig.ChainID) metrics.SaveUint64Metric(coreComponents.StatusHandler, core.MetricMinGasPrice, economicsData.MinGasPrice()) diff --git a/dataRetriever/factory/dataPoolFactory.go b/dataRetriever/factory/dataPoolFactory.go new file mode 100644 index 00000000000..c53fc05ad3c --- /dev/null +++ b/dataRetriever/factory/dataPoolFactory.go @@ -0,0 +1,98 @@ +package factory + +import ( + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool/headersCache" + txpool2 "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/txpool" + "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/dataRetriever/txpool" + "github.com/ElrondNetwork/elrond-go/logger" + "github.com/ElrondNetwork/elrond-go/process/economics" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage/factory" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" +) + +var log = logger.GetOrCreate("dataRetriever/factory") + +// ArgsDataPool holds the arguments needed for NewDataPoolFromConfig function +type ArgsDataPool struct { + Config *config.Config + EconomicsData *economics.EconomicsData + ShardCoordinator sharding.Coordinator +} + +// NewDataPoolFromConfig will return a new instance of a PoolsHolder +func NewDataPoolFromConfig(args ArgsDataPool) (dataRetriever.PoolsHolder, error) { + log.Debug("creatingDataPool from config") + + mainConfig := args.Config + + txPool, err := txpool2.CreateTxPool(txpool.ArgShardedTxPool{ + Config: factory.GetCacherFromConfig(mainConfig.TxDataPool), + MinGasPrice: args.EconomicsData.MinGasPrice(), + NumberOfShards: args.ShardCoordinator.NumberOfShards(), + SelfShardID: args.ShardCoordinator.SelfId(), + }) + if err != nil { + log.Error("error creating txpool") + return nil, err + } + + uTxPool, err := shardedData.NewShardedData(factory.GetCacherFromConfig(mainConfig.UnsignedTransactionDataPool)) + if err != nil { + log.Error("error creating smart contract result pool") + return nil, err + } + + rewardTxPool, err := shardedData.NewShardedData(factory.GetCacherFromConfig(mainConfig.RewardTransactionDataPool)) + if err != nil { + log.Error("error creating reward transaction pool") + return nil, err + } + + hdrPool, err := headersCache.NewHeadersPool(mainConfig.HeadersPoolConfig) + if err != nil { + log.Error("error creating headers pool") + return nil, err + } + + cacherCfg := factory.GetCacherFromConfig(mainConfig.TxBlockBodyDataPool) + txBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Error("error creating txBlockBody") + return nil, err + } + + cacherCfg = factory.GetCacherFromConfig(mainConfig.PeerBlockBodyDataPool) + peerChangeBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Error("error creating peerChangeBlockBody") + return nil, err + } + + cacherCfg = factory.GetCacherFromConfig(mainConfig.TrieNodesDataPool) + trieNodes, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Info("error creating trieNodes") + return nil, err + } + + currBlockTxs, err := dataPool.NewCurrentBlockPool() + if err != nil { + return nil, err + } + + return dataPool.NewDataPool( + txPool, + uTxPool, + rewardTxPool, + hdrPool, + txBlockBody, + peerChangeBlockBody, + trieNodes, + currBlockTxs, + ) +} diff --git a/epochStart/bootstrap/disabled/disabledAccountsAdapter.go b/epochStart/bootstrap/disabled/disabledAccountsAdapter.go new file mode 100644 index 00000000000..600a304d530 --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledAccountsAdapter.go @@ -0,0 +1,98 @@ +package disabled + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/state" +) + +type accountsAdapter struct { +} + +// NewAccountsAdapter returns a new instance of accountsAdapter +func NewAccountsAdapter() *accountsAdapter { + return &accountsAdapter{} +} + +func (a *accountsAdapter) GetAccountWithJournal(addressContainer state.AddressContainer) (state.AccountHandler, error) { + return nil, nil +} + +func (a *accountsAdapter) GetExistingAccount(addressContainer state.AddressContainer) (state.AccountHandler, error) { + return nil, nil +} + +func (a *accountsAdapter) HasAccount(addressContainer state.AddressContainer) (bool, error) { + return false, nil +} + +func (a *accountsAdapter) RemoveAccount(addressContainer state.AddressContainer) error { + return nil +} + +func (a *accountsAdapter) Commit() ([]byte, error) { + return nil, nil +} + +func (a *accountsAdapter) JournalLen() int { + return 0 +} + +func (a *accountsAdapter) RevertToSnapshot(snapshot int) error { + return nil +} + +func (a *accountsAdapter) RootHash() ([]byte, error) { + return nil, nil +} + +func (a *accountsAdapter) RecreateTrie(rootHash []byte) error { + return nil +} + +func (a *accountsAdapter) PutCode(accountHandler state.AccountHandler, code []byte) error { + return nil +} + +func (a *accountsAdapter) RemoveCode(codeHash []byte) error { + return nil +} + +func (a *accountsAdapter) SaveDataTrie(accountHandler state.AccountHandler) error { + return nil +} + +func (a *accountsAdapter) PruneTrie(rootHash []byte, identifier data.TriePruningIdentifier) error { + return nil +} + +func (a *accountsAdapter) CancelPrune(rootHash []byte, identifier data.TriePruningIdentifier) { + return +} + +func (a *accountsAdapter) SnapshotState(rootHash []byte) { + return +} + +func (a *accountsAdapter) SetStateCheckpoint(rootHash []byte) { + return +} + +func (a *accountsAdapter) IsPruningEnabled() bool { + return false +} + +func (a *accountsAdapter) ClosePersister() error { + return nil +} + +func (a *accountsAdapter) GetAllLeaves(rootHash []byte) (map[string][]byte, error) { + return nil, nil +} + +func (a *accountsAdapter) RecreateAllTries(rootHash []byte) (map[string]data.Trie, error) { + return nil, nil +} + +func (a *accountsAdapter) IsInterfaceNil() bool { + return a == nil +} diff --git a/epochStart/bootstrap/disabled/disabledCacher.go b/epochStart/bootstrap/disabled/disabledCacher.go new file mode 100644 index 00000000000..7423e77fa30 --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledCacher.go @@ -0,0 +1,60 @@ +package disabled + +type cacher struct { +} + +// NewCacher returns a new instance of cacher +func NewCacher() *cacher { + return &cacher{} +} + +func (d *cacher) Clear() { +} + +func (d *cacher) Put(key []byte, value interface{}) bool { + return true +} + +func (d *cacher) Get(key []byte) (value interface{}, ok bool) { + return nil, false +} + +func (d *cacher) Has(key []byte) bool { + panic("implement me") +} + +func (d *cacher) Peek(key []byte) (value interface{}, ok bool) { + panic("implement me") +} + +func (d *cacher) HasOrAdd(key []byte, value interface{}) (ok, evicted bool) { + panic("implement me") +} + +func (d *cacher) Remove(key []byte) { + panic("implement me") +} + +func (d *cacher) RemoveOldest() { + panic("implement me") +} + +func (d *cacher) Keys() [][]byte { + panic("implement me") +} + +func (d *cacher) Len() int { + panic("implement me") +} + +func (d *cacher) MaxSize() int { + panic("implement me") +} + +func (d *cacher) RegisterHandler(func(key []byte)) { + panic("implement me") +} + +func (d *cacher) IsInterfaceNil() bool { + panic("implement me") +} diff --git a/epochStart/bootstrap/disabled/disabledEpochStartTrigger.go b/epochStart/bootstrap/disabled/disabledEpochStartTrigger.go new file mode 100644 index 00000000000..3ab662f4bff --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledEpochStartTrigger.go @@ -0,0 +1,64 @@ +package disabled + +import ( + "github.com/ElrondNetwork/elrond-go/data" +) + +type epochStartTrigger struct { +} + +// NewEpochStartTrigger returns a new instance of epochStartTrigger +func NewEpochStartTrigger() *epochStartTrigger { + return &epochStartTrigger{} +} + +func (e *epochStartTrigger) Update(round uint64) { +} + +func (e *epochStartTrigger) ReceivedHeader(header data.HeaderHandler) { +} + +func (e *epochStartTrigger) IsEpochStart() bool { + return false +} + +func (e *epochStartTrigger) Epoch() uint32 { + return 0 +} + +func (e *epochStartTrigger) EpochStartRound() uint64 { + return 0 +} + +func (e *epochStartTrigger) SetProcessed(header data.HeaderHandler) { +} + +func (e *epochStartTrigger) RevertStateToBlock(header data.HeaderHandler) error { + return nil +} + +func (e *epochStartTrigger) EpochStartMetaHdrHash() []byte { + return nil +} + +func (e *epochStartTrigger) GetSavedStateKey() []byte { + return nil +} + +func (e *epochStartTrigger) LoadState(key []byte) error { + return nil +} + +func (e *epochStartTrigger) SetFinalityAttestingRound(round uint64) { +} + +func (e *epochStartTrigger) EpochFinalityAttestingRound() uint64 { + return 0 +} + +func (e *epochStartTrigger) RequestEpochStartIfNeeded(interceptedHeader data.HeaderHandler) { +} + +func (e *epochStartTrigger) IsInterfaceNil() bool { + return e == nil +} diff --git a/epochStart/bootstrap/disabled/disabledHeaderSigVerifier.go b/epochStart/bootstrap/disabled/disabledHeaderSigVerifier.go new file mode 100644 index 00000000000..e7d2009a05d --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledHeaderSigVerifier.go @@ -0,0 +1,25 @@ +package disabled + +import ( + "github.com/ElrondNetwork/elrond-go/data" +) + +type headerSigVerifier struct { +} + +// NewHeaderSigVerifier returns a new instance of headerSigVerifier +func NewHeaderSigVerifier() *headerSigVerifier { + return &headerSigVerifier{} +} + +func (h *headerSigVerifier) VerifyRandSeedAndLeaderSignature(header data.HeaderHandler) error { + return nil +} + +func (h *headerSigVerifier) VerifySignature(header data.HeaderHandler) error { + return nil +} + +func (h *headerSigVerifier) IsInterfaceNil() bool { + return h == nil +} diff --git a/epochStart/bootstrap/disabled/disabledMultiSigner.go b/epochStart/bootstrap/disabled/disabledMultiSigner.go new file mode 100644 index 00000000000..a2011eab77e --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledMultiSigner.go @@ -0,0 +1,53 @@ +package disabled + +import ( + "github.com/ElrondNetwork/elrond-go/crypto" +) + +type multiSigner struct { +} + +// NewMultiSigner returns a new instance of multiSigner +func NewMultiSigner() *multiSigner { + return &multiSigner{} +} + +func (m *multiSigner) Create(pubKeys []string, index uint16) (crypto.MultiSigner, error) { + return nil, nil +} + +func (m *multiSigner) SetAggregatedSig([]byte) error { + return nil +} + +func (m *multiSigner) Verify(msg []byte, bitmap []byte) error { + return nil +} + +func (m *multiSigner) Reset(pubKeys []string, index uint16) error { + return nil +} + +func (m *multiSigner) CreateSignatureShare(msg []byte, bitmap []byte) ([]byte, error) { + return nil, nil +} + +func (m *multiSigner) StoreSignatureShare(index uint16, sig []byte) error { + return nil +} + +func (m *multiSigner) SignatureShare(index uint16) ([]byte, error) { + return nil, nil +} + +func (m *multiSigner) VerifySignatureShare(index uint16, sig []byte, msg []byte, bitmap []byte) error { + return nil +} + +func (m *multiSigner) AggregateSigs(bitmap []byte) ([]byte, error) { + return nil, nil +} + +func (m *multiSigner) IsInterfaceNil() bool { + return m == nil +} diff --git a/epochStart/bootstrap/disabled/disabledNodesCoordinator.go b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go new file mode 100644 index 00000000000..22a670812b3 --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go @@ -0,0 +1,91 @@ +package disabled + +import ( + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// nodesCoordinator - +type nodesCoordinator struct { +} + +// NewNodesCoordinator returns a new instance of nodesCoordinator +func NewNodesCoordinator() *nodesCoordinator { + return &nodesCoordinator{} +} + +func (n *nodesCoordinator) SetNodesPerShards( + eligible map[uint32][]sharding.Validator, + waiting map[uint32][]sharding.Validator, + epoch uint32, + updateList bool, +) error { + return nil +} + +func (n *nodesCoordinator) ComputeLeaving(allValidators []sharding.Validator) []sharding.Validator { + return nil +} + +func (n *nodesCoordinator) GetValidatorsIndexes(publicKeys []string, epoch uint32) ([]uint64, error) { + return nil, nil +} + +func (n *nodesCoordinator) GetAllEligibleValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) { + return nil, nil +} + +func (n *nodesCoordinator) GetAllWaitingValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) { + return nil, nil +} + +func (n *nodesCoordinator) GetConsensusValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) { + return nil, nil +} + +func (n *nodesCoordinator) GetConsensusValidatorsRewardsAddresses(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) { + return nil, nil +} + +func (n *nodesCoordinator) GetOwnPublicKey() []byte { + return nil +} + +func (n *nodesCoordinator) ComputeConsensusGroup(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []sharding.Validator, err error) { + return nil, nil +} + +func (n *nodesCoordinator) GetValidatorWithPublicKey(publicKey []byte, epoch uint32) (validator sharding.Validator, shardId uint32, err error) { + return nil, 0, nil +} + +func (n *nodesCoordinator) UpdatePeersListAndIndex() error { + return nil +} + +func (n *nodesCoordinator) LoadState(key []byte) error { + return nil +} + +func (n *nodesCoordinator) GetSavedStateKey() []byte { + return nil +} + +func (n *nodesCoordinator) ShardIdForEpoch(epoch uint32) (uint32, error) { + return 0, nil +} + +func (n *nodesCoordinator) GetConsensusWhitelistedNodes(epoch uint32) (map[string]struct{}, error) { + return nil, nil +} + +func (n *nodesCoordinator) ConsensusGroupSize(uint32) int { + return 0 +} + +func (n *nodesCoordinator) GetNumTotalEligible() uint64 { + return 0 +} + +func (n *nodesCoordinator) IsInterfaceNil() bool { + return n == nil +} diff --git a/epochStart/bootstrap/disabled/disabledValidityAttester.go b/epochStart/bootstrap/disabled/disabledValidityAttester.go new file mode 100644 index 00000000000..6e4b43d733e --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledValidityAttester.go @@ -0,0 +1,25 @@ +package disabled + +import ( + "github.com/ElrondNetwork/elrond-go/data" +) + +type validityAttester struct { +} + +// NewValidityAttester returns a new instance of validityAttester +func NewValidityAttester() *validityAttester { + return &validityAttester{} +} + +func (v *validityAttester) CheckBlockAgainstFinal(headerHandler data.HeaderHandler) error { + return nil +} + +func (v *validityAttester) CheckBlockAgainstRounder(headerHandler data.HeaderHandler) error { + return nil +} + +func (v *validityAttester) IsInterfaceNil() bool { + return v == nil +} diff --git a/epochStart/bootstrap/epochStartDataProvider.go b/epochStart/bootstrap/epochStartDataProvider.go index cca4c36187e..d013def55d5 100644 --- a/epochStart/bootstrap/epochStartDataProvider.go +++ b/epochStart/bootstrap/epochStartDataProvider.go @@ -14,13 +14,15 @@ import ( "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/trie" - factory3 "github.com/ElrondNetwork/elrond-go/data/trie/factory" + trieFactory "github.com/ElrondNetwork/elrond-go/data/trie/factory" "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" "github.com/ElrondNetwork/elrond-go/dataRetriever" + factory2 "github.com/ElrondNetwork/elrond-go/dataRetriever/factory" "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/resolverscontainer" "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/factory/interceptors" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/storagehandler" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/structs" "github.com/ElrondNetwork/elrond-go/hashing" @@ -28,10 +30,14 @@ import ( "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/economics" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/lrucache" "github.com/ElrondNetwork/elrond-go/storage/timecache" + "github.com/ElrondNetwork/elrond-go/update" + "github.com/ElrondNetwork/elrond-go/update/sync" ) var log = logger.GetOrCreate("registration") @@ -50,14 +56,20 @@ type epochStartDataProvider struct { hasher hashing.Hasher messenger p2p.Messenger generalConfig config.Config + economicsConfig config.EconomicsConfig + defaultShardCoordinator sharding.Coordinator pathManager PathManagerHandler nodesConfigProvider NodesConfigProviderHandler epochStartMetaBlockInterceptor EpochStartMetaBlockInterceptorHandler metaBlockInterceptor MetaBlockInterceptorHandler shardHeaderInterceptor ShardHeaderInterceptorHandler miniBlockInterceptor MiniBlockInterceptorHandler + singleSigner crypto.SingleSigner + blockSingleSigner crypto.SingleSigner + keyGen crypto.KeyGenerator + blockKeyGen crypto.KeyGenerator requestHandler process.RequestHandler - whiteListHandler dataRetriever.WhiteListHandler + whiteListHandler update.WhiteListHandler } // ArgsEpochStartDataProvider holds the arguments needed for creating an epoch start data provider component @@ -67,13 +79,19 @@ type ArgsEpochStartDataProvider struct { Marshalizer marshal.Marshalizer Hasher hashing.Hasher GeneralConfig config.Config + EconomicsConfig config.EconomicsConfig + DefaultShardCoordinator sharding.Coordinator PathManager PathManagerHandler NodesConfigProvider NodesConfigProviderHandler EpochStartMetaBlockInterceptor EpochStartMetaBlockInterceptorHandler MetaBlockInterceptor MetaBlockInterceptorHandler ShardHeaderInterceptor ShardHeaderInterceptorHandler MiniBlockInterceptor MiniBlockInterceptorHandler - WhiteListHandler dataRetriever.WhiteListHandler + SingleSigner crypto.SingleSigner + BlockSingleSigner crypto.SingleSigner + KeyGen crypto.KeyGenerator + BlockKeyGen crypto.KeyGenerator + WhiteListHandler update.WhiteListHandler } // NewEpochStartDataProvider will return a new instance of epochStartDataProvider @@ -112,12 +130,29 @@ func NewEpochStartDataProvider(args ArgsEpochStartDataProvider) (*epochStartData if check.IfNil(args.WhiteListHandler) { return nil, ErrNilWhiteListHandler } + if check.IfNil(args.DefaultShardCoordinator) { + return nil, ErrNilDefaultShardCoordinator + } + if check.IfNil(args.BlockKeyGen) { + return nil, ErrNilBlockKeyGen + } + if check.IfNil(args.KeyGen) { + return nil, ErrNilKeyGen + } + if check.IfNil(args.SingleSigner) { + return nil, ErrNilSingleSigner + } + if check.IfNil(args.BlockSingleSigner) { + return nil, ErrNilBlockSingleSigner + } + return &epochStartDataProvider{ publicKey: args.PublicKey, marshalizer: args.Marshalizer, hasher: args.Hasher, messenger: args.Messenger, generalConfig: args.GeneralConfig, + economicsConfig: args.EconomicsConfig, pathManager: args.PathManager, nodesConfigProvider: args.NodesConfigProvider, epochStartMetaBlockInterceptor: args.EpochStartMetaBlockInterceptor, @@ -125,24 +160,39 @@ func NewEpochStartDataProvider(args ArgsEpochStartDataProvider) (*epochStartData shardHeaderInterceptor: args.ShardHeaderInterceptor, miniBlockInterceptor: args.MiniBlockInterceptor, whiteListHandler: args.WhiteListHandler, + defaultShardCoordinator: args.DefaultShardCoordinator, + keyGen: args.KeyGen, + blockKeyGen: args.BlockKeyGen, + singleSigner: args.SingleSigner, + blockSingleSigner: args.BlockSingleSigner, }, nil } // Bootstrap will handle requesting and receiving the needed information the node will bootstrap from func (esdp *epochStartDataProvider) Bootstrap() (*structs.ComponentsNeededForBootstrap, error) { - err := esdp.initTopicsAndInterceptors() + economicsData, err := economics.NewEconomicsData(&esdp.economicsConfig) if err != nil { return nil, err } - defer func() { - esdp.resetTopicsAndInterceptors() - }() + + commonDataPool, err := factory2.NewDataPoolFromConfig( + factory2.ArgsDataPool{ + Config: &esdp.generalConfig, + EconomicsData: economicsData, + ShardCoordinator: esdp.defaultShardCoordinator, + }, + ) requestHandlerMeta, err := esdp.createRequestHandler() if err != nil { return nil, err } + interceptorsContainer, err := esdp.createInterceptors(commonDataPool) + if err != nil || interceptorsContainer == nil { + return nil, err + } + esdp.requestHandler = requestHandlerMeta epochNumForRequestingTheLatestAvailable := uint32(math.MaxUint32) @@ -184,15 +234,19 @@ func (esdp *epochStartDataProvider) Bootstrap() (*structs.ComponentsNeededForBoo return nil, err } - pendingMiniBlocks := make([]*block.MiniBlock, 0) - for _, mb := range epochStartData.PendingMiniBlockHeaders { - receivedMb, errGetMb := esdp.getMiniBlock(&mb) - if errGetMb != nil { - return nil, errGetMb - } - pendingMiniBlocks = append(pendingMiniBlocks, receivedMb) - log.Info("received miniblock", "type", receivedMb.Type) + pendingMiniBlocks, err := esdp.getMiniBlocks(epochStartData.PendingMiniBlockHeaders, shardCoordinator.SelfId()) + if err != nil { + return nil, err } + //pendingMiniBlocks := make([]*block.MiniBlock, 0) + //for _, mb := range epochStartData.PendingMiniBlockHeaders { + // receivedMb, errGetMb := esdp.getMiniBlock(&mb) + // if errGetMb != nil { + // return nil, errGetMb + // } + // pendingMiniBlocks = append(pendingMiniBlocks, receivedMb) + // log.Info("received miniblock", "type", receivedMb.Type) + //} lastFinalizedMetaBlock, err := esdp.getMetaBlock(epochStartData.LastFinishedMetaBlock) if err != nil { @@ -257,6 +311,49 @@ func (esdp *epochStartDataProvider) Bootstrap() (*structs.ComponentsNeededForBoo return components, nil } +func (esdp *epochStartDataProvider) getMiniBlocks(pendingMiniBlocks []block.ShardMiniBlockHeader, shardID uint32) (map[string]*block.MiniBlock, error) { + cacher, err := lrucache.NewCache(100) + if err != nil { + return nil, err + } + syncMiniBlocksArgs := sync.ArgsNewPendingMiniBlocksSyncer{ + Storage: &disabled.Storer{}, + Cache: cacher, + Marshalizer: esdp.marshalizer, + RequestHandler: esdp.requestHandler, + } + pendingMiniBlocksSyncer, err := sync.NewPendingMiniBlocksSyncer(syncMiniBlocksArgs) + if err != nil { + return nil, err + } + + waitTime := 1 * time.Minute + err = pendingMiniBlocksSyncer.SyncPendingMiniBlocksForEpochStart(pendingMiniBlocks, waitTime) + if err != nil { + return nil, err + } + + return pendingMiniBlocksSyncer.GetMiniBlocks() +} + +func (esdp *epochStartDataProvider) createInterceptors(dataPool dataRetriever.PoolsHolder) (process.InterceptorsContainer, error) { + args := interceptors.ArgsEpochStartInterceptorContainer{ + Config: esdp.generalConfig, + ShardCoordinator: esdp.defaultShardCoordinator, + Marshalizer: esdp.marshalizer, + Hasher: esdp.hasher, + Messenger: esdp.messenger, + DataPool: dataPool, + SingleSigner: esdp.singleSigner, + BlockSingleSigner: esdp.blockSingleSigner, + KeyGen: esdp.keyGen, + BlockKeyGen: esdp.blockKeyGen, + WhiteListHandler: esdp.whiteListHandler, + } + + return interceptors.NewEpochStartInterceptorsContainer(args) +} + func (esdp *epochStartDataProvider) changeMessageProcessorsForMetaBlocks() { err := esdp.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) if err != nil { @@ -297,7 +394,7 @@ func (esdp *epochStartDataProvider) createRequestHandler() (process.RequestHandl if err != nil { return nil, err } - triesHolder.Put([]byte(factory3.UserAccountTrie), stateTrie) + triesHolder.Put([]byte(trieFactory.UserAccountTrie), stateTrie) peerTrieStorageManager, err := trie.NewTrieStorageManagerWithoutPruning(disabled.NewDisabledStorer()) if err != nil { @@ -308,7 +405,7 @@ func (esdp *epochStartDataProvider) createRequestHandler() (process.RequestHandl if err != nil { return nil, err } - triesHolder.Put([]byte(factory3.PeerAccountTrie), peerTrie) + triesHolder.Put([]byte(trieFactory.PeerAccountTrie), peerTrie) resolversContainerArgs := resolverscontainer.FactoryArgs{ ShardCoordinator: shardC, @@ -379,25 +476,20 @@ func (esdp *epochStartDataProvider) getCurrentEpochStartData( return nil, errors.New("not found") } -func (esdp *epochStartDataProvider) initTopicsAndInterceptors() error { - err := esdp.messenger.CreateTopic(factory.MetachainBlocksTopic, true) +func (esdp *epochStartDataProvider) initTopicForEpochStartMetaBlockInterceptor() error { + err := esdp.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) if err != nil { log.Info("error unregistering message processor", "error", err) return err } - err = esdp.messenger.RegisterMessageProcessor(factory.MetachainBlocksTopic, esdp.epochStartMetaBlockInterceptor) + err = esdp.messenger.CreateTopic(factory.MetachainBlocksTopic, true) if err != nil { + log.Info("error registering message processor", "error", err) return err } - err = esdp.messenger.CreateTopic(factory.ShardBlocksTopic+"_1_META", true) - if err != nil { - log.Info("error unregistering message processor", "error", err) - return err - } - - err = esdp.messenger.RegisterMessageProcessor(factory.ShardBlocksTopic+"_1_META", esdp.shardHeaderInterceptor) + err = esdp.messenger.RegisterMessageProcessor(factory.MetachainBlocksTopic, esdp.epochStartMetaBlockInterceptor) if err != nil { return err } @@ -428,7 +520,7 @@ func (esdp *epochStartDataProvider) getTrieFromRootHash(_ []byte) (state.TriesHo } func (esdp *epochStartDataProvider) resetTopicsAndInterceptors() { - err := esdp.messenger.UnregisterAllMessageProcessors() + err := esdp.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) if err != nil { log.Info("error unregistering message processors", "error", err) } @@ -452,6 +544,14 @@ func (esdp *epochStartDataProvider) getMetaBlock(hash []byte) (*block.MetaBlock, } func (esdp *epochStartDataProvider) getEpochStartMetaBlock(epoch uint32) (*block.MetaBlock, error) { + err := esdp.initTopicForEpochStartMetaBlockInterceptor() + if err != nil { + return nil, err + } + defer func() { + esdp.resetTopicsAndInterceptors() + }() + esdp.requestEpochStartMetaBlock(epoch) time.Sleep(delayAfterRequesting) diff --git a/epochStart/bootstrap/errors.go b/epochStart/bootstrap/errors.go index 3a7a27d2e8f..87b6706ca42 100644 --- a/epochStart/bootstrap/errors.go +++ b/epochStart/bootstrap/errors.go @@ -20,6 +20,9 @@ var ErrNilHasher = errors.New("nil hasher") // ErrNilNodesConfigProvider signals that a nil nodes config provider has been given var ErrNilNodesConfigProvider = errors.New("nil nodes config provider") +// ErrNilDefaultShardCoordinator signals that a nil default shard coordinator +var ErrNilDefaultShardCoordinator = errors.New("nil default shard coordinator") + // ErrNilEpochStartMetaBlockInterceptor signals that a epoch start metablock interceptor has been provided var ErrNilEpochStartMetaBlockInterceptor = errors.New("nil epoch start metablock interceptor") @@ -38,8 +41,20 @@ var ErrNumTriesExceeded = errors.New("num of tries exceeded. try re-request") // ErrNilShardCoordinator signals that a nil shard coordinator has been provided var ErrNilShardCoordinator = errors.New("nil shard coordinator") -// ErrNilWhiteListHandler +// ErrNilWhiteListHandler signals a that a nil white list handler has been provided var ErrNilWhiteListHandler = errors.New("nil white list handler") +// ErrNilSingleSigner signals a that a nil single signer has been provided +var ErrNilSingleSigner = errors.New("nil single signer") + +// ErrNilBlockSingleSigner signals a that a nil single signer has been provided +var ErrNilBlockSingleSigner = errors.New("nil block single signer") + +// ErrNilKeyGen signals a that a nil key gen has been provided +var ErrNilKeyGen = errors.New("nil key gen") + +// ErrNilBlockKeyGen signals a that a nil key gen has been provided +var ErrNilBlockKeyGen = errors.New("nil block key gen") + // ErrShardDataNotFound signals that no shard header has been found for the calculated shard var ErrShardDataNotFound = errors.New("shard data not found") diff --git a/epochStart/bootstrap/factory/epochStartDataProviderFactory.go b/epochStart/bootstrap/factory/epochStartDataProviderFactory.go index 5cdf30a9de9..b544896d410 100644 --- a/epochStart/bootstrap/factory/epochStartDataProviderFactory.go +++ b/epochStart/bootstrap/factory/epochStartDataProviderFactory.go @@ -17,29 +17,41 @@ import ( ) type epochStartDataProviderFactory struct { - pubKey crypto.PublicKey - messenger p2p.Messenger - marshalizer marshal.Marshalizer - hasher hashing.Hasher - pathManager storage.PathManagerHandler - nodesConfigProvider bootstrap.NodesConfigProviderHandler - generalConfig config.Config - shouldSync bool + pubKey crypto.PublicKey + messenger p2p.Messenger + marshalizer marshal.Marshalizer + hasher hashing.Hasher + pathManager storage.PathManagerHandler + nodesConfigProvider bootstrap.NodesConfigProviderHandler + generalConfig config.Config + economicsConfig config.EconomicsConfig + defaultShardCoordinator sharding.Coordinator + singleSigner crypto.SingleSigner + blockSingleSigner crypto.SingleSigner + keyGen crypto.KeyGenerator + blockKeyGen crypto.KeyGenerator + shouldSync bool } // EpochStartDataProviderFactoryArgs holds the arguments needed for creating aa factory for the epoch start data // provider component type EpochStartDataProviderFactoryArgs struct { - PubKey crypto.PublicKey - Messenger p2p.Messenger - Marshalizer marshal.Marshalizer - Hasher hashing.Hasher - NodesConfigProvider bootstrap.NodesConfigProviderHandler - PathManager storage.PathManagerHandler - StartTime time.Time - OriginalNodesConfig *sharding.NodesSetup - GeneralConfig *config.Config - IsEpochFoundInStorage bool + PubKey crypto.PublicKey + Messenger p2p.Messenger + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + NodesConfigProvider bootstrap.NodesConfigProviderHandler + PathManager storage.PathManagerHandler + DefaultShardCoordinator sharding.Coordinator + StartTime time.Time + OriginalNodesConfig *sharding.NodesSetup + GeneralConfig *config.Config + EconomicsConfig *config.EconomicsConfig + SingleSigner crypto.SingleSigner + BlockSingleSigner crypto.SingleSigner + KeyGen crypto.KeyGenerator + BlockKeyGen crypto.KeyGenerator + IsEpochFoundInStorage bool } // NewEpochStartDataProviderFactory returns a new instance of epochStartDataProviderFactory @@ -62,6 +74,21 @@ func NewEpochStartDataProviderFactory(args EpochStartDataProviderFactoryArgs) (* if check.IfNil(args.NodesConfigProvider) { return nil, bootstrap.ErrNilNodesConfigProvider } + if check.IfNil(args.DefaultShardCoordinator) { + return nil, bootstrap.ErrNilDefaultShardCoordinator + } + if check.IfNil(args.BlockKeyGen) { + return nil, bootstrap.ErrNilBlockKeyGen + } + if check.IfNil(args.KeyGen) { + return nil, bootstrap.ErrNilKeyGen + } + if check.IfNil(args.SingleSigner) { + return nil, bootstrap.ErrNilSingleSigner + } + if check.IfNil(args.BlockSingleSigner) { + return nil, bootstrap.ErrNilBlockSingleSigner + } shouldSync := bootstrap.ShouldSyncWithTheNetwork( args.StartTime, @@ -69,17 +96,23 @@ func NewEpochStartDataProviderFactory(args EpochStartDataProviderFactoryArgs) (* args.OriginalNodesConfig, args.GeneralConfig, ) - shouldSync = true // hardcoded so we can test we can sync + shouldSync = false // hardcoded so we can test we can sync return &epochStartDataProviderFactory{ - pubKey: args.PubKey, - messenger: args.Messenger, - marshalizer: args.Marshalizer, - hasher: args.Hasher, - pathManager: args.PathManager, - generalConfig: *args.GeneralConfig, - nodesConfigProvider: args.NodesConfigProvider, - shouldSync: shouldSync, + pubKey: args.PubKey, + messenger: args.Messenger, + marshalizer: args.Marshalizer, + hasher: args.Hasher, + pathManager: args.PathManager, + generalConfig: *args.GeneralConfig, + economicsConfig: *args.EconomicsConfig, + nodesConfigProvider: args.NodesConfigProvider, + defaultShardCoordinator: args.DefaultShardCoordinator, + keyGen: args.KeyGen, + blockKeyGen: args.BlockKeyGen, + singleSigner: args.SingleSigner, + blockSingleSigner: args.BlockSingleSigner, + shouldSync: shouldSync, }, nil } @@ -115,6 +148,9 @@ func (esdpf *epochStartDataProviderFactory) Create() (bootstrap.EpochStartDataPr return nil, err } whiteListHandler, err := interceptors.NewWhiteListDataVerifier(whiteListCache) + if err != nil { + return nil, err + } argsEpochStart := bootstrap.ArgsEpochStartDataProvider{ PublicKey: esdpf.pubKey, @@ -123,14 +159,18 @@ func (esdpf *epochStartDataProviderFactory) Create() (bootstrap.EpochStartDataPr Hasher: esdpf.hasher, NodesConfigProvider: esdpf.nodesConfigProvider, GeneralConfig: esdpf.generalConfig, + EconomicsConfig: esdpf.economicsConfig, PathManager: esdpf.pathManager, + SingleSigner: esdpf.singleSigner, + BlockSingleSigner: esdpf.blockSingleSigner, + KeyGen: esdpf.keyGen, + BlockKeyGen: esdpf.blockKeyGen, + DefaultShardCoordinator: esdpf.defaultShardCoordinator, EpochStartMetaBlockInterceptor: epochStartMetaBlockInterceptor, MetaBlockInterceptor: metaBlockInterceptor, ShardHeaderInterceptor: shardHdrInterceptor, MiniBlockInterceptor: miniBlockInterceptor, WhiteListHandler: whiteListHandler, } - epochStartDataProvider, err := bootstrap.NewEpochStartDataProvider(argsEpochStart) - - return epochStartDataProvider, nil + return bootstrap.NewEpochStartDataProvider(argsEpochStart) } diff --git a/epochStart/bootstrap/factory/interceptors/epochStartInterceptorsContainerFactory.go b/epochStart/bootstrap/factory/interceptors/epochStartInterceptorsContainerFactory.go new file mode 100644 index 00000000000..d802373503b --- /dev/null +++ b/epochStart/bootstrap/factory/interceptors/epochStartInterceptorsContainerFactory.go @@ -0,0 +1,107 @@ +package interceptors + +import ( + "time" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/crypto" + "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" + "github.com/ElrondNetwork/elrond-go/epochStart/genesis" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/factory/interceptorscontainer" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/timecache" + "github.com/ElrondNetwork/elrond-go/update" +) + +// ArgsEpochStartInterceptorContainer holds the arguments needed for creating a new epoch start interceptors +// container factory +type ArgsEpochStartInterceptorContainer struct { + Config config.Config + ShardCoordinator sharding.Coordinator + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + Messenger process.TopicHandler + DataPool dataRetriever.PoolsHolder + SingleSigner crypto.SingleSigner + BlockSingleSigner crypto.SingleSigner + KeyGen crypto.KeyGenerator + BlockKeyGen crypto.KeyGenerator + WhiteListHandler update.WhiteListHandler +} + +// NewEpochStartInterceptorsContainer will return a real interceptors container factory, but will many disabled +// components +func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) (process.InterceptorsContainer, error) { + nodesCoordinator := disabled.NewNodesCoordinator() + storer := disabled.ChainStorer{GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return disabled.NewDisabledStorer() + }} + txSignMarshalizer := marshal.JsonMarshalizer{} + multiSigner := disabled.NewMultiSigner() + accountsAdapter := disabled.NewAccountsAdapter() + addressConverter, err := addressConverters.NewPlainAddressConverter( + args.Config.Address.Length, + args.Config.Address.Prefix, + ) + if err != nil { + return nil, err + } + blackListHandler := timecache.NewTimeCache(1 * time.Minute) + feeHandler := genesis.NewGenesisFeeHandler() + headerSigVerifier := disabled.NewHeaderSigVerifier() + chainID := []byte("chain ID") + sizeCheckDelta := 0 + validityAttester := disabled.NewValidityAttester() + epochStartTrigger := disabled.NewEpochStartTrigger() + + argsIntCont := interceptorscontainer.MetaInterceptorsContainerFactoryArgs{ + ShardCoordinator: args.ShardCoordinator, + NodesCoordinator: nodesCoordinator, + Messenger: args.Messenger, + Store: &storer, + ProtoMarshalizer: args.Marshalizer, + TxSignMarshalizer: &txSignMarshalizer, + Hasher: args.Hasher, + MultiSigner: multiSigner, + DataPool: args.DataPool, + Accounts: accountsAdapter, + AddrConverter: addressConverter, + SingleSigner: args.SingleSigner, + BlockSingleSigner: args.BlockSingleSigner, + KeyGen: args.KeyGen, + BlockKeyGen: args.BlockKeyGen, + MaxTxNonceDeltaAllowed: core.MaxTxNonceDeltaAllowed, + TxFeeHandler: feeHandler, + BlackList: blackListHandler, + HeaderSigVerifier: headerSigVerifier, + ChainID: chainID, + SizeCheckDelta: uint32(sizeCheckDelta), + ValidityAttester: validityAttester, + EpochStartTrigger: epochStartTrigger, + WhiteListHandler: args.WhiteListHandler, + } + + interceptorsContainerFactory, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(argsIntCont) + if err != nil { + return nil, err + } + + container, err := interceptorsContainerFactory.Create() + if err != nil { + return nil, err + } + + err = interceptorscontainer.SetWhiteListHandlerToInterceptors(container, args.WhiteListHandler) + if err != nil { + return nil, err + } + + return container, nil +} diff --git a/epochStart/bootstrap/storagehandler/baseStorageHandler.go b/epochStart/bootstrap/storagehandler/baseStorageHandler.go index d0f67a1f4b3..d397998bda5 100644 --- a/epochStart/bootstrap/storagehandler/baseStorageHandler.go +++ b/epochStart/bootstrap/storagehandler/baseStorageHandler.go @@ -30,7 +30,7 @@ type baseStorageHandler struct { currentEpoch uint32 } -func (bsh *baseStorageHandler) getAndSavePendingMiniBlocks(miniBlocks []*block.MiniBlock) ([]bootstrapStorage.PendingMiniBlockInfo, error) { +func (bsh *baseStorageHandler) getAndSavePendingMiniBlocks(miniBlocks map[string]*block.MiniBlock) ([]bootstrapStorage.PendingMiniBlockInfo, error) { countersMap := make(map[uint32]int) for _, miniBlock := range miniBlocks { countersMap[miniBlock.SenderShardID]++ diff --git a/epochStart/bootstrap/structs/components.go b/epochStart/bootstrap/structs/components.go index fb365ae61b6..8ca51a040f1 100644 --- a/epochStart/bootstrap/structs/components.go +++ b/epochStart/bootstrap/structs/components.go @@ -15,5 +15,5 @@ type ComponentsNeededForBootstrap struct { ShardHeaders map[uint32]*block.Header ShardCoordinator sharding.Coordinator Tries state.TriesHolder - PendingMiniBlocks []*block.MiniBlock + PendingMiniBlocks map[string]*block.MiniBlock } diff --git a/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go b/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go index 8194817a21b..2ea6c6c3517 100644 --- a/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go +++ b/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go @@ -16,6 +16,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/interceptors/processor" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/update" ) const numGoRoutines = 2000 @@ -442,3 +443,19 @@ func (bicf *baseInterceptorsContainerFactory) generateUnsignedTxsInterceptors() return bicf.container.AddMultiple(keys, interceptorsSlice) } + +// SetWhiteListHandlerToInterceptors will set the white list handler to all given interceptors +func SetWhiteListHandlerToInterceptors(containter process.InterceptorsContainer, handler update.WhiteListHandler) error { + var err error + + containter.Iterate(func(key string, interceptor process.Interceptor) bool { + errFound := interceptor.SetIsDataForCurrentShardVerifier(handler) + if errFound != nil { + err = errFound + return false + } + return true + }) + + return err +} diff --git a/process/factory/metachain/interceptorsContainerFactory.go b/process/factory/metachain/interceptorsContainerFactory.go deleted file mode 100644 index 8c65ad4a2d3..00000000000 --- a/process/factory/metachain/interceptorsContainerFactory.go +++ /dev/null @@ -1,626 +0,0 @@ -package metachain - -import ( - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/check" - "github.com/ElrondNetwork/elrond-go/core/throttler" - "github.com/ElrondNetwork/elrond-go/crypto" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/dataValidators" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/process/factory/containers" - "github.com/ElrondNetwork/elrond-go/process/interceptors" - processInterceptors "github.com/ElrondNetwork/elrond-go/process/interceptors" - interceptorFactory "github.com/ElrondNetwork/elrond-go/process/interceptors/factory" - "github.com/ElrondNetwork/elrond-go/process/interceptors/processor" - "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/ElrondNetwork/elrond-go/sharding" -) - -const numGoRoutines = 2000 - -type interceptorsContainerFactory struct { - accounts state.AccountsAdapter - maxTxNonceDeltaAllowed int - protoMarshalizer marshal.Marshalizer - txSignMarshalizer marshal.Marshalizer - hasher hashing.Hasher - store dataRetriever.StorageService - dataPool dataRetriever.PoolsHolder - shardCoordinator sharding.Coordinator - messenger process.TopicHandler - multiSigner crypto.MultiSigner - nodesCoordinator sharding.NodesCoordinator - blackList process.BlackListHandler - argInterceptorFactory *interceptorFactory.ArgInterceptedDataFactory - globalThrottler process.InterceptorThrottler -} - -// NewInterceptorsContainerFactory is responsible for creating a new interceptors factory object -func NewInterceptorsContainerFactory( - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - messenger process.TopicHandler, - store dataRetriever.StorageService, - protoMarshalizer marshal.Marshalizer, - txSignMarshalizer marshal.Marshalizer, - hasher hashing.Hasher, - multiSigner crypto.MultiSigner, - dataPool dataRetriever.PoolsHolder, - accounts state.AccountsAdapter, - addrConverter state.AddressConverter, - singleSigner crypto.SingleSigner, - blockSingleSigner crypto.SingleSigner, - keyGen crypto.KeyGenerator, - blockKeyGen crypto.KeyGenerator, - maxTxNonceDeltaAllowed int, - txFeeHandler process.FeeHandler, - blackList process.BlackListHandler, - headerSigVerifier process.InterceptedHeaderSigVerifier, - chainID []byte, - sizeCheckDelta uint32, - validityAttester process.ValidityAttester, - epochStartTrigger process.EpochStartTriggerHandler, -) (*interceptorsContainerFactory, error) { - - if check.IfNil(shardCoordinator) { - return nil, process.ErrNilShardCoordinator - } - if check.IfNil(messenger) { - return nil, process.ErrNilMessenger - } - if check.IfNil(store) { - return nil, process.ErrNilStore - } - if check.IfNil(protoMarshalizer) { - return nil, process.ErrNilMarshalizer - } - - if sizeCheckDelta > 0 { - protoMarshalizer = marshal.NewSizeCheckUnmarshalizer(protoMarshalizer, sizeCheckDelta) - } - - if check.IfNil(txSignMarshalizer) { - return nil, process.ErrNilMarshalizer - } - if check.IfNil(hasher) { - return nil, process.ErrNilHasher - } - if check.IfNil(multiSigner) { - return nil, process.ErrNilMultiSigVerifier - } - if check.IfNil(dataPool) { - return nil, process.ErrNilDataPoolHolder - } - if check.IfNil(nodesCoordinator) { - return nil, process.ErrNilNodesCoordinator - } - if check.IfNil(accounts) { - return nil, process.ErrNilAccountsAdapter - } - if check.IfNil(addrConverter) { - return nil, process.ErrNilAddressConverter - } - if check.IfNil(singleSigner) { - return nil, process.ErrNilSingleSigner - } - if check.IfNil(keyGen) { - return nil, process.ErrNilKeyGen - } - if check.IfNil(txFeeHandler) { - return nil, process.ErrNilEconomicsFeeHandler - } - if check.IfNil(blackList) { - return nil, process.ErrNilBlackListHandler - } - if check.IfNil(blockKeyGen) { - return nil, process.ErrNilKeyGen - } - if check.IfNil(blockSingleSigner) { - return nil, process.ErrNilSingleSigner - } - if check.IfNil(headerSigVerifier) { - return nil, process.ErrNilHeaderSigVerifier - } - if check.IfNil(epochStartTrigger) { - return nil, process.ErrNilEpochStartTrigger - } - if len(chainID) == 0 { - return nil, process.ErrInvalidChainID - } - if check.IfNil(validityAttester) { - return nil, process.ErrNilValidityAttester - } - - argInterceptorFactory := &interceptorFactory.ArgInterceptedDataFactory{ - ProtoMarshalizer: protoMarshalizer, - TxSignMarshalizer: txSignMarshalizer, - Hasher: hasher, - ShardCoordinator: shardCoordinator, - NodesCoordinator: nodesCoordinator, - MultiSigVerifier: multiSigner, - KeyGen: keyGen, - BlockKeyGen: blockKeyGen, - Signer: singleSigner, - BlockSigner: blockSingleSigner, - AddrConv: addrConverter, - FeeHandler: txFeeHandler, - HeaderSigVerifier: headerSigVerifier, - ChainID: chainID, - ValidityAttester: validityAttester, - EpochStartTrigger: epochStartTrigger, - } - - icf := &interceptorsContainerFactory{ - shardCoordinator: shardCoordinator, - messenger: messenger, - store: store, - protoMarshalizer: protoMarshalizer, - txSignMarshalizer: txSignMarshalizer, - hasher: hasher, - multiSigner: multiSigner, - dataPool: dataPool, - nodesCoordinator: nodesCoordinator, - blackList: blackList, - argInterceptorFactory: argInterceptorFactory, - maxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, - accounts: accounts, - } - - var err error - icf.globalThrottler, err = throttler.NewNumGoRoutineThrottler(numGoRoutines) - if err != nil { - return nil, err - } - - return icf, nil -} - -// Create returns an interceptor container that will hold all interceptors in the system -func (icf *interceptorsContainerFactory) Create() (process.InterceptorsContainer, error) { - container := containers.NewInterceptorsContainer() - - keys, interceptorSlice, err := icf.generateMetablockInterceptor() - if err != nil { - return nil, err - } - err = container.AddMultiple(keys, interceptorSlice) - if err != nil { - return nil, err - } - - keys, interceptorSlice, err = icf.generateShardHeaderInterceptors() - if err != nil { - return nil, err - } - err = container.AddMultiple(keys, interceptorSlice) - if err != nil { - return nil, err - } - - keys, interceptorSlice, err = icf.generateTxInterceptors() - if err != nil { - return nil, err - } - err = container.AddMultiple(keys, interceptorSlice) - if err != nil { - return nil, err - } - - keys, interceptorSlice, err = icf.generateUnsignedTxsInterceptors() - if err != nil { - return nil, err - } - - err = container.AddMultiple(keys, interceptorSlice) - if err != nil { - return nil, err - } - - keys, interceptorSlice, err = icf.generateMiniBlocksInterceptors() - if err != nil { - return nil, err - } - err = container.AddMultiple(keys, interceptorSlice) - if err != nil { - return nil, err - } - - keys, interceptorSlice, err = icf.generateTrieNodesInterceptors() - if err != nil { - return nil, err - } - - err = container.AddMultiple(keys, interceptorSlice) - if err != nil { - return nil, err - } - - return container, nil -} - -func (icf *interceptorsContainerFactory) createTopicAndAssignHandler( - topic string, - interceptor process.Interceptor, - createChannel bool, -) (process.Interceptor, error) { - - err := icf.messenger.CreateTopic(topic, createChannel) - if err != nil { - return nil, err - } - - return interceptor, icf.messenger.RegisterMessageProcessor(topic, interceptor) -} - -//------- Metablock interceptor - -func (icf *interceptorsContainerFactory) generateMetablockInterceptor() ([]string, []process.Interceptor, error) { - identifierHdr := factory.MetachainBlocksTopic - - //TODO implement other HeaderHandlerProcessValidator that will check the header's nonce - // against blockchain's latest nonce - k finality - hdrValidator, err := dataValidators.NewNilHeaderValidator() - if err != nil { - return nil, nil, err - } - - hdrFactory, err := interceptorFactory.NewInterceptedMetaHeaderDataFactory(icf.argInterceptorFactory) - if err != nil { - return nil, nil, err - } - - argProcessor := &processor.ArgHdrInterceptorProcessor{ - Headers: icf.dataPool.Headers(), - HdrValidator: hdrValidator, - BlackList: icf.blackList, - } - hdrProcessor, err := processor.NewHdrInterceptorProcessor(argProcessor) - if err != nil { - return nil, nil, err - } - - //only one metachain header topic - interceptor, err := processInterceptors.NewSingleDataInterceptor( - hdrFactory, - hdrProcessor, - icf.globalThrottler, - ) - if err != nil { - return nil, nil, err - } - - _, err = icf.createTopicAndAssignHandler(identifierHdr, interceptor, true) - if err != nil { - return nil, nil, err - } - - return []string{identifierHdr}, []process.Interceptor{interceptor}, nil -} - -//------- Shard header interceptors - -func (icf *interceptorsContainerFactory) generateShardHeaderInterceptors() ([]string, []process.Interceptor, error) { - shardC := icf.shardCoordinator - noOfShards := shardC.NumberOfShards() - keys := make([]string, noOfShards) - interceptorSlice := make([]process.Interceptor, noOfShards) - - //wire up to topics: shardBlocks_0_META, shardBlocks_1_META ... - for idx := uint32(0); idx < noOfShards; idx++ { - identifierHeader := factory.ShardBlocksTopic + shardC.CommunicationIdentifier(idx) - interceptor, err := icf.createOneShardHeaderInterceptor(identifierHeader) - if err != nil { - return nil, nil, err - } - - keys[int(idx)] = identifierHeader - interceptorSlice[int(idx)] = interceptor - } - - return keys, interceptorSlice, nil -} - -func (icf *interceptorsContainerFactory) createOneShardHeaderInterceptor(topic string) (process.Interceptor, error) { - //TODO implement other HeaderHandlerProcessValidator that will check the header's nonce - // against blockchain's latest nonce - k finality - hdrValidator, err := dataValidators.NewNilHeaderValidator() - if err != nil { - return nil, err - } - - hdrFactory, err := interceptorFactory.NewInterceptedShardHeaderDataFactory(icf.argInterceptorFactory) - if err != nil { - return nil, err - } - - argProcessor := &processor.ArgHdrInterceptorProcessor{ - Headers: icf.dataPool.Headers(), - HdrValidator: hdrValidator, - BlackList: icf.blackList, - } - hdrProcessor, err := processor.NewHdrInterceptorProcessor(argProcessor) - if err != nil { - return nil, err - } - - interceptor, err := processInterceptors.NewSingleDataInterceptor( - hdrFactory, - hdrProcessor, - icf.globalThrottler, - ) - if err != nil { - return nil, err - } - - return icf.createTopicAndAssignHandler(topic, interceptor, true) -} - -//------- Tx interceptors - -func (icf *interceptorsContainerFactory) generateTxInterceptors() ([]string, []process.Interceptor, error) { - shardC := icf.shardCoordinator - - noOfShards := shardC.NumberOfShards() - - keys := make([]string, noOfShards) - interceptorSlice := make([]process.Interceptor, noOfShards) - - for idx := uint32(0); idx < noOfShards; idx++ { - identifierTx := factory.TransactionTopic + shardC.CommunicationIdentifier(idx) - - interceptor, err := icf.createOneTxInterceptor(identifierTx) - if err != nil { - return nil, nil, err - } - - keys[int(idx)] = identifierTx - interceptorSlice[int(idx)] = interceptor - } - - //tx interceptor for metachain topic - identifierTx := factory.TransactionTopic + shardC.CommunicationIdentifier(core.MetachainShardId) - - interceptor, err := icf.createOneTxInterceptor(identifierTx) - if err != nil { - return nil, nil, err - } - - keys = append(keys, identifierTx) - interceptorSlice = append(interceptorSlice, interceptor) - return keys, interceptorSlice, nil -} - -func (icf *interceptorsContainerFactory) createOneTxInterceptor(topic string) (process.Interceptor, error) { - txValidator, err := dataValidators.NewTxValidator(icf.accounts, icf.shardCoordinator, icf.maxTxNonceDeltaAllowed) - if err != nil { - return nil, err - } - - argProcessor := &processor.ArgTxInterceptorProcessor{ - ShardedDataCache: icf.dataPool.Transactions(), - TxValidator: txValidator, - } - txProcessor, err := processor.NewTxInterceptorProcessor(argProcessor) - if err != nil { - return nil, err - } - - txFactory, err := interceptorFactory.NewInterceptedTxDataFactory(icf.argInterceptorFactory) - if err != nil { - return nil, err - } - - interceptor, err := interceptors.NewMultiDataInterceptor( - icf.protoMarshalizer, - txFactory, - txProcessor, - icf.globalThrottler, - ) - if err != nil { - return nil, err - } - - return icf.createTopicAndAssignHandler(topic, interceptor, true) -} - -//------- Unsigned transactions interceptors - -func (icf *interceptorsContainerFactory) generateUnsignedTxsInterceptors() ([]string, []process.Interceptor, error) { - shardC := icf.shardCoordinator - - noOfShards := shardC.NumberOfShards() - - keys := make([]string, noOfShards) - interceptorSlice := make([]process.Interceptor, noOfShards) - - for idx := uint32(0); idx < noOfShards; idx++ { - identifierScr := factory.UnsignedTransactionTopic + shardC.CommunicationIdentifier(idx) - - interceptor, err := icf.createOneUnsignedTxInterceptor(identifierScr) - if err != nil { - return nil, nil, err - } - - keys[int(idx)] = identifierScr - interceptorSlice[int(idx)] = interceptor - } - - return keys, interceptorSlice, nil -} - -func (icf *interceptorsContainerFactory) createOneUnsignedTxInterceptor(topic string) (process.Interceptor, error) { - //TODO replace the nil tx validator with white list validator - txValidator, err := mock.NewNilTxValidator() - if err != nil { - return nil, err - } - - argProcessor := &processor.ArgTxInterceptorProcessor{ - ShardedDataCache: icf.dataPool.UnsignedTransactions(), - TxValidator: txValidator, - } - txProcessor, err := processor.NewTxInterceptorProcessor(argProcessor) - if err != nil { - return nil, err - } - - txFactory, err := interceptorFactory.NewInterceptedUnsignedTxDataFactory(icf.argInterceptorFactory) - if err != nil { - return nil, err - } - - interceptor, err := interceptors.NewMultiDataInterceptor( - icf.protoMarshalizer, - txFactory, - txProcessor, - icf.globalThrottler, - ) - if err != nil { - return nil, err - } - - return icf.createTopicAndAssignHandler(topic, interceptor, true) -} - -//------- MiniBlocks interceptors - -func (icf *interceptorsContainerFactory) generateMiniBlocksInterceptors() ([]string, []process.Interceptor, error) { - shardC := icf.shardCoordinator - noOfShards := shardC.NumberOfShards() - keys := make([]string, noOfShards+1) - interceptorSlice := make([]process.Interceptor, noOfShards+1) - - for idx := uint32(0); idx < noOfShards; idx++ { - identifierMiniBlocks := factory.MiniBlocksTopic + shardC.CommunicationIdentifier(idx) - - interceptor, err := icf.createOneMiniBlocksInterceptor(identifierMiniBlocks) - if err != nil { - return nil, nil, err - } - - keys[int(idx)] = identifierMiniBlocks - interceptorSlice[int(idx)] = interceptor - } - - identifierMiniBlocks := factory.MiniBlocksTopic + shardC.CommunicationIdentifier(core.MetachainShardId) - - interceptor, err := icf.createOneMiniBlocksInterceptor(identifierMiniBlocks) - if err != nil { - return nil, nil, err - } - - keys[noOfShards] = identifierMiniBlocks - interceptorSlice[noOfShards] = interceptor - - return keys, interceptorSlice, nil -} - -func (icf *interceptorsContainerFactory) createOneMiniBlocksInterceptor(topic string) (process.Interceptor, error) { - argProcessor := &processor.ArgTxBodyInterceptorProcessor{ - MiniblockCache: icf.dataPool.MiniBlocks(), - Marshalizer: icf.protoMarshalizer, - Hasher: icf.hasher, - ShardCoordinator: icf.shardCoordinator, - } - txBlockBodyProcessor, err := processor.NewTxBodyInterceptorProcessor(argProcessor) - if err != nil { - return nil, err - } - - txFactory, err := interceptorFactory.NewInterceptedTxBlockBodyDataFactory(icf.argInterceptorFactory) - if err != nil { - return nil, err - } - - interceptor, err := interceptors.NewSingleDataInterceptor( - txFactory, - txBlockBodyProcessor, - icf.globalThrottler, - ) - if err != nil { - return nil, err - } - - return icf.createTopicAndAssignHandler(topic, interceptor, true) -} - -func (icf *interceptorsContainerFactory) generateTrieNodesInterceptors() ([]string, []process.Interceptor, error) { - shardC := icf.shardCoordinator - - keys := make([]string, 0) - trieInterceptors := make([]process.Interceptor, 0) - - for i := uint32(0); i < shardC.NumberOfShards(); i++ { - identifierTrieNodes := factory.ValidatorTrieNodesTopic + shardC.CommunicationIdentifier(i) - interceptor, err := icf.createOneTrieNodesInterceptor(identifierTrieNodes) - if err != nil { - return nil, nil, err - } - - keys = append(keys, identifierTrieNodes) - trieInterceptors = append(trieInterceptors, interceptor) - - identifierTrieNodes = factory.AccountTrieNodesTopic + shardC.CommunicationIdentifier(i) - interceptor, err = icf.createOneTrieNodesInterceptor(identifierTrieNodes) - if err != nil { - return nil, nil, err - } - - keys = append(keys, identifierTrieNodes) - trieInterceptors = append(trieInterceptors, interceptor) - } - - identifierTrieNodes := factory.ValidatorTrieNodesTopic + shardC.CommunicationIdentifier(core.MetachainShardId) - interceptor, err := icf.createOneTrieNodesInterceptor(identifierTrieNodes) - if err != nil { - return nil, nil, err - } - - keys = append(keys, identifierTrieNodes) - trieInterceptors = append(trieInterceptors, interceptor) - - identifierTrieNodes = factory.AccountTrieNodesTopic + shardC.CommunicationIdentifier(core.MetachainShardId) - interceptor, err = icf.createOneTrieNodesInterceptor(identifierTrieNodes) - if err != nil { - return nil, nil, err - } - - keys = append(keys, identifierTrieNodes) - trieInterceptors = append(trieInterceptors, interceptor) - - return keys, trieInterceptors, nil -} - -func (icf *interceptorsContainerFactory) createOneTrieNodesInterceptor(topic string) (process.Interceptor, error) { - trieNodesProcessor, err := processor.NewTrieNodesInterceptorProcessor(icf.dataPool.TrieNodes()) - if err != nil { - return nil, err - } - - trieNodesFactory, err := interceptorFactory.NewInterceptedTrieNodeDataFactory(icf.argInterceptorFactory) - if err != nil { - return nil, err - } - - interceptor, err := interceptors.NewMultiDataInterceptor( - icf.protoMarshalizer, - trieNodesFactory, - trieNodesProcessor, - icf.globalThrottler, - ) - if err != nil { - return nil, err - } - - return icf.createTopicAndAssignHandler(topic, interceptor, true) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (icf *interceptorsContainerFactory) IsInterfaceNil() bool { - return icf == nil -} diff --git a/update/interface.go b/update/interface.go index 96f8eaca138..0b13d20caa0 100644 --- a/update/interface.go +++ b/update/interface.go @@ -123,6 +123,7 @@ type EpochStartTriesSyncHandler interface { // EpochStartPendingMiniBlocksSyncHandler defines the methods to sync all pending miniblocks type EpochStartPendingMiniBlocksSyncHandler interface { SyncPendingMiniBlocksFromMeta(epochStart *block.MetaBlock, unFinished map[string]*block.MetaBlock, waitTime time.Duration) error + SyncPendingMiniBlocksForEpochStart(miniBlockHeaders []block.ShardMiniBlockHeader, waitTime time.Duration) error GetMiniBlocks() (map[string]*block.MiniBlock, error) IsInterfaceNil() bool } diff --git a/update/sync/syncHeadersByHash.go b/update/sync/syncHeadersByHash.go new file mode 100644 index 00000000000..b1171653a04 --- /dev/null +++ b/update/sync/syncHeadersByHash.go @@ -0,0 +1,196 @@ +package sync + +import ( + "sync" + "time" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/update" +) + +type missingHeadersByHash struct { + mutMissingHdrs sync.Mutex + mapHeaders map[string]*block.Header + mapHashes map[string]struct{} + pool storage.Cacher + storage update.HistoryStorer + chReceivedAll chan bool + marshalizer marshal.Marshalizer + stopSyncing bool + epochToSync uint32 + syncedAll bool + requestHandler process.RequestHandler +} + +// ArgsNewMissingHeadersByHashSyncer defines the arguments needed for the sycner +type ArgsNewMissingHeadersByHashSyncer struct { + Storage storage.Storer + Cache storage.Cacher + Marshalizer marshal.Marshalizer + RequestHandler process.RequestHandler +} + +// NewMissingheadersByHashSyncer creates a syncer for all missing headers +func NewMissingheadersByHashSyncer(args ArgsNewMissingHeadersByHashSyncer) (*missingHeadersByHash, error) { + if check.IfNil(args.Storage) { + return nil, dataRetriever.ErrNilHeadersStorage + } + if check.IfNil(args.Cache) { + return nil, dataRetriever.ErrNilCacher + } + if check.IfNil(args.Marshalizer) { + return nil, dataRetriever.ErrNilMarshalizer + } + if check.IfNil(args.RequestHandler) { + return nil, process.ErrNilRequestHandler + } + + p := &missingHeadersByHash{ + mutMissingHdrs: sync.Mutex{}, + mapHeaders: make(map[string]*block.Header), + mapHashes: make(map[string]struct{}), + pool: args.Cache, + storage: args.Storage, + chReceivedAll: make(chan bool), + requestHandler: args.RequestHandler, + stopSyncing: true, + syncedAll: false, + marshalizer: args.Marshalizer, + } + + p.pool.RegisterHandler(p.receivedHeader) + + return p, nil +} + +// SyncMissingHeadersByHash syncs the missing headers +func (m *missingHeadersByHash) SyncMissingHeadersByHash( + shardID uint32, + headersHashes [][]byte, + waitTime time.Duration, +) error { + _ = process.EmptyChannel(m.chReceivedAll) + + requestedMBs := 0 + m.mutMissingHdrs.Lock() + m.stopSyncing = false + for _, hash := range headersHashes { + m.mapHashes[string(hash)] = struct{}{} + header, ok := m.getHeaderFromPoolOrStorage(hash) + if ok { + m.mapHeaders[string(hash)] = header + continue + } + + requestedMBs++ + m.requestHandler.RequestShardHeader(shardID, hash) + } + m.mutMissingHdrs.Unlock() + + var err error + defer func() { + m.mutMissingHdrs.Lock() + m.stopSyncing = true + if err == nil { + m.syncedAll = true + } + m.mutMissingHdrs.Unlock() + }() + + if requestedMBs > 0 { + err = WaitFor(m.chReceivedAll, waitTime) + if err != nil { + return err + } + } + + return nil +} + +// receivedHeader is a callback function when a new header was received +// it will further ask for missing transactions +func (m *missingHeadersByHash) receivedHeader(hdrHash []byte) { + m.mutMissingHdrs.Lock() + if m.stopSyncing { + m.mutMissingHdrs.Unlock() + return + } + + if _, ok := m.mapHashes[string(hdrHash)]; !ok { + m.mutMissingHdrs.Unlock() + return + } + + if _, ok := m.mapHeaders[string(hdrHash)]; ok { + m.mutMissingHdrs.Unlock() + return + } + + header, ok := m.getHeaderFromPool(hdrHash) + if !ok { + m.mutMissingHdrs.Unlock() + return + } + + m.mapHeaders[string(hdrHash)] = header + receivedAll := len(m.mapHashes) == len(m.mapHeaders) + m.mutMissingHdrs.Unlock() + if receivedAll { + m.chReceivedAll <- true + } +} + +func (m *missingHeadersByHash) getHeaderFromPoolOrStorage(hash []byte) (*block.Header, bool) { + header, ok := m.getHeaderFromPool(hash) + if ok { + return header, true + } + + hdrData, err := GetDataFromStorage(hash, m.storage, m.epochToSync) + if err != nil { + return nil, false + } + + var hdr block.Header + err = m.marshalizer.Unmarshal(hdr, hdrData) + if err != nil { + return nil, false + } + + return &hdr, true +} + +func (m *missingHeadersByHash) getHeaderFromPool(hash []byte) (*block.Header, bool) { + val, ok := m.pool.Peek(hash) + if !ok { + return nil, false + } + + header, ok := val.(*block.Header) + if !ok { + return nil, false + } + + return header, true +} + +// GetHeader returns the synced headers +func (m *missingHeadersByHash) GetHeader() (map[string]*block.Header, error) { + m.mutMissingHdrs.Lock() + defer m.mutMissingHdrs.Unlock() + if !m.syncedAll { + return nil, update.ErrNotSynced + } + + return m.mapHeaders, nil +} + +// IsInterfaceNil returns nil if underlying object is nil +func (m *missingHeadersByHash) IsInterfaceNil() bool { + return m == nil +} diff --git a/update/sync/syncMiniBlocks.go b/update/sync/syncMiniBlocks.go index 1b664013591..aea48bbadf1 100644 --- a/update/sync/syncMiniBlocks.go +++ b/update/sync/syncMiniBlocks.go @@ -93,6 +93,18 @@ func (p *pendingMiniBlocks) SyncPendingMiniBlocksFromMeta( listPendingMiniBlocks = append(listPendingMiniBlocks, computedPending...) } + return p.syncMiniBlocks(listPendingMiniBlocks, waitTime) +} + +// SyncPendingMiniBlocksForEpochStart will sync the miniblocks for the given epoch start meta block +func (p *pendingMiniBlocks) SyncPendingMiniBlocksForEpochStart( + miniBlockHeaders []block.ShardMiniBlockHeader, + waitTime time.Duration, +) error { + return p.syncMiniBlocks(miniBlockHeaders, waitTime) +} + +func (p *pendingMiniBlocks) syncMiniBlocks(listPendingMiniBlocks []block.ShardMiniBlockHeader, waitTime time.Duration) error { _ = process.EmptyChannel(p.chReceivedAll) requestedMBs := 0 From 47e9410ea7443d12b4e94419a4a22dfceff8c659 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Thu, 19 Mar 2020 18:49:49 +0200 Subject: [PATCH 21/61] fixes, moving implementation to bootstrapper. --- cmd/node/main.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index 3a44e651a6e..13ebcc6f52e 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -58,7 +58,6 @@ import ( "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" - storageFactory "github.com/ElrondNetwork/elrond-go/storage/factory" "github.com/ElrondNetwork/elrond-go/storage/lrucache" "github.com/ElrondNetwork/elrond-go/storage/pathmanager" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" From 607e5c25cf25d9d337767d05fb9d770f78628100 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Fri, 20 Mar 2020 11:38:16 +0200 Subject: [PATCH 22/61] EN-5829 added sync headers by hash + integration (WIP) --- .../bootstrap/epochStartDataProvider.go | 147 ++++++++++++------ .../factory/epochStartDataProviderFactory.go | 2 +- update/interface.go | 8 + update/sync/syncHeadersByHash.go | 38 +++-- 4 files changed, 129 insertions(+), 66 deletions(-) diff --git a/epochStart/bootstrap/epochStartDataProvider.go b/epochStart/bootstrap/epochStartDataProvider.go index d013def55d5..e9c7f7b169f 100644 --- a/epochStart/bootstrap/epochStartDataProvider.go +++ b/epochStart/bootstrap/epochStartDataProvider.go @@ -34,7 +34,6 @@ import ( "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" - "github.com/ElrondNetwork/elrond-go/storage/lrucache" "github.com/ElrondNetwork/elrond-go/storage/timecache" "github.com/ElrondNetwork/elrond-go/update" "github.com/ElrondNetwork/elrond-go/update/sync" @@ -188,12 +187,22 @@ func (esdp *epochStartDataProvider) Bootstrap() (*structs.ComponentsNeededForBoo return nil, err } + esdp.requestHandler = requestHandlerMeta + interceptorsContainer, err := esdp.createInterceptors(commonDataPool) if err != nil || interceptorsContainer == nil { return nil, err } - esdp.requestHandler = requestHandlerMeta + miniBlocksSyncer, err := esdp.getMiniBlockSyncer(commonDataPool.MiniBlocks()) + if err != nil { + return nil, err + } + + missingHeadersSyncer, err := esdp.getHeaderHandlerSyncer(commonDataPool.Headers()) + if err != nil { + return nil, err + } epochNumForRequestingTheLatestAvailable := uint32(math.MaxUint32) metaBlock, err := esdp.getEpochStartMetaBlock(epochNumForRequestingTheLatestAvailable) @@ -201,7 +210,7 @@ func (esdp *epochStartDataProvider) Bootstrap() (*structs.ComponentsNeededForBoo return nil, err } - prevMetaBlock, err := esdp.getEpochStartMetaBlock(metaBlock.Epoch - 1) + prevMetaBlock, err := esdp.getMetaBlock(missingHeadersSyncer, metaBlock.EpochStart.Economics.PrevEpochStartHash) if err != nil { return nil, err } @@ -219,7 +228,7 @@ func (esdp *epochStartDataProvider) Bootstrap() (*structs.ComponentsNeededForBoo return nil, err } - shardHeaders, err := esdp.getShardHeaders(metaBlock, nodesConfig, shardCoordinator) + shardHeaders, err := esdp.getShardHeaders(missingHeadersSyncer, metaBlock, nodesConfig, shardCoordinator) if err != nil { log.Debug("shard headers not found", "error", err) } @@ -234,7 +243,7 @@ func (esdp *epochStartDataProvider) Bootstrap() (*structs.ComponentsNeededForBoo return nil, err } - pendingMiniBlocks, err := esdp.getMiniBlocks(epochStartData.PendingMiniBlockHeaders, shardCoordinator.SelfId()) + pendingMiniBlocks, err := esdp.getMiniBlocks(miniBlocksSyncer, epochStartData.PendingMiniBlockHeaders, shardCoordinator.SelfId()) if err != nil { return nil, err } @@ -248,13 +257,13 @@ func (esdp *epochStartDataProvider) Bootstrap() (*structs.ComponentsNeededForBoo // log.Info("received miniblock", "type", receivedMb.Type) //} - lastFinalizedMetaBlock, err := esdp.getMetaBlock(epochStartData.LastFinishedMetaBlock) + lastFinalizedMetaBlock, err := esdp.getMetaBlock(missingHeadersSyncer, epochStartData.LastFinishedMetaBlock) if err != nil { return nil, err } log.Info("received last finalized meta block", "nonce", lastFinalizedMetaBlock.Nonce) - firstPendingMetaBlock, err := esdp.getMetaBlock(epochStartData.FirstPendingMetaBlock) + firstPendingMetaBlock, err := esdp.getMetaBlock(missingHeadersSyncer, epochStartData.FirstPendingMetaBlock) if err != nil { return nil, err } @@ -311,29 +320,39 @@ func (esdp *epochStartDataProvider) Bootstrap() (*structs.ComponentsNeededForBoo return components, nil } -func (esdp *epochStartDataProvider) getMiniBlocks(pendingMiniBlocks []block.ShardMiniBlockHeader, shardID uint32) (map[string]*block.MiniBlock, error) { - cacher, err := lrucache.NewCache(100) - if err != nil { - return nil, err - } +func (esdp *epochStartDataProvider) getMiniBlockSyncer(dataPool storage.Cacher) (update.EpochStartPendingMiniBlocksSyncHandler, error) { syncMiniBlocksArgs := sync.ArgsNewPendingMiniBlocksSyncer{ Storage: &disabled.Storer{}, - Cache: cacher, + Cache: dataPool, Marshalizer: esdp.marshalizer, RequestHandler: esdp.requestHandler, } - pendingMiniBlocksSyncer, err := sync.NewPendingMiniBlocksSyncer(syncMiniBlocksArgs) - if err != nil { - return nil, err + return sync.NewPendingMiniBlocksSyncer(syncMiniBlocksArgs) +} + +func (esdp *epochStartDataProvider) getHeaderHandlerSyncer(pool dataRetriever.HeadersPool) (update.MissingHeadersByHashSyncer, error) { + syncMissingHeadersArgs := sync.ArgsNewMissingHeadersByHashSyncer{ + Storage: &disabled.Storer{}, + Cache: pool, + Marshalizer: esdp.marshalizer, + RequestHandler: esdp.requestHandler, } + return sync.NewMissingheadersByHashSyncer(syncMissingHeadersArgs) +} + +func (esdp *epochStartDataProvider) getMiniBlocks( + handler update.EpochStartPendingMiniBlocksSyncHandler, + pendingMiniBlocks []block.ShardMiniBlockHeader, + shardID uint32, +) (map[string]*block.MiniBlock, error) { waitTime := 1 * time.Minute - err = pendingMiniBlocksSyncer.SyncPendingMiniBlocksForEpochStart(pendingMiniBlocks, waitTime) + err := handler.SyncPendingMiniBlocksForEpochStart(pendingMiniBlocks, waitTime) if err != nil { return nil, err } - return pendingMiniBlocksSyncer.GetMiniBlocks() + return handler.GetMiniBlocks() } func (esdp *epochStartDataProvider) createInterceptors(dataPool dataRetriever.PoolsHolder) (process.InterceptorsContainer, error) { @@ -526,21 +545,35 @@ func (esdp *epochStartDataProvider) resetTopicsAndInterceptors() { } } -func (esdp *epochStartDataProvider) getMetaBlock(hash []byte) (*block.MetaBlock, error) { - esdp.requestMetaBlock(hash) - - time.Sleep(delayAfterRequesting) +func (esdp *epochStartDataProvider) getMetaBlock(syncer update.MissingHeadersByHashSyncer, hash []byte) (*block.MetaBlock, error) { + //esdp.requestMetaBlock(hash) + // + //time.Sleep(delayAfterRequesting) + // + //for { + // numConnectedPeers := len(esdp.messenger.Peers()) + // threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) + // mb, errConsensusNotReached := esdp.metaBlockInterceptor.GetMetaBlock(hash, threshold) + // if errConsensusNotReached == nil { + // return mb, nil + // } + // log.Info("consensus not reached for meta block. re-requesting and trying again...") + // esdp.requestMetaBlock(hash) + //} + waitTime := 1 * time.Minute + err := syncer.SyncMissingHeadersByHash(esdp.defaultShardCoordinator.SelfId(), [][]byte{hash}, waitTime) + if err != nil { + return nil, err + } - for { - numConnectedPeers := len(esdp.messenger.Peers()) - threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) - mb, errConsensusNotReached := esdp.metaBlockInterceptor.GetMetaBlock(hash, threshold) - if errConsensusNotReached == nil { - return mb, nil - } - log.Info("consensus not reached for meta block. re-requesting and trying again...") - esdp.requestMetaBlock(hash) + hdrs, err := syncer.GetHeaders() + if err != nil { + return nil, err } + + syncer.ClearFields() + + return hdrs[string(hash)].(*block.MetaBlock), nil } func (esdp *epochStartDataProvider) getEpochStartMetaBlock(epoch uint32) (*block.MetaBlock, error) { @@ -584,6 +617,7 @@ func (esdp *epochStartDataProvider) getShardCoordinator(metaBlock *block.MetaBlo } func (esdp *epochStartDataProvider) getShardHeaders( + syncer update.MissingHeadersByHashSyncer, metaBlock *block.MetaBlock, nodesConfig *sharding.NodesSetup, shardCoordinator sharding.Coordinator, @@ -594,7 +628,7 @@ func (esdp *epochStartDataProvider) getShardHeaders( if shardID == core.MetachainShardId { for _, entry := range metaBlock.EpochStart.LastFinalizedHeaders { var hdr *block.Header - hdr, err := esdp.getShardHeader(entry.HeaderHash, entry.ShardID) + hdr, err := esdp.getShardHeader(syncer, entry.HeaderHash, entry.ShardID) if err != nil { return nil, err } @@ -616,6 +650,7 @@ func (esdp *epochStartDataProvider) getShardHeaders( } hdr, err := esdp.getShardHeader( + syncer, entryForShard.HeaderHash, entryForShard.ShardID, ) @@ -628,27 +663,43 @@ func (esdp *epochStartDataProvider) getShardHeaders( } func (esdp *epochStartDataProvider) getShardHeader( + syncer update.MissingHeadersByHashSyncer, hash []byte, shardID uint32, ) (*block.Header, error) { - esdp.requestShardHeader(shardID, hash) - time.Sleep(delayBetweenRequests) + waitTime := 1 * time.Minute + err := syncer.SyncMissingHeadersByHash(esdp.defaultShardCoordinator.SelfId(), [][]byte{hash}, waitTime) + if err != nil { + return nil, err + } - count := 0 - for { - if count > maxNumTimesToRetry { - panic("can't sync with the other peers") - } - count++ - numConnectedPeers := len(esdp.messenger.Peers()) - threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) - mb, errConsensusNotReached := esdp.shardHeaderInterceptor.GetShardHeader(hash, threshold) - if errConsensusNotReached == nil { - return mb, nil - } - log.Info("consensus not reached for shard header. re-requesting and trying again...") - esdp.requestShardHeader(shardID, hash) + hdrs, err := syncer.GetHeaders() + if err != nil { + return nil, err } + + syncer.ClearFields() + + return hdrs[string(hash)].(*block.Header), nil + + //esdp.requestShardHeader(shardID, hash) + //time.Sleep(delayBetweenRequests) + // + //count := 0 + //for { + // if count > maxNumTimesToRetry { + // panic("can't sync with the other peers") + // } + // count++ + // numConnectedPeers := len(esdp.messenger.Peers()) + // threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) + // mb, errConsensusNotReached := esdp.shardHeaderInterceptor.GetShardHeader(hash, threshold) + // if errConsensusNotReached == nil { + // return mb, nil + // } + // log.Info("consensus not reached for shard header. re-requesting and trying again...") + // esdp.requestShardHeader(shardID, hash) + //} } func (esdp *epochStartDataProvider) requestMetaBlock(hash []byte) { diff --git a/epochStart/bootstrap/factory/epochStartDataProviderFactory.go b/epochStart/bootstrap/factory/epochStartDataProviderFactory.go index b544896d410..4c2a05b663b 100644 --- a/epochStart/bootstrap/factory/epochStartDataProviderFactory.go +++ b/epochStart/bootstrap/factory/epochStartDataProviderFactory.go @@ -96,7 +96,7 @@ func NewEpochStartDataProviderFactory(args EpochStartDataProviderFactoryArgs) (* args.OriginalNodesConfig, args.GeneralConfig, ) - shouldSync = false // hardcoded so we can test we can sync + shouldSync = true // hardcoded so we can test we can sync return &epochStartDataProviderFactory{ pubKey: args.PubKey, diff --git a/update/interface.go b/update/interface.go index 0b13d20caa0..85c1d2adba1 100644 --- a/update/interface.go +++ b/update/interface.go @@ -135,6 +135,14 @@ type PendingTransactionsSyncHandler interface { IsInterfaceNil() bool } +// MissingHeadersByHashSyncer defines the methods to sync all missing headers by hash +type MissingHeadersByHashSyncer interface { + SyncMissingHeadersByHash(shardID uint32, headersHashes [][]byte, waitTime time.Duration) error + GetHeaders() (map[string]data.HeaderHandler, error) + ClearFields() + IsInterfaceNil() bool +} + // DataWriter defines the methods to write data type DataWriter interface { WriteString(s string) (int, error) diff --git a/update/sync/syncHeadersByHash.go b/update/sync/syncHeadersByHash.go index b1171653a04..d88c7d16c09 100644 --- a/update/sync/syncHeadersByHash.go +++ b/update/sync/syncHeadersByHash.go @@ -5,6 +5,7 @@ import ( "time" "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/marshal" @@ -15,9 +16,9 @@ import ( type missingHeadersByHash struct { mutMissingHdrs sync.Mutex - mapHeaders map[string]*block.Header + mapHeaders map[string]data.HeaderHandler mapHashes map[string]struct{} - pool storage.Cacher + pool dataRetriever.HeadersPool storage update.HistoryStorer chReceivedAll chan bool marshalizer marshal.Marshalizer @@ -30,7 +31,7 @@ type missingHeadersByHash struct { // ArgsNewMissingHeadersByHashSyncer defines the arguments needed for the sycner type ArgsNewMissingHeadersByHashSyncer struct { Storage storage.Storer - Cache storage.Cacher + Cache dataRetriever.HeadersPool Marshalizer marshal.Marshalizer RequestHandler process.RequestHandler } @@ -52,7 +53,7 @@ func NewMissingheadersByHashSyncer(args ArgsNewMissingHeadersByHashSyncer) (*mis p := &missingHeadersByHash{ mutMissingHdrs: sync.Mutex{}, - mapHeaders: make(map[string]*block.Header), + mapHeaders: make(map[string]data.HeaderHandler), mapHashes: make(map[string]struct{}), pool: args.Cache, storage: args.Storage, @@ -114,7 +115,7 @@ func (m *missingHeadersByHash) SyncMissingHeadersByHash( // receivedHeader is a callback function when a new header was received // it will further ask for missing transactions -func (m *missingHeadersByHash) receivedHeader(hdrHash []byte) { +func (m *missingHeadersByHash) receivedHeader(hdrHandler data.HeaderHandler, hdrHash []byte) { m.mutMissingHdrs.Lock() if m.stopSyncing { m.mutMissingHdrs.Unlock() @@ -145,7 +146,7 @@ func (m *missingHeadersByHash) receivedHeader(hdrHash []byte) { } } -func (m *missingHeadersByHash) getHeaderFromPoolOrStorage(hash []byte) (*block.Header, bool) { +func (m *missingHeadersByHash) getHeaderFromPoolOrStorage(hash []byte) (data.HeaderHandler, bool) { header, ok := m.getHeaderFromPool(hash) if ok { return header, true @@ -165,22 +166,17 @@ func (m *missingHeadersByHash) getHeaderFromPoolOrStorage(hash []byte) (*block.H return &hdr, true } -func (m *missingHeadersByHash) getHeaderFromPool(hash []byte) (*block.Header, bool) { - val, ok := m.pool.Peek(hash) - if !ok { - return nil, false - } - - header, ok := val.(*block.Header) - if !ok { +func (m *missingHeadersByHash) getHeaderFromPool(hash []byte) (data.HeaderHandler, bool) { + val, err := m.pool.GetHeaderByHash(hash) + if err != nil { return nil, false } - return header, true + return val, true } -// GetHeader returns the synced headers -func (m *missingHeadersByHash) GetHeader() (map[string]*block.Header, error) { +// GetHeaders returns the synced headers +func (m *missingHeadersByHash) GetHeaders() (map[string]data.HeaderHandler, error) { m.mutMissingHdrs.Lock() defer m.mutMissingHdrs.Unlock() if !m.syncedAll { @@ -190,6 +186,14 @@ func (m *missingHeadersByHash) GetHeader() (map[string]*block.Header, error) { return m.mapHeaders, nil } +// ClearFields will clear all the maps +func (m *missingHeadersByHash) ClearFields() { + m.mutMissingHdrs.Lock() + m.mapHashes = make(map[string]struct{}) + m.mapHeaders = make(map[string]data.HeaderHandler) + m.mutMissingHdrs.Unlock() +} + // IsInterfaceNil returns nil if underlying object is nil func (m *missingHeadersByHash) IsInterfaceNil() bool { return m == nil From fdd2c5ca1b9086878e25e30bf0a0e01e2c52d8ea Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Fri, 20 Mar 2020 11:44:15 +0200 Subject: [PATCH 23/61] fixes, moving implementation to bootstrapper. --- epochStart/bootstrap/epochStartDataProvider.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/epochStart/bootstrap/epochStartDataProvider.go b/epochStart/bootstrap/epochStartDataProvider.go index 8e944421f0d..098f5d17de3 100644 --- a/epochStart/bootstrap/epochStartDataProvider.go +++ b/epochStart/bootstrap/epochStartDataProvider.go @@ -177,6 +177,7 @@ func (esdp *epochStartDataProvider) searchDataInLocalStorage() { // TODO: add a component which opens headers storer and gets the last epoch start metablock // in order to provide the last known epoch in storage. Right now, it won't work as expected // if storage pruning is disabled + isEpochFoundInStorage := true currentEpoch, errNotCritical := storageFactory.FindLastEpochFromStorage( esdp.workingDir, esdp.genesisNodesConfig.ChainID, @@ -185,9 +186,19 @@ func (esdp *epochStartDataProvider) searchDataInLocalStorage() { ) if errNotCritical != nil { log.Debug("no epoch db found in storage", "error", errNotCritical.Error()) + isEpochFoundInStorage = false } log.Debug("current epoch from the storage : ", "epoch", currentEpoch) + + shouldSync := ShouldSyncWithTheNetwork( + args.StartTime, + isEpochFoundInStorage, + args.OriginalNodesConfig, + args.GeneralConfig, + ) + + log.Debug("shouldSync epochStartData", "shouldSync", shouldSync) } // Bootstrap will handle requesting and receiving the needed information the node will bootstrap from From 47116cbcad63e471adf35b41b1a0c3061a739796 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Fri, 20 Mar 2020 12:50:16 +0200 Subject: [PATCH 24/61] fixes, moving implementation to bootstrapper. --- cmd/node/main.go | 56 +++--- .../bootstrap/epochStartDataProvider.go | 9 - .../epochStartMetaBlockInterceptorStub.go | 41 ---- epochStart/bootstrap/mock/messengerStub.go | 186 ------------------ .../mock/metaBlockInterceptorStub.go | 41 ---- .../bootstrap/mock/metaBlockResolverStub.go | 20 -- .../mock/miniBlockInterceptorStub.go | 41 ---- .../bootstrap/mock/nodesConfigProviderStub.go | 25 --- epochStart/bootstrap/mock/p2pMessageMock.go | 54 ----- epochStart/bootstrap/mock/pathManagerStub.go | 32 --- epochStart/bootstrap/mock/publicKeyMock.go | 30 --- .../bootstrap/mock/shardCoordinatorMock.go | 76 ------- .../mock/shardHeaderInterceptorStub.go | 51 ----- .../bootstrap/mock/whiteListHandlerStub.go | 32 --- .../bootstrap/simpleMetaBlockInterceptor.go | 132 ------------- .../bootstrap/simpleMiniBlockInterceptor.go | 132 ------------- .../bootstrap/simpleShardHeaderInterceptor.go | 132 ------------- 17 files changed, 30 insertions(+), 1060 deletions(-) delete mode 100644 epochStart/bootstrap/mock/epochStartMetaBlockInterceptorStub.go delete mode 100644 epochStart/bootstrap/mock/messengerStub.go delete mode 100644 epochStart/bootstrap/mock/metaBlockInterceptorStub.go delete mode 100644 epochStart/bootstrap/mock/metaBlockResolverStub.go delete mode 100644 epochStart/bootstrap/mock/miniBlockInterceptorStub.go delete mode 100644 epochStart/bootstrap/mock/nodesConfigProviderStub.go delete mode 100644 epochStart/bootstrap/mock/p2pMessageMock.go delete mode 100644 epochStart/bootstrap/mock/pathManagerStub.go delete mode 100644 epochStart/bootstrap/mock/publicKeyMock.go delete mode 100644 epochStart/bootstrap/mock/shardCoordinatorMock.go delete mode 100644 epochStart/bootstrap/mock/shardHeaderInterceptorStub.go delete mode 100644 epochStart/bootstrap/mock/whiteListHandlerStub.go delete mode 100644 epochStart/bootstrap/simpleMetaBlockInterceptor.go delete mode 100644 epochStart/bootstrap/simpleMiniBlockInterceptor.go delete mode 100644 epochStart/bootstrap/simpleShardHeaderInterceptor.go diff --git a/cmd/node/main.go b/cmd/node/main.go index f65ae59eca2..ff337927127 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -523,6 +523,18 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } log.Trace("working directory", "path", workingDir) + storageCleanupFlagValue := ctx.GlobalBool(storageCleanup.Name) + if storageCleanupFlagValue { + dbPath := filepath.Join( + workingDir, + defaultDBPath) + log.Trace("cleaning storage", "path", dbPath) + err = os.RemoveAll(dbPath) + if err != nil { + return err + } + } + pathTemplateForPruningStorer := filepath.Join( workingDir, defaultDBPath, @@ -550,14 +562,18 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { return err } - var shardId = core.GetShardIdString(shardCoordinator.SelfId()) + genesisShardCoordinator, nodeType, err := createShardCoordinator(nodesConfig, pubKey, preferencesConfig.Preferences, log) + if err != nil { + return err + } + var shardId = core.GetShardIdString(genesisShardCoordinator.SelfId()) log.Trace("creating crypto components") cryptoArgs := factory.NewCryptoComponentsFactoryArgs( ctx, generalConfig, nodesConfig, - shardCoordinator, + genesisShardCoordinator, keyGen, privKey, log, @@ -585,18 +601,18 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { time.Sleep(secondsToWaitForP2PBootstrap * time.Second) epochStartComponentArgs := factoryEpochBootstrap.EpochStartDataProviderFactoryArgs{ - PubKey: pubKey, - Messenger: networkComponents.NetMessenger, - Marshalizer: marshalizer, - Hasher: hasher, - NodesConfigProvider: nodesconfigprovider.NewSimpleNodesConfigProvider(nodesConfig), - PathManager: pathManager, - StartTime: startTime, - OriginalNodesConfig: nodesConfig, - GeneralConfig: generalConfig, - WorkingDir: workingDir, - DefaultDBPath: defaultDBPath, - DefaultEpochString: defaultEpochString, + PubKey: pubKey, + Messenger: networkComponents.NetMessenger, + Marshalizer: marshalizer, + Hasher: hasher, + NodesConfigProvider: nodesconfigprovider.NewSimpleNodesConfigProvider(nodesConfig), + PathManager: pathManager, + StartTime: startTime, + OriginalNodesConfig: nodesConfig, + GeneralConfig: generalConfig, + WorkingDir: workingDir, + DefaultDBPath: defaultDBPath, + DefaultEpochString: defaultEpochString, PubKey: pubKey, Messenger: networkComponents.NetMessenger, Marshalizer: coreComponents.InternalMarshalizer, @@ -638,18 +654,6 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { log.Error("error bootstrapping", "error", err) } - storageCleanupFlagValue := ctx.GlobalBool(storageCleanup.Name) - if storageCleanupFlagValue { - dbPath := filepath.Join( - workingDir, - defaultDBPath) - log.Trace("cleaning storage", "path", dbPath) - err = os.RemoveAll(dbPath) - if err != nil { - return err - } - } - log.Trace("creating economics data components") economicsData, err := economics.NewEconomicsData(economicsConfig) if err != nil { diff --git a/epochStart/bootstrap/epochStartDataProvider.go b/epochStart/bootstrap/epochStartDataProvider.go index dda01b478e6..684be1b717c 100644 --- a/epochStart/bootstrap/epochStartDataProvider.go +++ b/epochStart/bootstrap/epochStartDataProvider.go @@ -314,15 +314,6 @@ func (esdp *epochStartDataProvider) Bootstrap() (*structs.ComponentsNeededForBoo if err != nil { return nil, err } - //pendingMiniBlocks := make([]*block.MiniBlock, 0) - //for _, mb := range epochStartData.PendingMiniBlockHeaders { - // receivedMb, errGetMb := esdp.getMiniBlock(&mb) - // if errGetMb != nil { - // return nil, errGetMb - // } - // pendingMiniBlocks = append(pendingMiniBlocks, receivedMb) - // log.Info("received miniblock", "type", receivedMb.Type) - //} lastFinalizedMetaBlock, err := esdp.getMetaBlock(missingHeadersSyncer, epochStartData.LastFinishedMetaBlock) if err != nil { diff --git a/epochStart/bootstrap/mock/epochStartMetaBlockInterceptorStub.go b/epochStart/bootstrap/mock/epochStartMetaBlockInterceptorStub.go deleted file mode 100644 index 88dac0b5a99..00000000000 --- a/epochStart/bootstrap/mock/epochStartMetaBlockInterceptorStub.go +++ /dev/null @@ -1,41 +0,0 @@ -package mock - -import ( - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/process" -) - -// EpochStartMetaBlockInterceptorStub - -type EpochStartMetaBlockInterceptorStub struct { - ProcessReceivedMessageCalled func(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error - GetMetaBlockCalled func(target int, epoch uint32) (*block.MetaBlock, error) -} - -// SetIsDataForCurrentShardVerifier - -func (m *EpochStartMetaBlockInterceptorStub) SetIsDataForCurrentShardVerifier(_ process.InterceptedDataVerifier) error { - return nil -} - -// ProcessReceivedMessage - -func (m *EpochStartMetaBlockInterceptorStub) ProcessReceivedMessage(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error { - if m.ProcessReceivedMessageCalled != nil { - return m.ProcessReceivedMessageCalled(message, broadcastHandler) - } - - return nil -} - -// GetEpochStartMetaBlock - -func (m *EpochStartMetaBlockInterceptorStub) GetEpochStartMetaBlock(target int, epoch uint32) (*block.MetaBlock, error) { - if m.GetMetaBlockCalled != nil { - return m.GetMetaBlockCalled(target, epoch) - } - - return &block.MetaBlock{}, nil -} - -// IsInterfaceNil - -func (m *EpochStartMetaBlockInterceptorStub) IsInterfaceNil() bool { - return m == nil -} diff --git a/epochStart/bootstrap/mock/messengerStub.go b/epochStart/bootstrap/mock/messengerStub.go deleted file mode 100644 index b796fcc5066..00000000000 --- a/epochStart/bootstrap/mock/messengerStub.go +++ /dev/null @@ -1,186 +0,0 @@ -package mock - -import "github.com/ElrondNetwork/elrond-go/p2p" - -// MessengerStub - -type MessengerStub struct { - CloseCalled func() error - IDCalled func() p2p.PeerID - PeersCalled func() []p2p.PeerID - AddressesCalled func() []string - ConnectToPeerCalled func(address string) error - ConnectedPeersOnTopicCalled func(topic string) []p2p.PeerID - TrimConnectionsCalled func() - IsConnectedCalled func(peerID p2p.PeerID) bool - ConnectedPeersCalled func() []p2p.PeerID - CreateTopicCalled func(name string, createChannelForTopic bool) error - HasTopicCalled func(name string) bool - HasTopicValidatorCalled func(name string) bool - BroadcastOnChannelCalled func(channel string, topic string, buff []byte) - BroadcastCalled func(topic string, buff []byte) - RegisterMessageProcessorCalled func(topic string, handler p2p.MessageProcessor) error - UnregisterAllMessageProcessorsCalled func() error - UnregisterMessageProcessorCalled func(topic string) error - SendToConnectedPeerCalled func(topic string, buff []byte, peerID p2p.PeerID) error - OutgoingChannelLoadBalancerCalled func() p2p.ChannelLoadBalancer - BootstrapCalled func() error -} - -// UnregisterAllMessageProcessors - -func (ms *MessengerStub) UnregisterAllMessageProcessors() error { - if ms.UnregisterAllMessageProcessorsCalled != nil { - return ms.UnregisterAllMessageProcessorsCalled() - } - - return nil -} - -// ConnectedAddresses - -func (ms *MessengerStub) ConnectedAddresses() []string { - panic("implement me") -} - -// PeerAddress - -func (ms *MessengerStub) PeerAddress(pid p2p.PeerID) string { - panic("implement me") -} - -// ConnectedPeersOnTopic - -func (ms *MessengerStub) ConnectedPeersOnTopic(topic string) []p2p.PeerID { - if ms.ConnectedPeersOnTopicCalled != nil { - return ms.ConnectedPeersOnTopicCalled(topic) - } - - return nil -} - -// BroadcastOnChannelBlocking - -func (ms *MessengerStub) BroadcastOnChannelBlocking(channel string, topic string, buff []byte) error { - panic("implement me") -} - -// IsConnectedToTheNetwork - -func (ms *MessengerStub) IsConnectedToTheNetwork() bool { - panic("implement me") -} - -// ThresholdMinConnectedPeers - -func (ms *MessengerStub) ThresholdMinConnectedPeers() int { - panic("implement me") -} - -// SetThresholdMinConnectedPeers - -func (ms *MessengerStub) SetThresholdMinConnectedPeers(minConnectedPeers int) error { - panic("implement me") -} - -// RegisterMessageProcessor - -func (ms *MessengerStub) RegisterMessageProcessor(topic string, handler p2p.MessageProcessor) error { - if ms.RegisterMessageProcessorCalled != nil { - return ms.RegisterMessageProcessorCalled(topic, handler) - } - - return nil -} - -// UnregisterMessageProcessor - -func (ms *MessengerStub) UnregisterMessageProcessor(topic string) error { - if ms.UnregisterMessageProcessorCalled != nil { - return ms.UnregisterMessageProcessorCalled(topic) - } - - return nil -} - -// Broadcast - -func (ms *MessengerStub) Broadcast(topic string, buff []byte) { - ms.BroadcastCalled(topic, buff) -} - -// OutgoingChannelLoadBalancer - -func (ms *MessengerStub) OutgoingChannelLoadBalancer() p2p.ChannelLoadBalancer { - return ms.OutgoingChannelLoadBalancerCalled() -} - -// Close - -func (ms *MessengerStub) Close() error { - return ms.CloseCalled() -} - -// ID - -func (ms *MessengerStub) ID() p2p.PeerID { - return ms.IDCalled() -} - -// Peers - -func (ms *MessengerStub) Peers() []p2p.PeerID { - if ms.PeersCalled != nil { - return ms.PeersCalled() - } - - return []p2p.PeerID{"peer1", "peer2", "peer3", "peer4", "peer5", "peer6"} -} - -// Addresses - -func (ms *MessengerStub) Addresses() []string { - return ms.AddressesCalled() -} - -// ConnectToPeer - -func (ms *MessengerStub) ConnectToPeer(address string) error { - return ms.ConnectToPeerCalled(address) -} - -// TrimConnections - -func (ms *MessengerStub) TrimConnections() { - ms.TrimConnectionsCalled() -} - -// IsConnected - -func (ms *MessengerStub) IsConnected(peerID p2p.PeerID) bool { - return ms.IsConnectedCalled(peerID) -} - -// ConnectedPeers - -func (ms *MessengerStub) ConnectedPeers() []p2p.PeerID { - return ms.ConnectedPeersCalled() -} - -// CreateTopic - -func (ms *MessengerStub) CreateTopic(name string, createChannelForTopic bool) error { - if ms.CreateTopicCalled != nil { - return ms.CreateTopicCalled(name, createChannelForTopic) - } - - return nil -} - -// HasTopic - -func (ms *MessengerStub) HasTopic(name string) bool { - return ms.HasTopicCalled(name) -} - -// HasTopicValidator - -func (ms *MessengerStub) HasTopicValidator(name string) bool { - return ms.HasTopicValidatorCalled(name) -} - -// BroadcastOnChannel - -func (ms *MessengerStub) BroadcastOnChannel(channel string, topic string, buff []byte) { - ms.BroadcastOnChannelCalled(channel, topic, buff) -} - -// SendToConnectedPeer - -func (ms *MessengerStub) SendToConnectedPeer(topic string, buff []byte, peerID p2p.PeerID) error { - return ms.SendToConnectedPeerCalled(topic, buff, peerID) -} - -// Bootstrap - -func (ms *MessengerStub) Bootstrap() error { - return ms.BootstrapCalled() -} - -// IsInterfaceNil returns true if there is no value under the interface -func (ms *MessengerStub) IsInterfaceNil() bool { - return ms == nil -} diff --git a/epochStart/bootstrap/mock/metaBlockInterceptorStub.go b/epochStart/bootstrap/mock/metaBlockInterceptorStub.go deleted file mode 100644 index e2a82ab9413..00000000000 --- a/epochStart/bootstrap/mock/metaBlockInterceptorStub.go +++ /dev/null @@ -1,41 +0,0 @@ -package mock - -import ( - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/process" -) - -// MetaBlockInterceptorStub - -type MetaBlockInterceptorStub struct { - ProcessReceivedMessageCalled func(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error - GetMetaBlockCalled func(hash []byte, target int) (*block.MetaBlock, error) -} - -// SetIsDataForCurrentShardVerifier - -func (m *MetaBlockInterceptorStub) SetIsDataForCurrentShardVerifier(_ process.InterceptedDataVerifier) error { - return nil -} - -// ProcessReceivedMessage - -func (m *MetaBlockInterceptorStub) ProcessReceivedMessage(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error { - if m.ProcessReceivedMessageCalled != nil { - return m.ProcessReceivedMessageCalled(message, broadcastHandler) - } - - return nil -} - -// GetMetaBlock - -func (m *MetaBlockInterceptorStub) GetMetaBlock(hash []byte, target int) (*block.MetaBlock, error) { - if m.GetMetaBlockCalled != nil { - return m.GetMetaBlockCalled(hash, target) - } - - return &block.MetaBlock{}, nil -} - -// IsInterfaceNil - -func (m *MetaBlockInterceptorStub) IsInterfaceNil() bool { - return m == nil -} diff --git a/epochStart/bootstrap/mock/metaBlockResolverStub.go b/epochStart/bootstrap/mock/metaBlockResolverStub.go deleted file mode 100644 index ed868592f40..00000000000 --- a/epochStart/bootstrap/mock/metaBlockResolverStub.go +++ /dev/null @@ -1,20 +0,0 @@ -package mock - -// MetaBlockResolverStub - -type MetaBlockResolverStub struct { - RequestEpochStartMetaBlockCalled func(epoch uint32) error -} - -// RequestEpochStartMetaBlock - -func (m *MetaBlockResolverStub) RequestEpochStartMetaBlock(epoch uint32) error { - if m.RequestEpochStartMetaBlockCalled != nil { - return m.RequestEpochStartMetaBlockCalled(epoch) - } - - return nil -} - -// IsInterfaceNil - -func (m *MetaBlockResolverStub) IsInterfaceNil() bool { - return m == nil -} diff --git a/epochStart/bootstrap/mock/miniBlockInterceptorStub.go b/epochStart/bootstrap/mock/miniBlockInterceptorStub.go deleted file mode 100644 index c7de260c77a..00000000000 --- a/epochStart/bootstrap/mock/miniBlockInterceptorStub.go +++ /dev/null @@ -1,41 +0,0 @@ -package mock - -import ( - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/process" -) - -// MiniBlockInterceptorStub - -type MiniBlockInterceptorStub struct { - ProcessReceivedMessageCalled func(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error - GetMiniBlockCalled func(hash []byte, target int) (*block.MiniBlock, error) -} - -// SetIsDataForCurrentShardVerifier - -func (m *MiniBlockInterceptorStub) SetIsDataForCurrentShardVerifier(_ process.InterceptedDataVerifier) error { - return nil -} - -// ProcessReceivedMessage - -func (m *MiniBlockInterceptorStub) ProcessReceivedMessage(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error { - if m.ProcessReceivedMessageCalled != nil { - return m.ProcessReceivedMessageCalled(message, broadcastHandler) - } - - return nil -} - -// GetMiniBlock - -func (m *MiniBlockInterceptorStub) GetMiniBlock(hash []byte, target int) (*block.MiniBlock, error) { - if m.GetMiniBlockCalled != nil { - return m.GetMiniBlockCalled(hash, target) - } - - return &block.MiniBlock{}, nil -} - -// IsInterfaceNil - -func (m *MiniBlockInterceptorStub) IsInterfaceNil() bool { - return m == nil -} diff --git a/epochStart/bootstrap/mock/nodesConfigProviderStub.go b/epochStart/bootstrap/mock/nodesConfigProviderStub.go deleted file mode 100644 index fd8966549d9..00000000000 --- a/epochStart/bootstrap/mock/nodesConfigProviderStub.go +++ /dev/null @@ -1,25 +0,0 @@ -package mock - -import ( - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/sharding" -) - -// NodesConfigProviderStub - -type NodesConfigProviderStub struct { - GetNodesConfigForMetaBlockCalled func(metaBlock *block.MetaBlock) (*sharding.NodesSetup, error) -} - -// GetNodesConfigForMetaBlock - -func (n *NodesConfigProviderStub) GetNodesConfigForMetaBlock(metaBlock *block.MetaBlock) (*sharding.NodesSetup, error) { - if n.GetNodesConfigForMetaBlockCalled != nil { - return n.GetNodesConfigForMetaBlockCalled(metaBlock) - } - - return &sharding.NodesSetup{}, nil -} - -// IsInterfaceNil - -func (n *NodesConfigProviderStub) IsInterfaceNil() bool { - return n == nil -} diff --git a/epochStart/bootstrap/mock/p2pMessageMock.go b/epochStart/bootstrap/mock/p2pMessageMock.go deleted file mode 100644 index 99bd2f862c2..00000000000 --- a/epochStart/bootstrap/mock/p2pMessageMock.go +++ /dev/null @@ -1,54 +0,0 @@ -package mock - -import "github.com/ElrondNetwork/elrond-go/p2p" - -// P2PMessageMock - -type P2PMessageMock struct { - FromField []byte - DataField []byte - SeqNoField []byte - TopicIDsField []string - SignatureField []byte - KeyField []byte - PeerField p2p.PeerID -} - -// From - -func (msg *P2PMessageMock) From() []byte { - return msg.FromField -} - -// Data - -func (msg *P2PMessageMock) Data() []byte { - return msg.DataField -} - -// SeqNo - -func (msg *P2PMessageMock) SeqNo() []byte { - return msg.SeqNoField -} - -// TopicIDs - -func (msg *P2PMessageMock) TopicIDs() []string { - return msg.TopicIDsField -} - -// Signature - -func (msg *P2PMessageMock) Signature() []byte { - return msg.SignatureField -} - -// Key - -func (msg *P2PMessageMock) Key() []byte { - return msg.KeyField -} - -// Peer - -func (msg *P2PMessageMock) Peer() p2p.PeerID { - return msg.PeerField -} - -// IsInterfaceNil returns true if there is no value under the interface -func (msg *P2PMessageMock) IsInterfaceNil() bool { - return msg == nil -} diff --git a/epochStart/bootstrap/mock/pathManagerStub.go b/epochStart/bootstrap/mock/pathManagerStub.go deleted file mode 100644 index 78aa45b6b67..00000000000 --- a/epochStart/bootstrap/mock/pathManagerStub.go +++ /dev/null @@ -1,32 +0,0 @@ -package mock - -import "fmt" - -// PathManagerStub - -type PathManagerStub struct { - PathForEpochCalled func(shardId string, epoch uint32, identifier string) string - PathForStaticCalled func(shardId string, identifier string) string -} - -// PathForEpoch - -func (p *PathManagerStub) PathForEpoch(shardId string, epoch uint32, identifier string) string { - if p.PathForEpochCalled != nil { - return p.PathForEpochCalled(shardId, epoch, identifier) - } - - return fmt.Sprintf("Epoch_%d/Shard_%s/%s", epoch, shardId, identifier) -} - -// PathForStatic - -func (p *PathManagerStub) PathForStatic(shardId string, identifier string) string { - if p.PathForEpochCalled != nil { - return p.PathForStaticCalled(shardId, identifier) - } - - return fmt.Sprintf("Static/Shard_%s/%s", shardId, identifier) -} - -// IsInterfaceNil - -func (p *PathManagerStub) IsInterfaceNil() bool { - return p == nil -} diff --git a/epochStart/bootstrap/mock/publicKeyMock.go b/epochStart/bootstrap/mock/publicKeyMock.go deleted file mode 100644 index e018a183715..00000000000 --- a/epochStart/bootstrap/mock/publicKeyMock.go +++ /dev/null @@ -1,30 +0,0 @@ -package mock - -import "github.com/ElrondNetwork/elrond-go/crypto" - -// PublicKeyMock mocks a public key implementation -type PublicKeyMock struct { - ToByteArrayMock func() ([]byte, error) - SuiteMock func() crypto.Suite - PointMock func() crypto.Point -} - -// ToByteArray mocks converting a public key to a byte array -func (pubKey *PublicKeyMock) ToByteArray() ([]byte, error) { - return []byte("publicKeyMock"), nil -} - -// Suite - -func (pubKey *PublicKeyMock) Suite() crypto.Suite { - return pubKey.SuiteMock() -} - -// Point - -func (pubKey *PublicKeyMock) Point() crypto.Point { - return pubKey.PointMock() -} - -// IsInterfaceNil returns true if there is no value under the interface -func (pubKey *PublicKeyMock) IsInterfaceNil() bool { - return pubKey == nil -} diff --git a/epochStart/bootstrap/mock/shardCoordinatorMock.go b/epochStart/bootstrap/mock/shardCoordinatorMock.go deleted file mode 100644 index 12dee4aad79..00000000000 --- a/epochStart/bootstrap/mock/shardCoordinatorMock.go +++ /dev/null @@ -1,76 +0,0 @@ -package mock - -import ( - "fmt" - - "github.com/ElrondNetwork/elrond-go/data/state" -) - -// MultipleShardsCoordinatorMock - -type MultipleShardsCoordinatorMock struct { - NoShards uint32 - ComputeIdCalled func(address state.AddressContainer) uint32 - SelfIDCalled func() uint32 - CurrentShard uint32 -} - -// NewMultiShardsCoordinatorMock - -func NewMultiShardsCoordinatorMock(nrShard uint32) *MultipleShardsCoordinatorMock { - return &MultipleShardsCoordinatorMock{NoShards: nrShard} -} - -// NumberOfShards - -func (scm *MultipleShardsCoordinatorMock) NumberOfShards() uint32 { - return scm.NoShards -} - -// ComputeId - -func (scm *MultipleShardsCoordinatorMock) ComputeId(address state.AddressContainer) uint32 { - if scm.ComputeIdCalled == nil { - return scm.SelfId() - } - return scm.ComputeIdCalled(address) -} - -// SelfId - -func (scm *MultipleShardsCoordinatorMock) SelfId() uint32 { - if scm.SelfIDCalled != nil { - return scm.SelfIDCalled() - } - - return scm.CurrentShard -} - -// SetSelfId - -func (scm *MultipleShardsCoordinatorMock) SetSelfId(shardId uint32) error { - return nil -} - -// SameShard - -func (scm *MultipleShardsCoordinatorMock) SameShard(firstAddress, secondAddress state.AddressContainer) bool { - return true -} - -// SetNoShards - -func (scm *MultipleShardsCoordinatorMock) SetNoShards(noShards uint32) { - scm.NoShards = noShards -} - -// CommunicationIdentifier returns the identifier between current shard ID and destination shard ID -// identifier is generated such as the first shard from identifier is always smaller than the last -func (scm *MultipleShardsCoordinatorMock) CommunicationIdentifier(destShardID uint32) string { - if destShardID == scm.CurrentShard { - return fmt.Sprintf("_%d", scm.CurrentShard) - } - - if destShardID < scm.CurrentShard { - return fmt.Sprintf("_%d_%d", destShardID, scm.CurrentShard) - } - - return fmt.Sprintf("_%d_%d", scm.CurrentShard, destShardID) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (scm *MultipleShardsCoordinatorMock) IsInterfaceNil() bool { - return scm == nil -} diff --git a/epochStart/bootstrap/mock/shardHeaderInterceptorStub.go b/epochStart/bootstrap/mock/shardHeaderInterceptorStub.go deleted file mode 100644 index a2f10f6b98a..00000000000 --- a/epochStart/bootstrap/mock/shardHeaderInterceptorStub.go +++ /dev/null @@ -1,51 +0,0 @@ -package mock - -import ( - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/process" -) - -// ShardHeaderInterceptorStub - -type ShardHeaderInterceptorStub struct { - ProcessReceivedMessageCalled func(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error - GetAllReceivedShardHeadersCalled func() []block.ShardData - GetShardHeaderCalled func(hash []byte, target int) (*block.Header, error) -} - -// SetIsDataForCurrentShardVerifier - -func (s *ShardHeaderInterceptorStub) SetIsDataForCurrentShardVerifier(_ process.InterceptedDataVerifier) error { - return nil -} - -// GetShardHeader - -func (s *ShardHeaderInterceptorStub) GetShardHeader(hash []byte, target int) (*block.Header, error) { - if s.GetShardHeaderCalled != nil { - return s.GetShardHeaderCalled(hash, target) - } - - return &block.Header{}, nil -} - -// ProcessReceivedMessage - -func (s *ShardHeaderInterceptorStub) ProcessReceivedMessage(message p2p.MessageP2P, broadcastHandler func(buffToSend []byte)) error { - if s.ProcessReceivedMessageCalled != nil { - return s.ProcessReceivedMessageCalled(message, broadcastHandler) - } - - return nil -} - -// GetAllReceivedShardHeaders - -func (s *ShardHeaderInterceptorStub) GetAllReceivedShardHeaders() []block.ShardData { - if s.GetAllReceivedShardHeadersCalled != nil { - return s.GetAllReceivedShardHeadersCalled() - } - - return nil -} - -// IsInterfaceNil - -func (s *ShardHeaderInterceptorStub) IsInterfaceNil() bool { - return s == nil -} diff --git a/epochStart/bootstrap/mock/whiteListHandlerStub.go b/epochStart/bootstrap/mock/whiteListHandlerStub.go deleted file mode 100644 index 3fa020ecd84..00000000000 --- a/epochStart/bootstrap/mock/whiteListHandlerStub.go +++ /dev/null @@ -1,32 +0,0 @@ -package mock - -import "github.com/ElrondNetwork/elrond-go/process" - -type WhiteListHandlerStub struct { - RemoveCalled func(keys [][]byte) - AddCalled func(keys [][]byte) - IsForCurrentShardCalled func(interceptedData process.InterceptedData) bool -} - -func (w *WhiteListHandlerStub) IsForCurrentShard(interceptedData process.InterceptedData) bool { - if w.IsForCurrentShardCalled != nil { - return w.IsForCurrentShardCalled(interceptedData) - } - return true -} - -func (w *WhiteListHandlerStub) Remove(keys [][]byte) { - if w.RemoveCalled != nil { - w.RemoveCalled(keys) - } -} - -func (w *WhiteListHandlerStub) Add(keys [][]byte) { - if w.AddCalled != nil { - w.AddCalled(keys) - } -} - -func (w *WhiteListHandlerStub) IsInterfaceNil() bool { - return w == nil -} diff --git a/epochStart/bootstrap/simpleMetaBlockInterceptor.go b/epochStart/bootstrap/simpleMetaBlockInterceptor.go deleted file mode 100644 index 6a8ca7a02d8..00000000000 --- a/epochStart/bootstrap/simpleMetaBlockInterceptor.go +++ /dev/null @@ -1,132 +0,0 @@ -package bootstrap - -import ( - "bytes" - "sync" - "time" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/check" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/process" -) - -type simpleMetaBlockInterceptor struct { - marshalizer marshal.Marshalizer - hasher hashing.Hasher - mutReceivedMetaBlocks sync.RWMutex - mapReceivedMetaBlocks map[string]*block.MetaBlock - mapMetaBlocksFromPeers map[string][]p2p.PeerID -} - -// SetIsDataForCurrentShardVerifier - -func (s *simpleMetaBlockInterceptor) SetIsDataForCurrentShardVerifier(_ process.InterceptedDataVerifier) error { - return nil -} - -// NewSimpleMetaBlockInterceptor will return a new instance of simpleMetaBlockInterceptor -func NewSimpleMetaBlockInterceptor(marshalizer marshal.Marshalizer, hasher hashing.Hasher) (*simpleMetaBlockInterceptor, error) { - if check.IfNil(marshalizer) { - return nil, ErrNilMarshalizer - } - if check.IfNil(hasher) { - return nil, ErrNilHasher - } - - return &simpleMetaBlockInterceptor{ - marshalizer: marshalizer, - hasher: hasher, - mutReceivedMetaBlocks: sync.RWMutex{}, - mapReceivedMetaBlocks: make(map[string]*block.MetaBlock), - mapMetaBlocksFromPeers: make(map[string][]p2p.PeerID), - }, nil -} - -// ProcessReceivedMessage will receive the metablocks and will add them to the maps -func (s *simpleMetaBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { - var mb block.MetaBlock - err := s.marshalizer.Unmarshal(&mb, message.Data()) - if err != nil { - return err - } - - s.mutReceivedMetaBlocks.Lock() - mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, &mb) - if err != nil { - s.mutReceivedMetaBlocks.Unlock() - return err - } - - s.mapReceivedMetaBlocks[string(mbHash)] = &mb - s.addToPeerList(string(mbHash), message.Peer()) - s.mutReceivedMetaBlocks.Unlock() - - return nil -} - -// this func should be called under mutex protection -func (s *simpleMetaBlockInterceptor) addToPeerList(hash string, id p2p.PeerID) { - peersListForHash, ok := s.mapMetaBlocksFromPeers[hash] - if !ok { - s.mapMetaBlocksFromPeers[hash] = append(s.mapMetaBlocksFromPeers[hash], id) - return - } - - for _, peer := range peersListForHash { - if peer == id { - return - } - } - - s.mapMetaBlocksFromPeers[hash] = append(s.mapMetaBlocksFromPeers[hash], id) -} - -// GetMetaBlock will return the metablock after it is confirmed or an error if the number of tries was exceeded -func (s *simpleMetaBlockInterceptor) GetMetaBlock(hash []byte, target int) (*block.MetaBlock, error) { - // TODO : replace this with a channel which will be written in when data is ready - for count := 0; count < numTriesUntilExit; count++ { - time.Sleep(timeToWaitBeforeCheckingReceivedHeaders) - s.mutReceivedMetaBlocks.RLock() - for hashInMap, peersList := range s.mapMetaBlocksFromPeers { - isOk := s.isMapEntryOk(hash, peersList, hashInMap, target) - if isOk { - s.mutReceivedMetaBlocks.RUnlock() - return s.mapReceivedMetaBlocks[hashInMap], nil - } - } - s.mutReceivedMetaBlocks.RUnlock() - } - - return nil, ErrNumTriesExceeded -} - -func (s *simpleMetaBlockInterceptor) isMapEntryOk( - expectedHash []byte, - peersList []p2p.PeerID, - hash string, - target int, -) bool { - mb, ok := s.mapReceivedMetaBlocks[string(expectedHash)] - if !ok { - return false - } - - mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, mb) - if err != nil { - return false - } - if bytes.Equal(expectedHash, mbHash) && len(peersList) >= target { - log.Info("got consensus for metablock", "len", len(peersList)) - return true - } - - return false -} - -// IsInterfaceNil returns true if there is no value under the interface -func (s *simpleMetaBlockInterceptor) IsInterfaceNil() bool { - return s == nil -} diff --git a/epochStart/bootstrap/simpleMiniBlockInterceptor.go b/epochStart/bootstrap/simpleMiniBlockInterceptor.go deleted file mode 100644 index c4ecd963a7d..00000000000 --- a/epochStart/bootstrap/simpleMiniBlockInterceptor.go +++ /dev/null @@ -1,132 +0,0 @@ -package bootstrap - -import ( - "bytes" - "sync" - "time" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/check" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/process" -) - -type simpleMiniBlockInterceptor struct { - marshalizer marshal.Marshalizer - hasher hashing.Hasher - mutReceivedMiniBlocks sync.RWMutex - mapReceivedMiniBlocks map[string]*block.MiniBlock - mapMiniBlocksFromPeers map[string][]p2p.PeerID -} - -// NewSimpleMiniBlockInterceptor will return a new instance of simpleShardHeaderInterceptor -func NewSimpleMiniBlockInterceptor(marshalizer marshal.Marshalizer, hasher hashing.Hasher) (*simpleMiniBlockInterceptor, error) { - if check.IfNil(marshalizer) { - return nil, ErrNilMarshalizer - } - if check.IfNil(hasher) { - return nil, ErrNilHasher - } - - return &simpleMiniBlockInterceptor{ - marshalizer: marshalizer, - hasher: hasher, - mutReceivedMiniBlocks: sync.RWMutex{}, - mapReceivedMiniBlocks: make(map[string]*block.MiniBlock), - mapMiniBlocksFromPeers: make(map[string][]p2p.PeerID), - }, nil -} - -// SetIsDataForCurrentShardVerifier - -func (s *simpleMiniBlockInterceptor) SetIsDataForCurrentShardVerifier(_ process.InterceptedDataVerifier) error { - return nil -} - -// ProcessReceivedMessage will receive the metablocks and will add them to the maps -func (s *simpleMiniBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { - var mb block.MiniBlock - err := s.marshalizer.Unmarshal(&mb, message.Data()) - if err != nil { - return err - } - - s.mutReceivedMiniBlocks.Lock() - mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, &mb) - if err != nil { - s.mutReceivedMiniBlocks.Unlock() - return err - } - - s.mapReceivedMiniBlocks[string(mbHash)] = &mb - s.addToPeerList(string(mbHash), message.Peer()) - s.mutReceivedMiniBlocks.Unlock() - - return nil -} - -// this func should be called under mutex protection -func (s *simpleMiniBlockInterceptor) addToPeerList(hash string, id p2p.PeerID) { - peersListForHash, ok := s.mapMiniBlocksFromPeers[hash] - if !ok { - s.mapMiniBlocksFromPeers[hash] = append(s.mapMiniBlocksFromPeers[hash], id) - return - } - - for _, peer := range peersListForHash { - if peer == id { - return - } - } - - s.mapMiniBlocksFromPeers[hash] = append(s.mapMiniBlocksFromPeers[hash], id) -} - -// GetMiniBlock will return the miniblock with the given hash -func (s *simpleMiniBlockInterceptor) GetMiniBlock(hash []byte, target int) (*block.MiniBlock, error) { - // TODO : replace this with a channel which will be written in when data is ready - for count := 0; count < numTriesUntilExit; count++ { - time.Sleep(timeToWaitBeforeCheckingReceivedHeaders) - s.mutReceivedMiniBlocks.RLock() - for hashInMap, peersList := range s.mapMiniBlocksFromPeers { - isOk := s.isMapEntryOk(hash, peersList, hashInMap, target) - if isOk { - s.mutReceivedMiniBlocks.RUnlock() - return s.mapReceivedMiniBlocks[hashInMap], nil - } - } - s.mutReceivedMiniBlocks.RUnlock() - } - - return nil, ErrNumTriesExceeded -} - -func (s *simpleMiniBlockInterceptor) isMapEntryOk( - expectedHash []byte, - peersList []p2p.PeerID, - hash string, - target int, -) bool { - mb, ok := s.mapReceivedMiniBlocks[string(expectedHash)] - if !ok { - return false - } - - mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, mb) - if err != nil { - return false - } - if bytes.Equal(expectedHash, mbHash) && len(peersList) >= target { - log.Info("got consensus for mini block", "len", len(peersList)) - return true - } - - return false -} - -// IsInterfaceNil returns true if there is no value under the interface -func (s *simpleMiniBlockInterceptor) IsInterfaceNil() bool { - return s == nil -} diff --git a/epochStart/bootstrap/simpleShardHeaderInterceptor.go b/epochStart/bootstrap/simpleShardHeaderInterceptor.go deleted file mode 100644 index ccad5c07874..00000000000 --- a/epochStart/bootstrap/simpleShardHeaderInterceptor.go +++ /dev/null @@ -1,132 +0,0 @@ -package bootstrap - -import ( - "bytes" - "sync" - "time" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/check" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/process" -) - -type simpleShardHeaderInterceptor struct { - marshalizer marshal.Marshalizer - hasher hashing.Hasher - mutReceivedShardHeaders sync.RWMutex - mapReceivedShardHeaders map[string]*block.Header - mapShardHeadersFromPeers map[string][]p2p.PeerID -} - -// NewSimpleShardHeaderInterceptor will return a new instance of simpleShardHeaderInterceptor -func NewSimpleShardHeaderInterceptor(marshalizer marshal.Marshalizer, hasher hashing.Hasher) (*simpleShardHeaderInterceptor, error) { - if check.IfNil(marshalizer) { - return nil, ErrNilMarshalizer - } - if check.IfNil(hasher) { - return nil, ErrNilHasher - } - - return &simpleShardHeaderInterceptor{ - marshalizer: marshalizer, - hasher: hasher, - mutReceivedShardHeaders: sync.RWMutex{}, - mapReceivedShardHeaders: make(map[string]*block.Header), - mapShardHeadersFromPeers: make(map[string][]p2p.PeerID), - }, nil -} - -// SetIsDataForCurrentShardVerifier - -func (s *simpleShardHeaderInterceptor) SetIsDataForCurrentShardVerifier(_ process.InterceptedDataVerifier) error { - return nil -} - -// ProcessReceivedMessage will receive the metablocks and will add them to the maps -func (s *simpleShardHeaderInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { - var mb block.Header - err := s.marshalizer.Unmarshal(&mb, message.Data()) - if err != nil { - return err - } - - s.mutReceivedShardHeaders.Lock() - mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, &mb) - if err != nil { - s.mutReceivedShardHeaders.Unlock() - return err - } - - s.mapReceivedShardHeaders[string(mbHash)] = &mb - s.addToPeerList(string(mbHash), message.Peer()) - s.mutReceivedShardHeaders.Unlock() - - return nil -} - -// this func should be called under mutex protection -func (s *simpleShardHeaderInterceptor) addToPeerList(hash string, id p2p.PeerID) { - peersListForHash, ok := s.mapShardHeadersFromPeers[hash] - if !ok { - s.mapShardHeadersFromPeers[hash] = append(s.mapShardHeadersFromPeers[hash], id) - return - } - - for _, peer := range peersListForHash { - if peer == id { - return - } - } - - s.mapShardHeadersFromPeers[hash] = append(s.mapShardHeadersFromPeers[hash], id) -} - -// GetShardHeader will return the shard header -func (s *simpleShardHeaderInterceptor) GetShardHeader(hash []byte, target int) (*block.Header, error) { - // TODO : replace this with a channel which will be written in when data is ready - for count := 0; count < numTriesUntilExit; count++ { - time.Sleep(timeToWaitBeforeCheckingReceivedHeaders) - s.mutReceivedShardHeaders.RLock() - for hashInMap, peersList := range s.mapShardHeadersFromPeers { - isOk := s.isMapEntryOk(hash, peersList, hashInMap, target) - if isOk { - s.mutReceivedShardHeaders.RUnlock() - return s.mapReceivedShardHeaders[hashInMap], nil - } - } - s.mutReceivedShardHeaders.RUnlock() - } - - return nil, ErrNumTriesExceeded -} - -func (s *simpleShardHeaderInterceptor) isMapEntryOk( - expectedHash []byte, - peersList []p2p.PeerID, - hashInMap string, - target int, -) bool { - mb, ok := s.mapReceivedShardHeaders[string(expectedHash)] - if !ok { - return false - } - - hdrHash, err := core.CalculateHash(s.marshalizer, s.hasher, mb) - if err != nil { - return false - } - if bytes.Equal(expectedHash, hdrHash) && len(peersList) >= target { - log.Info("got consensus for shard block", "len", len(peersList)) - return true - } - - return false -} - -// IsInterfaceNil returns true if there is no value under the interface -func (s *simpleShardHeaderInterceptor) IsInterfaceNil() bool { - return s == nil -} From 1100e61050e751dcae219213dfcb0277b4620d3e Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Fri, 20 Mar 2020 13:43:11 +0200 Subject: [PATCH 25/61] fixes, moving implementation to bootstrapper. --- cmd/node/main.go | 50 +++++++++++++------------------ epochStart/bootstrap/interface.go | 3 +- 2 files changed, 23 insertions(+), 30 deletions(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index ff337927127..dd863124033 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -462,7 +462,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } log.Debug("config", "file", ctx.GlobalString(genesisFile.Name)) - nodesConfig, err := sharding.NewNodesSetup(ctx.GlobalString(nodesFile.Name)) + genesisNodesConfig, err := sharding.NewNodesSetup(ctx.GlobalString(nodesFile.Name)) if err != nil { return err } @@ -474,13 +474,13 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { log.Debug("NTP average clock offset", "value", syncer.ClockOffset()) //TODO: The next 5 lines should be deleted when we are done testing from a precalculated (not hard coded) timestamp - if nodesConfig.StartTime == 0 { + if genesisNodesConfig.StartTime == 0 { time.Sleep(1000 * time.Millisecond) ntpTime := syncer.CurrentTime() - nodesConfig.StartTime = (ntpTime.Unix()/60 + 1) * 60 + genesisNodesConfig.StartTime = (ntpTime.Unix()/60 + 1) * 60 } - startTime := time.Unix(nodesConfig.StartTime, 0) + startTime := time.Unix(genesisNodesConfig.StartTime, 0) log.Info("start time", "formatted", startTime.Format("Mon Jan 2 15:04:05 MST 2006"), @@ -538,7 +538,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { pathTemplateForPruningStorer := filepath.Join( workingDir, defaultDBPath, - nodesConfig.ChainID, + genesisNodesConfig.ChainID, fmt.Sprintf("%s_%s", defaultEpochString, core.PathEpochPlaceholder), fmt.Sprintf("%s_%s", defaultShardString, core.PathShardPlaceholder), core.PathIdentifierPlaceholder) @@ -546,7 +546,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { pathTemplateForStaticStorer := filepath.Join( workingDir, defaultDBPath, - nodesConfig.ChainID, + genesisNodesConfig.ChainID, defaultStaticDbString, fmt.Sprintf("%s_%s", defaultShardString, core.PathShardPlaceholder), core.PathIdentifierPlaceholder) @@ -562,7 +562,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { return err } - genesisShardCoordinator, nodeType, err := createShardCoordinator(nodesConfig, pubKey, preferencesConfig.Preferences, log) + genesisShardCoordinator, nodeType, err := createShardCoordinator(genesisNodesConfig, pubKey, preferencesConfig.Preferences, log) if err != nil { return err } @@ -572,7 +572,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { cryptoArgs := factory.NewCryptoComponentsFactoryArgs( ctx, generalConfig, - nodesConfig, + genesisNodesConfig, genesisShardCoordinator, keyGen, privKey, @@ -584,7 +584,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } log.Trace("creating core components") - coreArgs := factory.NewCoreComponentsFactoryArgs(generalConfig, pathManager, shardId, []byte(nodesConfig.ChainID)) + coreArgs := factory.NewCoreComponentsFactoryArgs(generalConfig, pathManager, shardId, []byte(genesisNodesConfig.ChainID)) coreComponents, err := factory.CoreComponentsFactory(coreArgs) if err != nil { return err @@ -605,10 +605,10 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { Messenger: networkComponents.NetMessenger, Marshalizer: marshalizer, Hasher: hasher, - NodesConfigProvider: nodesconfigprovider.NewSimpleNodesConfigProvider(nodesConfig), + NodesConfigProvider: nodesconfigprovider.NewSimpleNodesConfigProvider(genesisNodesConfig), PathManager: pathManager, StartTime: startTime, - OriginalNodesConfig: nodesConfig, + OriginalNodesConfig: genesisNodesConfig, GeneralConfig: generalConfig, WorkingDir: workingDir, DefaultDBPath: defaultDBPath, @@ -617,11 +617,11 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { Messenger: networkComponents.NetMessenger, Marshalizer: coreComponents.InternalMarshalizer, Hasher: coreComponents.Hasher, - NodesConfigProvider: nodesconfigprovider.NewSimpleNodesConfigProvider(nodesConfig), + NodesConfigProvider: nodesconfigprovider.NewSimpleNodesConfigProvider(genesisNodesConfig), DefaultShardCoordinator: shardCoordinator, PathManager: pathManager, StartTime: startTime, - OriginalNodesConfig: nodesConfig, + OriginalNodesConfig: genesisNodesConfig, EconomicsConfig: economicsConfig, GeneralConfig: generalConfig, KeyGen: cryptoComponents.TxSignKeyGen, @@ -642,15 +642,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } res, err := epochStartDataProvider.Bootstrap() - isFreshStart := err != nil - if !isFreshStart { - nodesConfig = res.NodesConfig - currentEpoch = res.EpochStartMetaBlock.Epoch - bootstrapRoundIndex.Value = res.EpochStartMetaBlock.Round - log.Info("received epoch start metablock from network", - "nonce", res.EpochStartMetaBlock.GetNonce(), - "epoch", res.EpochStartMetaBlock.GetEpoch()) - } else { + if err != nil { log.Error("error bootstrapping", "error", err) } @@ -688,7 +680,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } log.Trace("initializing metrics") - metrics.InitMetrics(coreComponents.StatusHandler, pubKey, nodeType, shardCoordinator, nodesConfig, version, economicsConfig) + metrics.InitMetrics(coreComponents.StatusHandler, pubKey, nodeType, shardCoordinator, genesisNodesConfig, version, economicsConfig) err = statusHandlersInfo.UpdateStorerAndMetricsForPersistentHandler(dataComponents.Store.GetStorer(dataRetriever.StatusMetricsUnit)) if err != nil { @@ -707,7 +699,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } nodesCoordinator, err := createNodesCoordinator( - nodesConfig, + genesisNodesConfig, preferencesConfig.Preferences, epochStartNotifier, pubKey, @@ -738,7 +730,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } metrics.SaveStringMetric(coreComponents.StatusHandler, core.MetricNodeDisplayName, preferencesConfig.Preferences.NodeDisplayName) - metrics.SaveStringMetric(coreComponents.StatusHandler, core.MetricChainId, nodesConfig.ChainID) + metrics.SaveStringMetric(coreComponents.StatusHandler, core.MetricChainId, genesisNodesConfig.ChainID) metrics.SaveUint64Metric(coreComponents.StatusHandler, core.MetricMinGasPrice, economicsData.MinGasPrice()) sessionInfoFileOutput := fmt.Sprintf("%s:%s\n%s:%s\n%s:%v\n%s:%s\n%s:%v\n", @@ -775,7 +767,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { log.LogIfError(err) log.Trace("creating tps benchmark components") - tpsBenchmark, err := statistics.NewTPSBenchmark(shardCoordinator.NumberOfShards(), nodesConfig.RoundDuration/1000) + tpsBenchmark, err := statistics.NewTPSBenchmark(shardCoordinator.NumberOfShards(), genesisNodesConfig.RoundDuration/1000) if err != nil { return err } @@ -807,7 +799,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } log.Trace("creating time cache for requested items components") - requestedItemsHandler := timecache.NewTimeCache(time.Duration(uint64(time.Millisecond) * nodesConfig.RoundDuration)) + requestedItemsHandler := timecache.NewTimeCache(time.Duration(uint64(time.Millisecond) * genesisNodesConfig.RoundDuration)) whiteListCache, err := storageUnit.NewCache( storageUnit.CacheType(generalConfig.WhiteListPool.Type), @@ -827,7 +819,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { coreArgs, genesisConfig, economicsData, - nodesConfig, + genesisNodesConfig, gasSchedule, syncer, shardCoordinator, @@ -864,7 +856,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { currentNode, err := createNode( generalConfig, preferencesConfig, - nodesConfig, + genesisNodesConfig, economicsData, syncer, keyGen, diff --git a/epochStart/bootstrap/interface.go b/epochStart/bootstrap/interface.go index 314a90a7e28..67f38547823 100644 --- a/epochStart/bootstrap/interface.go +++ b/epochStart/bootstrap/interface.go @@ -39,7 +39,8 @@ type NodesConfigProviderHandler interface { // EpochStartDataProviderHandler defines what a component which fetches the data needed for starting in an epoch should do type EpochStartDataProviderHandler interface { - Bootstrap() (*structs.ComponentsNeededForBootstrap, error) + Bootstrap() (uint32, uint32, uint32, error) + IsInterfaceNil() bool } // PathManagerHandler defines which actions should be done for generating paths for databases directories From e6f52bdb810e9ccff8552068be429b1ebf4eb8ee Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Fri, 20 Mar 2020 14:45:30 +0200 Subject: [PATCH 26/61] fixes --- cmd/node/main.go | 69 +++---- .../bootstrap/epochStartDataProvider.go | 24 ++- epochStart/bootstrap/errors.go | 60 ------ .../factory/disabledEpochStartDataProvider.go | 15 -- .../factory/epochStartDataProviderFactory.go | 188 ------------------ .../epochStartInterceptorsContainerFactory.go | 2 +- epochStart/bootstrap/interface.go | 57 ------ .../simpleNodesConfigProvider.go | 27 --- epochStart/bootstrap/structs/components.go | 19 -- 9 files changed, 50 insertions(+), 411 deletions(-) delete mode 100644 epochStart/bootstrap/errors.go delete mode 100644 epochStart/bootstrap/factory/disabledEpochStartDataProvider.go delete mode 100644 epochStart/bootstrap/factory/epochStartDataProviderFactory.go rename epochStart/bootstrap/factory/{interceptors => }/epochStartInterceptorsContainerFactory.go (99%) delete mode 100644 epochStart/bootstrap/interface.go delete mode 100644 epochStart/bootstrap/nodesconfigprovider/simpleNodesConfigProvider.go delete mode 100644 epochStart/bootstrap/structs/components.go diff --git a/cmd/node/main.go b/cmd/node/main.go index dd863124033..c381d55874d 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -32,6 +32,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/display" "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" factoryEpochBootstrap "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/factory" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/nodesconfigprovider" "github.com/ElrondNetwork/elrond-go/epochStart/notifier" @@ -600,50 +601,44 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } time.Sleep(secondsToWaitForP2PBootstrap * time.Second) - epochStartComponentArgs := factoryEpochBootstrap.EpochStartDataProviderFactoryArgs{ - PubKey: pubKey, - Messenger: networkComponents.NetMessenger, - Marshalizer: marshalizer, - Hasher: hasher, - NodesConfigProvider: nodesconfigprovider.NewSimpleNodesConfigProvider(genesisNodesConfig), - PathManager: pathManager, - StartTime: startTime, - OriginalNodesConfig: genesisNodesConfig, - GeneralConfig: generalConfig, - WorkingDir: workingDir, - DefaultDBPath: defaultDBPath, - DefaultEpochString: defaultEpochString, - PubKey: pubKey, - Messenger: networkComponents.NetMessenger, - Marshalizer: coreComponents.InternalMarshalizer, - Hasher: coreComponents.Hasher, - NodesConfigProvider: nodesconfigprovider.NewSimpleNodesConfigProvider(genesisNodesConfig), - DefaultShardCoordinator: shardCoordinator, - PathManager: pathManager, - StartTime: startTime, - OriginalNodesConfig: genesisNodesConfig, - EconomicsConfig: economicsConfig, - GeneralConfig: generalConfig, - KeyGen: cryptoComponents.TxSignKeyGen, - BlockKeyGen: cryptoComponents.BlockSignKeyGen, - SingleSigner: cryptoComponents.TxSingleSigner, - BlockSingleSigner: cryptoComponents.SingleSigner, - IsEpochFoundInStorage: epochFoundInStorage, - } - - epochStartComponentFactory, err := factoryEpochBootstrap.NewEpochStartDataProviderFactory(epochStartComponentArgs) - if err != nil { + epochStartBootsrapArgs := bootstrap.ArgsEpochStartDataProvider{ + PublicKey: nil, + Messenger: nil, + Marshalizer: nil, + Hasher: nil, + GeneralConfig: config.Config{}, + EconomicsConfig: config.EconomicsConfig{}, + DefaultShardCoordinator: nil, + PathManager: nil, + NodesConfigProvider: nil, + EpochStartMetaBlockInterceptor: nil, + MetaBlockInterceptor: nil, + ShardHeaderInterceptor: nil, + MiniBlockInterceptor: nil, + SingleSigner: nil, + BlockSingleSigner: nil, + KeyGen: nil, + BlockKeyGen: nil, + WhiteListHandler: nil, + GenesisNodesConfig: nil, + WorkingDir: "", + DefaultDBPath: "", + DefaultEpochString: "", + } + bootsrapper, err := bootstrap.NewEpochStartDataProvider(epochStartBootsrapArgs) + if err != nil { + log.Error("could not create bootsrapper", "err", err) return err } - - epochStartDataProvider, err := epochStartComponentFactory.Create() + currentEpoch, currentShardId, shardNumber, err := bootsrapper.Bootstrap() if err != nil { + log.Error("boostrap return error", "error", err) return err } - res, err := epochStartDataProvider.Bootstrap() + shardCoordinator, err := sharding.NewMultiShardCoordinator(shardNumber, currentShardId) if err != nil { - log.Error("error bootstrapping", "error", err) + return err } log.Trace("creating economics data components") diff --git a/epochStart/bootstrap/epochStartDataProvider.go b/epochStart/bootstrap/epochStartDataProvider.go index 684be1b717c..6e35fb20ee9 100644 --- a/epochStart/bootstrap/epochStartDataProvider.go +++ b/epochStart/bootstrap/epochStartDataProvider.go @@ -22,9 +22,8 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/resolverscontainer" "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" - "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/factory/interceptors" + factory3 "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/factory" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/storagehandler" - "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/structs" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/marshal" @@ -40,8 +39,7 @@ import ( "github.com/ElrondNetwork/elrond-go/update/sync" ) -var log = logger.GetOrCreate("registration") -var _ process.Interceptor = (*simpleMetaBlockInterceptor)(nil) +var log = logger.GetOrCreate("epochStart/bootstrap") const delayBetweenRequests = 1 * time.Second const delayAfterRequesting = 1 * time.Second @@ -49,6 +47,18 @@ const thresholdForConsideringMetaBlockCorrect = 0.2 const numRequestsToSendOnce = 4 const maxNumTimesToRetry = 100 +// ComponentsNeededForBootstrap holds the components which need to be initialized from network +type ComponentsNeededForBootstrap struct { + EpochStartMetaBlock *block.MetaBlock + PreviousEpochStartMetaBlock *block.MetaBlock + ShardHeader *block.Header //only for shards, nil for meta + NodesConfig *sharding.NodesSetup + ShardHeaders map[uint32]*block.Header + ShardCoordinator sharding.Coordinator + Tries state.TriesHolder + PendingMiniBlocks map[string]*block.MiniBlock +} + // epochStartDataProvider will handle requesting the needed data to start when joining late the network type epochStartDataProvider struct { publicKey crypto.PublicKey @@ -253,7 +263,7 @@ func (esdp *epochStartDataProvider) searchDataInLocalStorage() { } // Bootstrap will handle requesting and receiving the needed information the node will bootstrap from -func (esdp *epochStartDataProvider) Bootstrap() (*structs.ComponentsNeededForBootstrap, error) { +func (esdp *epochStartDataProvider) Bootstrap() (uint32, uint32, uint32, error) { // TODO: add searching for epoch start metablock and other data inside this component interceptorsContainer, err := esdp.createInterceptors(commonDataPool) @@ -414,7 +424,7 @@ func (esdp *epochStartDataProvider) getMiniBlocks( } func (esdp *epochStartDataProvider) createInterceptors(dataPool dataRetriever.PoolsHolder) (process.InterceptorsContainer, error) { - args := interceptors.ArgsEpochStartInterceptorContainer{ + args := factory3.ArgsEpochStartInterceptorContainer{ Config: esdp.generalConfig, ShardCoordinator: esdp.defaultShardCoordinator, Marshalizer: esdp.marshalizer, @@ -428,7 +438,7 @@ func (esdp *epochStartDataProvider) createInterceptors(dataPool dataRetriever.Po WhiteListHandler: esdp.whiteListHandler, } - return interceptors.NewEpochStartInterceptorsContainer(args) + return factory3.NewEpochStartInterceptorsContainer(args) } func (esdp *epochStartDataProvider) changeMessageProcessorsForMetaBlocks() { diff --git a/epochStart/bootstrap/errors.go b/epochStart/bootstrap/errors.go deleted file mode 100644 index 87b6706ca42..00000000000 --- a/epochStart/bootstrap/errors.go +++ /dev/null @@ -1,60 +0,0 @@ -package bootstrap - -import "errors" - -// ErrNilPublicKey signals that a nil public key has been provided -var ErrNilPublicKey = errors.New("nil public key") - -// ErrNilMessenger signals that a nil messenger has been provided -var ErrNilMessenger = errors.New("nil messenger") - -// ErrNilMarshalizer signals that a nil marshalizer has been provided -var ErrNilMarshalizer = errors.New("nil marshalizer") - -// ErrNilPathManager signals that a nil path manager has been provided -var ErrNilPathManager = errors.New("nil path manager") - -// ErrNilHasher signals that a nil hasher has been provider -var ErrNilHasher = errors.New("nil hasher") - -// ErrNilNodesConfigProvider signals that a nil nodes config provider has been given -var ErrNilNodesConfigProvider = errors.New("nil nodes config provider") - -// ErrNilDefaultShardCoordinator signals that a nil default shard coordinator -var ErrNilDefaultShardCoordinator = errors.New("nil default shard coordinator") - -// ErrNilEpochStartMetaBlockInterceptor signals that a epoch start metablock interceptor has been provided -var ErrNilEpochStartMetaBlockInterceptor = errors.New("nil epoch start metablock interceptor") - -// ErrNilMetaBlockInterceptor signals that a metablock interceptor has been provided -var ErrNilMetaBlockInterceptor = errors.New("nil metablock interceptor") - -// ErrNilShardHeaderInterceptor signals that a nil shard header interceptor has been provided -var ErrNilShardHeaderInterceptor = errors.New("nil shard header interceptor") - -// ErrNilMiniBlockInterceptor signals that a nil mini block interceptor has been provided -var ErrNilMiniBlockInterceptor = errors.New("nil mini block interceptor") - -// ErrNumTriesExceeded signals that there were too many tries for fetching a metablock -var ErrNumTriesExceeded = errors.New("num of tries exceeded. try re-request") - -// ErrNilShardCoordinator signals that a nil shard coordinator has been provided -var ErrNilShardCoordinator = errors.New("nil shard coordinator") - -// ErrNilWhiteListHandler signals a that a nil white list handler has been provided -var ErrNilWhiteListHandler = errors.New("nil white list handler") - -// ErrNilSingleSigner signals a that a nil single signer has been provided -var ErrNilSingleSigner = errors.New("nil single signer") - -// ErrNilBlockSingleSigner signals a that a nil single signer has been provided -var ErrNilBlockSingleSigner = errors.New("nil block single signer") - -// ErrNilKeyGen signals a that a nil key gen has been provided -var ErrNilKeyGen = errors.New("nil key gen") - -// ErrNilBlockKeyGen signals a that a nil key gen has been provided -var ErrNilBlockKeyGen = errors.New("nil block key gen") - -// ErrShardDataNotFound signals that no shard header has been found for the calculated shard -var ErrShardDataNotFound = errors.New("shard data not found") diff --git a/epochStart/bootstrap/factory/disabledEpochStartDataProvider.go b/epochStart/bootstrap/factory/disabledEpochStartDataProvider.go deleted file mode 100644 index f15e932b743..00000000000 --- a/epochStart/bootstrap/factory/disabledEpochStartDataProvider.go +++ /dev/null @@ -1,15 +0,0 @@ -package factory - -import ( - "errors" - - "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/structs" -) - -type disabledEpochStartDataProvider struct { -} - -// Bootstrap will return an error indicating that the sync is not needed -func (d *disabledEpochStartDataProvider) Bootstrap() (*structs.ComponentsNeededForBootstrap, error) { - return nil, errors.New("sync not needed") -} diff --git a/epochStart/bootstrap/factory/epochStartDataProviderFactory.go b/epochStart/bootstrap/factory/epochStartDataProviderFactory.go deleted file mode 100644 index e03085b866b..00000000000 --- a/epochStart/bootstrap/factory/epochStartDataProviderFactory.go +++ /dev/null @@ -1,188 +0,0 @@ -package factory - -import ( - "time" - - "github.com/ElrondNetwork/elrond-go/config" - "github.com/ElrondNetwork/elrond-go/core/check" - "github.com/ElrondNetwork/elrond-go/crypto" - "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/process/interceptors" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/ElrondNetwork/elrond-go/storage/storageUnit" -) - -type epochStartDataProviderFactory struct { - pubKey crypto.PublicKey - messenger p2p.Messenger - marshalizer marshal.Marshalizer - hasher hashing.Hasher - pathManager storage.PathManagerHandler - nodesConfigProvider bootstrap.NodesConfigProviderHandler - generalConfig config.Config - economicsConfig config.EconomicsConfig - defaultShardCoordinator sharding.Coordinator - singleSigner crypto.SingleSigner - blockSingleSigner crypto.SingleSigner - keyGen crypto.KeyGenerator - blockKeyGen crypto.KeyGenerator - shouldSync bool - workingDir string - defaultDBPath string - defaultEpochString string -} - -// EpochStartDataProviderFactoryArgs holds the arguments needed for creating aa factory for the epoch start data -// provider component -type EpochStartDataProviderFactoryArgs struct { - PubKey crypto.PublicKey - Messenger p2p.Messenger - Marshalizer marshal.Marshalizer - Hasher hashing.Hasher - NodesConfigProvider bootstrap.NodesConfigProviderHandler - PathManager storage.PathManagerHandler - DefaultShardCoordinator sharding.Coordinator - StartTime time.Time - OriginalNodesConfig *sharding.NodesSetup - GeneralConfig *config.Config - EconomicsConfig *config.EconomicsConfig - SingleSigner crypto.SingleSigner - BlockSingleSigner crypto.SingleSigner - KeyGen crypto.KeyGenerator - BlockKeyGen crypto.KeyGenerator - IsEpochFoundInStorage bool - WorkingDir string - DefaultDBPath string - DefaultEpochString string -} - -// NewEpochStartDataProviderFactory returns a new instance of epochStartDataProviderFactory -func NewEpochStartDataProviderFactory(args EpochStartDataProviderFactoryArgs) (*epochStartDataProviderFactory, error) { - if check.IfNil(args.PubKey) { - return nil, bootstrap.ErrNilPublicKey - } - if check.IfNil(args.Messenger) { - return nil, bootstrap.ErrNilMessenger - } - if check.IfNil(args.Marshalizer) { - return nil, bootstrap.ErrNilMarshalizer - } - if check.IfNil(args.PathManager) { - return nil, bootstrap.ErrNilPathManager - } - if check.IfNil(args.Hasher) { - return nil, bootstrap.ErrNilHasher - } - if check.IfNil(args.NodesConfigProvider) { - return nil, bootstrap.ErrNilNodesConfigProvider - } - if check.IfNil(args.DefaultShardCoordinator) { - return nil, bootstrap.ErrNilDefaultShardCoordinator - } - if check.IfNil(args.BlockKeyGen) { - return nil, bootstrap.ErrNilBlockKeyGen - } - if check.IfNil(args.KeyGen) { - return nil, bootstrap.ErrNilKeyGen - } - if check.IfNil(args.SingleSigner) { - return nil, bootstrap.ErrNilSingleSigner - } - if check.IfNil(args.BlockSingleSigner) { - return nil, bootstrap.ErrNilBlockSingleSigner - } - - shouldSync := bootstrap.ShouldSyncWithTheNetwork( - args.StartTime, - args.IsEpochFoundInStorage, - args.OriginalNodesConfig, - args.GeneralConfig, - ) - shouldSync = true // hardcoded so we can test we can sync - - return &epochStartDataProviderFactory{ - pubKey: args.PubKey, - messenger: args.Messenger, - marshalizer: args.Marshalizer, - hasher: args.Hasher, - pathManager: args.PathManager, - generalConfig: *args.GeneralConfig, - economicsConfig: *args.EconomicsConfig, - nodesConfigProvider: args.NodesConfigProvider, - defaultShardCoordinator: args.DefaultShardCoordinator, - keyGen: args.KeyGen, - blockKeyGen: args.BlockKeyGen, - singleSigner: args.SingleSigner, - blockSingleSigner: args.BlockSingleSigner, - shouldSync: shouldSync, - workingDir: args.WorkingDir, - defaultEpochString: args.DefaultEpochString, - defaultDBPath: args.DefaultEpochString, - }, nil -} - -// Create will init and return an instance of an epoch start data provider -func (esdpf *epochStartDataProviderFactory) Create() (bootstrap.EpochStartDataProviderHandler, error) { - if !esdpf.shouldSync { - return &disabledEpochStartDataProvider{}, nil - } - - epochStartMetaBlockInterceptor, err := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(esdpf.marshalizer, esdpf.hasher) - if err != nil { - return nil, err - } - metaBlockInterceptor, err := bootstrap.NewSimpleMetaBlockInterceptor(esdpf.marshalizer, esdpf.hasher) - if err != nil { - return nil, err - } - shardHdrInterceptor, err := bootstrap.NewSimpleShardHeaderInterceptor(esdpf.marshalizer, esdpf.hasher) - if err != nil { - return nil, err - } - miniBlockInterceptor, err := bootstrap.NewSimpleMiniBlockInterceptor(esdpf.marshalizer, esdpf.hasher) - if err != nil { - return nil, err - } - - whiteListCache, err := storageUnit.NewCache( - storageUnit.CacheType(esdpf.generalConfig.WhiteListPool.Type), - esdpf.generalConfig.WhiteListPool.Size, - esdpf.generalConfig.WhiteListPool.Shards, - ) - if err != nil { - return nil, err - } - whiteListHandler, err := interceptors.NewWhiteListDataVerifier(whiteListCache) - if err != nil { - return nil, err - } - - argsEpochStart := bootstrap.ArgsEpochStartDataProvider{ - PublicKey: esdpf.pubKey, - Messenger: esdpf.messenger, - Marshalizer: esdpf.marshalizer, - Hasher: esdpf.hasher, - NodesConfigProvider: esdpf.nodesConfigProvider, - GeneralConfig: esdpf.generalConfig, - EconomicsConfig: esdpf.economicsConfig, - PathManager: esdpf.pathManager, - SingleSigner: esdpf.singleSigner, - BlockSingleSigner: esdpf.blockSingleSigner, - KeyGen: esdpf.keyGen, - BlockKeyGen: esdpf.blockKeyGen, - DefaultShardCoordinator: esdpf.defaultShardCoordinator, - EpochStartMetaBlockInterceptor: epochStartMetaBlockInterceptor, - MetaBlockInterceptor: metaBlockInterceptor, - ShardHeaderInterceptor: shardHdrInterceptor, - MiniBlockInterceptor: miniBlockInterceptor, - WhiteListHandler: whiteListHandler, - WorkingDir: esdpf.workingDir, - DefaultEpochString: esdpf.defaultEpochString, - DefaultDBPath: esdpf.defaultDBPath, - } - return bootstrap.NewEpochStartDataProvider(argsEpochStart) -} diff --git a/epochStart/bootstrap/factory/interceptors/epochStartInterceptorsContainerFactory.go b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go similarity index 99% rename from epochStart/bootstrap/factory/interceptors/epochStartInterceptorsContainerFactory.go rename to epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go index d802373503b..03f72411d3c 100644 --- a/epochStart/bootstrap/factory/interceptors/epochStartInterceptorsContainerFactory.go +++ b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go @@ -1,4 +1,4 @@ -package interceptors +package factory import ( "time" diff --git a/epochStart/bootstrap/interface.go b/epochStart/bootstrap/interface.go deleted file mode 100644 index 67f38547823..00000000000 --- a/epochStart/bootstrap/interface.go +++ /dev/null @@ -1,57 +0,0 @@ -package bootstrap - -import ( - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/structs" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/sharding" -) - -// EpochStartMetaBlockInterceptorHandler defines what a component which will handle receiving the epoch start meta blocks should do -type EpochStartMetaBlockInterceptorHandler interface { - process.Interceptor - GetEpochStartMetaBlock(target int, epoch uint32) (*block.MetaBlock, error) -} - -// MetaBlockInterceptorHandler defines what a component which will handle receiving the meta blocks should do -type MetaBlockInterceptorHandler interface { - process.Interceptor - GetMetaBlock(hash []byte, target int) (*block.MetaBlock, error) -} - -// ShardHeaderInterceptorHandler defines what a component which will handle receiving the the shard headers should do -type ShardHeaderInterceptorHandler interface { - process.Interceptor - GetShardHeader(hash []byte, target int) (*block.Header, error) -} - -// MiniBlockInterceptorHandler defines what a component which will handle receiving the mini blocks should do -type MiniBlockInterceptorHandler interface { - process.Interceptor - GetMiniBlock(hash []byte, target int) (*block.MiniBlock, error) -} - -// NodesConfigProviderHandler defines what a component which will handle the nodes config should be able to do -type NodesConfigProviderHandler interface { - GetNodesConfigForMetaBlock(metaBlock *block.MetaBlock) (*sharding.NodesSetup, error) - IsInterfaceNil() bool -} - -// EpochStartDataProviderHandler defines what a component which fetches the data needed for starting in an epoch should do -type EpochStartDataProviderHandler interface { - Bootstrap() (uint32, uint32, uint32, error) - IsInterfaceNil() bool -} - -// PathManagerHandler defines which actions should be done for generating paths for databases directories -type PathManagerHandler interface { - PathForEpoch(shardId string, epoch uint32, identifier string) string - PathForStatic(shardId string, identifier string) string - IsInterfaceNil() bool -} - -// StorageHandler defines which actions should be done by a component which handles the storage of bootstrap data -type StorageHandler interface { - SaveDataToStorage(components structs.ComponentsNeededForBootstrap) error - IsInterfaceNil() bool -} diff --git a/epochStart/bootstrap/nodesconfigprovider/simpleNodesConfigProvider.go b/epochStart/bootstrap/nodesconfigprovider/simpleNodesConfigProvider.go deleted file mode 100644 index 710ddff5d30..00000000000 --- a/epochStart/bootstrap/nodesconfigprovider/simpleNodesConfigProvider.go +++ /dev/null @@ -1,27 +0,0 @@ -package nodesconfigprovider - -import ( - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/sharding" -) - -type simpleNodesConfigProvider struct { - originalNodesConfig *sharding.NodesSetup -} - -// NewSimpleNodesConfigProvider returns a new instance of simpleNodesConfigProvider -func NewSimpleNodesConfigProvider(originalNodesConfig *sharding.NodesSetup) *simpleNodesConfigProvider { - return &simpleNodesConfigProvider{ - originalNodesConfig: originalNodesConfig, - } -} - -// GetNodesConfigForMetaBlock will return the original nodes setup -func (sncp *simpleNodesConfigProvider) GetNodesConfigForMetaBlock(_ *block.MetaBlock) (*sharding.NodesSetup, error) { - return sncp.originalNodesConfig, nil -} - -// IsInterfaceNil returns true if there is no value under the interface -func (sncp *simpleNodesConfigProvider) IsInterfaceNil() bool { - return sncp == nil -} diff --git a/epochStart/bootstrap/structs/components.go b/epochStart/bootstrap/structs/components.go deleted file mode 100644 index 8ca51a040f1..00000000000 --- a/epochStart/bootstrap/structs/components.go +++ /dev/null @@ -1,19 +0,0 @@ -package structs - -import ( - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/sharding" -) - -// ComponentsNeededForBootstrap holds the components which need to be initialized from network -type ComponentsNeededForBootstrap struct { - EpochStartMetaBlock *block.MetaBlock - PreviousEpochStartMetaBlock *block.MetaBlock - ShardHeader *block.Header //only for shards, nil for meta - NodesConfig *sharding.NodesSetup - ShardHeaders map[uint32]*block.Header - ShardCoordinator sharding.Coordinator - Tries state.TriesHolder - PendingMiniBlocks map[string]*block.MiniBlock -} From e9714b84c2874b358461ccc7688316813f499f17 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Fri, 20 Mar 2020 16:01:00 +0200 Subject: [PATCH 27/61] fix after review --- cmd/node/main.go | 2 +- .../bootstrap/epochStartDataProvider.go | 31 ++++++------------- 2 files changed, 11 insertions(+), 22 deletions(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index c381d55874d..f2feaf17335 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -829,7 +829,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { whiteListHandler, epochStartNotifier, &generalConfig.EpochStartConfig, - 0, + currentEpoch, rater, generalConfig.Marshalizer.SizeCheckDelta, generalConfig.StateTriesConfig.CheckpointRoundsModulus, diff --git a/epochStart/bootstrap/epochStartDataProvider.go b/epochStart/bootstrap/epochStartDataProvider.go index 6e35fb20ee9..94cd252763f 100644 --- a/epochStart/bootstrap/epochStartDataProvider.go +++ b/epochStart/bootstrap/epochStartDataProvider.go @@ -67,7 +67,7 @@ type epochStartDataProvider struct { messenger p2p.Messenger generalConfig config.Config economicsConfig config.EconomicsConfig - defaultShardCoordinator sharding.Coordinator + genesisShardCoordinator sharding.Coordinator pathManager PathManagerHandler nodesConfigProvider NodesConfigProviderHandler epochStartMetaBlockInterceptor EpochStartMetaBlockInterceptorHandler @@ -85,6 +85,8 @@ type epochStartDataProvider struct { workingDir string defaultDBPath string defaultEpochString string + + dataPool dataRetriever.PoolsHolder } // ArgsEpochStartDataProvider holds the arguments needed for creating an epoch start data provider component @@ -99,9 +101,6 @@ type ArgsEpochStartDataProvider struct { PathManager PathManagerHandler NodesConfigProvider NodesConfigProviderHandler EpochStartMetaBlockInterceptor EpochStartMetaBlockInterceptorHandler - MetaBlockInterceptor MetaBlockInterceptorHandler - ShardHeaderInterceptor ShardHeaderInterceptorHandler - MiniBlockInterceptor MiniBlockInterceptorHandler SingleSigner crypto.SingleSigner BlockSingleSigner crypto.SingleSigner KeyGen crypto.KeyGenerator @@ -136,15 +135,6 @@ func NewEpochStartDataProvider(args ArgsEpochStartDataProvider) (*epochStartData if check.IfNil(args.EpochStartMetaBlockInterceptor) { return nil, ErrNilEpochStartMetaBlockInterceptor } - if check.IfNil(args.MetaBlockInterceptor) { - return nil, ErrNilMetaBlockInterceptor - } - if check.IfNil(args.ShardHeaderInterceptor) { - return nil, ErrNilShardHeaderInterceptor - } - if check.IfNil(args.MiniBlockInterceptor) { - return nil, ErrNilMiniBlockInterceptor - } if check.IfNil(args.WhiteListHandler) { return nil, ErrNilWhiteListHandler } @@ -182,18 +172,12 @@ func NewEpochStartDataProvider(args ArgsEpochStartDataProvider) (*epochStartData workingDir: args.WorkingDir, defaultEpochString: args.DefaultEpochString, defaultDBPath: args.DefaultEpochString, - defaultShardCoordinator: args.DefaultShardCoordinator, keyGen: args.KeyGen, blockKeyGen: args.BlockKeyGen, singleSigner: args.SingleSigner, blockSingleSigner: args.BlockSingleSigner, } - err := epochStartProvider.initInternalComponents() - if err != nil { - return nil, err - } - return epochStartProvider, nil } @@ -222,11 +206,11 @@ func (esdp *epochStartDataProvider) initInternalComponents() error { return nil, err } - commonDataPool, err := factory2.NewDataPoolFromConfig( + esdp.dataPool, err = factory2.NewDataPoolFromConfig( factory2.ArgsDataPool{ Config: &esdp.generalConfig, EconomicsData: economicsData, - ShardCoordinator: esdp.defaultShardCoordinator, + ShardCoordinator: esdp.shardCoordinator, }, ) @@ -266,6 +250,11 @@ func (esdp *epochStartDataProvider) searchDataInLocalStorage() { func (esdp *epochStartDataProvider) Bootstrap() (uint32, uint32, uint32, error) { // TODO: add searching for epoch start metablock and other data inside this component + err := esdp.initInternalComponents() + if err != nil { + return nil, err + } + interceptorsContainer, err := esdp.createInterceptors(commonDataPool) if err != nil || interceptorsContainer == nil { return nil, err From 87e05aa1c41944b35e1acd69e8a363292160419d Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Fri, 20 Mar 2020 18:32:42 +0200 Subject: [PATCH 28/61] epoch start processing --- epochStart/bootstrap/common.go | 25 -- .../bootstrap/epochStartDataProvider_test.go | 166 -------- .../{epochStartDataProvider.go => process.go} | 366 +++++++++--------- 3 files changed, 175 insertions(+), 382 deletions(-) delete mode 100644 epochStart/bootstrap/common.go delete mode 100644 epochStart/bootstrap/epochStartDataProvider_test.go rename epochStart/bootstrap/{epochStartDataProvider.go => process.go} (60%) diff --git a/epochStart/bootstrap/common.go b/epochStart/bootstrap/common.go deleted file mode 100644 index 1877e0d5a81..00000000000 --- a/epochStart/bootstrap/common.go +++ /dev/null @@ -1,25 +0,0 @@ -package bootstrap - -import ( - "time" - - "github.com/ElrondNetwork/elrond-go/config" - "github.com/ElrondNetwork/elrond-go/sharding" -) - -// ShouldSyncWithTheNetwork returns true if a peer is not synced with the latest epoch (especially used when a peer -// wants to join the network after the genesis) -func ShouldSyncWithTheNetwork( - startTime time.Time, - epochFoundInStorage bool, - nodesConfig *sharding.NodesSetup, - config *config.Config, -) bool { - isCurrentTimeBeforeGenesis := time.Now().Sub(startTime) < 0 - timeInFirstEpochAtMinRoundsPerEpoch := startTime.Add(time.Duration(nodesConfig.RoundDuration * - uint64(config.EpochStartConfig.MinRoundsBetweenEpochs))) - isEpochZero := time.Now().Sub(timeInFirstEpochAtMinRoundsPerEpoch) < 0 - shouldSyncWithTheNetwork := !isCurrentTimeBeforeGenesis && !isEpochZero && !epochFoundInStorage - - return shouldSyncWithTheNetwork -} diff --git a/epochStart/bootstrap/epochStartDataProvider_test.go b/epochStart/bootstrap/epochStartDataProvider_test.go deleted file mode 100644 index c14b126f5c8..00000000000 --- a/epochStart/bootstrap/epochStartDataProvider_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package bootstrap_test - -import ( - "errors" - "testing" - - "github.com/ElrondNetwork/elrond-go/core/check" - "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" - "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/mock" - mock2 "github.com/ElrondNetwork/elrond-go/epochStart/mock" - "github.com/stretchr/testify/require" -) - -func TestNewEpochStartDataProvider_NilPublicKeyShouldErr(t *testing.T) { - t.Parallel() - - args := getArguments() - args.PublicKey = nil - epStart, err := bootstrap.NewEpochStartDataProvider(args) - - require.Nil(t, epStart) - require.Equal(t, bootstrap.ErrNilPublicKey, err) -} - -func TestNewEpochStartDataProvider_NilMessengerShouldErr(t *testing.T) { - t.Parallel() - - args := getArguments() - args.Messenger = nil - epStart, err := bootstrap.NewEpochStartDataProvider(args) - - require.Nil(t, epStart) - require.Equal(t, bootstrap.ErrNilMessenger, err) -} - -func TestNewEpochStartDataProvider_NilMarshalizerShouldErr(t *testing.T) { - t.Parallel() - - args := getArguments() - args.Marshalizer = nil - epStart, err := bootstrap.NewEpochStartDataProvider(args) - - require.Nil(t, epStart) - require.Equal(t, bootstrap.ErrNilMarshalizer, err) -} - -func TestNewEpochStartDataProvider_NilHasherShouldErr(t *testing.T) { - t.Parallel() - - args := getArguments() - args.Hasher = nil - epStart, err := bootstrap.NewEpochStartDataProvider(args) - - require.Nil(t, epStart) - require.Equal(t, bootstrap.ErrNilHasher, err) -} - -func TestNewEpochStartDataProvider_NilPathManagerShouldErr(t *testing.T) { - t.Parallel() - - args := getArguments() - args.PathManager = nil - epStart, err := bootstrap.NewEpochStartDataProvider(args) - - require.Nil(t, epStart) - require.Equal(t, bootstrap.ErrNilPathManager, err) -} - -func TestNewEpochStartDataProvider_NilNodesConfigProviderShouldErr(t *testing.T) { - t.Parallel() - - args := getArguments() - args.NodesConfigProvider = nil - epStart, err := bootstrap.NewEpochStartDataProvider(args) - - require.Nil(t, epStart) - require.Equal(t, bootstrap.ErrNilNodesConfigProvider, err) -} - -func TestNewEpochStartDataProvider_NilMetablockInterceptorShouldErr(t *testing.T) { - t.Parallel() - - args := getArguments() - args.MetaBlockInterceptor = nil - epStart, err := bootstrap.NewEpochStartDataProvider(args) - - require.Nil(t, epStart) - require.Equal(t, bootstrap.ErrNilMetaBlockInterceptor, err) -} - -func TestNewEpochStartDataProvider_NilShardHeaderInterceptorShouldErr(t *testing.T) { - t.Parallel() - - args := getArguments() - args.ShardHeaderInterceptor = nil - epStart, err := bootstrap.NewEpochStartDataProvider(args) - - require.Nil(t, epStart) - require.Equal(t, bootstrap.ErrNilShardHeaderInterceptor, err) -} - -func TestNewEpochStartDataProvider_NilMetaBlockInterceptorShouldErr(t *testing.T) { - t.Parallel() - - args := getArguments() - args.MetaBlockInterceptor = nil - epStart, err := bootstrap.NewEpochStartDataProvider(args) - - require.Nil(t, epStart) - require.Equal(t, bootstrap.ErrNilMetaBlockInterceptor, err) -} - -func TestNewEpochStartDataProvider_NilMiniBlockInterceptorShouldErr(t *testing.T) { - t.Parallel() - - args := getArguments() - args.MiniBlockInterceptor = nil - epStart, err := bootstrap.NewEpochStartDataProvider(args) - - require.Nil(t, epStart) - require.Equal(t, bootstrap.ErrNilMiniBlockInterceptor, err) -} - -func TestNewEpochStartDataProvider_OkValsShouldWork(t *testing.T) { - t.Parallel() - - args := getArguments() - epStart, err := bootstrap.NewEpochStartDataProvider(args) - - require.Nil(t, err) - require.False(t, check.IfNil(epStart)) -} - -func TestEpochStartDataProvider_Bootstrap_TopicCreationFailsShouldErr(t *testing.T) { - t.Parallel() - - expectedErr := errors.New("error while creating topic") - args := getArguments() - args.Messenger = &mock.MessengerStub{ - CreateTopicCalled: func(_ string, _ bool) error { - return expectedErr - }, - } - epStart, _ := bootstrap.NewEpochStartDataProvider(args) - - res, err := epStart.Bootstrap() - - require.Nil(t, res) - require.Equal(t, expectedErr, err) -} - -func getArguments() bootstrap.ArgsEpochStartDataProvider { - return bootstrap.ArgsEpochStartDataProvider{ - PublicKey: &mock.PublicKeyMock{}, - Messenger: &mock.MessengerStub{}, - Marshalizer: &mock2.MarshalizerMock{}, - Hasher: mock2.HasherMock{}, - NodesConfigProvider: &mock.NodesConfigProviderStub{}, - EpochStartMetaBlockInterceptor: &mock.EpochStartMetaBlockInterceptorStub{}, - PathManager: &mock.PathManagerStub{}, - MetaBlockInterceptor: &mock.MetaBlockInterceptorStub{}, - ShardHeaderInterceptor: &mock.ShardHeaderInterceptorStub{}, - MiniBlockInterceptor: &mock.MiniBlockInterceptorStub{}, - WhiteListHandler: &mock.WhiteListHandlerStub{}, - } -} diff --git a/epochStart/bootstrap/epochStartDataProvider.go b/epochStart/bootstrap/process.go similarity index 60% rename from epochStart/bootstrap/epochStartDataProvider.go rename to epochStart/bootstrap/process.go index 94cd252763f..6a2f4b6f039 100644 --- a/epochStart/bootstrap/epochStartDataProvider.go +++ b/epochStart/bootstrap/process.go @@ -29,7 +29,6 @@ import ( "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/economics" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" @@ -41,10 +40,8 @@ import ( var log = logger.GetOrCreate("epochStart/bootstrap") -const delayBetweenRequests = 1 * time.Second const delayAfterRequesting = 1 * time.Second const thresholdForConsideringMetaBlockCorrect = 0.2 -const numRequestsToSendOnce = 4 const maxNumTimesToRetry = 100 // ComponentsNeededForBootstrap holds the components which need to be initialized from network @@ -59,15 +56,14 @@ type ComponentsNeededForBootstrap struct { PendingMiniBlocks map[string]*block.MiniBlock } -// epochStartDataProvider will handle requesting the needed data to start when joining late the network -type epochStartDataProvider struct { +// epochStartBootstrap will handle requesting the needed data to start when joining late the network +type epochStartBootstrap struct { publicKey crypto.PublicKey marshalizer marshal.Marshalizer hasher hashing.Hasher messenger p2p.Messenger generalConfig config.Config economicsConfig config.EconomicsConfig - genesisShardCoordinator sharding.Coordinator pathManager PathManagerHandler nodesConfigProvider NodesConfigProviderHandler epochStartMetaBlockInterceptor EpochStartMetaBlockInterceptorHandler @@ -86,18 +82,19 @@ type epochStartDataProvider struct { defaultDBPath string defaultEpochString string - dataPool dataRetriever.PoolsHolder + dataPool dataRetriever.PoolsHolder + computedEpoch uint32 } -// ArgsEpochStartDataProvider holds the arguments needed for creating an epoch start data provider component -type ArgsEpochStartDataProvider struct { +// ArgsEpochStartBootstrap holds the arguments needed for creating an epoch start data provider component +type ArgsEpochStartBootstrap struct { PublicKey crypto.PublicKey Messenger p2p.Messenger Marshalizer marshal.Marshalizer Hasher hashing.Hasher GeneralConfig config.Config EconomicsConfig config.EconomicsConfig - DefaultShardCoordinator sharding.Coordinator + GenesisShardCoordinator sharding.Coordinator PathManager PathManagerHandler NodesConfigProvider NodesConfigProviderHandler EpochStartMetaBlockInterceptor EpochStartMetaBlockInterceptorHandler @@ -112,8 +109,8 @@ type ArgsEpochStartDataProvider struct { DefaultEpochString string } -// NewEpochStartDataProvider will return a new instance of epochStartDataProvider -func NewEpochStartDataProvider(args ArgsEpochStartDataProvider) (*epochStartDataProvider, error) { +// NewEpochStartBootstrap will return a new instance of epochStartBootstrap +func NewEpochStartBootstrapHandler(args ArgsEpochStartBootstrap) (*epochStartBootstrap, error) { if check.IfNil(args.PublicKey) { return nil, ErrNilPublicKey } @@ -154,7 +151,7 @@ func NewEpochStartDataProvider(args ArgsEpochStartDataProvider) (*epochStartData return nil, ErrNilBlockSingleSigner } - epochStartProvider := &epochStartDataProvider{ + epochStartProvider := &epochStartBootstrap{ publicKey: args.PublicKey, marshalizer: args.Marshalizer, hasher: args.Hasher, @@ -181,152 +178,184 @@ func NewEpochStartDataProvider(args ArgsEpochStartDataProvider) (*epochStartData return epochStartProvider, nil } -func (esdp *epochStartDataProvider) initInternalComponents() error { +func (e *epochStartBootstrap) initInternalComponents() error { var err error - esdp.shardCoordinator, err = sharding.NewMultiShardCoordinator(esdp.genesisNodesConfig.NumberOfShards(), core.MetachainShardId) + e.shardCoordinator, err = sharding.NewMultiShardCoordinator(e.genesisNodesConfig.NumberOfShards(), core.MetachainShardId) if err != nil { return err } - err = esdp.initTopicsAndInterceptors() + err = e.initTopicsAndInterceptors() if err != nil { return err } defer func() { - esdp.resetTopicsAndInterceptors() + e.resetTopicsAndInterceptors() }() - err = esdp.createRequestHandler() + err = e.createRequestHandler() if err != nil { return err } - economicsData, err := economics.NewEconomicsData(&esdp.economicsConfig) - if err != nil { - return nil, err - } - - esdp.dataPool, err = factory2.NewDataPoolFromConfig( + e.dataPool, err = factory2.NewDataPoolFromConfig( factory2.ArgsDataPool{ - Config: &esdp.generalConfig, - EconomicsData: economicsData, - ShardCoordinator: esdp.shardCoordinator, + Config: &e.generalConfig, + EconomicsData: e.economicsData, + ShardCoordinator: e.shardCoordinator, }, ) return nil } -func (esdp *epochStartDataProvider) searchDataInLocalStorage() { - var errNotCritical error - // TODO: add a component which opens headers storer and gets the last epoch start metablock - // in order to provide the last known epoch in storage. Right now, it won't work as expected - // if storage pruning is disabled - isEpochFoundInStorage := true +func (e *epochStartBootstrap) searchDataInLocalStorage() { currentEpoch, errNotCritical := storageFactory.FindLastEpochFromStorage( - esdp.workingDir, - esdp.genesisNodesConfig.ChainID, - esdp.defaultDBPath, - esdp.defaultEpochString, + e.workingDir, + e.genesisNodesConfig.ChainID, + e.defaultDBPath, + e.defaultEpochString, ) if errNotCritical != nil { log.Debug("no epoch db found in storage", "error", errNotCritical.Error()) - isEpochFoundInStorage = false } log.Debug("current epoch from the storage : ", "epoch", currentEpoch) +} - shouldSync := ShouldSyncWithTheNetwork( - args.StartTime, - isEpochFoundInStorage, - args.OriginalNodesConfig, - args.GeneralConfig, - ) +func (e *epochStartBootstrap) isStartInEpochZero() bool { + startTime := time.Unix(e.genesisNodesConfig.StartTime, 0) + isCurrentTimeBeforeGenesis := time.Now().Sub(startTime) < 0 + if isCurrentTimeBeforeGenesis { + return true + } + + timeInFirstEpochAtRoundsPerEpoch := startTime.Add(time.Duration(e.genesisNodesConfig.RoundDuration * + uint64(e.generalConfig.EpochStartConfig.RoundsPerEpoch))) + isEpochZero := time.Now().Sub(timeInFirstEpochAtRoundsPerEpoch) < 0 + + return isEpochZero +} + +func (e *epochStartBootstrap) prepareEpochZero() (uint32, uint32, uint32, error) { + currentEpoch := uint32(0) + return currentEpoch, e.shardCoordinator.SelfId(), e.shardCoordinator.NumberOfShards(), nil +} + +func (e *epochStartBootstrap) isCurrentEpochSavedInStorage() bool { + // TODO: implement + return true +} + +func (e *epochStartBootstrap) requestDataFromNetwork() { + +} + +func (e *epochStartBootstrap) saveGatheredDataToStorage() { + +} + +func (e *epochStartBootstrap) computeMostProbableEpoch() { + startTime := time.Unix(e.genesisNodesConfig.StartTime, 0) + elapsedTime := time.Since(startTime) + + timeForOneEpoch := time.Duration(e.genesisNodesConfig.RoundDuration * + uint64(e.generalConfig.EpochStartConfig.RoundsPerEpoch)) + + elaspedTimeInSeconds := uint64(elapsedTime.Seconds()) + timeForOneEpochInSeconds := uint64(timeForOneEpoch.Seconds()) + + e.computedEpoch = uint32(elaspedTimeInSeconds / timeForOneEpochInSeconds) +} + +func (e *epochStartBootstrap) Bootstrap() (uint32, uint32, uint32, error) { + if e.isStartInEpochZero() { + return e.prepareEpochZero() + } - log.Debug("shouldSync epochStartData", "shouldSync", shouldSync) } // Bootstrap will handle requesting and receiving the needed information the node will bootstrap from -func (esdp *epochStartDataProvider) Bootstrap() (uint32, uint32, uint32, error) { +func (e *epochStartBootstrap) requestAndProcessing() (uint32, uint32, uint32, error) { // TODO: add searching for epoch start metablock and other data inside this component - err := esdp.initInternalComponents() + err := e.initInternalComponents() if err != nil { return nil, err } - interceptorsContainer, err := esdp.createInterceptors(commonDataPool) + interceptorsContainer, err := e.createInterceptors(commonDataPool) if err != nil || interceptorsContainer == nil { return nil, err } - miniBlocksSyncer, err := esdp.getMiniBlockSyncer(commonDataPool.MiniBlocks()) + miniBlocksSyncer, err := e.getMiniBlockSyncer(commonDataPool.MiniBlocks()) if err != nil { return nil, err } - missingHeadersSyncer, err := esdp.getHeaderHandlerSyncer(commonDataPool.Headers()) + missingHeadersSyncer, err := e.getHeaderHandlerSyncer(commonDataPool.Headers()) if err != nil { return nil, err } epochNumForRequestingTheLatestAvailable := uint32(math.MaxUint32) - metaBlock, err := esdp.getEpochStartMetaBlock(epochNumForRequestingTheLatestAvailable) + metaBlock, err := e.getEpochStartMetaBlock(epochNumForRequestingTheLatestAvailable) if err != nil { return nil, err } - prevMetaBlock, err := esdp.getMetaBlock(missingHeadersSyncer, metaBlock.EpochStart.Economics.PrevEpochStartHash) + prevMetaBlock, err := e.getMetaBlock(missingHeadersSyncer, metaBlock.EpochStart.Economics.PrevEpochStartHash) if err != nil { return nil, err } - esdp.changeMessageProcessorsForMetaBlocks() + e.changeMessageProcessorsForMetaBlocks() log.Info("previous meta block", "epoch", prevMetaBlock.Epoch) - nodesConfig, err := esdp.nodesConfigProvider.GetNodesConfigForMetaBlock(metaBlock) + nodesConfig, err := e.nodesConfigProvider.GetNodesConfigForMetaBlock(metaBlock) if err != nil { return nil, err } - esdp.shardCoordinator, err = esdp.getShardCoordinator(metaBlock, nodesConfig) + e.shardCoordinator, err = e.getShardCoordinator(metaBlock, nodesConfig) if err != nil { return nil, err } - shardHeaders, err := esdp.getShardHeaders(missingHeadersSyncer, metaBlock, nodesConfig, shardCoordinator) + shardHeaders, err := e.getShardHeaders(missingHeadersSyncer, metaBlock, nodesConfig, shardCoordinator) if err != nil { log.Debug("shard headers not found", "error", err) } var shardHeaderForShard *block.Header - if esdp.shardCoordinator.SelfId() < esdp.shardCoordinator.NumberOfShards() { - shardHeaderForShard = shardHeaders[esdp.shardCoordinator.SelfId()] + if e.shardCoordinator.SelfId() < e.shardCoordinator.NumberOfShards() { + shardHeaderForShard = shardHeaders[e.shardCoordinator.SelfId()] } - epochStartData, err := esdp.getCurrentEpochStartData(esdp.shardCoordinator, metaBlock) + epochStartData, err := e.getCurrentEpochStartData(e.shardCoordinator, metaBlock) if err != nil { return nil, err } - pendingMiniBlocks, err := esdp.getMiniBlocks(miniBlocksSyncer, epochStartData.PendingMiniBlockHeaders, shardCoordinator.SelfId()) + pendingMiniBlocks, err := e.getMiniBlocks(miniBlocksSyncer, epochStartData.PendingMiniBlockHeaders, shardCoordinator.SelfId()) if err != nil { return nil, err } - lastFinalizedMetaBlock, err := esdp.getMetaBlock(missingHeadersSyncer, epochStartData.LastFinishedMetaBlock) + lastFinalizedMetaBlock, err := e.getMetaBlock(missingHeadersSyncer, epochStartData.LastFinishedMetaBlock) if err != nil { return nil, err } log.Info("received last finalized meta block", "nonce", lastFinalizedMetaBlock.Nonce) - firstPendingMetaBlock, err := esdp.getMetaBlock(missingHeadersSyncer, epochStartData.FirstPendingMetaBlock) + firstPendingMetaBlock, err := e.getMetaBlock(missingHeadersSyncer, epochStartData.FirstPendingMetaBlock) if err != nil { return nil, err } log.Info("received first pending meta block", "nonce", firstPendingMetaBlock.Nonce) - trieToReturn, err := esdp.getTrieFromRootHash(epochStartData.RootHash) + trieToReturn, err := e.getTrieFromRootHash(epochStartData.RootHash) if err != nil { return nil, err } @@ -337,19 +366,19 @@ func (esdp *epochStartDataProvider) Bootstrap() (uint32, uint32, uint32, error) ShardHeader: shardHeaderForShard, NodesConfig: nodesConfig, ShardHeaders: shardHeaders, - ShardCoordinator: esdp.shardCoordinator, + ShardCoordinator: e.shardCoordinator, Tries: trieToReturn, PendingMiniBlocks: pendingMiniBlocks, } var storageHandlerComponent StorageHandler - if esdp.shardCoordinator.SelfId() > esdp.shardCoordinator.NumberOfShards() { + if e.shardCoordinator.SelfId() > e.shardCoordinator.NumberOfShards() { storageHandlerComponent, err = storagehandler.NewMetaStorageHandler( - esdp.generalConfig, - esdp.shardCoordinator, - esdp.pathManager, - esdp.marshalizer, - esdp.hasher, + e.generalConfig, + e.shardCoordinator, + e.pathManager, + e.marshalizer, + e.hasher, metaBlock.Epoch, ) if err != nil { @@ -357,11 +386,11 @@ func (esdp *epochStartDataProvider) Bootstrap() (uint32, uint32, uint32, error) } } else { storageHandlerComponent, err = storagehandler.NewShardStorageHandler( - esdp.generalConfig, - esdp.shardCoordinator, - esdp.pathManager, - esdp.marshalizer, - esdp.hasher, + e.generalConfig, + e.shardCoordinator, + e.pathManager, + e.marshalizer, + e.hasher, metaBlock.Epoch, ) if err != nil { @@ -377,27 +406,27 @@ func (esdp *epochStartDataProvider) Bootstrap() (uint32, uint32, uint32, error) return components, nil } -func (esdp *epochStartDataProvider) getMiniBlockSyncer(dataPool storage.Cacher) (update.EpochStartPendingMiniBlocksSyncHandler, error) { +func (e *epochStartBootstrap) getMiniBlockSyncer(dataPool storage.Cacher) (update.EpochStartPendingMiniBlocksSyncHandler, error) { syncMiniBlocksArgs := sync.ArgsNewPendingMiniBlocksSyncer{ Storage: &disabled.Storer{}, Cache: dataPool, - Marshalizer: esdp.marshalizer, - RequestHandler: esdp.requestHandler, + Marshalizer: e.marshalizer, + RequestHandler: e.requestHandler, } return sync.NewPendingMiniBlocksSyncer(syncMiniBlocksArgs) } -func (esdp *epochStartDataProvider) getHeaderHandlerSyncer(pool dataRetriever.HeadersPool) (update.MissingHeadersByHashSyncer, error) { +func (e *epochStartBootstrap) getHeaderHandlerSyncer(pool dataRetriever.HeadersPool) (update.MissingHeadersByHashSyncer, error) { syncMissingHeadersArgs := sync.ArgsNewMissingHeadersByHashSyncer{ Storage: &disabled.Storer{}, Cache: pool, - Marshalizer: esdp.marshalizer, - RequestHandler: esdp.requestHandler, + Marshalizer: e.marshalizer, + RequestHandler: e.requestHandler, } return sync.NewMissingheadersByHashSyncer(syncMissingHeadersArgs) } -func (esdp *epochStartDataProvider) getMiniBlocks( +func (e *epochStartBootstrap) getMiniBlocks( handler update.EpochStartPendingMiniBlocksSyncHandler, pendingMiniBlocks []block.ShardMiniBlockHeader, shardID uint32, @@ -412,38 +441,38 @@ func (esdp *epochStartDataProvider) getMiniBlocks( return handler.GetMiniBlocks() } -func (esdp *epochStartDataProvider) createInterceptors(dataPool dataRetriever.PoolsHolder) (process.InterceptorsContainer, error) { +func (e *epochStartBootstrap) createInterceptors(dataPool dataRetriever.PoolsHolder) (process.InterceptorsContainer, error) { args := factory3.ArgsEpochStartInterceptorContainer{ - Config: esdp.generalConfig, - ShardCoordinator: esdp.defaultShardCoordinator, - Marshalizer: esdp.marshalizer, - Hasher: esdp.hasher, - Messenger: esdp.messenger, + Config: e.generalConfig, + ShardCoordinator: e.defaultShardCoordinator, + Marshalizer: e.marshalizer, + Hasher: e.hasher, + Messenger: e.messenger, DataPool: dataPool, - SingleSigner: esdp.singleSigner, - BlockSingleSigner: esdp.blockSingleSigner, - KeyGen: esdp.keyGen, - BlockKeyGen: esdp.blockKeyGen, - WhiteListHandler: esdp.whiteListHandler, + SingleSigner: e.singleSigner, + BlockSingleSigner: e.blockSingleSigner, + KeyGen: e.keyGen, + BlockKeyGen: e.blockKeyGen, + WhiteListHandler: e.whiteListHandler, } return factory3.NewEpochStartInterceptorsContainer(args) } -func (esdp *epochStartDataProvider) changeMessageProcessorsForMetaBlocks() { - err := esdp.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) +func (e *epochStartBootstrap) changeMessageProcessorsForMetaBlocks() { + err := e.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) if err != nil { log.Info("error unregistering message processor", "error", err) } - err = esdp.messenger.RegisterMessageProcessor(factory.MetachainBlocksTopic, esdp.metaBlockInterceptor) + err = e.messenger.RegisterMessageProcessor(factory.MetachainBlocksTopic, e.metaBlockInterceptor) if err != nil { log.Info("error unregistering message processor", "error", err) } } -func (esdp *epochStartDataProvider) createRequestHandler() error { - dataPacker, err := partitioning.NewSimpleDataPacker(esdp.marshalizer) +func (e *epochStartBootstrap) createRequestHandler() error { + dataPacker, err := partitioning.NewSimpleDataPacker(e.marshalizer) if err != nil { return err } @@ -461,7 +490,7 @@ func (esdp *epochStartDataProvider) createRequestHandler() error { if err != nil { return err } - stateTrie, err := trie.NewTrie(stateTrieStorageManager, esdp.marshalizer, esdp.hasher) + stateTrie, err := trie.NewTrie(stateTrieStorageManager, e.marshalizer, e.hasher) if err != nil { return err } @@ -472,17 +501,17 @@ func (esdp *epochStartDataProvider) createRequestHandler() error { return err } - peerTrie, err := trie.NewTrie(peerTrieStorageManager, esdp.marshalizer, esdp.hasher) + peerTrie, err := trie.NewTrie(peerTrieStorageManager, e.marshalizer, e.hasher) if err != nil { return err } triesHolder.Put([]byte(trieFactory.PeerAccountTrie), peerTrie) resolversContainerArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: esdp.shardCoordinator, - Messenger: esdp.messenger, + ShardCoordinator: e.shardCoordinator, + Messenger: e.messenger, Store: storageService, - Marshalizer: esdp.marshalizer, + Marshalizer: e.marshalizer, DataPools: cacher, Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), DataPacker: dataPacker, @@ -500,7 +529,7 @@ func (esdp *epochStartDataProvider) createRequestHandler() error { return err } - finder, err := containers.NewResolversFinder(container, esdp.shardCoordinator) + finder, err := containers.NewResolversFinder(container, e.shardCoordinator) if err != nil { return err } @@ -509,33 +538,33 @@ func (esdp *epochStartDataProvider) createRequestHandler() error { maxToRequest := 100 - esdp.requestHandler, err = requestHandlers.NewResolverRequestHandler(finder, requestedItemsHandler, esdp.whiteListHandler, maxToRequest, core.MetachainShardId) + e.requestHandler, err = requestHandlers.NewResolverRequestHandler(finder, requestedItemsHandler, e.whiteListHandler, maxToRequest, core.MetachainShardId) return err } -func (esdp *epochStartDataProvider) getMiniBlock(miniBlockHeader *block.ShardMiniBlockHeader) (*block.MiniBlock, error) { - esdp.requestMiniBlock(miniBlockHeader) +func (e *epochStartBootstrap) getMiniBlock(miniBlockHeader *block.ShardMiniBlockHeader) (*block.MiniBlock, error) { + e.requestMiniBlock(miniBlockHeader) time.Sleep(delayAfterRequesting) for { - numConnectedPeers := len(esdp.messenger.Peers()) + numConnectedPeers := len(e.messenger.Peers()) threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) - mb, errConsensusNotReached := esdp.miniBlockInterceptor.GetMiniBlock(miniBlockHeader.Hash, threshold) + mb, errConsensusNotReached := e.miniBlockInterceptor.GetMiniBlock(miniBlockHeader.Hash, threshold) if errConsensusNotReached == nil { return mb, nil } log.Info("consensus not reached for epoch start meta block. re-requesting and trying again...") - esdp.requestMiniBlock(miniBlockHeader) + e.requestMiniBlock(miniBlockHeader) } } -func (esdp *epochStartDataProvider) requestMiniBlock(miniBlockHeader *block.ShardMiniBlockHeader) { - esdp.requestHandler.RequestMiniBlock(miniBlockHeader.ReceiverShardID, miniBlockHeader.Hash) +func (e *epochStartBootstrap) requestMiniBlock(miniBlockHeader *block.ShardMiniBlockHeader) { + e.requestHandler.RequestMiniBlock(miniBlockHeader.ReceiverShardID, miniBlockHeader.Hash) } -func (esdp *epochStartDataProvider) getCurrentEpochStartData( - shardCoordinator sharding.Coordinator, +func (e *epochStartBootstrap) getCurrentEpochStartData( + shardCoordinator sharding.epochStartBootstrap, metaBlock *block.MetaBlock, ) (*block.EpochStartShardData, error) { shardID := shardCoordinator.SelfId() @@ -548,20 +577,20 @@ func (esdp *epochStartDataProvider) getCurrentEpochStartData( return nil, errors.New("not found") } -func (esdp *epochStartDataProvider) initTopicForEpochStartMetaBlockInterceptor() error { - err := esdp.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) +func (e *epochStartBootstrap) initTopicForEpochStartMetaBlockInterceptor() error { + err := e.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) if err != nil { log.Info("error unregistering message processor", "error", err) return err } - err = esdp.messenger.CreateTopic(factory.MetachainBlocksTopic, true) + err = e.messenger.CreateTopic(factory.MetachainBlocksTopic, true) if err != nil { log.Info("error registering message processor", "error", err) return err } - err = esdp.messenger.RegisterMessageProcessor(factory.MetachainBlocksTopic, esdp.epochStartMetaBlockInterceptor) + err = e.messenger.RegisterMessageProcessor(factory.MetachainBlocksTopic, e.epochStartMetaBlockInterceptor) if err != nil { return err } @@ -569,8 +598,8 @@ func (esdp *epochStartDataProvider) initTopicForEpochStartMetaBlockInterceptor() return nil } -func (esdp *epochStartDataProvider) getShardID(nodesConfig *sharding.NodesSetup) (uint32, error) { - pubKeyBytes, err := esdp.publicKey.ToByteArray() +func (e *epochStartBootstrap) getShardID(nodesConfig *sharding.NodesSetup) (uint32, error) { + pubKeyBytes, err := e.publicKey.ToByteArray() if err != nil { return 0, err } @@ -586,35 +615,35 @@ func (esdp *epochStartDataProvider) getShardID(nodesConfig *sharding.NodesSetup) return 0, nil } -func (esdp *epochStartDataProvider) getTrieFromRootHash(_ []byte) (state.TriesHolder, error) { +func (e *epochStartBootstrap) getTrieFromRootHash(_ []byte) (state.TriesHolder, error) { // TODO: get trie from trie syncer return state.NewDataTriesHolder(), nil } -func (esdp *epochStartDataProvider) resetTopicsAndInterceptors() { - err := esdp.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) +func (e *epochStartBootstrap) resetTopicsAndInterceptors() { + err := e.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) if err != nil { log.Info("error unregistering message processors", "error", err) } } -func (esdp *epochStartDataProvider) getMetaBlock(syncer update.MissingHeadersByHashSyncer, hash []byte) (*block.MetaBlock, error) { - //esdp.requestMetaBlock(hash) +func (e *epochStartBootstrap) getMetaBlock(syncer update.MissingHeadersByHashSyncer, hash []byte) (*block.MetaBlock, error) { + //e.requestMetaBlock(hash) // //time.Sleep(delayAfterRequesting) // //for { - // numConnectedPeers := len(esdp.messenger.Peers()) + // numConnectedPeers := len(e.messenger.Peers()) // threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) - // mb, errConsensusNotReached := esdp.metaBlockInterceptor.GetMetaBlock(hash, threshold) + // mb, errConsensusNotReached := e.metaBlockInterceptor.GetMetaBlock(hash, threshold) // if errConsensusNotReached == nil { // return mb, nil // } // log.Info("consensus not reached for meta block. re-requesting and trying again...") - // esdp.requestMetaBlock(hash) + // e.requestMetaBlock(hash) //} waitTime := 1 * time.Minute - err := syncer.SyncMissingHeadersByHash(esdp.defaultShardCoordinator.SelfId(), [][]byte{hash}, waitTime) + err := syncer.SyncMissingHeadersByHash(e.defaultShardCoordinator.SelfId(), [][]byte{hash}, waitTime) if err != nil { return nil, err } @@ -629,16 +658,16 @@ func (esdp *epochStartDataProvider) getMetaBlock(syncer update.MissingHeadersByH return hdrs[string(hash)].(*block.MetaBlock), nil } -func (esdp *epochStartDataProvider) getEpochStartMetaBlock(epoch uint32) (*block.MetaBlock, error) { - err := esdp.initTopicForEpochStartMetaBlockInterceptor() +func (e *epochStartBootstrap) getEpochStartMetaBlock(epoch uint32) (*block.MetaBlock, error) { + err := e.initTopicForEpochStartMetaBlockInterceptor() if err != nil { return nil, err } defer func() { - esdp.resetTopicsAndInterceptors() + e.resetTopicsAndInterceptors() }() - esdp.requestEpochStartMetaBlock(epoch) + e.requestEpochStartMetaBlock(epoch) time.Sleep(delayAfterRequesting) count := 0 @@ -648,19 +677,19 @@ func (esdp *epochStartDataProvider) getEpochStartMetaBlock(epoch uint32) (*block panic("can't sync with other peers") } count++ - numConnectedPeers := len(esdp.messenger.Peers()) + numConnectedPeers := len(e.messenger.Peers()) threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) - mb, errConsensusNotReached := esdp.epochStartMetaBlockInterceptor.GetEpochStartMetaBlock(threshold, epoch) + mb, errConsensusNotReached := e.epochStartMetaBlockInterceptor.GetEpochStartMetaBlock(threshold, epoch) if errConsensusNotReached == nil { return mb, nil } log.Info("consensus not reached for meta block. re-requesting and trying again...") - esdp.requestEpochStartMetaBlock(epoch) + e.requestEpochStartMetaBlock(epoch) } } -func (esdp *epochStartDataProvider) getShardCoordinator(metaBlock *block.MetaBlock, nodesConfig *sharding.NodesSetup) (sharding.Coordinator, error) { - shardID, err := esdp.getShardID(nodesConfig) +func (e *epochStartBootstrap) getShardCoordinator(metaBlock *block.MetaBlock, nodesConfig *sharding.NodesSetup) (sharding.epochStartBootstrap, error) { + shardID, err := e.getShardID(nodesConfig) if err != nil { return nil, err } @@ -669,10 +698,10 @@ func (esdp *epochStartDataProvider) getShardCoordinator(metaBlock *block.MetaBlo return sharding.NewMultiShardCoordinator(uint32(numOfShards), shardID) } -func (esdp *epochStartDataProvider) getShardHeaders( +func (e *epochStartBootstrap) getShardHeaders( syncer update.MissingHeadersByHashSyncer, metaBlock *block.MetaBlock, - shardCoordinator sharding.Coordinator, + shardCoordinator sharding.epochStartBootstrap, ) (map[uint32]*block.Header, error) { headersMap := make(map[uint32]*block.Header) @@ -680,7 +709,7 @@ func (esdp *epochStartDataProvider) getShardHeaders( if shardID == core.MetachainShardId { for _, entry := range metaBlock.EpochStart.LastFinalizedHeaders { var hdr *block.Header - hdr, err := esdp.getShardHeader(syncer, entry.HeaderHash, entry.ShardID) + hdr, err := e.getShardHeader(syncer, entry.HeaderHash, entry.ShardID) if err != nil { return nil, err } @@ -701,7 +730,7 @@ func (esdp *epochStartDataProvider) getShardHeaders( return nil, ErrShardDataNotFound } - hdr, err := esdp.getShardHeader( + hdr, err := e.getShardHeader( syncer, entryForShard.HeaderHash, entryForShard.ShardID, @@ -714,13 +743,13 @@ func (esdp *epochStartDataProvider) getShardHeaders( return headersMap, nil } -func (esdp *epochStartDataProvider) getShardHeader( +func (e *epochStartBootstrap) getShardHeader( syncer update.MissingHeadersByHashSyncer, hash []byte, shardID uint32, ) (*block.Header, error) { waitTime := 1 * time.Minute - err := syncer.SyncMissingHeadersByHash(esdp.defaultShardCoordinator.SelfId(), [][]byte{hash}, waitTime) + err := syncer.SyncMissingHeadersByHash(e.defaultShardCoordinator.SelfId(), [][]byte{hash}, waitTime) if err != nil { return nil, err } @@ -733,54 +762,9 @@ func (esdp *epochStartDataProvider) getShardHeader( syncer.ClearFields() return hdrs[string(hash)].(*block.Header), nil - - //esdp.requestShardHeader(shardID, hash) - //time.Sleep(delayBetweenRequests) - // - //count := 0 - //for { - // if count > maxNumTimesToRetry { - // panic("can't sync with the other peers") - // } - // count++ - // numConnectedPeers := len(esdp.messenger.Peers()) - // threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) - // mb, errConsensusNotReached := esdp.shardHeaderInterceptor.GetShardHeader(hash, threshold) - // if errConsensusNotReached == nil { - // return mb, nil - // } - // log.Info("consensus not reached for shard header. re-requesting and trying again...") - // esdp.requestShardHeader(shardID, hash) - //} -} - -func (esdp *epochStartDataProvider) requestMetaBlock(hash []byte) { - // send more requests - log.Debug("requested meta block", "hash", hash) - for i := 0; i < numRequestsToSendOnce; i++ { - time.Sleep(delayBetweenRequests) - esdp.requestHandler.RequestMetaHeader(hash) - } -} - -func (esdp *epochStartDataProvider) requestShardHeader(shardID uint32, hash []byte) { - // send more requests - log.Debug("requested shard block", "shard ID", shardID, "hash", hash) - for i := 0; i < numRequestsToSendOnce; i++ { - time.Sleep(delayBetweenRequests) - esdp.requestHandler.RequestShardHeader(shardID, hash) - } -} - -func (esdp *epochStartDataProvider) requestEpochStartMetaBlock(epoch uint32) { - // send more requests - for i := 0; i < numRequestsToSendOnce; i++ { - time.Sleep(delayBetweenRequests) - esdp.requestHandler.RequestStartOfEpochMetaBlock(epoch) - } } // IsInterfaceNil returns true if there is no value under the interface -func (esdp *epochStartDataProvider) IsInterfaceNil() bool { - return esdp == nil +func (e *epochStartBootstrap) IsInterfaceNil() bool { + return e == nil } From 88ab16df84f0e2a1e782eedff0a4b83540a16efa Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Mon, 23 Mar 2020 13:59:25 +0200 Subject: [PATCH 29/61] epoch start processing and refactor. --- .../bootstrap/disabled/disabledPoolsHolder.go | 106 ---- epochStart/bootstrap/process.go | 544 ++++++------------ .../simpleEpochStartMetaBlockInterceptor.go | 7 +- epochStart/bootstrap/syncEpochStartMeta.go | 73 +++ epochStart/interface.go | 29 + update/interface.go | 2 +- update/sync/syncHeadersByHash.go | 6 +- update/sync/syncMiniBlocks.go | 2 +- 8 files changed, 274 insertions(+), 495 deletions(-) delete mode 100644 epochStart/bootstrap/disabled/disabledPoolsHolder.go create mode 100644 epochStart/bootstrap/syncEpochStartMeta.go diff --git a/epochStart/bootstrap/disabled/disabledPoolsHolder.go b/epochStart/bootstrap/disabled/disabledPoolsHolder.go deleted file mode 100644 index 878d89e525a..00000000000 --- a/epochStart/bootstrap/disabled/disabledPoolsHolder.go +++ /dev/null @@ -1,106 +0,0 @@ -package disabled - -import ( - "github.com/ElrondNetwork/elrond-go/config" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool/headersCache" - "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" - "github.com/ElrondNetwork/elrond-go/dataRetriever/txpool" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/ElrondNetwork/elrond-go/storage/storageUnit" -) - -// PoolsHolder - -type PoolsHolder struct { - transactions dataRetriever.ShardedDataCacherNotifier - unsignedTransactions dataRetriever.ShardedDataCacherNotifier - rewardTransactions dataRetriever.ShardedDataCacherNotifier - headers dataRetriever.HeadersPool - miniBlocks storage.Cacher - peerChangesBlocks storage.Cacher - trieNodes storage.Cacher - currBlockTxs dataRetriever.TransactionCacher -} - -// NewDisabledPoolsHolder - -func NewDisabledPoolsHolder() *PoolsHolder { - phf := &PoolsHolder{} - - phf.transactions, _ = txpool.NewShardedTxPool( - txpool.ArgShardedTxPool{ - Config: storageUnit.CacheConfig{ - Size: 10000, - SizeInBytes: 1000000000, - Shards: 16, - }, - MinGasPrice: 100000000000000, - NumberOfShards: 1, - }, - ) - - phf.unsignedTransactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache}) - phf.rewardTransactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) - phf.headers, _ = headersCache.NewHeadersPool(config.HeadersPoolConfig{MaxHeadersPerShard: 1000, NumElementsToRemoveOnEviction: 100}) - phf.miniBlocks, _ = storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) - phf.peerChangesBlocks, _ = storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) - phf.currBlockTxs, _ = dataPool.NewCurrentBlockPool() - phf.trieNodes, _ = storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) - - return phf -} - -// CurrentBlockTxs - -func (phm *PoolsHolder) CurrentBlockTxs() dataRetriever.TransactionCacher { - return phm.currBlockTxs -} - -// Transactions - -func (phm *PoolsHolder) Transactions() dataRetriever.ShardedDataCacherNotifier { - return phm.transactions -} - -// UnsignedTransactions - -func (phm *PoolsHolder) UnsignedTransactions() dataRetriever.ShardedDataCacherNotifier { - return phm.unsignedTransactions -} - -// RewardTransactions - -func (phm *PoolsHolder) RewardTransactions() dataRetriever.ShardedDataCacherNotifier { - return phm.rewardTransactions -} - -// Headers - -func (phm *PoolsHolder) Headers() dataRetriever.HeadersPool { - return phm.headers -} - -// MiniBlocks - -func (phm *PoolsHolder) MiniBlocks() storage.Cacher { - return phm.miniBlocks -} - -// PeerChangesBlocks - -func (phm *PoolsHolder) PeerChangesBlocks() storage.Cacher { - return phm.peerChangesBlocks -} - -// SetTransactions - -func (phm *PoolsHolder) SetTransactions(transactions dataRetriever.ShardedDataCacherNotifier) { - phm.transactions = transactions -} - -// SetUnsignedTransactions - -func (phm *PoolsHolder) SetUnsignedTransactions(scrs dataRetriever.ShardedDataCacherNotifier) { - phm.unsignedTransactions = scrs -} - -// TrieNodes - -func (phm *PoolsHolder) TrieNodes() storage.Cacher { - return phm.trieNodes -} - -// IsInterfaceNil returns true if there is no value under the interface -func (phm *PoolsHolder) IsInterfaceNil() bool { - return phm == nil -} diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 6a2f4b6f039..6741797797d 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -3,32 +3,33 @@ package bootstrap import ( "encoding/hex" "errors" - "math" "time" "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/core/partitioning" "github.com/ElrondNetwork/elrond-go/crypto" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/trie" trieFactory "github.com/ElrondNetwork/elrond-go/data/trie/factory" "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" "github.com/ElrondNetwork/elrond-go/dataRetriever" - factory2 "github.com/ElrondNetwork/elrond-go/dataRetriever/factory" + factoryDataPool "github.com/ElrondNetwork/elrond-go/dataRetriever/factory" "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/resolverscontainer" "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" + "github.com/ElrondNetwork/elrond-go/epochStart" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" - factory3 "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/factory" + factoryInterceptors "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/factory" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/storagehandler" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/economics" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" @@ -40,9 +41,7 @@ import ( var log = logger.GetOrCreate("epochStart/bootstrap") -const delayAfterRequesting = 1 * time.Second -const thresholdForConsideringMetaBlockCorrect = 0.2 -const maxNumTimesToRetry = 100 +const timeToWait = 5 * time.Second // ComponentsNeededForBootstrap holds the components which need to be initialized from network type ComponentsNeededForBootstrap struct { @@ -58,157 +57,83 @@ type ComponentsNeededForBootstrap struct { // epochStartBootstrap will handle requesting the needed data to start when joining late the network type epochStartBootstrap struct { - publicKey crypto.PublicKey - marshalizer marshal.Marshalizer - hasher hashing.Hasher - messenger p2p.Messenger - generalConfig config.Config - economicsConfig config.EconomicsConfig - pathManager PathManagerHandler - nodesConfigProvider NodesConfigProviderHandler - epochStartMetaBlockInterceptor EpochStartMetaBlockInterceptorHandler - metaBlockInterceptor MetaBlockInterceptorHandler - shardHeaderInterceptor ShardHeaderInterceptorHandler - miniBlockInterceptor MiniBlockInterceptorHandler - singleSigner crypto.SingleSigner - blockSingleSigner crypto.SingleSigner - keyGen crypto.KeyGenerator - blockKeyGen crypto.KeyGenerator - requestHandler process.RequestHandler - whiteListHandler dataRetriever.WhiteListHandler - shardCoordinator sharding.Coordinator - genesisNodesConfig *sharding.NodesSetup - workingDir string - defaultDBPath string - defaultEpochString string - - dataPool dataRetriever.PoolsHolder - computedEpoch uint32 + publicKey crypto.PublicKey + marshalizer marshal.Marshalizer + hasher hashing.Hasher + messenger p2p.Messenger + generalConfig config.Config + economicsData *economics.EconomicsData + singleSigner crypto.SingleSigner + blockSingleSigner crypto.SingleSigner + keyGen crypto.KeyGenerator + blockKeyGen crypto.KeyGenerator + requestHandler process.RequestHandler + whiteListHandler update.WhiteListHandler + shardCoordinator sharding.Coordinator + genesisNodesConfig *sharding.NodesSetup + workingDir string + defaultDBPath string + defaultEpochString string + + interceptorContainer process.InterceptorsContainer + dataPool dataRetriever.PoolsHolder + computedEpoch uint32 + + miniBlocksSyncer epochStart.PendingMiniBlocksSyncHandler + headersSyncer epochStart.HeadersByHashSyncer + epochStartMetaBlockSyncer epochStart.StartOfEpochMetaSyncer + + baseData baseDataInStorage +} + +type baseDataInStorage struct { + shardId uint32 + lastRound uint64 + lastEpoch uint32 } // ArgsEpochStartBootstrap holds the arguments needed for creating an epoch start data provider component type ArgsEpochStartBootstrap struct { - PublicKey crypto.PublicKey - Messenger p2p.Messenger - Marshalizer marshal.Marshalizer - Hasher hashing.Hasher - GeneralConfig config.Config - EconomicsConfig config.EconomicsConfig - GenesisShardCoordinator sharding.Coordinator - PathManager PathManagerHandler - NodesConfigProvider NodesConfigProviderHandler - EpochStartMetaBlockInterceptor EpochStartMetaBlockInterceptorHandler - SingleSigner crypto.SingleSigner - BlockSingleSigner crypto.SingleSigner - KeyGen crypto.KeyGenerator - BlockKeyGen crypto.KeyGenerator - WhiteListHandler dataRetriever.WhiteListHandler - GenesisNodesConfig *sharding.NodesSetup - WorkingDir string - DefaultDBPath string - DefaultEpochString string + PublicKey crypto.PublicKey + Messenger p2p.Messenger + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + GeneralConfig config.Config + EconomicsConfig config.EconomicsConfig + GenesisShardCoordinator sharding.Coordinator + SingleSigner crypto.SingleSigner + BlockSingleSigner crypto.SingleSigner + KeyGen crypto.KeyGenerator + BlockKeyGen crypto.KeyGenerator + WhiteListHandler update.WhiteListHandler + GenesisNodesConfig *sharding.NodesSetup + WorkingDir string + DefaultDBPath string + DefaultEpochString string } // NewEpochStartBootstrap will return a new instance of epochStartBootstrap func NewEpochStartBootstrapHandler(args ArgsEpochStartBootstrap) (*epochStartBootstrap, error) { - if check.IfNil(args.PublicKey) { - return nil, ErrNilPublicKey - } - if check.IfNil(args.Messenger) { - return nil, ErrNilMessenger - } - if check.IfNil(args.Marshalizer) { - return nil, ErrNilMarshalizer - } - if check.IfNil(args.Hasher) { - return nil, ErrNilHasher - } - if check.IfNil(args.PathManager) { - return nil, ErrNilPathManager - } - if check.IfNil(args.NodesConfigProvider) { - return nil, ErrNilNodesConfigProvider - } - if check.IfNil(args.EpochStartMetaBlockInterceptor) { - return nil, ErrNilEpochStartMetaBlockInterceptor - } - if check.IfNil(args.WhiteListHandler) { - return nil, ErrNilWhiteListHandler - } - if check.IfNil(args.DefaultShardCoordinator) { - return nil, ErrNilDefaultShardCoordinator - } - if check.IfNil(args.BlockKeyGen) { - return nil, ErrNilBlockKeyGen - } - if check.IfNil(args.KeyGen) { - return nil, ErrNilKeyGen - } - if check.IfNil(args.SingleSigner) { - return nil, ErrNilSingleSigner - } - if check.IfNil(args.BlockSingleSigner) { - return nil, ErrNilBlockSingleSigner - } - epochStartProvider := &epochStartBootstrap{ - publicKey: args.PublicKey, - marshalizer: args.Marshalizer, - hasher: args.Hasher, - messenger: args.Messenger, - generalConfig: args.GeneralConfig, - economicsConfig: args.EconomicsConfig, - pathManager: args.PathManager, - nodesConfigProvider: args.NodesConfigProvider, - epochStartMetaBlockInterceptor: args.EpochStartMetaBlockInterceptor, - metaBlockInterceptor: args.MetaBlockInterceptor, - shardHeaderInterceptor: args.ShardHeaderInterceptor, - miniBlockInterceptor: args.MiniBlockInterceptor, - whiteListHandler: args.WhiteListHandler, - genesisNodesConfig: args.GenesisNodesConfig, - workingDir: args.WorkingDir, - defaultEpochString: args.DefaultEpochString, - defaultDBPath: args.DefaultEpochString, - keyGen: args.KeyGen, - blockKeyGen: args.BlockKeyGen, - singleSigner: args.SingleSigner, - blockSingleSigner: args.BlockSingleSigner, + publicKey: args.PublicKey, + marshalizer: args.Marshalizer, + hasher: args.Hasher, + messenger: args.Messenger, + generalConfig: args.GeneralConfig, + whiteListHandler: args.WhiteListHandler, + genesisNodesConfig: args.GenesisNodesConfig, + workingDir: args.WorkingDir, + defaultEpochString: args.DefaultEpochString, + defaultDBPath: args.DefaultEpochString, + keyGen: args.KeyGen, + blockKeyGen: args.BlockKeyGen, + singleSigner: args.SingleSigner, + blockSingleSigner: args.BlockSingleSigner, } return epochStartProvider, nil } -func (e *epochStartBootstrap) initInternalComponents() error { - var err error - e.shardCoordinator, err = sharding.NewMultiShardCoordinator(e.genesisNodesConfig.NumberOfShards(), core.MetachainShardId) - if err != nil { - return err - } - - err = e.initTopicsAndInterceptors() - if err != nil { - return err - } - defer func() { - e.resetTopicsAndInterceptors() - }() - - err = e.createRequestHandler() - if err != nil { - return err - } - - e.dataPool, err = factory2.NewDataPoolFromConfig( - factory2.ArgsDataPool{ - Config: &e.generalConfig, - EconomicsData: e.economicsData, - ShardCoordinator: e.shardCoordinator, - }, - ) - - return nil -} - func (e *epochStartBootstrap) searchDataInLocalStorage() { currentEpoch, errNotCritical := storageFactory.FindLastEpochFromStorage( e.workingDir, @@ -221,6 +146,7 @@ func (e *epochStartBootstrap) searchDataInLocalStorage() { } log.Debug("current epoch from the storage : ", "epoch", currentEpoch) + // TODO: write gathered data in baseDataInStorage } func (e *epochStartBootstrap) isStartInEpochZero() bool { @@ -242,11 +168,6 @@ func (e *epochStartBootstrap) prepareEpochZero() (uint32, uint32, uint32, error) return currentEpoch, e.shardCoordinator.SelfId(), e.shardCoordinator.NumberOfShards(), nil } -func (e *epochStartBootstrap) isCurrentEpochSavedInStorage() bool { - // TODO: implement - return true -} - func (e *epochStartBootstrap) requestDataFromNetwork() { } @@ -273,46 +194,119 @@ func (e *epochStartBootstrap) Bootstrap() (uint32, uint32, uint32, error) { return e.prepareEpochZero() } -} + e.computeMostProbableEpoch() + e.searchDataInLocalStorage() -// Bootstrap will handle requesting and receiving the needed information the node will bootstrap from -func (e *epochStartBootstrap) requestAndProcessing() (uint32, uint32, uint32, error) { - // TODO: add searching for epoch start metablock and other data inside this component + isCurrentEpochSaved := e.baseData.lastEpoch+1 >= e.computedEpoch + if isCurrentEpochSaved { + return e.prepareEpochFromStorage() + } - err := e.initInternalComponents() + err := e.prepareComponentsToSyncFromNetwork() if err != nil { - return nil, err + return 0, 0, 0, err } - interceptorsContainer, err := e.createInterceptors(commonDataPool) - if err != nil || interceptorsContainer == nil { - return nil, err + return e.requestAndProcessing() +} + +func (e *epochStartBootstrap) prepareEpochFromStorage() (uint32, uint32, uint32, error) { + return 0, 0, 0, nil +} + +func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { + var err error + e.shardCoordinator, err = sharding.NewMultiShardCoordinator(e.genesisNodesConfig.NumberOfShards(), core.MetachainShardId) + if err != nil { + return err } - miniBlocksSyncer, err := e.getMiniBlockSyncer(commonDataPool.MiniBlocks()) + err = e.createRequestHandler() if err != nil { - return nil, err + return err } - missingHeadersSyncer, err := e.getHeaderHandlerSyncer(commonDataPool.Headers()) + e.dataPool, err = factoryDataPool.NewDataPoolFromConfig( + factoryDataPool.ArgsDataPool{ + Config: &e.generalConfig, + EconomicsData: e.economicsData, + ShardCoordinator: e.shardCoordinator, + }, + ) + + args := factoryInterceptors.ArgsEpochStartInterceptorContainer{ + Config: e.generalConfig, + ShardCoordinator: e.shardCoordinator, + Marshalizer: e.marshalizer, + Hasher: e.hasher, + Messenger: e.messenger, + DataPool: e.dataPool, + SingleSigner: e.singleSigner, + BlockSingleSigner: e.blockSingleSigner, + KeyGen: e.keyGen, + BlockKeyGen: e.blockKeyGen, + WhiteListHandler: e.whiteListHandler, + } + + e.interceptorContainer, err = factoryInterceptors.NewEpochStartInterceptorsContainer(args) if err != nil { - return nil, err + return err } - epochNumForRequestingTheLatestAvailable := uint32(math.MaxUint32) - metaBlock, err := e.getEpochStartMetaBlock(epochNumForRequestingTheLatestAvailable) + // TODO epochStart meta syncer + e.epochStartMetaBlockSyncer = NewEpochStartmetaBlockSyncer() + + syncMiniBlocksArgs := sync.ArgsNewPendingMiniBlocksSyncer{ + Storage: &disabled.Storer{}, + Cache: e.dataPool.MiniBlocks(), + Marshalizer: e.marshalizer, + RequestHandler: e.requestHandler, + } + e.miniBlocksSyncer, err = sync.NewPendingMiniBlocksSyncer(syncMiniBlocksArgs) + + syncMissingHeadersArgs := sync.ArgsNewMissingHeadersByHashSyncer{ + Storage: &disabled.Storer{}, + Cache: e.dataPool.Headers(), + Marshalizer: e.marshalizer, + RequestHandler: e.requestHandler, + } + e.headersSyncer, err = sync.NewMissingheadersByHashSyncer(syncMissingHeadersArgs) + + return nil +} + +func (e *epochStartBootstrap) syncHeadersFrom(meta *block.MetaBlock) (map[string]data.HeaderHandler, error) { + hashesToRequest := make([][]byte, 0, len(meta.EpochStart.LastFinalizedHeaders)+1) + shardIds := make([]uint32, 0, len(meta.EpochStart.LastFinalizedHeaders)+1) + + for _, epochStartData := range meta.EpochStart.LastFinalizedHeaders { + hashesToRequest = append(hashesToRequest, epochStartData.HeaderHash) + shardIds = append(shardIds, epochStartData.ShardID) + } + + hashesToRequest = append(hashesToRequest, meta.EpochStart.Economics.PrevEpochStartHash) + shardIds = append(shardIds, core.MetachainShardId) + + err := e.headersSyncer.SyncMissingHeadersByHash(shardIds, hashesToRequest, timeToWait) if err != nil { return nil, err } - prevMetaBlock, err := e.getMetaBlock(missingHeadersSyncer, metaBlock.EpochStart.Economics.PrevEpochStartHash) + return e.headersSyncer.GetHeaders() +} + +// Bootstrap will handle requesting and receiving the needed information the node will bootstrap from +func (e *epochStartBootstrap) requestAndProcessing() (uint32, uint32, uint32, error) { + metaBlock, err := e.epochStartMetaBlockSyncer.SyncEpochStartMeta(timeToWait) if err != nil { - return nil, err + return 0, 0, 0, err } - e.changeMessageProcessorsForMetaBlocks() + headers, err := e.syncHeadersFrom(metaBlock) + if err != nil { + return 0, 0, 0, err + } - log.Info("previous meta block", "epoch", prevMetaBlock.Epoch) nodesConfig, err := e.nodesConfigProvider.GetNodesConfigForMetaBlock(metaBlock) if err != nil { return nil, err @@ -406,71 +400,6 @@ func (e *epochStartBootstrap) requestAndProcessing() (uint32, uint32, uint32, er return components, nil } -func (e *epochStartBootstrap) getMiniBlockSyncer(dataPool storage.Cacher) (update.EpochStartPendingMiniBlocksSyncHandler, error) { - syncMiniBlocksArgs := sync.ArgsNewPendingMiniBlocksSyncer{ - Storage: &disabled.Storer{}, - Cache: dataPool, - Marshalizer: e.marshalizer, - RequestHandler: e.requestHandler, - } - return sync.NewPendingMiniBlocksSyncer(syncMiniBlocksArgs) -} - -func (e *epochStartBootstrap) getHeaderHandlerSyncer(pool dataRetriever.HeadersPool) (update.MissingHeadersByHashSyncer, error) { - syncMissingHeadersArgs := sync.ArgsNewMissingHeadersByHashSyncer{ - Storage: &disabled.Storer{}, - Cache: pool, - Marshalizer: e.marshalizer, - RequestHandler: e.requestHandler, - } - return sync.NewMissingheadersByHashSyncer(syncMissingHeadersArgs) -} - -func (e *epochStartBootstrap) getMiniBlocks( - handler update.EpochStartPendingMiniBlocksSyncHandler, - pendingMiniBlocks []block.ShardMiniBlockHeader, - shardID uint32, -) (map[string]*block.MiniBlock, error) { - - waitTime := 1 * time.Minute - err := handler.SyncPendingMiniBlocksForEpochStart(pendingMiniBlocks, waitTime) - if err != nil { - return nil, err - } - - return handler.GetMiniBlocks() -} - -func (e *epochStartBootstrap) createInterceptors(dataPool dataRetriever.PoolsHolder) (process.InterceptorsContainer, error) { - args := factory3.ArgsEpochStartInterceptorContainer{ - Config: e.generalConfig, - ShardCoordinator: e.defaultShardCoordinator, - Marshalizer: e.marshalizer, - Hasher: e.hasher, - Messenger: e.messenger, - DataPool: dataPool, - SingleSigner: e.singleSigner, - BlockSingleSigner: e.blockSingleSigner, - KeyGen: e.keyGen, - BlockKeyGen: e.blockKeyGen, - WhiteListHandler: e.whiteListHandler, - } - - return factory3.NewEpochStartInterceptorsContainer(args) -} - -func (e *epochStartBootstrap) changeMessageProcessorsForMetaBlocks() { - err := e.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) - if err != nil { - log.Info("error unregistering message processor", "error", err) - } - - err = e.messenger.RegisterMessageProcessor(factory.MetachainBlocksTopic, e.metaBlockInterceptor) - if err != nil { - log.Info("error unregistering message processor", "error", err) - } -} - func (e *epochStartBootstrap) createRequestHandler() error { dataPacker, err := partitioning.NewSimpleDataPacker(e.marshalizer) if err != nil { @@ -483,7 +412,6 @@ func (e *epochStartBootstrap) createRequestHandler() error { }, } - cacher := disabled.NewDisabledPoolsHolder() triesHolder := state.NewDataTriesHolder() stateTrieStorageManager, err := trie.NewTrieStorageManagerWithoutPruning(disabled.NewDisabledStorer()) @@ -512,7 +440,7 @@ func (e *epochStartBootstrap) createRequestHandler() error { Messenger: e.messenger, Store: storageService, Marshalizer: e.marshalizer, - DataPools: cacher, + DataPools: e.dataPool, Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), DataPacker: dataPacker, TriesContainer: triesHolder, @@ -542,27 +470,6 @@ func (e *epochStartBootstrap) createRequestHandler() error { return err } -func (e *epochStartBootstrap) getMiniBlock(miniBlockHeader *block.ShardMiniBlockHeader) (*block.MiniBlock, error) { - e.requestMiniBlock(miniBlockHeader) - - time.Sleep(delayAfterRequesting) - - for { - numConnectedPeers := len(e.messenger.Peers()) - threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) - mb, errConsensusNotReached := e.miniBlockInterceptor.GetMiniBlock(miniBlockHeader.Hash, threshold) - if errConsensusNotReached == nil { - return mb, nil - } - log.Info("consensus not reached for epoch start meta block. re-requesting and trying again...") - e.requestMiniBlock(miniBlockHeader) - } -} - -func (e *epochStartBootstrap) requestMiniBlock(miniBlockHeader *block.ShardMiniBlockHeader) { - e.requestHandler.RequestMiniBlock(miniBlockHeader.ReceiverShardID, miniBlockHeader.Hash) -} - func (e *epochStartBootstrap) getCurrentEpochStartData( shardCoordinator sharding.epochStartBootstrap, metaBlock *block.MetaBlock, @@ -577,27 +484,6 @@ func (e *epochStartBootstrap) getCurrentEpochStartData( return nil, errors.New("not found") } -func (e *epochStartBootstrap) initTopicForEpochStartMetaBlockInterceptor() error { - err := e.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) - if err != nil { - log.Info("error unregistering message processor", "error", err) - return err - } - - err = e.messenger.CreateTopic(factory.MetachainBlocksTopic, true) - if err != nil { - log.Info("error registering message processor", "error", err) - return err - } - - err = e.messenger.RegisterMessageProcessor(factory.MetachainBlocksTopic, e.epochStartMetaBlockInterceptor) - if err != nil { - return err - } - - return nil -} - func (e *epochStartBootstrap) getShardID(nodesConfig *sharding.NodesSetup) (uint32, error) { pubKeyBytes, err := e.publicKey.ToByteArray() if err != nil { @@ -615,89 +501,6 @@ func (e *epochStartBootstrap) getShardID(nodesConfig *sharding.NodesSetup) (uint return 0, nil } -func (e *epochStartBootstrap) getTrieFromRootHash(_ []byte) (state.TriesHolder, error) { - // TODO: get trie from trie syncer - return state.NewDataTriesHolder(), nil -} - -func (e *epochStartBootstrap) resetTopicsAndInterceptors() { - err := e.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) - if err != nil { - log.Info("error unregistering message processors", "error", err) - } -} - -func (e *epochStartBootstrap) getMetaBlock(syncer update.MissingHeadersByHashSyncer, hash []byte) (*block.MetaBlock, error) { - //e.requestMetaBlock(hash) - // - //time.Sleep(delayAfterRequesting) - // - //for { - // numConnectedPeers := len(e.messenger.Peers()) - // threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) - // mb, errConsensusNotReached := e.metaBlockInterceptor.GetMetaBlock(hash, threshold) - // if errConsensusNotReached == nil { - // return mb, nil - // } - // log.Info("consensus not reached for meta block. re-requesting and trying again...") - // e.requestMetaBlock(hash) - //} - waitTime := 1 * time.Minute - err := syncer.SyncMissingHeadersByHash(e.defaultShardCoordinator.SelfId(), [][]byte{hash}, waitTime) - if err != nil { - return nil, err - } - - hdrs, err := syncer.GetHeaders() - if err != nil { - return nil, err - } - - syncer.ClearFields() - - return hdrs[string(hash)].(*block.MetaBlock), nil -} - -func (e *epochStartBootstrap) getEpochStartMetaBlock(epoch uint32) (*block.MetaBlock, error) { - err := e.initTopicForEpochStartMetaBlockInterceptor() - if err != nil { - return nil, err - } - defer func() { - e.resetTopicsAndInterceptors() - }() - - e.requestEpochStartMetaBlock(epoch) - - time.Sleep(delayAfterRequesting) - count := 0 - - for { - if count > maxNumTimesToRetry { - panic("can't sync with other peers") - } - count++ - numConnectedPeers := len(e.messenger.Peers()) - threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) - mb, errConsensusNotReached := e.epochStartMetaBlockInterceptor.GetEpochStartMetaBlock(threshold, epoch) - if errConsensusNotReached == nil { - return mb, nil - } - log.Info("consensus not reached for meta block. re-requesting and trying again...") - e.requestEpochStartMetaBlock(epoch) - } -} - -func (e *epochStartBootstrap) getShardCoordinator(metaBlock *block.MetaBlock, nodesConfig *sharding.NodesSetup) (sharding.epochStartBootstrap, error) { - shardID, err := e.getShardID(nodesConfig) - if err != nil { - return nil, err - } - - numOfShards := len(metaBlock.EpochStart.LastFinalizedHeaders) - return sharding.NewMultiShardCoordinator(uint32(numOfShards), shardID) -} - func (e *epochStartBootstrap) getShardHeaders( syncer update.MissingHeadersByHashSyncer, metaBlock *block.MetaBlock, @@ -743,27 +546,6 @@ func (e *epochStartBootstrap) getShardHeaders( return headersMap, nil } -func (e *epochStartBootstrap) getShardHeader( - syncer update.MissingHeadersByHashSyncer, - hash []byte, - shardID uint32, -) (*block.Header, error) { - waitTime := 1 * time.Minute - err := syncer.SyncMissingHeadersByHash(e.defaultShardCoordinator.SelfId(), [][]byte{hash}, waitTime) - if err != nil { - return nil, err - } - - hdrs, err := syncer.GetHeaders() - if err != nil { - return nil, err - } - - syncer.ClearFields() - - return hdrs[string(hash)].(*block.Header), nil -} - // IsInterfaceNil returns true if there is no value under the interface func (e *epochStartBootstrap) IsInterfaceNil() bool { return e == nil diff --git a/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go index 051e5bdc6db..f53478c05f7 100644 --- a/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go +++ b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go @@ -8,6 +8,7 @@ import ( "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/epochStart" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" @@ -28,10 +29,10 @@ type simpleEpochStartMetaBlockInterceptor struct { // NewSimpleEpochStartMetaBlockInterceptor will return a new instance of simpleEpochStartMetaBlockInterceptor func NewSimpleEpochStartMetaBlockInterceptor(marshalizer marshal.Marshalizer, hasher hashing.Hasher) (*simpleEpochStartMetaBlockInterceptor, error) { if check.IfNil(marshalizer) { - return nil, ErrNilMarshalizer + return nil, epochStart.ErrNilMarshalizer } if check.IfNil(hasher) { - return nil, ErrNilHasher + return nil, epochStart.ErrNilHasher } return &simpleEpochStartMetaBlockInterceptor{ @@ -106,7 +107,7 @@ func (s *simpleEpochStartMetaBlockInterceptor) GetEpochStartMetaBlock(target int s.mutReceivedMetaBlocks.RUnlock() } - return nil, ErrNumTriesExceeded + return nil, epochStart.ErrNumTriesExceeded } func (s *simpleEpochStartMetaBlockInterceptor) isMapEntryOk( diff --git a/epochStart/bootstrap/syncEpochStartMeta.go b/epochStart/bootstrap/syncEpochStartMeta.go new file mode 100644 index 00000000000..199245d0a66 --- /dev/null +++ b/epochStart/bootstrap/syncEpochStartMeta.go @@ -0,0 +1,73 @@ +package bootstrap + +import ( + "math" + "time" + + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/storage" +) + +type epochStartMetaSyncer struct { + requestHandler epochStart.RequestHandler + messenger p2p.Messenger + metaBlockPool storage.Cacher +} + +func NewEpochStartMetaSyncer() (*epochStartMetaSyncer, error) { + return &epochStartMetaSyncer{}, nil +} + +// SyncEpochStartMeta syncs the latest epoch start metablock +func (e *epochStartMetaSyncer) SyncEpochStartMeta(waitTime time.Duration) (*block.MetaBlock, error) { + err := e.initTopicForEpochStartMetaBlockInterceptor() + if err != nil { + return nil, err + } + defer func() { + e.resetTopicsAndInterceptors() + }() + + unknownEpoch := uint32(math.MaxUint32) + e.requestHandler.RequestStartOfEpochMetaBlock(unknownEpoch) + + // TODO: implement waitTime and consensus + + return nil, nil +} + +func (e *epochStartMetaSyncer) resetTopicsAndInterceptors() { + err := e.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) + if err != nil { + log.Info("error unregistering message processors", "error", err) + } +} + +func (e *epochStartMetaSyncer) initTopicForEpochStartMetaBlockInterceptor() error { + err := e.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) + if err != nil { + log.Info("error unregistering message processor", "error", err) + return err + } + + err = e.messenger.CreateTopic(factory.MetachainBlocksTopic, true) + if err != nil { + log.Info("error registering message processor", "error", err) + return err + } + + err = e.messenger.RegisterMessageProcessor(factory.MetachainBlocksTopic, e.epochStartMetaBlockInterceptor) + if err != nil { + return err + } + + return nil +} + +// IsInterfaceNil returns true if underlying object is nil +func (e *epochStartMetaSyncer) IsInterfaceNil() bool { + return e == nil +} diff --git a/epochStart/interface.go b/epochStart/interface.go index 268d3508a86..72e48a26952 100644 --- a/epochStart/interface.go +++ b/epochStart/interface.go @@ -4,6 +4,7 @@ import ( "time" "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" ) // TriggerHandler defines the functionalities for an start of epoch trigger @@ -78,3 +79,31 @@ type ValidatorStatisticsProcessorHandler interface { Process(info data.ValidatorInfoHandler) error IsInterfaceNil() bool } + +// HeadersByHashSyncer defines the methods to sync all missing headers by hash +type HeadersByHashSyncer interface { + SyncMissingHeadersByHash(shardIDs []uint32, headersHashes [][]byte, waitTime time.Duration) error + GetHeaders() (map[string]data.HeaderHandler, error) + ClearFields() + IsInterfaceNil() bool +} + +// PendingMiniBlocksSyncHandler defines the methods to sync all pending miniblocks +type PendingMiniBlocksSyncHandler interface { + SyncPendingMiniBlocks(miniBlockHeaders []block.ShardMiniBlockHeader, waitTime time.Duration) error + GetMiniBlocks() (map[string]*block.MiniBlock, error) + IsInterfaceNil() bool +} + +// AccountsDBSyncer defines the methods for the accounts db syncer +type AccountsDBSyncer interface { + GetSyncedTries() map[string]data.Trie + SyncAccounts(rootHash []byte) error + IsInterfaceNil() bool +} + +// StartOfEpochMetaSyncer defines the methods to synchronize epoch start meta block from the network when nothing is known +type StartOfEpochMetaSyncer interface { + SyncEpochStartMeta(waitTime time.Duration) (*block.MetaBlock, error) + IsInterfaceNil() bool +} diff --git a/update/interface.go b/update/interface.go index 85c1d2adba1..6973207121a 100644 --- a/update/interface.go +++ b/update/interface.go @@ -137,7 +137,7 @@ type PendingTransactionsSyncHandler interface { // MissingHeadersByHashSyncer defines the methods to sync all missing headers by hash type MissingHeadersByHashSyncer interface { - SyncMissingHeadersByHash(shardID uint32, headersHashes [][]byte, waitTime time.Duration) error + SyncMissingHeadersByHash(shardIDs []uint32, headersHashes [][]byte, waitTime time.Duration) error GetHeaders() (map[string]data.HeaderHandler, error) ClearFields() IsInterfaceNil() bool diff --git a/update/sync/syncHeadersByHash.go b/update/sync/syncHeadersByHash.go index d88c7d16c09..a505d2419c6 100644 --- a/update/sync/syncHeadersByHash.go +++ b/update/sync/syncHeadersByHash.go @@ -71,7 +71,7 @@ func NewMissingheadersByHashSyncer(args ArgsNewMissingHeadersByHashSyncer) (*mis // SyncMissingHeadersByHash syncs the missing headers func (m *missingHeadersByHash) SyncMissingHeadersByHash( - shardID uint32, + shardIDs []uint32, headersHashes [][]byte, waitTime time.Duration, ) error { @@ -80,7 +80,7 @@ func (m *missingHeadersByHash) SyncMissingHeadersByHash( requestedMBs := 0 m.mutMissingHdrs.Lock() m.stopSyncing = false - for _, hash := range headersHashes { + for index, hash := range headersHashes { m.mapHashes[string(hash)] = struct{}{} header, ok := m.getHeaderFromPoolOrStorage(hash) if ok { @@ -89,7 +89,7 @@ func (m *missingHeadersByHash) SyncMissingHeadersByHash( } requestedMBs++ - m.requestHandler.RequestShardHeader(shardID, hash) + m.requestHandler.RequestShardHeader(shardIDs[index], hash) } m.mutMissingHdrs.Unlock() diff --git a/update/sync/syncMiniBlocks.go b/update/sync/syncMiniBlocks.go index aea48bbadf1..a8436dd1b17 100644 --- a/update/sync/syncMiniBlocks.go +++ b/update/sync/syncMiniBlocks.go @@ -97,7 +97,7 @@ func (p *pendingMiniBlocks) SyncPendingMiniBlocksFromMeta( } // SyncPendingMiniBlocksForEpochStart will sync the miniblocks for the given epoch start meta block -func (p *pendingMiniBlocks) SyncPendingMiniBlocksForEpochStart( +func (p *pendingMiniBlocks) SyncPendingMiniBlocks( miniBlockHeaders []block.ShardMiniBlockHeader, waitTime time.Duration, ) error { From d3b75822a341d2b13c9ce9ebc93bb3f2195db5c4 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Mon, 23 Mar 2020 15:49:27 +0200 Subject: [PATCH 30/61] processing changes. --- epochStart/bootstrap/process.go | 215 ++++++++---------- .../storagehandler/metaStorageHandler.go | 10 +- .../storagehandler/shardStorageHandler.go | 8 +- 3 files changed, 113 insertions(+), 120 deletions(-) diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 6741797797d..43d6e191bd4 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -30,7 +30,6 @@ import ( "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/economics" - "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" storageFactory "github.com/ElrondNetwork/elrond-go/storage/factory" @@ -47,7 +46,7 @@ const timeToWait = 5 * time.Second type ComponentsNeededForBootstrap struct { EpochStartMetaBlock *block.MetaBlock PreviousEpochStartMetaBlock *block.MetaBlock - ShardHeader *block.Header //only for shards, nil for meta + ShardHeader *block.Header NodesConfig *sharding.NodesSetup ShardHeaders map[uint32]*block.Header ShardCoordinator sharding.Coordinator @@ -168,10 +167,6 @@ func (e *epochStartBootstrap) prepareEpochZero() (uint32, uint32, uint32, error) return currentEpoch, e.shardCoordinator.SelfId(), e.shardCoordinator.NumberOfShards(), nil } -func (e *epochStartBootstrap) requestDataFromNetwork() { - -} - func (e *epochStartBootstrap) saveGatheredDataToStorage() { } @@ -211,6 +206,7 @@ func (e *epochStartBootstrap) Bootstrap() (uint32, uint32, uint32, error) { } func (e *epochStartBootstrap) prepareEpochFromStorage() (uint32, uint32, uint32, error) { + // TODO: compute self shard ID for current epoch return 0, 0, 0, nil } @@ -302,61 +298,125 @@ func (e *epochStartBootstrap) requestAndProcessing() (uint32, uint32, uint32, er return 0, 0, 0, err } + numShards := uint32(len(metaBlock.EpochStart.LastFinalizedHeaders)) + headers, err := e.syncHeadersFrom(metaBlock) if err != nil { return 0, 0, 0, err } - nodesConfig, err := e.nodesConfigProvider.GetNodesConfigForMetaBlock(metaBlock) + prevEpochStartMetaHash := metaBlock.EpochStart.Economics.PrevEpochStartHash + prevEpochStartMeta, ok := headers[string(prevEpochStartMetaHash)].(*block.MetaBlock) + if !ok { + return 0, 0, 0, epochStart.ErrWrongTypeAssertion + } + + nodesConfigForCurrEpoch, err := e.nodesConfigProvider.GetNodesConfigForMetaBlock(metaBlock) if err != nil { - return nil, err + return 0, 0, 0, err } - e.shardCoordinator, err = e.getShardCoordinator(metaBlock, nodesConfig) + nodesConfigForPrevEpoch, err := e.nodesConfigProvider.GetNodesConfigForMetaBlock(prevEpochStartMeta) if err != nil { - return nil, err + return 0, 0, 0, err } - shardHeaders, err := e.getShardHeaders(missingHeadersSyncer, metaBlock, nodesConfig, shardCoordinator) + selfShardId, err := e.getShardID(nodesConfigForCurrEpoch) if err != nil { - log.Debug("shard headers not found", "error", err) + return 0, 0, 0, err + } + + e.shardCoordinator, err = sharding.NewMultiShardCoordinator(numShards, selfShardId) + if err != nil { + return 0, 0, 0, err } - var shardHeaderForShard *block.Header - if e.shardCoordinator.SelfId() < e.shardCoordinator.NumberOfShards() { - shardHeaderForShard = shardHeaders[e.shardCoordinator.SelfId()] + if e.shardCoordinator.SelfId() == core.MetachainShardId { + return e.requestAndProcessForShard(e.shardCoordinator.SelfId()) } - epochStartData, err := e.getCurrentEpochStartData(e.shardCoordinator, metaBlock) + return e.requestAndProcessForMeta() +} + +func (e *epochStartBootstrap) requestAndProcessForMeta() error { + // accounts and peer accounts syncer + + components := &ComponentsNeededForBootstrap{ + EpochStartMetaBlock: metaBlock, + PreviousEpochStartMetaBlock: prevEpochStartMeta, + ShardHeader: shardHeaderForShard, + NodesConfig: nodesConfig, + ShardHeaders: shardHeaders, + ShardCoordinator: e.shardCoordinator, + Tries: trieToReturn, + PendingMiniBlocks: pendingMiniBlocks, + } + + storageHandlerComponent, err = storagehandler.NewMetaStorageHandler( + e.generalConfig, + e.shardCoordinator, + e.pathManager, + e.marshalizer, + e.hasher, + metaBlock.Epoch, + ) if err != nil { return nil, err } - pendingMiniBlocks, err := e.getMiniBlocks(miniBlocksSyncer, epochStartData.PendingMiniBlockHeaders, shardCoordinator.SelfId()) + errSavingToStorage := storageHandlerComponent.SaveDataToStorage(*components) + if errSavingToStorage != nil { + return errSavingToStorage + } + + return nil +} + +func (e *epochStartBootstrap) requestAndProcessForShard(shardId uint32, metaBlock *block.MetaBlock) error { + var epochStartData block.EpochStartShardData + found := false + for _, shardData := range metaBlock.EpochStart.LastFinalizedHeaders { + if shardData.ShardID == shardId { + epochStartData = shardData + found = true + break + } + } + if !found { + return epochStart.ErrEpochStartDataForShardNotFound + } + + err := e.miniBlocksSyncer.SyncPendingMiniBlocks(epochStartData.PendingMiniBlockHeaders, timeToWait) if err != nil { - return nil, err + return err } - lastFinalizedMetaBlock, err := e.getMetaBlock(missingHeadersSyncer, epochStartData.LastFinishedMetaBlock) + pendingMiniBlocks, err := e.miniBlocksSyncer.GetMiniBlocks() if err != nil { - return nil, err + return err } - log.Info("received last finalized meta block", "nonce", lastFinalizedMetaBlock.Nonce) - firstPendingMetaBlock, err := e.getMetaBlock(missingHeadersSyncer, epochStartData.FirstPendingMetaBlock) + shardIds := make([]uint32, 0, 2) + hashesToRequest := make([][]byte, 0, 2) + hashesToRequest = append(hashesToRequest, epochStartData.LastFinishedMetaBlock) + hashesToRequest = append(hashesToRequest, epochStartData.FirstPendingMetaBlock) + shardIds = append(shardIds, shardId) + shardIds = append(shardIds, shardId) + + e.headersSyncer.ClearFields() + err = e.headersSyncer.SyncMissingHeadersByHash(shardIds, hashesToRequest, timeToWait) if err != nil { - return nil, err + return err } - log.Info("received first pending meta block", "nonce", firstPendingMetaBlock.Nonce) - trieToReturn, err := e.getTrieFromRootHash(epochStartData.RootHash) + neededHeaders, err := e.headersSyncer.GetHeaders() if err != nil { - return nil, err + return err } - components := &structs.ComponentsNeededForBootstrap{ + components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: metaBlock, - PreviousEpochStartMetaBlock: prevMetaBlock, + PreviousEpochStartMetaBlock: prevEpochStartMeta, ShardHeader: shardHeaderForShard, NodesConfig: nodesConfig, ShardHeaders: shardHeaders, @@ -365,39 +425,24 @@ func (e *epochStartBootstrap) requestAndProcessing() (uint32, uint32, uint32, er PendingMiniBlocks: pendingMiniBlocks, } - var storageHandlerComponent StorageHandler - if e.shardCoordinator.SelfId() > e.shardCoordinator.NumberOfShards() { - storageHandlerComponent, err = storagehandler.NewMetaStorageHandler( - e.generalConfig, - e.shardCoordinator, - e.pathManager, - e.marshalizer, - e.hasher, - metaBlock.Epoch, - ) - if err != nil { - return nil, err - } - } else { - storageHandlerComponent, err = storagehandler.NewShardStorageHandler( - e.generalConfig, - e.shardCoordinator, - e.pathManager, - e.marshalizer, - e.hasher, - metaBlock.Epoch, - ) - if err != nil { - return nil, err - } + storageHandlerComponent, err := storagehandler.NewShardStorageHandler( + e.generalConfig, + e.shardCoordinator, + e.pathManager, + e.marshalizer, + e.hasher, + metaBlock.Epoch, + ) + if err != nil { + return nil, err } errSavingToStorage := storageHandlerComponent.SaveDataToStorage(*components) if errSavingToStorage != nil { - return nil, errSavingToStorage + return errSavingToStorage } - return components, nil + return nil } func (e *epochStartBootstrap) createRequestHandler() error { @@ -470,25 +515,12 @@ func (e *epochStartBootstrap) createRequestHandler() error { return err } -func (e *epochStartBootstrap) getCurrentEpochStartData( - shardCoordinator sharding.epochStartBootstrap, - metaBlock *block.MetaBlock, -) (*block.EpochStartShardData, error) { - shardID := shardCoordinator.SelfId() - for _, epochStartData := range metaBlock.EpochStart.LastFinalizedHeaders { - if epochStartData.ShardID == shardID { - return &epochStartData, nil - } - } - - return nil, errors.New("not found") -} - func (e *epochStartBootstrap) getShardID(nodesConfig *sharding.NodesSetup) (uint32, error) { pubKeyBytes, err := e.publicKey.ToByteArray() if err != nil { return 0, err } + pubKeyStr := hex.EncodeToString(pubKeyBytes) for shardID, nodesPerShard := range nodesConfig.InitialNodesPubKeys() { for _, nodePubKey := range nodesPerShard { @@ -501,51 +533,6 @@ func (e *epochStartBootstrap) getShardID(nodesConfig *sharding.NodesSetup) (uint return 0, nil } -func (e *epochStartBootstrap) getShardHeaders( - syncer update.MissingHeadersByHashSyncer, - metaBlock *block.MetaBlock, - shardCoordinator sharding.epochStartBootstrap, -) (map[uint32]*block.Header, error) { - headersMap := make(map[uint32]*block.Header) - - shardID := shardCoordinator.SelfId() - if shardID == core.MetachainShardId { - for _, entry := range metaBlock.EpochStart.LastFinalizedHeaders { - var hdr *block.Header - hdr, err := e.getShardHeader(syncer, entry.HeaderHash, entry.ShardID) - if err != nil { - return nil, err - } - headersMap[entry.ShardID] = hdr - } - - return headersMap, nil - } - - var entryForShard *block.EpochStartShardData - for _, entry := range metaBlock.EpochStart.LastFinalizedHeaders { - if entry.ShardID == shardID { - entryForShard = &entry - } - } - - if entryForShard == nil { - return nil, ErrShardDataNotFound - } - - hdr, err := e.getShardHeader( - syncer, - entryForShard.HeaderHash, - entryForShard.ShardID, - ) - if err != nil { - return nil, err - } - - headersMap[shardID] = hdr - return headersMap, nil -} - // IsInterfaceNil returns true if there is no value under the interface func (e *epochStartBootstrap) IsInterfaceNil() bool { return e == nil diff --git a/epochStart/bootstrap/storagehandler/metaStorageHandler.go b/epochStart/bootstrap/storagehandler/metaStorageHandler.go index 42950154433..505559bebdf 100644 --- a/epochStart/bootstrap/storagehandler/metaStorageHandler.go +++ b/epochStart/bootstrap/storagehandler/metaStorageHandler.go @@ -8,8 +8,8 @@ import ( "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" - "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/structs" "github.com/ElrondNetwork/elrond-go/epochStart/metachain" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" @@ -43,10 +43,12 @@ func NewMetaStorageHandler( if err != nil { return nil, err } + storageService, err := storageFactory.CreateForMeta() if err != nil { return nil, err } + base := &baseStorageHandler{ storageService: storageService, shardCoordinator: shardCoordinator, @@ -59,7 +61,7 @@ func NewMetaStorageHandler( } // SaveDataToStorage will save the fetched data to storage so it will be used by the storage bootstrap component -func (msh *metaStorageHandler) SaveDataToStorage(components structs.ComponentsNeededForBootstrap) error { +func (msh *metaStorageHandler) SaveDataToStorage(components *bootstrap.ComponentsNeededForBootstrap) error { // TODO: here we should save all needed data defer func() { @@ -107,10 +109,12 @@ func (msh *metaStorageHandler) SaveDataToStorage(components structs.ComponentsNe if err != nil { return err } + err = bootStorer.Put([]byte(highestRoundFromBootStorage), bootStrapDataBytes) if err != nil { return err } + log.Info("saved bootstrap data to storage") return nil } @@ -142,7 +146,7 @@ func (msh *metaStorageHandler) getAndSaveLastHeader(metaBlock *block.MetaBlock) return bootstrapHdrInfo, nil } -func (msh *metaStorageHandler) getAndSaveTriggerRegistry(components structs.ComponentsNeededForBootstrap) ([]byte, error) { +func (msh *metaStorageHandler) getAndSaveTriggerRegistry(components *bootstrap.ComponentsNeededForBootstrap) ([]byte, error) { metaBlock := components.EpochStartMetaBlock hash, err := core.CalculateHash(msh.marshalizer, msh.hasher, metaBlock) if err != nil { diff --git a/epochStart/bootstrap/storagehandler/shardStorageHandler.go b/epochStart/bootstrap/storagehandler/shardStorageHandler.go index 309f1729c14..966c157f1ca 100644 --- a/epochStart/bootstrap/storagehandler/shardStorageHandler.go +++ b/epochStart/bootstrap/storagehandler/shardStorageHandler.go @@ -9,8 +9,8 @@ import ( "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" - "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/structs" "github.com/ElrondNetwork/elrond-go/epochStart/shardchain" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" @@ -44,10 +44,12 @@ func NewShardStorageHandler( if err != nil { return nil, err } + storageService, err := storageFactory.CreateForShard() if err != nil { return nil, err } + base := &baseStorageHandler{ storageService: storageService, shardCoordinator: shardCoordinator, @@ -60,7 +62,7 @@ func NewShardStorageHandler( } // SaveDataToStorage will save the fetched data to storage so it will be used by the storage bootstrap component -func (ssh *shardStorageHandler) SaveDataToStorage(components structs.ComponentsNeededForBootstrap) error { +func (ssh *shardStorageHandler) SaveDataToStorage(components *bootstrap.ComponentsNeededForBootstrap) error { // TODO: here we should save all needed data defer func() { @@ -156,7 +158,7 @@ func (ssh *shardStorageHandler) getAndSaveLastHeader(shardHeader *block.Header) return bootstrapHdrInfo, nil } -func (ssh *shardStorageHandler) getAndSaveTriggerRegistry(components structs.ComponentsNeededForBootstrap) ([]byte, error) { +func (ssh *shardStorageHandler) getAndSaveTriggerRegistry(components *bootstrap.ComponentsNeededForBootstrap) ([]byte, error) { shardHeader := components.ShardHeader metaBlock := components.EpochStartMetaBlock From 09fbb7eeca054c817e5fae782a6368c8ec273e38 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Mon, 23 Mar 2020 21:30:33 +0200 Subject: [PATCH 31/61] processing. --- cmd/node/main.go | 8 +- consensus/mock/nodesCoordinatorMock.go | 24 -- consensus/spos/bls/subroundStartRound.go | 2 +- consensus/spos/consensusState.go | 8 +- data/mock/nodesCoordinatorMock.go | 24 -- .../disabled/disabledNodesCoordinator.go | 4 - epochStart/bootstrap/process.go | 269 ++++++++++++------ epochStart/bootstrap/syncValidatorStatus.go | 148 ++++++++++ epochStart/errors.go | 12 +- epochStart/interface.go | 12 + epochStart/mock/nodesCoordinatorStub.go | 14 - integrationTests/mock/nodesCoordinatorMock.go | 24 -- node/mock/nodesCoordinatorMock.go | 24 -- process/mock/nodesCoordinatorMock.go | 24 -- sharding/indexHashedNodesCoordinator.go | 21 -- .../indexHashedNodesCoordinatorRegistry.go | 2 +- sharding/indexHashedNodesCoordinator_test.go | 27 -- sharding/interface.go | 1 - update/interface.go | 1 - update/sync/syncHeadersByHash.go | 8 +- update/sync/syncMiniBlocks.go | 8 + 21 files changed, 364 insertions(+), 301 deletions(-) create mode 100644 epochStart/bootstrap/syncValidatorStatus.go diff --git a/cmd/node/main.go b/cmd/node/main.go index f2feaf17335..7f3725aa551 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -33,8 +33,6 @@ import ( "github.com/ElrondNetwork/elrond-go/display" "github.com/ElrondNetwork/elrond-go/epochStart" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" - factoryEpochBootstrap "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/factory" - "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/nodesconfigprovider" "github.com/ElrondNetwork/elrond-go/epochStart/notifier" "github.com/ElrondNetwork/elrond-go/facade" "github.com/ElrondNetwork/elrond-go/hashing" @@ -625,18 +623,18 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { DefaultDBPath: "", DefaultEpochString: "", } - bootsrapper, err := bootstrap.NewEpochStartDataProvider(epochStartBootsrapArgs) + bootsrapper, err := bootstrap.NewEpochStartBootstrapHandler(epochStartBootsrapArgs) if err != nil { log.Error("could not create bootsrapper", "err", err) return err } - currentEpoch, currentShardId, shardNumber, err := bootsrapper.Bootstrap() + currentEpoch, currentShardId, numOfShards, err := bootsrapper.Bootstrap() if err != nil { log.Error("boostrap return error", "error", err) return err } - shardCoordinator, err := sharding.NewMultiShardCoordinator(shardNumber, currentShardId) + shardCoordinator, err := sharding.NewMultiShardCoordinator(numOfShards, currentShardId) if err != nil { return err } diff --git a/consensus/mock/nodesCoordinatorMock.go b/consensus/mock/nodesCoordinatorMock.go index be2fe1f28f8..80f7721ef7a 100644 --- a/consensus/mock/nodesCoordinatorMock.go +++ b/consensus/mock/nodesCoordinatorMock.go @@ -93,30 +93,6 @@ func (ncm *NodesCoordinatorMock) GetConsensusValidatorsPublicKeys(randomness []b return pubKeys, nil } -// GetConsensusValidatorsRewardsAddresses - -func (ncm *NodesCoordinatorMock) GetConsensusValidatorsRewardsAddresses( - randomness []byte, - round uint64, - shardId uint32, - epoch uint32, -) ([]string, error) { - if ncm.GetValidatorsPublicKeysCalled != nil { - return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId, epoch) - } - - validators, err := ncm.ComputeConsensusGroup(randomness, round, shardId, epoch) - if err != nil { - return nil, err - } - - addresses := make([]string, 0) - for _, v := range validators { - addresses = append(addresses, string(v.Address())) - } - - return addresses, nil -} - // LoadState - func (ncm *NodesCoordinatorMock) LoadState(_ []byte) error { return nil diff --git a/consensus/spos/bls/subroundStartRound.go b/consensus/spos/bls/subroundStartRound.go index b02458efbf2..f951b441d39 100644 --- a/consensus/spos/bls/subroundStartRound.go +++ b/consensus/spos/bls/subroundStartRound.go @@ -222,7 +222,7 @@ func (sr *subroundStartRound) generateNextConsensusGroup(roundIndex int64) error shardId := sr.ShardCoordinator().SelfId() - nextConsensusGroup, _, err := sr.GetNextConsensusGroup( + nextConsensusGroup, err := sr.GetNextConsensusGroup( randomSeed, uint64(sr.RoundIndex), shardId, diff --git a/consensus/spos/consensusState.go b/consensus/spos/consensusState.go index 36444c61d42..936109c2a26 100644 --- a/consensus/spos/consensusState.go +++ b/consensus/spos/consensusState.go @@ -127,7 +127,7 @@ func (cns *ConsensusState) GetNextConsensusGroup( shardId uint32, nodesCoordinator sharding.NodesCoordinator, epoch uint32, -) ([]string, []string, error) { +) ([]string, error) { validatorsGroup, err := nodesCoordinator.ComputeConsensusGroup(randomSource, round, shardId, epoch) if err != nil { log.Debug( @@ -138,19 +138,17 @@ func (cns *ConsensusState) GetNextConsensusGroup( "shardId", shardId, "epoch", epoch, ) - return nil, nil, err + return nil, err } consensusSize := len(validatorsGroup) newConsensusGroup := make([]string, consensusSize) - consensusRewardAddresses := make([]string, consensusSize) for i := 0; i < consensusSize; i++ { newConsensusGroup[i] = string(validatorsGroup[i].PubKey()) - consensusRewardAddresses[i] = string(validatorsGroup[i].Address()) } - return newConsensusGroup, consensusRewardAddresses, nil + return newConsensusGroup, nil } // IsConsensusDataSet method returns true if the consensus data for the current round is set and false otherwise diff --git a/data/mock/nodesCoordinatorMock.go b/data/mock/nodesCoordinatorMock.go index 56b10c7af69..690686fe486 100644 --- a/data/mock/nodesCoordinatorMock.go +++ b/data/mock/nodesCoordinatorMock.go @@ -90,30 +90,6 @@ func (ncm *NodesCoordinatorMock) GetConsensusValidatorsPublicKeys( return valGrStr, nil } -// GetConsensusValidatorsRewardsAddresses - -func (ncm *NodesCoordinatorMock) GetConsensusValidatorsRewardsAddresses( - randomness []byte, - round uint64, - shardId uint32, - epoch uint32, -) ([]string, error) { - if ncm.GetValidatorsPublicKeysCalled != nil { - return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId, epoch) - } - - validators, err := ncm.ComputeConsensusGroup(randomness, round, shardId, epoch) - if err != nil { - return nil, err - } - - addresses := make([]string, 0) - for _, v := range validators { - addresses = append(addresses, string(v.Address())) - } - - return addresses, nil -} - // SetNodesPerShards - func (ncm *NodesCoordinatorMock) SetNodesPerShards( eligible map[uint32][]sharding.Validator, diff --git a/epochStart/bootstrap/disabled/disabledNodesCoordinator.go b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go index 22a670812b3..622f89cb190 100644 --- a/epochStart/bootstrap/disabled/disabledNodesCoordinator.go +++ b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go @@ -42,10 +42,6 @@ func (n *nodesCoordinator) GetConsensusValidatorsPublicKeys(randomness []byte, r return nil, nil } -func (n *nodesCoordinator) GetConsensusValidatorsRewardsAddresses(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) { - return nil, nil -} - func (n *nodesCoordinator) GetOwnPublicKey() []byte { return nil } diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 43d6e191bd4..6c87d7d40f6 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -1,8 +1,6 @@ package bootstrap import ( - "encoding/hex" - "errors" "time" "github.com/ElrondNetwork/elrond-go/config" @@ -12,6 +10,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/syncer" "github.com/ElrondNetwork/elrond-go/data/trie" trieFactory "github.com/ElrondNetwork/elrond-go/data/trie/factory" "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" @@ -33,6 +32,7 @@ import ( "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" storageFactory "github.com/ElrondNetwork/elrond-go/storage/factory" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/ElrondNetwork/elrond-go/storage/timecache" "github.com/ElrondNetwork/elrond-go/update" "github.com/ElrondNetwork/elrond-go/update/sync" @@ -47,10 +47,11 @@ type ComponentsNeededForBootstrap struct { EpochStartMetaBlock *block.MetaBlock PreviousEpochStartMetaBlock *block.MetaBlock ShardHeader *block.Header - NodesConfig *sharding.NodesSetup - ShardHeaders map[uint32]*block.Header + NodesConfig *sharding.NodesCoordinatorRegistry + ShardHeaders map[string]data.HeaderHandler ShardCoordinator sharding.Coordinator - Tries state.TriesHolder + UserAccountTries map[string]data.Trie + PeerAccountTries map[string]data.Trie PendingMiniBlocks map[string]*block.MiniBlock } @@ -70,6 +71,7 @@ type epochStartBootstrap struct { whiteListHandler update.WhiteListHandler shardCoordinator sharding.Coordinator genesisNodesConfig *sharding.NodesSetup + pathManager storage.PathManagerHandler workingDir string defaultDBPath string defaultEpochString string @@ -81,14 +83,25 @@ type epochStartBootstrap struct { miniBlocksSyncer epochStart.PendingMiniBlocksSyncHandler headersSyncer epochStart.HeadersByHashSyncer epochStartMetaBlockSyncer epochStart.StartOfEpochMetaSyncer - - baseData baseDataInStorage + nodesConfigHandler epochStart.StartOfEpochNodesConfigHandler + baseData baseDataInStorage + + epochStartMeta *block.MetaBlock + prevEpochStartMeta *block.MetaBlock + syncedHeaders map[string]data.HeaderHandler + nodesConfig *sharding.NodesCoordinatorRegistry + userAccountTries map[string]data.Trie + peerAccountTries map[string]data.Trie + + userTrieStorageManager data.StorageManager + peerTrieStorageManager data.StorageManager } type baseDataInStorage struct { - shardId uint32 - lastRound uint64 - lastEpoch uint32 + shardId uint32 + numberOfShards uint32 + lastRound uint64 + lastEpoch uint32 } // ArgsEpochStartBootstrap holds the arguments needed for creating an epoch start data provider component @@ -145,7 +158,7 @@ func (e *epochStartBootstrap) searchDataInLocalStorage() { } log.Debug("current epoch from the storage : ", "epoch", currentEpoch) - // TODO: write gathered data in baseDataInStorage + e.baseData.lastEpoch = currentEpoch } func (e *epochStartBootstrap) isStartInEpochZero() bool { @@ -167,10 +180,6 @@ func (e *epochStartBootstrap) prepareEpochZero() (uint32, uint32, uint32, error) return currentEpoch, e.shardCoordinator.SelfId(), e.shardCoordinator.NumberOfShards(), nil } -func (e *epochStartBootstrap) saveGatheredDataToStorage() { - -} - func (e *epochStartBootstrap) computeMostProbableEpoch() { startTime := time.Unix(e.genesisNodesConfig.StartTime, 0) elapsedTime := time.Since(startTime) @@ -249,8 +258,10 @@ func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { return err } - // TODO epochStart meta syncer - e.epochStartMetaBlockSyncer = NewEpochStartmetaBlockSyncer() + e.epochStartMetaBlockSyncer, err = NewEpochStartMetaSyncer() + if err != nil { + return err + } syncMiniBlocksArgs := sync.ArgsNewPendingMiniBlocksSyncer{ Storage: &disabled.Storer{}, @@ -293,78 +304,91 @@ func (e *epochStartBootstrap) syncHeadersFrom(meta *block.MetaBlock) (map[string // Bootstrap will handle requesting and receiving the needed information the node will bootstrap from func (e *epochStartBootstrap) requestAndProcessing() (uint32, uint32, uint32, error) { - metaBlock, err := e.epochStartMetaBlockSyncer.SyncEpochStartMeta(timeToWait) + var err error + e.epochStartMeta, err = e.epochStartMetaBlockSyncer.SyncEpochStartMeta(timeToWait) if err != nil { return 0, 0, 0, err } - numShards := uint32(len(metaBlock.EpochStart.LastFinalizedHeaders)) + e.baseData.numberOfShards = uint32(len(e.epochStartMeta.EpochStart.LastFinalizedHeaders)) + e.baseData.lastEpoch = e.epochStartMeta.Epoch - headers, err := e.syncHeadersFrom(metaBlock) + e.syncedHeaders, err = e.syncHeadersFrom(e.epochStartMeta) if err != nil { return 0, 0, 0, err } - prevEpochStartMetaHash := metaBlock.EpochStart.Economics.PrevEpochStartHash - prevEpochStartMeta, ok := headers[string(prevEpochStartMetaHash)].(*block.MetaBlock) + prevEpochStartMetaHash := e.epochStartMeta.EpochStart.Economics.PrevEpochStartHash + prevEpochStartMeta, ok := e.syncedHeaders[string(prevEpochStartMetaHash)].(*block.MetaBlock) if !ok { return 0, 0, 0, epochStart.ErrWrongTypeAssertion } + e.prevEpochStartMeta = prevEpochStartMeta - nodesConfigForCurrEpoch, err := e.nodesConfigProvider.GetNodesConfigForMetaBlock(metaBlock) + pubKeyBytes, err := e.publicKey.ToByteArray() if err != nil { return 0, 0, 0, err } - nodesConfigForPrevEpoch, err := e.nodesConfigProvider.GetNodesConfigForMetaBlock(prevEpochStartMeta) + e.nodesConfig, e.baseData.shardId, err = e.nodesConfigHandler.NodesConfigFromMetaBlock(e.epochStartMeta, e.prevEpochStartMeta, pubKeyBytes) if err != nil { return 0, 0, 0, err } - selfShardId, err := e.getShardID(nodesConfigForCurrEpoch) + e.shardCoordinator, err = sharding.NewMultiShardCoordinator(e.baseData.numberOfShards, e.baseData.shardId) if err != nil { return 0, 0, 0, err } - e.shardCoordinator, err = sharding.NewMultiShardCoordinator(numShards, selfShardId) - if err != nil { - return 0, 0, 0, err + if e.shardCoordinator.SelfId() == core.MetachainShardId { + err = e.requestAndProcessForShard() + if err != nil { + return 0, 0, 0, err + } } - if e.shardCoordinator.SelfId() == core.MetachainShardId { - return e.requestAndProcessForShard(e.shardCoordinator.SelfId()) + err = e.requestAndProcessForMeta() + if err != nil { + return 0, 0, 0, err } - return e.requestAndProcessForMeta() + return e.baseData.shardId, e.baseData.numberOfShards, e.baseData.lastEpoch, nil } func (e *epochStartBootstrap) requestAndProcessForMeta() error { - // accounts and peer accounts syncer + err := e.syncUserAccountsState(e.epochStartMeta.RootHash) + if err != nil { + return err + } + + err = e.syncPeerAccountsState(e.epochStartMeta.ValidatorStatsRootHash) + if err != nil { + return err + } components := &ComponentsNeededForBootstrap{ - EpochStartMetaBlock: metaBlock, - PreviousEpochStartMetaBlock: prevEpochStartMeta, - ShardHeader: shardHeaderForShard, - NodesConfig: nodesConfig, - ShardHeaders: shardHeaders, + EpochStartMetaBlock: e.epochStartMeta, + PreviousEpochStartMetaBlock: e.prevEpochStartMeta, + NodesConfig: e.nodesConfig, + ShardHeaders: e.syncedHeaders, ShardCoordinator: e.shardCoordinator, - Tries: trieToReturn, - PendingMiniBlocks: pendingMiniBlocks, + UserAccountTries: e.userAccountTries, + PeerAccountTries: e.peerAccountTries, } - storageHandlerComponent, err = storagehandler.NewMetaStorageHandler( + storageHandlerComponent, err := storagehandler.NewMetaStorageHandler( e.generalConfig, e.shardCoordinator, e.pathManager, e.marshalizer, e.hasher, - metaBlock.Epoch, + e.epochStartMeta.Epoch, ) if err != nil { - return nil, err + return err } - errSavingToStorage := storageHandlerComponent.SaveDataToStorage(*components) + errSavingToStorage := storageHandlerComponent.SaveDataToStorage(components) if errSavingToStorage != nil { return errSavingToStorage } @@ -372,11 +396,11 @@ func (e *epochStartBootstrap) requestAndProcessForMeta() error { return nil } -func (e *epochStartBootstrap) requestAndProcessForShard(shardId uint32, metaBlock *block.MetaBlock) error { +func (e *epochStartBootstrap) requestAndProcessForShard() error { var epochStartData block.EpochStartShardData found := false - for _, shardData := range metaBlock.EpochStart.LastFinalizedHeaders { - if shardData.ShardID == shardId { + for _, shardData := range e.epochStartMeta.EpochStart.LastFinalizedHeaders { + if shardData.ShardID == e.shardCoordinator.SelfId() { epochStartData = shardData found = true break @@ -400,8 +424,8 @@ func (e *epochStartBootstrap) requestAndProcessForShard(shardId uint32, metaBloc hashesToRequest := make([][]byte, 0, 2) hashesToRequest = append(hashesToRequest, epochStartData.LastFinishedMetaBlock) hashesToRequest = append(hashesToRequest, epochStartData.FirstPendingMetaBlock) - shardIds = append(shardIds, shardId) - shardIds = append(shardIds, shardId) + shardIds = append(shardIds, e.shardCoordinator.SelfId()) + shardIds = append(shardIds, e.shardCoordinator.SelfId()) e.headersSyncer.ClearFields() err = e.headersSyncer.SyncMissingHeadersByHash(shardIds, hashesToRequest, timeToWait) @@ -414,14 +438,28 @@ func (e *epochStartBootstrap) requestAndProcessForShard(shardId uint32, metaBloc return err } + for hash, hdr := range neededHeaders { + e.syncedHeaders[hash] = hdr + } + + ownShardHdr, ok := e.syncedHeaders[string(epochStartData.HeaderHash)].(*block.Header) + if !ok { + return epochStart.ErrWrongTypeAssertion + } + + err = e.syncUserAccountsState(ownShardHdr.RootHash) + if err != nil { + return err + } + components := &ComponentsNeededForBootstrap{ - EpochStartMetaBlock: metaBlock, - PreviousEpochStartMetaBlock: prevEpochStartMeta, - ShardHeader: shardHeaderForShard, - NodesConfig: nodesConfig, - ShardHeaders: shardHeaders, + EpochStartMetaBlock: e.epochStartMeta, + PreviousEpochStartMetaBlock: e.prevEpochStartMeta, + ShardHeader: ownShardHdr, + NodesConfig: e.nodesConfig, + ShardHeaders: e.syncedHeaders, ShardCoordinator: e.shardCoordinator, - Tries: trieToReturn, + UserAccountTries: e.userAccountTries, PendingMiniBlocks: pendingMiniBlocks, } @@ -431,13 +469,13 @@ func (e *epochStartBootstrap) requestAndProcessForShard(shardId uint32, metaBloc e.pathManager, e.marshalizer, e.hasher, - metaBlock.Epoch, + e.baseData.lastEpoch, ) if err != nil { - return nil, err + return err } - errSavingToStorage := storageHandlerComponent.SaveDataToStorage(*components) + errSavingToStorage := storageHandlerComponent.SaveDataToStorage(components) if errSavingToStorage != nil { return errSavingToStorage } @@ -445,6 +483,91 @@ func (e *epochStartBootstrap) requestAndProcessForShard(shardId uint32, metaBloc return nil } +func (e *epochStartBootstrap) syncUserAccountsState(rootHash []byte) error { + argsUserAccountsSyncer := syncer.ArgsNewUserAccountsSyncer{ + ArgsNewBaseAccountsSyncer: syncer.ArgsNewBaseAccountsSyncer{ + Hasher: e.hasher, + Marshalizer: e.marshalizer, + TrieStorageManager: e.userTrieStorageManager, + RequestHandler: e.requestHandler, + WaitTime: timeToWait, + Cacher: e.dataPool.TrieNodes(), + }, + ShardId: e.shardCoordinator.SelfId(), + } + accountsDBSyncer, err := syncer.NewUserAccountsSyncer(argsUserAccountsSyncer) + if err != nil { + return err + } + + err = accountsDBSyncer.SyncAccounts(rootHash) + if err != nil { + return err + } + + e.userAccountTries = accountsDBSyncer.GetSyncedTries() + return nil +} + +func (e *epochStartBootstrap) createTrieStorageManagers() error { + dbConfig := storageFactory.GetDBFromConfig(e.generalConfig.AccountsTrieStorage.DB) + trieStorage, err := storageUnit.NewStorageUnitFromConf( + storageFactory.GetCacherFromConfig(e.generalConfig.AccountsTrieStorage.Cache), + dbConfig, + storageFactory.GetBloomFromConfig(e.generalConfig.AccountsTrieStorage.Bloom), + ) + if err != nil { + return err + } + + e.userTrieStorageManager, err = trie.NewTrieStorageManagerWithoutPruning(trieStorage) + if err != nil { + return err + } + + dbConfig = storageFactory.GetDBFromConfig(e.generalConfig.PeerAccountsTrieStorage.DB) + trieStorage, err = storageUnit.NewStorageUnitFromConf( + storageFactory.GetCacherFromConfig(e.generalConfig.PeerAccountsTrieStorage.Cache), + dbConfig, + storageFactory.GetBloomFromConfig(e.generalConfig.PeerAccountsTrieStorage.Bloom), + ) + if err != nil { + return err + } + + e.peerTrieStorageManager, err = trie.NewTrieStorageManagerWithoutPruning(trieStorage) + if err != nil { + return err + } + + return nil +} + +func (e *epochStartBootstrap) syncPeerAccountsState(rootHash []byte) error { + argsValidatorAccountsSyncer := syncer.ArgsNewValidatorAccountsSyncer{ + ArgsNewBaseAccountsSyncer: syncer.ArgsNewBaseAccountsSyncer{ + Hasher: e.hasher, + Marshalizer: e.marshalizer, + TrieStorageManager: e.peerTrieStorageManager, + RequestHandler: e.requestHandler, + WaitTime: timeToWait, + Cacher: e.dataPool.TrieNodes(), + }, + } + accountsDBSyncer, err := syncer.NewValidatorAccountsSyncer(argsValidatorAccountsSyncer) + if err != nil { + return err + } + + err = accountsDBSyncer.SyncAccounts(rootHash) + if err != nil { + return err + } + + e.peerAccountTries = accountsDBSyncer.GetSyncedTries() + return nil +} + func (e *epochStartBootstrap) createRequestHandler() error { dataPacker, err := partitioning.NewSimpleDataPacker(e.marshalizer) if err != nil { @@ -458,23 +581,18 @@ func (e *epochStartBootstrap) createRequestHandler() error { } triesHolder := state.NewDataTriesHolder() - - stateTrieStorageManager, err := trie.NewTrieStorageManagerWithoutPruning(disabled.NewDisabledStorer()) - if err != nil { - return err - } - stateTrie, err := trie.NewTrie(stateTrieStorageManager, e.marshalizer, e.hasher) + err = e.createTrieStorageManagers() if err != nil { return err } - triesHolder.Put([]byte(trieFactory.UserAccountTrie), stateTrie) - peerTrieStorageManager, err := trie.NewTrieStorageManagerWithoutPruning(disabled.NewDisabledStorer()) + stateTrie, err := trie.NewTrie(e.userTrieStorageManager, e.marshalizer, e.hasher) if err != nil { return err } + triesHolder.Put([]byte(trieFactory.UserAccountTrie), stateTrie) - peerTrie, err := trie.NewTrie(peerTrieStorageManager, e.marshalizer, e.hasher) + peerTrie, err := trie.NewTrie(e.peerTrieStorageManager, e.marshalizer, e.hasher) if err != nil { return err } @@ -491,7 +609,6 @@ func (e *epochStartBootstrap) createRequestHandler() error { TriesContainer: triesHolder, SizeCheckDelta: 0, } - resolverFactory, err := resolverscontainer.NewMetaResolversContainerFactory(resolversContainerArgs) if err != nil { return err @@ -515,24 +632,6 @@ func (e *epochStartBootstrap) createRequestHandler() error { return err } -func (e *epochStartBootstrap) getShardID(nodesConfig *sharding.NodesSetup) (uint32, error) { - pubKeyBytes, err := e.publicKey.ToByteArray() - if err != nil { - return 0, err - } - - pubKeyStr := hex.EncodeToString(pubKeyBytes) - for shardID, nodesPerShard := range nodesConfig.InitialNodesPubKeys() { - for _, nodePubKey := range nodesPerShard { - if nodePubKey == pubKeyStr { - return shardID, nil - } - } - } - - return 0, nil -} - // IsInterfaceNil returns true if there is no value under the interface func (e *epochStartBootstrap) IsInterfaceNil() bool { return e == nil diff --git a/epochStart/bootstrap/syncValidatorStatus.go b/epochStart/bootstrap/syncValidatorStatus.go new file mode 100644 index 00000000000..5a432c19f6f --- /dev/null +++ b/epochStart/bootstrap/syncValidatorStatus.go @@ -0,0 +1,148 @@ +package bootstrap + +import ( + "bytes" + "fmt" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/update/sync" +) + +type syncValidatorStatus struct { + miniBlocksSyncer epochStart.PendingMiniBlocksSyncHandler + dataPool dataRetriever.PoolsHolder + marshalizer marshal.Marshalizer + requestHandler process.RequestHandler +} + +// NewSyncValidatorStatus creates a new validator status process component +func NewSyncValidatorStatus() (*syncValidatorStatus, error) { + + s := &syncValidatorStatus{} + syncMiniBlocksArgs := sync.ArgsNewPendingMiniBlocksSyncer{ + Storage: &disabled.Storer{}, + Cache: s.dataPool.MiniBlocks(), + Marshalizer: s.marshalizer, + RequestHandler: s.requestHandler, + } + var err error + s.miniBlocksSyncer, err = sync.NewPendingMiniBlocksSyncer(syncMiniBlocksArgs) + if err != nil { + return nil, err + } + + return s, nil +} + +// NodesConfigFromMetaBlock synces and creates registry from epoch start metablock +func (s *syncValidatorStatus) NodesConfigFromMetaBlock( + currMetaBlock *block.MetaBlock, + prevMetaBlock *block.MetaBlock, + publicKey []byte, +) (*sharding.NodesCoordinatorRegistry, uint32, error) { + if !currMetaBlock.IsStartOfEpochBlock() { + return nil, 0, epochStart.ErrNotEpochStartBlock + } + if !prevMetaBlock.IsStartOfEpochBlock() { + return nil, 0, epochStart.ErrNotEpochStartBlock + } + + nodesConfig := &sharding.NodesCoordinatorRegistry{ + EpochsConfig: make(map[string]*sharding.EpochValidators), + CurrentEpoch: currMetaBlock.Epoch, + } + + epochValidators, selfShardId, err := s.processNodesConfigFor(currMetaBlock, publicKey) + if err != nil { + return nil, 0, err + } + configId := fmt.Sprint(currMetaBlock.Epoch) + nodesConfig.EpochsConfig[configId] = epochValidators + + epochValidators, _, err = s.processNodesConfigFor(prevMetaBlock, nil) + if err != nil { + return nil, 0, err + } + configId = fmt.Sprint(prevMetaBlock.Epoch) + nodesConfig.EpochsConfig[configId] = epochValidators + + return nodesConfig, selfShardId, nil +} + +func (s *syncValidatorStatus) processNodesConfigFor( + metaBlock *block.MetaBlock, + publicKey []byte, +) (*sharding.EpochValidators, uint32, error) { + shardMBHeaders := make([]block.ShardMiniBlockHeader, 0) + for _, mbHeader := range metaBlock.MiniBlockHeaders { + if mbHeader.Type != block.PeerBlock { + continue + } + + shardMBHdr := block.ShardMiniBlockHeader{ + Hash: mbHeader.Hash, + ReceiverShardID: mbHeader.ReceiverShardID, + SenderShardID: core.MetachainShardId, + TxCount: mbHeader.TxCount, + } + shardMBHeaders = append(shardMBHeaders, shardMBHdr) + } + + s.miniBlocksSyncer.ClearFields() + err := s.miniBlocksSyncer.SyncPendingMiniBlocks(shardMBHeaders, timeToWait) + if err != nil { + return nil, 0, err + } + + peerMiniBlocks, err := s.miniBlocksSyncer.GetMiniBlocks() + if err != nil { + return nil, 0, err + } + + epochValidators := &sharding.EpochValidators{ + EligibleValidators: make(map[string][]*sharding.SerializableValidator), + WaitingValidators: make(map[string][]*sharding.SerializableValidator), + } + + selfShardId := core.AllShardId + found := false + shouldSearchSelfId := len(publicKey) == 0 + for _, mb := range peerMiniBlocks { + for _, txHash := range mb.TxHashes { + vid := &state.ValidatorInfo{} + err := s.marshalizer.Unmarshal(vid, txHash) + if err != nil { + return nil, 0, err + } + + serializableValidator := &sharding.SerializableValidator{ + PubKey: vid.PublicKey, + Address: vid.RewardAddress, // TODO - take out - need to refactor validator.go and its usage across the project + } + + shardId := fmt.Sprint(vid.ShardId) + // TODO - make decision according to validatorInfo.List after it is implemented + epochValidators.EligibleValidators[shardId] = append(epochValidators.EligibleValidators[shardId], serializableValidator) + + if shouldSearchSelfId && !found && bytes.Equal(vid.PublicKey, publicKey) { + selfShardId = vid.ShardId + found = true + } + } + } + + return epochValidators, selfShardId, nil +} + +// IsInterfaceNil returns true if underlying object is nil +func (s *syncValidatorStatus) IsInterfaceNil() bool { + return s == nil +} diff --git a/epochStart/errors.go b/epochStart/errors.go index 47382d2f37a..80ce9c7b724 100644 --- a/epochStart/errors.go +++ b/epochStart/errors.go @@ -41,9 +41,6 @@ var ErrNilStorage = errors.New("nil storage") // ErrNilHeaderHandler signals that a nil header handler has been provided var ErrNilHeaderHandler = errors.New("nil header handler") -// ErrNilArgsPendingMiniblocks signals that nil argument was passed -var ErrNilArgsPendingMiniblocks = errors.New("nil arguments for pending miniblock object") - // ErrNilMiniblocks signals that nil argument was passed var ErrNilMiniblocks = errors.New("nil arguments for miniblocks object") @@ -86,15 +83,9 @@ var ErrNilTriggerStorage = errors.New("nil trigger storage") // ErrNilMetaNonceHashStorage signals that nil meta header nonce hash storage has been provided var ErrNilMetaNonceHashStorage = errors.New("nil meta nonce hash storage") -// ErrNilMiniblocksStorage signals that nil miniblocks storage has been provided -var ErrNilMiniblocksStorage = errors.New("nil miniblocks storage") - // ErrValidatorMiniBlockHashDoesNotMatch signals that created and received validatorInfo miniblock hash does not match var ErrValidatorMiniBlockHashDoesNotMatch = errors.New("validatorInfo miniblock hash does not match") -// ErrTxHashDoesNotMatch signals that created and received tx hash does not match -var ErrTxHashDoesNotMatch = errors.New("validatorInfo miniblock tx hash does not match") - // ErrRewardMiniBlockHashDoesNotMatch signals that created and received rewards miniblock hash does not match var ErrRewardMiniBlockHashDoesNotMatch = errors.New("reward miniblock hash does not match") @@ -133,3 +124,6 @@ var ErrValidatorInfoMiniBlocksNumDoesNotMatch = errors.New("number of created an // ErrNilValidatorInfo signals that a nil value for the validatorInfo has been provided var ErrNilValidatorInfo = errors.New("validator info is nil") + +// ErrEpochStartDataForShardNotFound signals that epoch start shard data was not found for current shard id +var ErrEpochStartDataForShardNotFound = errors.New("epoch start data for current shard not found") diff --git a/epochStart/interface.go b/epochStart/interface.go index 72e48a26952..f3f839101e5 100644 --- a/epochStart/interface.go +++ b/epochStart/interface.go @@ -5,6 +5,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/sharding" ) // TriggerHandler defines the functionalities for an start of epoch trigger @@ -92,6 +93,7 @@ type HeadersByHashSyncer interface { type PendingMiniBlocksSyncHandler interface { SyncPendingMiniBlocks(miniBlockHeaders []block.ShardMiniBlockHeader, waitTime time.Duration) error GetMiniBlocks() (map[string]*block.MiniBlock, error) + ClearFields() IsInterfaceNil() bool } @@ -107,3 +109,13 @@ type StartOfEpochMetaSyncer interface { SyncEpochStartMeta(waitTime time.Duration) (*block.MetaBlock, error) IsInterfaceNil() bool } + +// StartOfEpochNodesConfigHandler defines the methods to process nodesConfig from epoch start metablocks +type StartOfEpochNodesConfigHandler interface { + NodesConfigFromMetaBlock( + currMetaBlock *block.MetaBlock, + prevMetaBlock *block.MetaBlock, + publicKey []byte, + ) (*sharding.NodesCoordinatorRegistry, uint32, error) + IsInterfaceNil() bool +} diff --git a/epochStart/mock/nodesCoordinatorStub.go b/epochStart/mock/nodesCoordinatorStub.go index d34d99f443e..0e0de400a0a 100644 --- a/epochStart/mock/nodesCoordinatorStub.go +++ b/epochStart/mock/nodesCoordinatorStub.go @@ -88,20 +88,6 @@ func (ncm *NodesCoordinatorStub) GetConsensusValidatorsPublicKeys( return nil, nil } -// GetConsensusValidatorsRewardsAddresses - -func (ncm *NodesCoordinatorStub) GetConsensusValidatorsRewardsAddresses( - randomness []byte, - round uint64, - shardId uint32, - epoch uint32, -) ([]string, error) { - if ncm.GetValidatorsPublicKeysCalled != nil { - return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId, epoch) - } - - return nil, nil -} - // SetNodesPerShards - func (ncm *NodesCoordinatorStub) SetNodesPerShards(_ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, _ uint32, _ bool) error { return nil diff --git a/integrationTests/mock/nodesCoordinatorMock.go b/integrationTests/mock/nodesCoordinatorMock.go index 08fe526217b..517a537d148 100644 --- a/integrationTests/mock/nodesCoordinatorMock.go +++ b/integrationTests/mock/nodesCoordinatorMock.go @@ -93,30 +93,6 @@ func (ncm *NodesCoordinatorMock) GetConsensusValidatorsPublicKeys( return pubKeys, nil } -// GetConsensusValidatorsRewardsAddresses - -func (ncm *NodesCoordinatorMock) GetConsensusValidatorsRewardsAddresses( - randomness []byte, - round uint64, - shardId uint32, - epoch uint32, -) ([]string, error) { - if ncm.GetValidatorsPublicKeysCalled != nil { - return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId, epoch) - } - - validators, err := ncm.ComputeConsensusGroup(randomness, round, shardId, epoch) - if err != nil { - return nil, err - } - - addresses := make([]string, 0) - for _, v := range validators { - addresses = append(addresses, string(v.Address())) - } - - return addresses, nil -} - // SetNodesPerShards - func (ncm *NodesCoordinatorMock) SetNodesPerShards(_ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, _ uint32, _ bool) error { return nil diff --git a/node/mock/nodesCoordinatorMock.go b/node/mock/nodesCoordinatorMock.go index 169c290fe14..f02cff9ccaf 100644 --- a/node/mock/nodesCoordinatorMock.go +++ b/node/mock/nodesCoordinatorMock.go @@ -92,30 +92,6 @@ func (ncm *NodesCoordinatorMock) GetConsensusValidatorsPublicKeys( return pubKeys, nil } -// GetConsensusValidatorsRewardsAddresses - -func (ncm *NodesCoordinatorMock) GetConsensusValidatorsRewardsAddresses( - randomness []byte, - round uint64, - shardId uint32, - epoch uint32, -) ([]string, error) { - if ncm.GetValidatorsPublicKeysCalled != nil { - return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId, epoch) - } - - validators, err := ncm.ComputeConsensusGroup(randomness, round, shardId, epoch) - if err != nil { - return nil, err - } - - addresses := make([]string, 0) - for _, v := range validators { - addresses = append(addresses, string(v.Address())) - } - - return addresses, nil -} - // SetNodesPerShards - func (ncm *NodesCoordinatorMock) SetNodesPerShards( _ map[uint32][]sharding.Validator, diff --git a/process/mock/nodesCoordinatorMock.go b/process/mock/nodesCoordinatorMock.go index e9b1a2e38a4..5bc9434ddcd 100644 --- a/process/mock/nodesCoordinatorMock.go +++ b/process/mock/nodesCoordinatorMock.go @@ -133,30 +133,6 @@ func (ncm *NodesCoordinatorMock) GetConsensusValidatorsPublicKeys( return valGrStr, nil } -// GetConsensusValidatorsRewardsAddresses - -func (ncm *NodesCoordinatorMock) GetConsensusValidatorsRewardsAddresses( - randomness []byte, - round uint64, - shardId uint32, - epoch uint32, -) ([]string, error) { - if ncm.GetValidatorsPublicKeysCalled != nil { - return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId, epoch) - } - - validators, err := ncm.ComputeConsensusGroup(randomness, round, shardId, epoch) - if err != nil { - return nil, err - } - - addresses := make([]string, 0) - for _, v := range validators { - addresses = append(addresses, string(v.Address())) - } - - return addresses, nil -} - // SetNodesPerShards - func (ncm *NodesCoordinatorMock) SetNodesPerShards( eligible map[uint32][]sharding.Validator, diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index 4e98892c0e0..235df58c8c6 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -351,27 +351,6 @@ func (ihgs *indexHashedNodesCoordinator) GetConsensusValidatorsPublicKeys( return pubKeys, nil } -// GetConsensusValidatorsRewardsAddresses calculates the validator consensus group for a specific shard, randomness and round -// number, returning their staking/rewards addresses -func (ihgs *indexHashedNodesCoordinator) GetConsensusValidatorsRewardsAddresses( - randomness []byte, - round uint64, - shardId uint32, - epoch uint32, -) ([]string, error) { - consensusNodes, err := ihgs.ComputeConsensusGroup(randomness, round, shardId, epoch) - if err != nil { - return nil, err - } - - addresses := make([]string, len(consensusNodes)) - for i, v := range consensusNodes { - addresses[i] = string(v.Address()) - } - - return addresses, nil -} - // GetAllEligibleValidatorsPublicKeys will return all validators public keys for all shards func (ihgs *indexHashedNodesCoordinator) GetAllEligibleValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) { validatorsPubKeys := make(map[uint32][][]byte) diff --git a/sharding/indexHashedNodesCoordinatorRegistry.go b/sharding/indexHashedNodesCoordinatorRegistry.go index 43569f66ecc..852947e2b06 100644 --- a/sharding/indexHashedNodesCoordinatorRegistry.go +++ b/sharding/indexHashedNodesCoordinatorRegistry.go @@ -11,7 +11,7 @@ const keyPrefix = "indexHashed_" // SerializableValidator holds the minimal data required for marshalling and un-marshalling a validator type SerializableValidator struct { PubKey []byte `json:"pubKey"` - Address []byte `json:"address"` + Address []byte `json:"address"` //TODO: address is not needed here - delete on refactor } // EpochValidators holds one epoch configuration for a nodes coordinator diff --git a/sharding/indexHashedNodesCoordinator_test.go b/sharding/indexHashedNodesCoordinator_test.go index 61bad59f9ca..19f65f5015c 100644 --- a/sharding/indexHashedNodesCoordinator_test.go +++ b/sharding/indexHashedNodesCoordinator_test.go @@ -1151,33 +1151,6 @@ func TestIndexHashedNodesCoordinator_GetConsensusValidatorsPublicKeysExistingEpo require.True(t, isStringSubgroup(pKeys, shard0PubKeys)) } -func TestIndexHashedNodesCoordinator_GetConsensusValidatorsRewardsAddressesInvalidRandomness(t *testing.T) { - t.Parallel() - - args := createArguments() - ihgs, err := NewIndexHashedNodesCoordinator(args) - require.Nil(t, err) - - var addresses []string - addresses, err = ihgs.GetConsensusValidatorsRewardsAddresses(nil, 0, 0, 0) - require.Equal(t, ErrNilRandomness, err) - require.Nil(t, addresses) -} - -func TestIndexHashedNodesCoordinator_GetConsensusValidatorsRewardsAddressesOK(t *testing.T) { - t.Parallel() - - args := createArguments() - ihgs, err := NewIndexHashedNodesCoordinator(args) - require.Nil(t, err) - - var addresses []string - randomness := []byte("randomness") - addresses, err = ihgs.GetConsensusValidatorsRewardsAddresses(randomness, 0, 0, 0) - require.Nil(t, err) - require.True(t, len(addresses) > 0) -} - func TestIndexHashedNodesCoordinator_GetValidatorsIndexes(t *testing.T) { t.Parallel() diff --git a/sharding/interface.go b/sharding/interface.go index e9357976473..c227fc36846 100644 --- a/sharding/interface.go +++ b/sharding/interface.go @@ -43,7 +43,6 @@ type PublicKeysSelector interface { GetAllEligibleValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) GetAllWaitingValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) GetConsensusValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) - GetConsensusValidatorsRewardsAddresses(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) GetOwnPublicKey() []byte } diff --git a/update/interface.go b/update/interface.go index 6973207121a..aed5c2ea876 100644 --- a/update/interface.go +++ b/update/interface.go @@ -123,7 +123,6 @@ type EpochStartTriesSyncHandler interface { // EpochStartPendingMiniBlocksSyncHandler defines the methods to sync all pending miniblocks type EpochStartPendingMiniBlocksSyncHandler interface { SyncPendingMiniBlocksFromMeta(epochStart *block.MetaBlock, unFinished map[string]*block.MetaBlock, waitTime time.Duration) error - SyncPendingMiniBlocksForEpochStart(miniBlockHeaders []block.ShardMiniBlockHeader, waitTime time.Duration) error GetMiniBlocks() (map[string]*block.MiniBlock, error) IsInterfaceNil() bool } diff --git a/update/sync/syncHeadersByHash.go b/update/sync/syncHeadersByHash.go index a505d2419c6..ebefed9582a 100644 --- a/update/sync/syncHeadersByHash.go +++ b/update/sync/syncHeadersByHash.go @@ -132,13 +132,7 @@ func (m *missingHeadersByHash) receivedHeader(hdrHandler data.HeaderHandler, hdr return } - header, ok := m.getHeaderFromPool(hdrHash) - if !ok { - m.mutMissingHdrs.Unlock() - return - } - - m.mapHeaders[string(hdrHash)] = header + m.mapHeaders[string(hdrHash)] = hdrHandler receivedAll := len(m.mapHashes) == len(m.mapHeaders) m.mutMissingHdrs.Unlock() if receivedAll { diff --git a/update/sync/syncMiniBlocks.go b/update/sync/syncMiniBlocks.go index a8436dd1b17..cad0203a216 100644 --- a/update/sync/syncMiniBlocks.go +++ b/update/sync/syncMiniBlocks.go @@ -295,6 +295,14 @@ func (p *pendingMiniBlocks) GetMiniBlocks() (map[string]*block.MiniBlock, error) return p.mapMiniBlocks, nil } +// ClearFields will clear all the maps +func (p *pendingMiniBlocks) ClearFields() { + p.mutPendingMb.Lock() + p.mapHashes = make(map[string]struct{}) + p.mapMiniBlocks = make(map[string]*block.MiniBlock) + p.mutPendingMb.Unlock() +} + // IsInterfaceNil returns nil if underlying object is nil func (p *pendingMiniBlocks) IsInterfaceNil() bool { return p == nil From 45a49afea156683132c98ab0c973245857eb733d Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Mon, 23 Mar 2020 23:26:49 +0200 Subject: [PATCH 32/61] finished implementation to save in bootrstrap storage unit. --- consensus/spos/consensusState_test.go | 5 +- .../baseStorageHandler.go | 35 ++-- epochStart/bootstrap/interface.go | 16 ++ .../metaStorageHandler.go | 33 ++-- epochStart/bootstrap/process.go | 111 ++++++++---- .../shardStorageHandler.go | 153 ++++++++++++++-- ...mpleEpochStartMetaBlockInterceptor_test.go | 171 ------------------ epochStart/bootstrap/syncEpochStartMeta.go | 8 +- epochStart/bootstrap/syncValidatorStatus.go | 19 +- epochStart/errors.go | 6 + epochStart/interface.go | 11 -- .../endOfEpoch/startInEpoch_test.go | 41 ----- process/peer/process.go | 5 +- sharding/indexHashedNodesCoordinator.go | 2 - 14 files changed, 296 insertions(+), 320 deletions(-) rename epochStart/bootstrap/{storagehandler => }/baseStorageHandler.go (75%) create mode 100644 epochStart/bootstrap/interface.go rename epochStart/bootstrap/{storagehandler => }/metaStorageHandler.go (78%) rename epochStart/bootstrap/{storagehandler => }/shardStorageHandler.go (51%) delete mode 100644 epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor_test.go diff --git a/consensus/spos/consensusState_test.go b/consensus/spos/consensusState_test.go index c40d3f8f7e4..bdd161b5f90 100644 --- a/consensus/spos/consensusState_test.go +++ b/consensus/spos/consensusState_test.go @@ -151,7 +151,7 @@ func TestConsensusState_GetNextConsensusGroupShouldFailWhenComputeValidatorsGrou return nil, err } - _, _, err2 := cns.GetNextConsensusGroup([]byte(""), 0, 0, nodesCoordinator, 0) + _, err2 := cns.GetNextConsensusGroup([]byte(""), 0, 0, nodesCoordinator, 0) assert.Equal(t, err, err2) } @@ -162,10 +162,9 @@ func TestConsensusState_GetNextConsensusGroupShouldWork(t *testing.T) { nodesCoordinator := &mock.NodesCoordinatorMock{} - nextConsensusGroup, rewardAddresses, err := cns.GetNextConsensusGroup(nil, 0, 0, nodesCoordinator, 0) + nextConsensusGroup, err := cns.GetNextConsensusGroup(nil, 0, 0, nodesCoordinator, 0) assert.Nil(t, err) assert.NotNil(t, nextConsensusGroup) - assert.NotNil(t, rewardAddresses) } func TestConsensusState_IsConsensusDataSetShouldReturnTrue(t *testing.T) { diff --git a/epochStart/bootstrap/storagehandler/baseStorageHandler.go b/epochStart/bootstrap/baseStorageHandler.go similarity index 75% rename from epochStart/bootstrap/storagehandler/baseStorageHandler.go rename to epochStart/bootstrap/baseStorageHandler.go index d397998bda5..79f4ae8accb 100644 --- a/epochStart/bootstrap/storagehandler/baseStorageHandler.go +++ b/epochStart/bootstrap/baseStorageHandler.go @@ -1,4 +1,4 @@ -package storagehandler +package bootstrap import ( "encoding/json" @@ -6,7 +6,6 @@ import ( "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" "github.com/ElrondNetwork/elrond-go/sharding" @@ -19,8 +18,6 @@ const triggerRegistrykeyPrefix = "epochStartTrigger_" const nodesCoordinatorRegistrykeyPrefix = "indexHashed_" -var log = logger.GetOrCreate("boostrap/storagehandler") - // baseStorageHandler handles the storage functions for saving bootstrap data type baseStorageHandler struct { storageService dataRetriever.StorageService @@ -47,15 +44,13 @@ func (bsh *baseStorageHandler) getAndSavePendingMiniBlocks(miniBlocks map[string return sliceToRet, nil } -func (bsh *baseStorageHandler) getAndSaveNodesCoordinatorKey(metaBlock *block.MetaBlock) ([]byte, error) { +func (bsh *baseStorageHandler) getAndSaveNodesCoordinatorKey( + metaBlock *block.MetaBlock, + nodesConfig *sharding.NodesCoordinatorRegistry, +) ([]byte, error) { key := append([]byte(nodesCoordinatorRegistrykeyPrefix), metaBlock.RandSeed...) - registry := sharding.NodesCoordinatorRegistry{ - EpochsConfig: nil, // TODO : populate this field when nodes coordinator is done - CurrentEpoch: metaBlock.Epoch, - } - - registryBytes, err := json.Marshal(®istry) + registryBytes, err := json.Marshal(nodesConfig) if err != nil { return nil, err } @@ -67,3 +62,21 @@ func (bsh *baseStorageHandler) getAndSaveNodesCoordinatorKey(metaBlock *block.Me return key, nil } + +func (bsh *baseStorageHandler) saveTries(components *ComponentsNeededForBootstrap) error { + for _, trie := range components.UserAccountTries { + err := trie.Commit() + if err != nil { + return err + } + } + + for _, trie := range components.PeerAccountTries { + err := trie.Commit() + if err != nil { + return err + } + } + + return nil +} diff --git a/epochStart/bootstrap/interface.go b/epochStart/bootstrap/interface.go new file mode 100644 index 00000000000..cf03d1d0360 --- /dev/null +++ b/epochStart/bootstrap/interface.go @@ -0,0 +1,16 @@ +package bootstrap + +import ( + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// StartOfEpochNodesConfigHandler defines the methods to process nodesConfig from epoch start metablocks +type StartOfEpochNodesConfigHandler interface { + NodesConfigFromMetaBlock( + currMetaBlock *block.MetaBlock, + prevMetaBlock *block.MetaBlock, + publicKey []byte, + ) (*sharding.NodesCoordinatorRegistry, uint32, error) + IsInterfaceNil() bool +} diff --git a/epochStart/bootstrap/storagehandler/metaStorageHandler.go b/epochStart/bootstrap/metaStorageHandler.go similarity index 78% rename from epochStart/bootstrap/storagehandler/metaStorageHandler.go rename to epochStart/bootstrap/metaStorageHandler.go index 505559bebdf..49ec43cd938 100644 --- a/epochStart/bootstrap/storagehandler/metaStorageHandler.go +++ b/epochStart/bootstrap/metaStorageHandler.go @@ -1,4 +1,4 @@ -package storagehandler +package bootstrap import ( "encoding/json" @@ -8,7 +8,6 @@ import ( "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" "github.com/ElrondNetwork/elrond-go/epochStart/metachain" "github.com/ElrondNetwork/elrond-go/hashing" @@ -61,9 +60,7 @@ func NewMetaStorageHandler( } // SaveDataToStorage will save the fetched data to storage so it will be used by the storage bootstrap component -func (msh *metaStorageHandler) SaveDataToStorage(components *bootstrap.ComponentsNeededForBootstrap) error { - // TODO: here we should save all needed data - +func (msh *metaStorageHandler) SaveDataToStorage(components *ComponentsNeededForBootstrap) error { defer func() { err := msh.storageService.CloseAll() if err != nil { @@ -88,21 +85,20 @@ func (msh *metaStorageHandler) SaveDataToStorage(components *bootstrap.Component return err } - nodesCoordinatorConfigKey, err := msh.getAndSaveNodesCoordinatorKey(components.EpochStartMetaBlock) + nodesCoordinatorConfigKey, err := msh.getAndSaveNodesCoordinatorKey(components.EpochStartMetaBlock, components.NodesConfig) if err != nil { return err } bootStrapData := bootstrapStorage.BootstrapData{ - LastHeader: lastHeader, // meta - epoch start metablock ; shard - shard header - LastCrossNotarizedHeaders: nil, // lastFinalizedMetaBlock + firstPendingMetaBlock - LastSelfNotarizedHeaders: []bootstrapStorage.BootstrapHeaderInfo{lastHeader}, // meta - epoch start metablock , shard: shard header - ProcessedMiniBlocks: nil, // first pending metablock and pending miniblocks - difference between them - // (shard - only shard ; meta - possible not to fill at all) - PendingMiniBlocks: miniBlocks, // pending miniblocks - NodesCoordinatorConfigKey: nodesCoordinatorConfigKey, // wait for radu's component - EpochStartTriggerConfigKey: triggerConfigKey, // metachain/shard trigger registery - HighestFinalBlockNonce: lastHeader.Nonce, // + LastHeader: lastHeader, // meta - epoch start metablock ; shard - shard header + LastCrossNotarizedHeaders: nil, // lastFinalizedMetaBlock + firstPendingMetaBlock + LastSelfNotarizedHeaders: []bootstrapStorage.BootstrapHeaderInfo{lastHeader}, + ProcessedMiniBlocks: nil, + PendingMiniBlocks: miniBlocks, + NodesCoordinatorConfigKey: nodesCoordinatorConfigKey, + EpochStartTriggerConfigKey: triggerConfigKey, + HighestFinalBlockNonce: lastHeader.Nonce, LastRound: int64(components.EpochStartMetaBlock.Round), } bootStrapDataBytes, err := msh.marshalizer.Marshal(&bootStrapData) @@ -115,6 +111,11 @@ func (msh *metaStorageHandler) SaveDataToStorage(components *bootstrap.Component return err } + err = msh.saveTries(components) + if err != nil { + return err + } + log.Info("saved bootstrap data to storage") return nil } @@ -146,7 +147,7 @@ func (msh *metaStorageHandler) getAndSaveLastHeader(metaBlock *block.MetaBlock) return bootstrapHdrInfo, nil } -func (msh *metaStorageHandler) getAndSaveTriggerRegistry(components *bootstrap.ComponentsNeededForBootstrap) ([]byte, error) { +func (msh *metaStorageHandler) getAndSaveTriggerRegistry(components *ComponentsNeededForBootstrap) ([]byte, error) { metaBlock := components.EpochStartMetaBlock hash, err := core.CalculateHash(msh.marshalizer, msh.hasher, metaBlock) if err != nil { diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 6c87d7d40f6..ded7e557504 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -22,13 +22,13 @@ import ( "github.com/ElrondNetwork/elrond-go/epochStart" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" factoryInterceptors "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/factory" - "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/storagehandler" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/economics" + "github.com/ElrondNetwork/elrond-go/process/interceptors" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" storageFactory "github.com/ElrondNetwork/elrond-go/storage/factory" @@ -48,7 +48,7 @@ type ComponentsNeededForBootstrap struct { PreviousEpochStartMetaBlock *block.MetaBlock ShardHeader *block.Header NodesConfig *sharding.NodesCoordinatorRegistry - ShardHeaders map[string]data.HeaderHandler + Headers map[string]data.HeaderHandler ShardCoordinator sharding.Coordinator UserAccountTries map[string]data.Trie PeerAccountTries map[string]data.Trie @@ -57,6 +57,7 @@ type ComponentsNeededForBootstrap struct { // epochStartBootstrap will handle requesting the needed data to start when joining late the network type epochStartBootstrap struct { + // should come via arguments publicKey crypto.PublicKey marshalizer marshal.Marshalizer hasher hashing.Hasher @@ -67,8 +68,6 @@ type epochStartBootstrap struct { blockSingleSigner crypto.SingleSigner keyGen crypto.KeyGenerator blockKeyGen crypto.KeyGenerator - requestHandler process.RequestHandler - whiteListHandler update.WhiteListHandler shardCoordinator sharding.Coordinator genesisNodesConfig *sharding.NodesSetup pathManager storage.PathManagerHandler @@ -76,25 +75,27 @@ type epochStartBootstrap struct { defaultDBPath string defaultEpochString string - interceptorContainer process.InterceptorsContainer - dataPool dataRetriever.PoolsHolder - computedEpoch uint32 - + // created components + requestHandler process.RequestHandler + interceptorContainer process.InterceptorsContainer + dataPool dataRetriever.PoolsHolder miniBlocksSyncer epochStart.PendingMiniBlocksSyncHandler headersSyncer epochStart.HeadersByHashSyncer epochStartMetaBlockSyncer epochStart.StartOfEpochMetaSyncer - nodesConfigHandler epochStart.StartOfEpochNodesConfigHandler - baseData baseDataInStorage + nodesConfigHandler StartOfEpochNodesConfigHandler + userTrieStorageManager data.StorageManager + peerTrieStorageManager data.StorageManager + whiteListHandler update.WhiteListHandler + // gathered data epochStartMeta *block.MetaBlock prevEpochStartMeta *block.MetaBlock syncedHeaders map[string]data.HeaderHandler nodesConfig *sharding.NodesCoordinatorRegistry userAccountTries map[string]data.Trie peerAccountTries map[string]data.Trie - - userTrieStorageManager data.StorageManager - peerTrieStorageManager data.StorageManager + baseData baseDataInStorage + computedEpoch uint32 } type baseDataInStorage struct { @@ -106,22 +107,21 @@ type baseDataInStorage struct { // ArgsEpochStartBootstrap holds the arguments needed for creating an epoch start data provider component type ArgsEpochStartBootstrap struct { - PublicKey crypto.PublicKey - Messenger p2p.Messenger - Marshalizer marshal.Marshalizer - Hasher hashing.Hasher - GeneralConfig config.Config - EconomicsConfig config.EconomicsConfig - GenesisShardCoordinator sharding.Coordinator - SingleSigner crypto.SingleSigner - BlockSingleSigner crypto.SingleSigner - KeyGen crypto.KeyGenerator - BlockKeyGen crypto.KeyGenerator - WhiteListHandler update.WhiteListHandler - GenesisNodesConfig *sharding.NodesSetup - WorkingDir string - DefaultDBPath string - DefaultEpochString string + PublicKey crypto.PublicKey + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + Messenger p2p.Messenger + GeneralConfig config.Config + EconomicsData *economics.EconomicsData + SingleSigner crypto.SingleSigner + BlockSingleSigner crypto.SingleSigner + KeyGen crypto.KeyGenerator + BlockKeyGen crypto.KeyGenerator + GenesisNodesConfig *sharding.NodesSetup + PathManager storage.PathManagerHandler + WorkingDir string + DefaultDBPath string + DefaultEpochString string } // NewEpochStartBootstrap will return a new instance of epochStartBootstrap @@ -132,7 +132,6 @@ func NewEpochStartBootstrapHandler(args ArgsEpochStartBootstrap) (*epochStartBoo hasher: args.Hasher, messenger: args.Messenger, generalConfig: args.GeneralConfig, - whiteListHandler: args.WhiteListHandler, genesisNodesConfig: args.GenesisNodesConfig, workingDir: args.WorkingDir, defaultEpochString: args.DefaultEpochString, @@ -226,6 +225,20 @@ func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { return err } + whiteListCache, err := storageUnit.NewCache( + storageUnit.CacheType(e.generalConfig.WhiteListPool.Type), + e.generalConfig.WhiteListPool.Size, + e.generalConfig.WhiteListPool.Shards, + ) + if err != nil { + return err + } + + e.whiteListHandler, err = interceptors.NewWhiteListDataVerifier(whiteListCache) + if err != nil { + return err + } + err = e.createRequestHandler() if err != nil { return err @@ -279,6 +292,16 @@ func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { } e.headersSyncer, err = sync.NewMissingheadersByHashSyncer(syncMissingHeadersArgs) + argsNewValidatorStatusSyncers := ArgsNewSyncValidatorStatus{ + DataPool: e.dataPool, + Marshalizer: e.marshalizer, + RequestHandler: e.requestHandler, + } + e.nodesConfigHandler, err = NewSyncValidatorStatus(argsNewValidatorStatusSyncers) + if err != nil { + return err + } + return nil } @@ -356,7 +379,12 @@ func (e *epochStartBootstrap) requestAndProcessing() (uint32, uint32, uint32, er } func (e *epochStartBootstrap) requestAndProcessForMeta() error { - err := e.syncUserAccountsState(e.epochStartMeta.RootHash) + err := e.createTrieStorageManagers() + if err != nil { + return err + } + + err = e.syncUserAccountsState(e.epochStartMeta.RootHash) if err != nil { return err } @@ -370,13 +398,13 @@ func (e *epochStartBootstrap) requestAndProcessForMeta() error { EpochStartMetaBlock: e.epochStartMeta, PreviousEpochStartMetaBlock: e.prevEpochStartMeta, NodesConfig: e.nodesConfig, - ShardHeaders: e.syncedHeaders, + Headers: e.syncedHeaders, ShardCoordinator: e.shardCoordinator, UserAccountTries: e.userAccountTries, PeerAccountTries: e.peerAccountTries, } - storageHandlerComponent, err := storagehandler.NewMetaStorageHandler( + storageHandlerComponent, err := NewMetaStorageHandler( e.generalConfig, e.shardCoordinator, e.pathManager, @@ -447,23 +475,34 @@ func (e *epochStartBootstrap) requestAndProcessForShard() error { return epochStart.ErrWrongTypeAssertion } + err = e.createTrieStorageManagers() + if err != nil { + return err + } + err = e.syncUserAccountsState(ownShardHdr.RootHash) if err != nil { return err } + err = e.syncPeerAccountsState(e.epochStartMeta.ValidatorStatsRootHash) + if err != nil { + return err + } + components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: e.epochStartMeta, PreviousEpochStartMetaBlock: e.prevEpochStartMeta, ShardHeader: ownShardHdr, NodesConfig: e.nodesConfig, - ShardHeaders: e.syncedHeaders, + Headers: e.syncedHeaders, ShardCoordinator: e.shardCoordinator, UserAccountTries: e.userAccountTries, + PeerAccountTries: e.peerAccountTries, PendingMiniBlocks: pendingMiniBlocks, } - storageHandlerComponent, err := storagehandler.NewShardStorageHandler( + storageHandlerComponent, err := NewShardStorageHandler( e.generalConfig, e.shardCoordinator, e.pathManager, @@ -625,9 +664,7 @@ func (e *epochStartBootstrap) createRequestHandler() error { } requestedItemsHandler := timecache.NewTimeCache(100) - maxToRequest := 100 - e.requestHandler, err = requestHandlers.NewResolverRequestHandler(finder, requestedItemsHandler, e.whiteListHandler, maxToRequest, core.MetachainShardId) return err } diff --git a/epochStart/bootstrap/storagehandler/shardStorageHandler.go b/epochStart/bootstrap/shardStorageHandler.go similarity index 51% rename from epochStart/bootstrap/storagehandler/shardStorageHandler.go rename to epochStart/bootstrap/shardStorageHandler.go index 966c157f1ca..16a25c651a4 100644 --- a/epochStart/bootstrap/storagehandler/shardStorageHandler.go +++ b/epochStart/bootstrap/shardStorageHandler.go @@ -1,4 +1,4 @@ -package storagehandler +package bootstrap import ( "encoding/json" @@ -7,9 +7,10 @@ import ( "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" + "github.com/ElrondNetwork/elrond-go/epochStart" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" "github.com/ElrondNetwork/elrond-go/epochStart/shardchain" "github.com/ElrondNetwork/elrond-go/hashing" @@ -62,13 +63,11 @@ func NewShardStorageHandler( } // SaveDataToStorage will save the fetched data to storage so it will be used by the storage bootstrap component -func (ssh *shardStorageHandler) SaveDataToStorage(components *bootstrap.ComponentsNeededForBootstrap) error { - // TODO: here we should save all needed data - +func (ssh *shardStorageHandler) SaveDataToStorage(components *ComponentsNeededForBootstrap) error { defer func() { err := ssh.storageService.CloseAll() if err != nil { - log.Debug("error while closing storers", "error", err) + log.Warn("error while closing storers", "error", err) } }() @@ -79,7 +78,12 @@ func (ssh *shardStorageHandler) SaveDataToStorage(components *bootstrap.Componen return err } - miniBlocks, err := ssh.getAndSavePendingMiniBlocks(components.PendingMiniBlocks) + processedMiniBlocks, err := ssh.getProcessMiniBlocks(components.PendingMiniBlocks, components.EpochStartMetaBlock, components.Headers) + if err != nil { + return err + } + + pendingMiniBlocks, err := ssh.getAndSavePendingMiniBlocks(components.PendingMiniBlocks) if err != nil { return err } @@ -89,28 +93,34 @@ func (ssh *shardStorageHandler) SaveDataToStorage(components *bootstrap.Componen return err } - nodesCoordinatorConfigKey, err := ssh.getAndSaveNodesCoordinatorKey(components.EpochStartMetaBlock) + nodesCoordinatorConfigKey, err := ssh.getAndSaveNodesCoordinatorKey(components.EpochStartMetaBlock, components.NodesConfig) + if err != nil { + return err + } + + lastCrossNotarizedHdrs, err := ssh.getLastCrossNotarzierHeaders(components.EpochStartMetaBlock, components.Headers) if err != nil { return err } bootStrapData := bootstrapStorage.BootstrapData{ - LastHeader: lastHeader, // meta - epoch start metablock ; shard - shard header - LastCrossNotarizedHeaders: nil, // lastFinalizedMetaBlock + firstPendingMetaBlock - LastSelfNotarizedHeaders: []bootstrapStorage.BootstrapHeaderInfo{lastHeader}, // meta - epoch start metablock , shard: shard header - ProcessedMiniBlocks: nil, // first pending metablock si pending miniblocks - difference between them - // (shard - only shard ; meta - possible not to fill at all) - PendingMiniBlocks: miniBlocks, // pending miniblocks - NodesCoordinatorConfigKey: nodesCoordinatorConfigKey, // wait for radu's component - EpochStartTriggerConfigKey: triggerConfigKey, // metachain/shard trigger registery - HighestFinalBlockNonce: 0, // + LastHeader: lastHeader, + LastCrossNotarizedHeaders: lastCrossNotarizedHdrs, + LastSelfNotarizedHeaders: []bootstrapStorage.BootstrapHeaderInfo{lastHeader}, + ProcessedMiniBlocks: processedMiniBlocks, + PendingMiniBlocks: pendingMiniBlocks, + NodesCoordinatorConfigKey: nodesCoordinatorConfigKey, + EpochStartTriggerConfigKey: triggerConfigKey, + HighestFinalBlockNonce: lastHeader.Nonce, LastRound: int64(components.ShardHeader.Round), } bootStrapDataBytes, err := ssh.marshalizer.Marshal(&bootStrapData) if err != nil { return err } - roundToUseAsKey := int64(components.ShardHeader.Round + 2) // TODO: change this. added 2 in order to skip + + roundToUseAsKey := int64(components.ShardHeader.Round + 2) + // TODO: change this. added 2 in order to skip // equality check between round and LastRound from bootstrap from storage component roundNum := bootstrapStorage.RoundNum{Num: roundToUseAsKey} roundNumBytes, err := ssh.marshalizer.Marshal(&roundNum) @@ -130,9 +140,85 @@ func (ssh *shardStorageHandler) SaveDataToStorage(components *bootstrap.Componen return err } + err = ssh.saveTries(components) + if err != nil { + return err + } + return nil } +func (ssh *shardStorageHandler) getProcessMiniBlocks( + pendingMiniBlocks map[string]*block.MiniBlock, + meta *block.MetaBlock, + headers map[string]data.HeaderHandler, +) ([]bootstrapStorage.MiniBlocksInMeta, error) { + processedMiniBlocks := make([]bootstrapStorage.MiniBlocksInMeta, 0) + for _, epochStartShardData := range meta.EpochStart.LastFinalizedHeaders { + if epochStartShardData.ShardID != ssh.shardCoordinator.SelfId() { + continue + } + + neededMeta, ok := headers[string(epochStartShardData.FirstPendingMetaBlock)].(*block.MetaBlock) + if !ok { + return nil, epochStart.ErrMissingHeader + } + + processedMbHashes := make([][]byte, 0) + miniBlocksDstMe := getAllMiniBlocksWithDst(neededMeta, ssh.shardCoordinator.SelfId()) + for hash, mb := range miniBlocksDstMe { + if _, ok := pendingMiniBlocks[hash]; ok { + continue + } + + processedMbHashes = append(processedMbHashes, mb.Hash) + } + + processedMiniBlocks = append(processedMiniBlocks, bootstrapStorage.MiniBlocksInMeta{ + MetaHash: epochStartShardData.FirstPendingMetaBlock, + MiniBlocksHashes: processedMbHashes, + }) + return processedMiniBlocks, nil + } + + return nil, epochStart.ErrEpochStartDataForShardNotFound +} + +func (ssh *shardStorageHandler) getLastCrossNotarzierHeaders(meta *block.MetaBlock, headers map[string]data.HeaderHandler) ([]bootstrapStorage.BootstrapHeaderInfo, error) { + crossNotarizedHdrs := make([]bootstrapStorage.BootstrapHeaderInfo, 0) + for _, epochStartShardData := range meta.EpochStart.LastFinalizedHeaders { + if epochStartShardData.ShardID != ssh.shardCoordinator.SelfId() { + continue + } + + neededMeta, ok := headers[string(epochStartShardData.LastFinishedMetaBlock)] + if !ok { + return nil, epochStart.ErrMissingHeader + } + + crossNotarizedHdrs = append(crossNotarizedHdrs, bootstrapStorage.BootstrapHeaderInfo{ + ShardId: ssh.shardCoordinator.SelfId(), + Nonce: neededMeta.GetNonce(), + Hash: epochStartShardData.LastFinishedMetaBlock, + }) + + neededMeta, ok = headers[string(epochStartShardData.LastFinishedMetaBlock)] + if !ok { + return nil, epochStart.ErrMissingHeader + } + + crossNotarizedHdrs = append(crossNotarizedHdrs, bootstrapStorage.BootstrapHeaderInfo{ + ShardId: ssh.shardCoordinator.SelfId(), + Nonce: neededMeta.GetNonce(), + Hash: epochStartShardData.FirstPendingMetaBlock, + }) + + return crossNotarizedHdrs, nil + } + + return nil, epochStart.ErrEpochStartDataForShardNotFound +} + func (ssh *shardStorageHandler) getAndSaveLastHeader(shardHeader *block.Header) (bootstrapStorage.BootstrapHeaderInfo, error) { lastHeaderHash, err := core.CalculateHash(ssh.marshalizer, ssh.hasher, shardHeader) if err != nil { @@ -158,7 +244,7 @@ func (ssh *shardStorageHandler) getAndSaveLastHeader(shardHeader *block.Header) return bootstrapHdrInfo, nil } -func (ssh *shardStorageHandler) getAndSaveTriggerRegistry(components *bootstrap.ComponentsNeededForBootstrap) ([]byte, error) { +func (ssh *shardStorageHandler) getAndSaveTriggerRegistry(components *ComponentsNeededForBootstrap) ([]byte, error) { shardHeader := components.ShardHeader metaBlock := components.EpochStartMetaBlock @@ -193,6 +279,35 @@ func (ssh *shardStorageHandler) getAndSaveTriggerRegistry(components *bootstrap. return key, nil } +func getAllMiniBlocksWithDst(m *block.MetaBlock, destId uint32) map[string]block.ShardMiniBlockHeader { + hashDst := make(map[string]block.ShardMiniBlockHeader) + for i := 0; i < len(m.ShardInfo); i++ { + if m.ShardInfo[i].ShardID == destId { + continue + } + + for _, val := range m.ShardInfo[i].ShardMiniBlockHeaders { + if val.ReceiverShardID == destId && val.SenderShardID != destId { + hashDst[string(val.Hash)] = val + } + } + } + + for _, val := range m.MiniBlockHeaders { + if val.ReceiverShardID == destId && val.SenderShardID != destId { + shardMiniBlockHdr := block.ShardMiniBlockHeader{ + Hash: val.Hash, + ReceiverShardID: val.ReceiverShardID, + SenderShardID: val.SenderShardID, + TxCount: val.TxCount, + } + hashDst[string(val.Hash)] = shardMiniBlockHdr + } + } + + return hashDst +} + // IsInterfaceNil returns true if there is no value under the interface func (ssh *shardStorageHandler) IsInterfaceNil() bool { return ssh == nil diff --git a/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor_test.go b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor_test.go deleted file mode 100644 index 675f14571ac..00000000000 --- a/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor_test.go +++ /dev/null @@ -1,171 +0,0 @@ -package bootstrap_test - -import ( - "testing" - - "github.com/ElrondNetwork/elrond-go/core/check" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" - mock2 "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/mock" - "github.com/ElrondNetwork/elrond-go/epochStart/mock" - "github.com/stretchr/testify/require" -) - -func TestNewSimpleMetaBlockInterceptor_NilMarshalizerShouldErr(t *testing.T) { - t.Parallel() - - smbi, err := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(nil, &mock.HasherMock{}) - require.Nil(t, smbi) - require.Equal(t, bootstrap.ErrNilMarshalizer, err) -} - -func TestNewSimpleMetaBlockInterceptor_NilHasherShouldErr(t *testing.T) { - t.Parallel() - - smbi, err := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(&mock.MarshalizerMock{}, nil) - require.Nil(t, smbi) - require.Equal(t, bootstrap.ErrNilHasher, err) -} - -func TestNewSimpleMetaBlockInterceptor_OkValsShouldWork(t *testing.T) { - t.Parallel() - - smbi, err := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(&mock.MarshalizerMock{}, &mock.HasherMock{}) - require.Nil(t, err) - require.False(t, check.IfNil(smbi)) -} - -func TestSimpleMetaBlockInterceptor_ProcessReceivedMessage_ReceivedMessageIsNotAMetaBlockShouldNotAdd(t *testing.T) { - t.Parallel() - - smbi, _ := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(&mock.MarshalizerMock{}, &mock.HasherMock{}) - - message := mock2.P2PMessageMock{ - DataField: []byte("not a metablock"), - } - - _ = smbi.ProcessReceivedMessage(&message, nil) - - require.Zero(t, len(smbi.GetReceivedMetablocks())) -} - -func TestSimpleMetaBlockInterceptor_ProcessReceivedMessage_UnmarshalFailsShouldErr(t *testing.T) { - t.Parallel() - - marshalizer := &mock.MarshalizerMock{Fail: true} - smbi, _ := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) - - mb := &block.MetaBlock{Epoch: 5} - mbBytes, _ := marshalizer.Marshal(mb) - message := mock2.P2PMessageMock{ - DataField: mbBytes, - } - - _ = smbi.ProcessReceivedMessage(&message, nil) - - require.Zero(t, len(smbi.GetReceivedMetablocks())) -} - -func TestSimpleMetaBlockInterceptor_ProcessReceivedMessage_ReceivedMessageIsAMetaBlockShouldAdd(t *testing.T) { - t.Parallel() - - marshalizer := &mock.MarshalizerMock{} - smbi, _ := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) - - mb := &block.MetaBlock{Epoch: 5} - mbBytes, _ := marshalizer.Marshal(mb) - message := mock2.P2PMessageMock{ - DataField: mbBytes, - } - - _ = smbi.ProcessReceivedMessage(&message, nil) - - require.Equal(t, 1, len(smbi.GetReceivedMetablocks())) -} - -func TestSimpleMetaBlockInterceptor_ProcessReceivedMessage_ShouldAddForMorePeers(t *testing.T) { - t.Parallel() - - marshalizer := &mock.MarshalizerMock{} - smbi, _ := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) - - mb := &block.MetaBlock{Epoch: 5} - mbBytes, _ := marshalizer.Marshal(mb) - message1 := &mock2.P2PMessageMock{ - DataField: mbBytes, - PeerField: "peer1", - } - message2 := &mock2.P2PMessageMock{ - DataField: mbBytes, - PeerField: "peer2", - } - - _ = smbi.ProcessReceivedMessage(message1, nil) - _ = smbi.ProcessReceivedMessage(message2, nil) - - for _, res := range smbi.GetPeersSliceForMetablocks() { - require.Equal(t, 2, len(res)) - } -} - -func TestSimpleMetaBlockInterceptor_ProcessReceivedMessage_ShouldNotAddTwiceForTheSamePeer(t *testing.T) { - t.Parallel() - - marshalizer := &mock.MarshalizerMock{} - smbi, _ := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) - - mb := &block.MetaBlock{Epoch: 5} - mbBytes, _ := marshalizer.Marshal(mb) - message1 := &mock2.P2PMessageMock{ - DataField: mbBytes, - PeerField: "peer1", - } - message2 := &mock2.P2PMessageMock{ - DataField: mbBytes, - PeerField: "peer1", - } - - _ = smbi.ProcessReceivedMessage(message1, nil) - _ = smbi.ProcessReceivedMessage(message2, nil) - - for _, res := range smbi.GetPeersSliceForMetablocks() { - require.Equal(t, 1, len(res)) - } -} - -func TestSimpleMetaBlockInterceptor_GetMetaBlock_NumTriesExceededShouldErr(t *testing.T) { - t.Parallel() - - marshalizer := &mock.MarshalizerMock{} - smbi, _ := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) - - // no message received, so should exit with err - mb, err := smbi.GetEpochStartMetaBlock(2, 5) - require.Zero(t, mb) - require.Equal(t, bootstrap.ErrNumTriesExceeded, err) -} - -func TestSimpleMetaBlockInterceptor_GetMetaBlockShouldWork(t *testing.T) { - t.Parallel() - - marshalizer := &mock.MarshalizerMock{} - smbi, _ := bootstrap.NewSimpleEpochStartMetaBlockInterceptor(marshalizer, &mock.HasherMock{}) - - mb := &block.MetaBlock{Epoch: 5} - mbBytes, _ := marshalizer.Marshal(mb) - message1 := &mock2.P2PMessageMock{ - DataField: mbBytes, - PeerField: "peer1", - } - message2 := &mock2.P2PMessageMock{ - DataField: mbBytes, - PeerField: "peer2", - } - - _ = smbi.ProcessReceivedMessage(message1, nil) - _ = smbi.ProcessReceivedMessage(message2, nil) - - mb, err := smbi.GetEpochStartMetaBlock(2, 5) - require.Nil(t, err) - require.NotNil(t, mb) -} diff --git a/epochStart/bootstrap/syncEpochStartMeta.go b/epochStart/bootstrap/syncEpochStartMeta.go index 199245d0a66..649f27c1f61 100644 --- a/epochStart/bootstrap/syncEpochStartMeta.go +++ b/epochStart/bootstrap/syncEpochStartMeta.go @@ -7,14 +7,16 @@ import ( "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/epochStart" "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/storage" ) type epochStartMetaSyncer struct { - requestHandler epochStart.RequestHandler - messenger p2p.Messenger - metaBlockPool storage.Cacher + requestHandler epochStart.RequestHandler + messenger p2p.Messenger + metaBlockPool storage.Cacher + epochStartMetaBlockInterceptor process.Interceptor } func NewEpochStartMetaSyncer() (*epochStartMetaSyncer, error) { diff --git a/epochStart/bootstrap/syncValidatorStatus.go b/epochStart/bootstrap/syncValidatorStatus.go index 5a432c19f6f..f5ab9005d6c 100644 --- a/epochStart/bootstrap/syncValidatorStatus.go +++ b/epochStart/bootstrap/syncValidatorStatus.go @@ -23,10 +23,20 @@ type syncValidatorStatus struct { requestHandler process.RequestHandler } -// NewSyncValidatorStatus creates a new validator status process component -func NewSyncValidatorStatus() (*syncValidatorStatus, error) { +// ArgsNewSyncValidatorStatus +type ArgsNewSyncValidatorStatus struct { + DataPool dataRetriever.PoolsHolder + Marshalizer marshal.Marshalizer + RequestHandler process.RequestHandler +} - s := &syncValidatorStatus{} +// NewSyncValidatorStatus creates a new validator status process component +func NewSyncValidatorStatus(args ArgsNewSyncValidatorStatus) (*syncValidatorStatus, error) { + s := &syncValidatorStatus{ + dataPool: args.DataPool, + marshalizer: args.Marshalizer, + requestHandler: args.RequestHandler, + } syncMiniBlocksArgs := sync.ArgsNewPendingMiniBlocksSyncer{ Storage: &disabled.Storer{}, Cache: s.dataPool.MiniBlocks(), @@ -130,6 +140,9 @@ func (s *syncValidatorStatus) processNodesConfigFor( shardId := fmt.Sprint(vid.ShardId) // TODO - make decision according to validatorInfo.List after it is implemented + // most probably there is a need to create an indexed hashed nodescoordinator - set the data manually + // call the shuffling and save the result - this is still problematic as you always need the -1 config + // recursively going to genesis - data has to be updated in the peer trie. epochValidators.EligibleValidators[shardId] = append(epochValidators.EligibleValidators[shardId], serializableValidator) if shouldSearchSelfId && !found && bytes.Equal(vid.PublicKey, publicKey) { diff --git a/epochStart/errors.go b/epochStart/errors.go index 80ce9c7b724..ab646aeaab2 100644 --- a/epochStart/errors.go +++ b/epochStart/errors.go @@ -127,3 +127,9 @@ var ErrNilValidatorInfo = errors.New("validator info is nil") // ErrEpochStartDataForShardNotFound signals that epoch start shard data was not found for current shard id var ErrEpochStartDataForShardNotFound = errors.New("epoch start data for current shard not found") + +// ErrNumTriesExceeded signals that number of tries has exceeded +var ErrNumTriesExceeded = errors.New("number of tries exceeded") + +// ErrMissingHeader signals that searched header is missing +var ErrMissingHeader = errors.New("missing header") diff --git a/epochStart/interface.go b/epochStart/interface.go index f3f839101e5..e4c034f955e 100644 --- a/epochStart/interface.go +++ b/epochStart/interface.go @@ -5,7 +5,6 @@ import ( "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/sharding" ) // TriggerHandler defines the functionalities for an start of epoch trigger @@ -109,13 +108,3 @@ type StartOfEpochMetaSyncer interface { SyncEpochStartMeta(waitTime time.Duration) (*block.MetaBlock, error) IsInterfaceNil() bool } - -// StartOfEpochNodesConfigHandler defines the methods to process nodesConfig from epoch start metablocks -type StartOfEpochNodesConfigHandler interface { - NodesConfigFromMetaBlock( - currMetaBlock *block.MetaBlock, - prevMetaBlock *block.MetaBlock, - publicKey []byte, - ) (*sharding.NodesCoordinatorRegistry, uint32, error) - IsInterfaceNil() bool -} diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch_test.go index 795a615cf4c..3c43be753f4 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch_test.go @@ -7,14 +7,10 @@ import ( "testing" "time" - "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/factory" - "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/nodesconfigprovider" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/stretchr/testify/assert" ) func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { @@ -117,43 +113,6 @@ func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { InitialNodes: getInitialNodes(nodesMap), } nodesConfig.SetNumberOfShards(uint32(numOfShards)) - - epochStartProviderFactoryArgs := factory.EpochStartDataProviderFactoryArgs{ - PubKey: nodeToJoinLate.NodeKeys.Pk, - Messenger: advertiser, - Marshalizer: integrationTests.TestMarshalizer, - Hasher: integrationTests.TestHasher, - NodesConfigProvider: nodesconfigprovider.NewSimpleNodesConfigProvider(&nodesConfig), - StartTime: time.Time{}, - OriginalNodesConfig: &nodesConfig, - PathManager: &mock.PathManagerStub{}, - GeneralConfig: &config.Config{ - EpochStartConfig: config.EpochStartConfig{ - MinRoundsBetweenEpochs: 5, - RoundsPerEpoch: 10, - }, - WhiteListPool: config.CacheConfig{ - Size: 10000, - Type: "LRU", - Shards: 1, - }, - StoragePruning: config.StoragePruningConfig{ - Enabled: false, - FullArchive: true, - NumEpochsToKeep: 3, - NumActivePersisters: 3, - }, - }, - IsEpochFoundInStorage: false, - } - epochStartDataProviderFactory, _ := factory.NewEpochStartDataProviderFactory(epochStartProviderFactoryArgs) - epochStartDataProvider, _ := epochStartDataProviderFactory.Create() - - res, err := epochStartDataProvider.Bootstrap() - assert.NoError(t, err) - assert.NotNil(t, res) - // TODO: add more checks - assert.Equal(t, epoch, res.EpochStartMetaBlock.Epoch) } func convertToSlice(originalMap map[uint32][]*integrationTests.TestProcessorNode) []*integrationTests.TestProcessorNode { diff --git a/process/peer/process.go b/process/peer/process.go index d69d501bb75..999a810b695 100644 --- a/process/peer/process.go +++ b/process/peer/process.go @@ -340,9 +340,8 @@ func (vs *validatorStatistics) getValidatorDataFromLeaves( func (vs *validatorStatistics) peerAccountToValidatorInfo(peerAccount *state.PeerAccount) *state.ValidatorInfo { return &state.ValidatorInfo{ PublicKey: peerAccount.BLSPublicKey, - ShardId: peerAccount.CurrentShardId, - List: "list", - Index: 0, + ShardId: peerAccount.NextShardId, + List: peerAccount.List, TempRating: peerAccount.TempRating, Rating: peerAccount.Rating, RewardAddress: peerAccount.RewardAddress, diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index 235df58c8c6..df01f77698b 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -98,8 +98,6 @@ func NewIndexHashedNodesCoordinator(arguments ArgNodesCoordinator) (*indexHashed ihgs.nodesPerShardSetter = ihgs err = ihgs.nodesPerShardSetter.SetNodesPerShards(arguments.EligibleNodes, arguments.WaitingNodes, arguments.Epoch, false) - - err = ihgs.SetNodesPerShards(arguments.EligibleNodes, arguments.WaitingNodes, arguments.Epoch, false) if err != nil { return nil, err } From 9e9b23be86e1e5322d07834e22aa24b06422a94d Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Tue, 24 Mar 2020 10:48:03 +0200 Subject: [PATCH 33/61] fix after merge --- cmd/node/main.go | 47 +++++++------------ epochStart/bootstrap/baseStorageHandler.go | 21 +++++---- .../{ => startInEpoch}/startInEpoch_test.go | 7 +-- .../networkSharding/networkSharding_test.go | 2 +- update/sync/syncHeadersByHash.go | 2 +- 5 files changed, 35 insertions(+), 44 deletions(-) rename integrationTests/multiShard/endOfEpoch/{ => startInEpoch}/startInEpoch_test.go (94%) diff --git a/cmd/node/main.go b/cmd/node/main.go index c1ef5df8824..5f9d4451e35 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -37,7 +37,6 @@ import ( "github.com/ElrondNetwork/elrond-go/epochStart/notifier" "github.com/ElrondNetwork/elrond-go/facade" "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/hashing/blake2b" "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/logger/redirects" "github.com/ElrondNetwork/elrond-go/marshal" @@ -578,11 +577,6 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { return err } - networkComponents, err = factory.NetworkComponentsFactory(p2pConfig, log, &blake2b.Blake2b{}) - if err != nil { - return err - } - genesisShardCoordinator, nodeType, err := createShardCoordinator(genesisNodesConfig, pubKey, preferencesConfig.Preferences, log) if err != nil { return err @@ -612,7 +606,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } log.Trace("creating network components") - networkComponents, err := factory.NetworkComponentsFactory(*p2pConfig, *generalConfig, coreComponents.StatusHandler) + networkComponents, err = factory.NetworkComponentsFactory(*p2pConfig, *generalConfig, coreComponents.StatusHandler) if err != nil { return err } @@ -622,29 +616,22 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } time.Sleep(secondsToWaitForP2PBootstrap * time.Second) - epochStartBootsrapArgs := bootstrap.ArgsEpochStartDataProvider{ - PublicKey: nil, - Messenger: nil, - Marshalizer: nil, - Hasher: nil, - GeneralConfig: config.Config{}, - EconomicsConfig: config.EconomicsConfig{}, - DefaultShardCoordinator: nil, - PathManager: nil, - NodesConfigProvider: nil, - EpochStartMetaBlockInterceptor: nil, - MetaBlockInterceptor: nil, - ShardHeaderInterceptor: nil, - MiniBlockInterceptor: nil, - SingleSigner: nil, - BlockSingleSigner: nil, - KeyGen: nil, - BlockKeyGen: nil, - WhiteListHandler: nil, - GenesisNodesConfig: nil, - WorkingDir: "", - DefaultDBPath: "", - DefaultEpochString: "", + epochStartBootsrapArgs := bootstrap.ArgsEpochStartBootstrap{ + PublicKey: nil, + Marshalizer: nil, + Hasher: nil, + Messenger: nil, + GeneralConfig: config.Config{}, + EconomicsData: nil, + SingleSigner: nil, + BlockSingleSigner: nil, + KeyGen: nil, + BlockKeyGen: nil, + GenesisNodesConfig: nil, + PathManager: nil, + WorkingDir: "", + DefaultDBPath: "", + DefaultEpochString: "", } bootsrapper, err := bootstrap.NewEpochStartBootstrapHandler(epochStartBootsrapArgs) if err != nil { diff --git a/epochStart/bootstrap/baseStorageHandler.go b/epochStart/bootstrap/baseStorageHandler.go index 79f4ae8accb..24a95b53a70 100644 --- a/epochStart/bootstrap/baseStorageHandler.go +++ b/epochStart/bootstrap/baseStorageHandler.go @@ -27,17 +27,20 @@ type baseStorageHandler struct { currentEpoch uint32 } -func (bsh *baseStorageHandler) getAndSavePendingMiniBlocks(miniBlocks map[string]*block.MiniBlock) ([]bootstrapStorage.PendingMiniBlockInfo, error) { - countersMap := make(map[uint32]int) - for _, miniBlock := range miniBlocks { - countersMap[miniBlock.SenderShardID]++ +func (bsh *baseStorageHandler) getAndSavePendingMiniBlocks(miniBlocks map[string]*block.MiniBlock) ([]bootstrapStorage.PendingMiniBlocksInfo, error) { + pendingMBsMap := make(map[uint32][][]byte) + for hash, miniBlock := range miniBlocks { + if _, ok := pendingMBsMap[miniBlock.SenderShardID]; !ok { + pendingMBsMap[miniBlock.SenderShardID] = make([][]byte, 0) + } + pendingMBsMap[miniBlock.SenderShardID] = append(pendingMBsMap[miniBlock.SenderShardID], []byte(hash)) } - sliceToRet := make([]bootstrapStorage.PendingMiniBlockInfo, 0) - for shardID, count := range countersMap { - sliceToRet = append(sliceToRet, bootstrapStorage.PendingMiniBlockInfo{ - ShardID: shardID, - NumPendingMiniBlocks: uint32(count), + sliceToRet := make([]bootstrapStorage.PendingMiniBlocksInfo, 0) + for shardID, hashes := range pendingMBsMap { + sliceToRet = append(sliceToRet, bootstrapStorage.PendingMiniBlocksInfo{ + ShardID: shardID, + MiniBlocksHashes: hashes, }) } diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go similarity index 94% rename from integrationTests/multiShard/endOfEpoch/startInEpoch_test.go rename to integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index 3c43be753f4..baf11333fca 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -1,4 +1,4 @@ -package epochStart +package startInEpoch import ( "context" @@ -10,6 +10,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" + "github.com/ElrondNetwork/elrond-go/integrationTests/multiShard/endOfEpoch" "github.com/ElrondNetwork/elrond-go/sharding" ) @@ -96,8 +97,8 @@ func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { time.Sleep(time.Second) - verifyIfNodesHasCorrectEpoch(t, epoch, nodes) - verifyIfAddedShardHeadersAreWithNewEpoch(t, nodes) + endOfEpoch.VerifyThatNodesHaveCorrectEpoch(t, epoch, nodes) + endOfEpoch.VerifyIfAddedShardHeadersAreWithNewEpoch(t, nodes) epochHandler := &mock.EpochStartTriggerStub{ EpochCalled: func() uint32 { diff --git a/integrationTests/p2p/networkSharding/networkSharding_test.go b/integrationTests/p2p/networkSharding/networkSharding_test.go index 1b9162a5e5e..2fd2ec0d0e6 100644 --- a/integrationTests/p2p/networkSharding/networkSharding_test.go +++ b/integrationTests/p2p/networkSharding/networkSharding_test.go @@ -124,7 +124,7 @@ func stopNodes(advertiser p2p.Messenger, nodesMap map[uint32][]*integrationTests func startNodes(nodesMap map[uint32][]*integrationTests.TestP2PNode) { for _, nodes := range nodesMap { for _, n := range nodes { - _ = n.Node.Start() + n.Node.Start() } } } diff --git a/update/sync/syncHeadersByHash.go b/update/sync/syncHeadersByHash.go index ebefed9582a..5748deff27e 100644 --- a/update/sync/syncHeadersByHash.go +++ b/update/sync/syncHeadersByHash.go @@ -42,7 +42,7 @@ func NewMissingheadersByHashSyncer(args ArgsNewMissingHeadersByHashSyncer) (*mis return nil, dataRetriever.ErrNilHeadersStorage } if check.IfNil(args.Cache) { - return nil, dataRetriever.ErrNilCacher + return nil, update.ErrNilCacher } if check.IfNil(args.Marshalizer) { return nil, dataRetriever.ErrNilMarshalizer From 933801226633b6da85ee55b4dc2aed355f2d8557 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Tue, 24 Mar 2020 13:02:13 +0200 Subject: [PATCH 34/61] processing finishes --- cmd/node/main.go | 44 ++++++------ epochStart/bootstrap/interface.go | 7 ++ epochStart/bootstrap/process.go | 8 ++- .../simpleEpochStartMetaBlockInterceptor.go | 2 +- epochStart/bootstrap/syncEpochStartMeta.go | 68 ++++++++++++++++--- 5 files changed, 95 insertions(+), 34 deletions(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index 5f9d4451e35..e55055e37b0 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -321,8 +321,6 @@ var coreServiceContainer serviceContainer.Core // go build -i -v -ldflags="-X main.appVersion=%VERS%" var appVersion = core.UnVersionedAppString -var currentEpoch = uint32(0) - func main() { _ = display.SetDisplayByteSlice(display.ToHexShort) log := logger.GetOrCreate("main") @@ -616,22 +614,28 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } time.Sleep(secondsToWaitForP2PBootstrap * time.Second) + log.Trace("creating economics data components") + economicsData, err := economics.NewEconomicsData(economicsConfig) + if err != nil { + return err + } + epochStartBootsrapArgs := bootstrap.ArgsEpochStartBootstrap{ - PublicKey: nil, - Marshalizer: nil, - Hasher: nil, - Messenger: nil, - GeneralConfig: config.Config{}, - EconomicsData: nil, - SingleSigner: nil, - BlockSingleSigner: nil, - KeyGen: nil, - BlockKeyGen: nil, - GenesisNodesConfig: nil, - PathManager: nil, - WorkingDir: "", - DefaultDBPath: "", - DefaultEpochString: "", + PublicKey: pubKey, + Marshalizer: coreComponents.InternalMarshalizer, + Hasher: coreComponents.Hasher, + Messenger: networkComponents.NetMessenger, + GeneralConfig: *generalConfig, + EconomicsData: economicsData, + SingleSigner: cryptoComponents.TxSingleSigner, + BlockSingleSigner: cryptoComponents.SingleSigner, + KeyGen: cryptoComponents.TxSignKeyGen, + BlockKeyGen: cryptoComponents.BlockSignKeyGen, + GenesisNodesConfig: genesisNodesConfig, + PathManager: pathManager, + WorkingDir: workingDir, + DefaultDBPath: defaultDBPath, + DefaultEpochString: defaultEpochString, } bootsrapper, err := bootstrap.NewEpochStartBootstrapHandler(epochStartBootsrapArgs) if err != nil { @@ -649,12 +653,6 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { return err } - log.Trace("creating economics data components") - economicsData, err := economics.NewEconomicsData(economicsConfig) - if err != nil { - return err - } - rater, err := rating.NewBlockSigningRaterAndListIndexer(economicsData.RatingsData()) if err != nil { return err diff --git a/epochStart/bootstrap/interface.go b/epochStart/bootstrap/interface.go index cf03d1d0360..2f8bf651323 100644 --- a/epochStart/bootstrap/interface.go +++ b/epochStart/bootstrap/interface.go @@ -2,6 +2,7 @@ package bootstrap import ( "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" ) @@ -14,3 +15,9 @@ type StartOfEpochNodesConfigHandler interface { ) (*sharding.NodesCoordinatorRegistry, uint32, error) IsInterfaceNil() bool } + +// EpochStartInterceptor +type EpochStartInterceptor interface { + process.Interceptor + GetEpochStartMetaBlock(target int, epoch uint32) (*block.MetaBlock, error) +} diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index ded7e557504..464e32fd77c 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -271,7 +271,13 @@ func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { return err } - e.epochStartMetaBlockSyncer, err = NewEpochStartMetaSyncer() + argsEpochStartSyncer := ArgsNewEpochStartMetaSyncer{ + RequestHandler: e.requestHandler, + Messenger: e.messenger, + Marshalizer: e.marshalizer, + Hasher: e.hasher, + } + e.epochStartMetaBlockSyncer, err = NewEpochStartMetaSyncer(argsEpochStartSyncer) if err != nil { return err } diff --git a/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go index f53478c05f7..4ad388eca50 100644 --- a/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go +++ b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go @@ -50,7 +50,7 @@ func (s *simpleEpochStartMetaBlockInterceptor) SetIsDataForCurrentShardVerifier( } // ProcessReceivedMessage will receive the metablocks and will add them to the maps -func (s *simpleEpochStartMetaBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { +func (s *simpleEpochStartMetaBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID) error { var mb block.MetaBlock err := s.marshalizer.Unmarshal(&mb, message.Data()) if err != nil { diff --git a/epochStart/bootstrap/syncEpochStartMeta.go b/epochStart/bootstrap/syncEpochStartMeta.go index 649f27c1f61..e6b8b4fc0f9 100644 --- a/epochStart/bootstrap/syncEpochStartMeta.go +++ b/epochStart/bootstrap/syncEpochStartMeta.go @@ -6,21 +6,48 @@ import ( "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/storage" ) type epochStartMetaSyncer struct { requestHandler epochStart.RequestHandler messenger p2p.Messenger - metaBlockPool storage.Cacher - epochStartMetaBlockInterceptor process.Interceptor + epochStartMetaBlockInterceptor EpochStartInterceptor + marshalizer marshal.Marshalizer + hasher hashing.Hasher } -func NewEpochStartMetaSyncer() (*epochStartMetaSyncer, error) { - return &epochStartMetaSyncer{}, nil +// ArgsNewEpochStartMetaSyncer - +type ArgsNewEpochStartMetaSyncer struct { + RequestHandler epochStart.RequestHandler + Messenger p2p.Messenger + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher +} + +const delayBetweenRequests = 1 * time.Second +const thresholdForConsideringMetaBlockCorrect = 0.2 +const numRequestsToSendOnce = 4 +const maxNumTimesToRetry = 100 + +func NewEpochStartMetaSyncer(args ArgsNewEpochStartMetaSyncer) (*epochStartMetaSyncer, error) { + e := &epochStartMetaSyncer{ + requestHandler: args.RequestHandler, + messenger: args.Messenger, + marshalizer: args.Marshalizer, + hasher: args.Hasher, + } + + var err error + e.epochStartMetaBlockInterceptor, err = NewSimpleEpochStartMetaBlockInterceptor(e.marshalizer, e.hasher) + if err != nil { + return nil, err + } + + return e, nil } // SyncEpochStartMeta syncs the latest epoch start metablock @@ -33,12 +60,35 @@ func (e *epochStartMetaSyncer) SyncEpochStartMeta(waitTime time.Duration) (*bloc e.resetTopicsAndInterceptors() }() + e.requestEpochStartMetaBlock() + unknownEpoch := uint32(math.MaxUint32) - e.requestHandler.RequestStartOfEpochMetaBlock(unknownEpoch) + count := 0 + for { + if count > maxNumTimesToRetry { + panic("can't sync with other peers") + } + count++ + numConnectedPeers := len(e.messenger.Peers()) + threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) + mb, errConsensusNotReached := e.epochStartMetaBlockInterceptor.GetEpochStartMetaBlock(threshold, unknownEpoch) + if errConsensusNotReached == nil { + return mb, nil + } + log.Info("consensus not reached for meta block. re-requesting and trying again...") + e.requestEpochStartMetaBlock() + } - // TODO: implement waitTime and consensus + return nil, epochStart.ErrNumTriesExceeded +} - return nil, nil +func (e *epochStartMetaSyncer) requestEpochStartMetaBlock() { + // send more requests + unknownEpoch := uint32(math.MaxUint32) + for i := 0; i < numRequestsToSendOnce; i++ { + time.Sleep(delayBetweenRequests) + e.requestHandler.RequestStartOfEpochMetaBlock(unknownEpoch) + } } func (e *epochStartMetaSyncer) resetTopicsAndInterceptors() { From e2e9d7a4422d8dc60faeae2b8935756a9b4d3273 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Tue, 24 Mar 2020 14:50:21 +0200 Subject: [PATCH 35/61] deleted unused files --- .../bootstrap/disabled/disabledCacher.go | 60 ------------------- .../disabled/disabledShardCoordinator.go | 60 ------------------- 2 files changed, 120 deletions(-) delete mode 100644 epochStart/bootstrap/disabled/disabledCacher.go delete mode 100644 epochStart/bootstrap/disabled/disabledShardCoordinator.go diff --git a/epochStart/bootstrap/disabled/disabledCacher.go b/epochStart/bootstrap/disabled/disabledCacher.go deleted file mode 100644 index 7423e77fa30..00000000000 --- a/epochStart/bootstrap/disabled/disabledCacher.go +++ /dev/null @@ -1,60 +0,0 @@ -package disabled - -type cacher struct { -} - -// NewCacher returns a new instance of cacher -func NewCacher() *cacher { - return &cacher{} -} - -func (d *cacher) Clear() { -} - -func (d *cacher) Put(key []byte, value interface{}) bool { - return true -} - -func (d *cacher) Get(key []byte) (value interface{}, ok bool) { - return nil, false -} - -func (d *cacher) Has(key []byte) bool { - panic("implement me") -} - -func (d *cacher) Peek(key []byte) (value interface{}, ok bool) { - panic("implement me") -} - -func (d *cacher) HasOrAdd(key []byte, value interface{}) (ok, evicted bool) { - panic("implement me") -} - -func (d *cacher) Remove(key []byte) { - panic("implement me") -} - -func (d *cacher) RemoveOldest() { - panic("implement me") -} - -func (d *cacher) Keys() [][]byte { - panic("implement me") -} - -func (d *cacher) Len() int { - panic("implement me") -} - -func (d *cacher) MaxSize() int { - panic("implement me") -} - -func (d *cacher) RegisterHandler(func(key []byte)) { - panic("implement me") -} - -func (d *cacher) IsInterfaceNil() bool { - panic("implement me") -} diff --git a/epochStart/bootstrap/disabled/disabledShardCoordinator.go b/epochStart/bootstrap/disabled/disabledShardCoordinator.go deleted file mode 100644 index 8e97dd44541..00000000000 --- a/epochStart/bootstrap/disabled/disabledShardCoordinator.go +++ /dev/null @@ -1,60 +0,0 @@ -package disabled - -import ( - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/data/state" -) - -type shardCoordinator struct { - numShards uint32 -} - -// NewShardCoordinator - -func NewShardCoordinator() *shardCoordinator { - return &shardCoordinator{numShards: 1} -} - -// NumberOfShards - -func (scm *shardCoordinator) NumberOfShards() uint32 { - return scm.numShards -} - -// SetNoShards - -func (scm *shardCoordinator) SetNoShards(shards uint32) { - scm.numShards = shards -} - -// ComputeId - -func (scm *shardCoordinator) ComputeId(address state.AddressContainer) uint32 { - - return uint32(0) -} - -// SelfId - -func (scm *shardCoordinator) SelfId() uint32 { - return 0 -} - -// SetSelfId - -func (scm *shardCoordinator) SetSelfId(shardId uint32) error { - return nil -} - -// SameShard - -func (scm *shardCoordinator) SameShard(firstAddress, secondAddress state.AddressContainer) bool { - return true -} - -// CommunicationIdentifier - -func (scm *shardCoordinator) CommunicationIdentifier(destShardID uint32) string { - if destShardID == core.MetachainShardId { - return "_0_META" - } - - return "_0" -} - -// IsInterfaceNil returns true if there is no value under the interface -func (scm *shardCoordinator) IsInterfaceNil() bool { - return scm == nil -} From 6a12b31ae01abe9bac58fc32ac6fb60a9ed6d43c Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Tue, 24 Mar 2020 15:09:52 +0200 Subject: [PATCH 36/61] fix after merge --- cmd/node/main.go | 7 +++++++ epochStart/bootstrap/process.go | 19 ++++++++++++++----- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index e55055e37b0..884c5e50b81 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -636,6 +636,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { WorkingDir: workingDir, DefaultDBPath: defaultDBPath, DefaultEpochString: defaultEpochString, + DefaultShardString: defaultShardString, } bootsrapper, err := bootstrap.NewEpochStartBootstrapHandler(epochStartBootsrapArgs) if err != nil { @@ -648,6 +649,12 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { return err } + if !generalConfig.StoragePruning.Enabled { + // TODO: refactor this as when the pruning storer is disabled, the default directory path is Epoch_0 + // and it should be Epoch_ALL or something similar + currentEpoch = 0 + } + shardCoordinator, err := sharding.NewMultiShardCoordinator(numOfShards, currentShardId) if err != nil { return err diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 464e32fd77c..d65171c0559 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -74,6 +74,7 @@ type epochStartBootstrap struct { workingDir string defaultDBPath string defaultEpochString string + defaultShardString string // created components requestHandler process.RequestHandler @@ -101,7 +102,7 @@ type epochStartBootstrap struct { type baseDataInStorage struct { shardId uint32 numberOfShards uint32 - lastRound uint64 + lastRound int64 lastEpoch uint32 } @@ -122,6 +123,7 @@ type ArgsEpochStartBootstrap struct { WorkingDir string DefaultDBPath string DefaultEpochString string + DefaultShardString string } // NewEpochStartBootstrap will return a new instance of epochStartBootstrap @@ -136,6 +138,7 @@ func NewEpochStartBootstrapHandler(args ArgsEpochStartBootstrap) (*epochStartBoo workingDir: args.WorkingDir, defaultEpochString: args.DefaultEpochString, defaultDBPath: args.DefaultEpochString, + defaultShardString: args.DefaultShardString, keyGen: args.KeyGen, blockKeyGen: args.BlockKeyGen, singleSigner: args.SingleSigner, @@ -146,18 +149,24 @@ func NewEpochStartBootstrapHandler(args ArgsEpochStartBootstrap) (*epochStartBoo } func (e *epochStartBootstrap) searchDataInLocalStorage() { - currentEpoch, errNotCritical := storageFactory.FindLastEpochFromStorage( + var errNotCritical error + e.baseData.lastEpoch, e.baseData.shardId, e.baseData.lastRound, errNotCritical = storageFactory.FindLatestDataFromStorage( // TODO: use last round and shard ID + e.generalConfig, + e.marshalizer, // TODO: remove hardcoded marshalizer when start in epoch is merged. e.workingDir, e.genesisNodesConfig.ChainID, e.defaultDBPath, e.defaultEpochString, + e.defaultShardString, ) if errNotCritical != nil { log.Debug("no epoch db found in storage", "error", errNotCritical.Error()) + } else { + log.Debug("got last data from storage", + "epoch", e.baseData.lastEpoch, + "last round", e.baseData.lastRound, + "last shard ID", e.baseData.lastRound) } - - log.Debug("current epoch from the storage : ", "epoch", currentEpoch) - e.baseData.lastEpoch = currentEpoch } func (e *epochStartBootstrap) isStartInEpochZero() bool { From d0b389eb5ec7a2f3aef38603f92db9e4e1e897eb Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Tue, 24 Mar 2020 15:23:27 +0200 Subject: [PATCH 37/61] fix after merge --- epochStart/bootstrap/process.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index d65171c0559..68b77dafb80 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -150,9 +150,9 @@ func NewEpochStartBootstrapHandler(args ArgsEpochStartBootstrap) (*epochStartBoo func (e *epochStartBootstrap) searchDataInLocalStorage() { var errNotCritical error - e.baseData.lastEpoch, e.baseData.shardId, e.baseData.lastRound, errNotCritical = storageFactory.FindLatestDataFromStorage( // TODO: use last round and shard ID + e.baseData.lastEpoch, e.baseData.shardId, e.baseData.lastRound, errNotCritical = storageFactory.FindLatestDataFromStorage( e.generalConfig, - e.marshalizer, // TODO: remove hardcoded marshalizer when start in epoch is merged. + e.marshalizer, e.workingDir, e.genesisNodesConfig.ChainID, e.defaultDBPath, From 5c90a256c8efb605b7e3843c73aeecb25a84e737 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Wed, 25 Mar 2020 00:17:21 +0200 Subject: [PATCH 38/61] implemented starting in epoch from storage. --- consensus/mock/nodesCoordinatorMock.go | 7 +- .../disabled/disabledNodesCoordinator.go | 33 +- epochStart/bootstrap/fromLocalStorage.go | 187 +++++++++ epochStart/bootstrap/process.go | 44 +-- .../simpleEpochStartMetaBlockInterceptor.go | 4 + epochStart/bootstrap/syncValidatorStatus.go | 14 +- epochStart/mock/nodesCoordinatorStub.go | 5 + integrationTests/mock/nodesCoordinatorMock.go | 7 +- node/mock/nodesCoordinatorMock.go | 7 +- process/mock/nodesCoordinatorMock.go | 7 +- .../indexHashedNodesCoordinatorRegistry.go | 10 + sharding/interface.go | 1 + sharding/networksharding/mock_test.go | 5 + storage/factory/common.go | 200 ---------- storage/factory/openStorage.go | 355 ++++++++++++++++++ 15 files changed, 631 insertions(+), 255 deletions(-) create mode 100644 epochStart/bootstrap/fromLocalStorage.go create mode 100644 storage/factory/openStorage.go diff --git a/consensus/mock/nodesCoordinatorMock.go b/consensus/mock/nodesCoordinatorMock.go index 80f7721ef7a..63f78da50f0 100644 --- a/consensus/mock/nodesCoordinatorMock.go +++ b/consensus/mock/nodesCoordinatorMock.go @@ -11,8 +11,13 @@ type NodesCoordinatorMock struct { GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) } +// SaveNodesCoordinatorRegistry - +func (ncm *NodesCoordinatorMock) SaveNodesCoordinatorRegistry(_ *sharding.NodesCoordinatorRegistry) error { + return nil +} + // GetWaitingPublicKeysPerShard - -func (ncm *NodesCoordinatorMock) GetWaitingPublicKeysPerShard(epoch uint32) (map[uint32][][]byte, error) { +func (ncm *NodesCoordinatorMock) GetWaitingPublicKeysPerShard(_ uint32) (map[uint32][][]byte, error) { return nil, nil } diff --git a/epochStart/bootstrap/disabled/disabledNodesCoordinator.go b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go index 622f89cb190..4e04375d2cc 100644 --- a/epochStart/bootstrap/disabled/disabledNodesCoordinator.go +++ b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go @@ -14,31 +14,36 @@ func NewNodesCoordinator() *nodesCoordinator { } func (n *nodesCoordinator) SetNodesPerShards( - eligible map[uint32][]sharding.Validator, - waiting map[uint32][]sharding.Validator, - epoch uint32, - updateList bool, + _ map[uint32][]sharding.Validator, + _ map[uint32][]sharding.Validator, + _ uint32, + _ bool, ) error { return nil } -func (n *nodesCoordinator) ComputeLeaving(allValidators []sharding.Validator) []sharding.Validator { +// SaveNodesCoordinatorRegistry - +func (n *nodesCoordinator) SaveNodesCoordinatorRegistry(_ *sharding.NodesCoordinatorRegistry) error { return nil } -func (n *nodesCoordinator) GetValidatorsIndexes(publicKeys []string, epoch uint32) ([]uint64, error) { +func (n *nodesCoordinator) ComputeLeaving(_ []sharding.Validator) []sharding.Validator { + return nil +} + +func (n *nodesCoordinator) GetValidatorsIndexes(_ []string, _ uint32) ([]uint64, error) { return nil, nil } -func (n *nodesCoordinator) GetAllEligibleValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) { +func (n *nodesCoordinator) GetAllEligibleValidatorsPublicKeys(_ uint32) (map[uint32][][]byte, error) { return nil, nil } -func (n *nodesCoordinator) GetAllWaitingValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) { +func (n *nodesCoordinator) GetAllWaitingValidatorsPublicKeys(_ uint32) (map[uint32][][]byte, error) { return nil, nil } -func (n *nodesCoordinator) GetConsensusValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) { +func (n *nodesCoordinator) GetConsensusValidatorsPublicKeys(_ []byte, _ uint64, _ uint32, _ uint32) ([]string, error) { return nil, nil } @@ -46,11 +51,11 @@ func (n *nodesCoordinator) GetOwnPublicKey() []byte { return nil } -func (n *nodesCoordinator) ComputeConsensusGroup(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []sharding.Validator, err error) { +func (n *nodesCoordinator) ComputeConsensusGroup(_ []byte, _ uint64, _ uint32, _ uint32) (validatorsGroup []sharding.Validator, err error) { return nil, nil } -func (n *nodesCoordinator) GetValidatorWithPublicKey(publicKey []byte, epoch uint32) (validator sharding.Validator, shardId uint32, err error) { +func (n *nodesCoordinator) GetValidatorWithPublicKey(_ []byte, _ uint32) (validator sharding.Validator, shardId uint32, err error) { return nil, 0, nil } @@ -58,7 +63,7 @@ func (n *nodesCoordinator) UpdatePeersListAndIndex() error { return nil } -func (n *nodesCoordinator) LoadState(key []byte) error { +func (n *nodesCoordinator) LoadState(_ []byte) error { return nil } @@ -66,11 +71,11 @@ func (n *nodesCoordinator) GetSavedStateKey() []byte { return nil } -func (n *nodesCoordinator) ShardIdForEpoch(epoch uint32) (uint32, error) { +func (n *nodesCoordinator) ShardIdForEpoch(_ uint32) (uint32, error) { return 0, nil } -func (n *nodesCoordinator) GetConsensusWhitelistedNodes(epoch uint32) (map[string]struct{}, error) { +func (n *nodesCoordinator) GetConsensusWhitelistedNodes(_ uint32) (map[string]struct{}, error) { return nil, nil } diff --git a/epochStart/bootstrap/fromLocalStorage.go b/epochStart/bootstrap/fromLocalStorage.go new file mode 100644 index 00000000000..3acac4d12ac --- /dev/null +++ b/epochStart/bootstrap/fromLocalStorage.go @@ -0,0 +1,187 @@ +package bootstrap + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + storageFactory "github.com/ElrondNetwork/elrond-go/storage/factory" +) + +func (e *epochStartBootstrap) searchDataInLocalStorage() { + var errNotCritical error + e.baseData.lastEpoch, e.baseData.shardId, e.baseData.lastRound, errNotCritical = storageFactory.FindLatestDataFromStorage( + e.generalConfig, + e.marshalizer, + e.workingDir, + e.genesisNodesConfig.ChainID, + e.defaultDBPath, + e.defaultEpochString, + e.defaultShardString, + ) + if errNotCritical != nil { + log.Debug("no epoch db found in storage", "error", errNotCritical.Error()) + } else { + log.Debug("got last data from storage", + "epoch", e.baseData.lastEpoch, + "last round", e.baseData.lastRound, + "last shard ID", e.baseData.lastRound) + } +} + +func (e *epochStartBootstrap) prepareEpochFromStorage() (uint32, uint32, uint32, error) { + args := storageFactory.ArgsNewOpenStorageUnits{ + GeneralConfig: e.generalConfig, + Marshalizer: e.marshalizer, + WorkingDir: e.workingDir, + ChainID: e.genesisNodesConfig.ChainID, + DefaultDBPath: e.defaultDBPath, + DefaultEpochString: e.defaultEpochString, + DefaultShardString: e.defaultShardString, + } + openStorageHandler, err := storageFactory.NewStorageUnitOpenHandler(args) + if err != nil { + return 0, 0, 0, err + } + + unitsToOpen := make([]string, 0) + unitsToOpen = append(unitsToOpen, e.generalConfig.BootstrapStorage.DB.FilePath) + unitsToOpen = append(unitsToOpen, e.generalConfig.MetaBlockStorage.DB.FilePath) + + storageUnits, err := openStorageHandler.OpenStorageUnits(unitsToOpen) + defer func() { + for _, storer := range storageUnits { + errClose := storer.Close() + log.LogIfError(errClose) + } + }() + + if err != nil || len(storageUnits) != len(unitsToOpen) { + return 0, 0, 0, err + } + + _, e.nodesConfig, err = e.getLastBootstrapData(storageUnits[0]) + if err != nil { + return 0, 0, 0, err + } + + pubKey, err := e.publicKey.ToByteArray() + if err != nil { + return 0, 0, 0, err + } + + if !e.checkIfShuffledOut(pubKey, e.nodesConfig) { + return e.baseData.lastEpoch, e.baseData.shardId, e.baseData.numberOfShards, nil + } + + e.epochStartMeta, err = e.getEpochStartMetaFromStorage(storageUnits[1]) + if err != nil { + return 0, 0, 0, err + } + + err = e.prepareComponentsToSyncFromNetwork() + if err != nil { + return 0, 0, 0, err + } + + e.syncedHeaders, err = e.syncHeadersFrom(e.epochStartMeta) + if err != nil { + return 0, 0, 0, err + } + + prevEpochStartMetaHash := e.epochStartMeta.EpochStart.Economics.PrevEpochStartHash + prevEpochStartMeta, ok := e.syncedHeaders[string(prevEpochStartMetaHash)].(*block.MetaBlock) + if !ok { + return 0, 0, 0, epochStart.ErrWrongTypeAssertion + } + e.prevEpochStartMeta = prevEpochStartMeta + + e.shardCoordinator, err = sharding.NewMultiShardCoordinator(e.baseData.numberOfShards, e.baseData.shardId) + if err != nil { + return 0, 0, 0, err + } + + if e.shardCoordinator.SelfId() == core.MetachainShardId { + err = e.requestAndProcessForShard() + if err != nil { + return 0, 0, 0, err + } + } + + err = e.requestAndProcessForMeta() + if err != nil { + return 0, 0, 0, err + } + + return e.baseData.lastEpoch, e.shardCoordinator.SelfId(), e.shardCoordinator.NumberOfShards(), nil +} + +func (e *epochStartBootstrap) checkIfShuffledOut( + pubKey []byte, + nodesConfig *sharding.NodesCoordinatorRegistry, +) bool { + epochConfig := nodesConfig.EpochsConfig[fmt.Sprint(e.baseData.lastEpoch)] + shardIdForConfig := fmt.Sprint(e.baseData.shardId) + + for _, validator := range epochConfig.WaitingValidators[shardIdForConfig] { + if bytes.Equal(pubKey, validator.PubKey) { + return false + } + } + + for _, validator := range epochConfig.EligibleValidators[shardIdForConfig] { + if bytes.Equal(pubKey, validator.PubKey) { + return false + } + } + + return true +} + +func (e *epochStartBootstrap) getLastBootstrapData(storer storage.Storer) (*bootstrapStorage.BootstrapData, *sharding.NodesCoordinatorRegistry, error) { + bootStorer, err := bootstrapStorage.NewBootstrapStorer(e.marshalizer, storer) + if err != nil { + return nil, nil, err + } + + highestRound := bootStorer.GetHighestRound() + bootstrapData, err := bootStorer.Get(highestRound) + if err != nil { + return nil, nil, err + } + + data, err := storer.Get(bootstrapData.NodesCoordinatorConfigKey) + if err != nil { + return nil, nil, err + } + + config := &sharding.NodesCoordinatorRegistry{} + err = json.Unmarshal(data, config) + if err != nil { + return nil, nil, err + } + + return &bootstrapData, config, nil +} + +func (e *epochStartBootstrap) getEpochStartMetaFromStorage(storer storage.Storer) (*block.MetaBlock, error) { + epochIdentifier := core.EpochStartIdentifier(e.baseData.lastEpoch) + data, err := storer.Get([]byte(epochIdentifier)) + if err != nil { + return nil, err + } + + metaBlock := &block.MetaBlock{} + err = e.marshalizer.Unmarshal(metaBlock, data) + if err != nil { + return nil, err + } + + return metaBlock, nil +} diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 68b77dafb80..812bc936fab 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -148,27 +148,6 @@ func NewEpochStartBootstrapHandler(args ArgsEpochStartBootstrap) (*epochStartBoo return epochStartProvider, nil } -func (e *epochStartBootstrap) searchDataInLocalStorage() { - var errNotCritical error - e.baseData.lastEpoch, e.baseData.shardId, e.baseData.lastRound, errNotCritical = storageFactory.FindLatestDataFromStorage( - e.generalConfig, - e.marshalizer, - e.workingDir, - e.genesisNodesConfig.ChainID, - e.defaultDBPath, - e.defaultEpochString, - e.defaultShardString, - ) - if errNotCritical != nil { - log.Debug("no epoch db found in storage", "error", errNotCritical.Error()) - } else { - log.Debug("got last data from storage", - "epoch", e.baseData.lastEpoch, - "last round", e.baseData.lastRound, - "last shard ID", e.baseData.lastRound) - } -} - func (e *epochStartBootstrap) isStartInEpochZero() bool { startTime := time.Unix(e.genesisNodesConfig.StartTime, 0) isCurrentTimeBeforeGenesis := time.Now().Sub(startTime) < 0 @@ -209,30 +188,31 @@ func (e *epochStartBootstrap) Bootstrap() (uint32, uint32, uint32, error) { e.computeMostProbableEpoch() e.searchDataInLocalStorage() + // TODO: make a better decision according to lastRound, lastEpoch isCurrentEpochSaved := e.baseData.lastEpoch+1 >= e.computedEpoch if isCurrentEpochSaved { - return e.prepareEpochFromStorage() + epoch, shardId, numOfShards, err := e.prepareEpochFromStorage() + if err == nil { + return epoch, shardId, numOfShards, nil + } } - err := e.prepareComponentsToSyncFromNetwork() + var err error + e.shardCoordinator, err = sharding.NewMultiShardCoordinator(e.genesisNodesConfig.NumberOfShards(), core.MetachainShardId) if err != nil { return 0, 0, 0, err } - return e.requestAndProcessing() -} + err = e.prepareComponentsToSyncFromNetwork() + if err != nil { + return 0, 0, 0, err + } -func (e *epochStartBootstrap) prepareEpochFromStorage() (uint32, uint32, uint32, error) { - // TODO: compute self shard ID for current epoch - return 0, 0, 0, nil + return e.requestAndProcessing() } func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { var err error - e.shardCoordinator, err = sharding.NewMultiShardCoordinator(e.genesisNodesConfig.NumberOfShards(), core.MetachainShardId) - if err != nil { - return err - } whiteListCache, err := storageUnit.NewCache( storageUnit.CacheType(e.generalConfig.WhiteListPool.Type), diff --git a/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go index 4ad388eca50..2b7c7911c2c 100644 --- a/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go +++ b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go @@ -57,6 +57,10 @@ func (s *simpleEpochStartMetaBlockInterceptor) ProcessReceivedMessage(message p2 return err } + if !mb.IsStartOfEpochBlock() { + return epochStart.ErrNotEpochStartBlock + } + s.mutReceivedMetaBlocks.Lock() mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, &mb) if err != nil { diff --git a/epochStart/bootstrap/syncValidatorStatus.go b/epochStart/bootstrap/syncValidatorStatus.go index f5ab9005d6c..10204572ce5 100644 --- a/epochStart/bootstrap/syncValidatorStatus.go +++ b/epochStart/bootstrap/syncValidatorStatus.go @@ -21,6 +21,7 @@ type syncValidatorStatus struct { dataPool dataRetriever.PoolsHolder marshalizer marshal.Marshalizer requestHandler process.RequestHandler + nodeCoordinator sharding.NodesCoordinator } // ArgsNewSyncValidatorStatus @@ -84,6 +85,8 @@ func (s *syncValidatorStatus) NodesConfigFromMetaBlock( configId = fmt.Sprint(prevMetaBlock.Epoch) nodesConfig.EpochsConfig[configId] = epochValidators + // TODO: use shuffling process from nodesCoordinator to create the final data + return nodesConfig, selfShardId, nil } @@ -139,11 +142,12 @@ func (s *syncValidatorStatus) processNodesConfigFor( } shardId := fmt.Sprint(vid.ShardId) - // TODO - make decision according to validatorInfo.List after it is implemented - // most probably there is a need to create an indexed hashed nodescoordinator - set the data manually - // call the shuffling and save the result - this is still problematic as you always need the -1 config - // recursively going to genesis - data has to be updated in the peer trie. - epochValidators.EligibleValidators[shardId] = append(epochValidators.EligibleValidators[shardId], serializableValidator) + switch vid.List { + case string(core.EligibleList): + epochValidators.EligibleValidators[shardId] = append(epochValidators.EligibleValidators[shardId], serializableValidator) + case string(core.WaitingList): + epochValidators.WaitingValidators[shardId] = append(epochValidators.EligibleValidators[shardId], serializableValidator) + } if shouldSearchSelfId && !found && bytes.Equal(vid.PublicKey, publicKey) { selfShardId = vid.ShardId diff --git a/epochStart/mock/nodesCoordinatorStub.go b/epochStart/mock/nodesCoordinatorStub.go index 0e0de400a0a..b7c3ba77cb9 100644 --- a/epochStart/mock/nodesCoordinatorStub.go +++ b/epochStart/mock/nodesCoordinatorStub.go @@ -13,6 +13,11 @@ type NodesCoordinatorStub struct { GetAllValidatorsPublicKeysCalled func() (map[uint32][][]byte, error) } +// SaveNodesCoordinatorRegistry - +func (ncm *NodesCoordinatorStub) SaveNodesCoordinatorRegistry(_ *sharding.NodesCoordinatorRegistry) error { + return nil +} + // ComputeLeaving - func (ncm *NodesCoordinatorStub) ComputeLeaving(_ []sharding.Validator) []sharding.Validator { return nil diff --git a/integrationTests/mock/nodesCoordinatorMock.go b/integrationTests/mock/nodesCoordinatorMock.go index 517a537d148..1c933f32feb 100644 --- a/integrationTests/mock/nodesCoordinatorMock.go +++ b/integrationTests/mock/nodesCoordinatorMock.go @@ -14,6 +14,11 @@ type NodesCoordinatorMock struct { GetAllWaitingValidatorsPublicKeysCalled func() (map[uint32][][]byte, error) } +// SaveNodesCoordinatorRegistry - +func (ncm *NodesCoordinatorMock) SaveNodesCoordinatorRegistry(_ *sharding.NodesCoordinatorRegistry) error { + return nil +} + // GetNumTotalEligible - func (ncm *NodesCoordinatorMock) GetNumTotalEligible() uint64 { return 1 @@ -145,7 +150,7 @@ func (ncm *NodesCoordinatorMock) GetOwnPublicKey() []byte { } // GetNodesPerShard - -func (ncm *NodesCoordinatorMock) GetNodesPerShard(epoch uint32) (map[uint32][]sharding.Validator, error) { +func (ncm *NodesCoordinatorMock) GetNodesPerShard(_ uint32) (map[uint32][]sharding.Validator, error) { return nil, nil } diff --git a/node/mock/nodesCoordinatorMock.go b/node/mock/nodesCoordinatorMock.go index f02cff9ccaf..a043acb54f9 100644 --- a/node/mock/nodesCoordinatorMock.go +++ b/node/mock/nodesCoordinatorMock.go @@ -12,6 +12,11 @@ type NodesCoordinatorMock struct { GetAllEligibleValidatorsPublicKeysCalled func() (map[uint32][][]byte, error) } +// SaveNodesCoordinatorRegistry - +func (ncm *NodesCoordinatorMock) SaveNodesCoordinatorRegistry(_ *sharding.NodesCoordinatorRegistry) error { + return nil +} + // UpdatePeersListAndIndex - func (ncm *NodesCoordinatorMock) UpdatePeersListAndIndex() error { return nil @@ -156,7 +161,7 @@ func (ncm *NodesCoordinatorMock) GetOwnPublicKey() []byte { } // GetNodesPerShard - -func (ncm *NodesCoordinatorMock) GetNodesPerShard(epoch uint32) (map[uint32][]sharding.Validator, error) { +func (ncm *NodesCoordinatorMock) GetNodesPerShard(_ uint32) (map[uint32][]sharding.Validator, error) { return nil, nil } diff --git a/process/mock/nodesCoordinatorMock.go b/process/mock/nodesCoordinatorMock.go index 5bc9434ddcd..593aaeb3cc3 100644 --- a/process/mock/nodesCoordinatorMock.go +++ b/process/mock/nodesCoordinatorMock.go @@ -63,6 +63,11 @@ func NewNodesCoordinatorMock() *NodesCoordinatorMock { } } +// SaveNodesCoordinatorRegistry - +func (ncm *NodesCoordinatorMock) SaveNodesCoordinatorRegistry(_ *sharding.NodesCoordinatorRegistry) error { + return nil +} + // GetNumTotalEligible - func (ncm *NodesCoordinatorMock) GetNumTotalEligible() uint64 { return 1 @@ -248,7 +253,7 @@ func (ncm *NodesCoordinatorMock) GetConsensusWhitelistedNodes( } // GetNodesPerShard - -func (ncm *NodesCoordinatorMock) GetNodesPerShard(epoch uint32) (map[uint32][]sharding.Validator, error) { +func (ncm *NodesCoordinatorMock) GetNodesPerShard(_ uint32) (map[uint32][]sharding.Validator, error) { return nil, nil } diff --git a/sharding/indexHashedNodesCoordinatorRegistry.go b/sharding/indexHashedNodesCoordinatorRegistry.go index e389e13174e..5b2e4b1b5c7 100644 --- a/sharding/indexHashedNodesCoordinatorRegistry.go +++ b/sharding/indexHashedNodesCoordinatorRegistry.go @@ -47,6 +47,16 @@ func (ihgs *indexHashedNodesCoordinator) LoadState(key []byte) error { ihgs.savedStateKey = key ihgs.mutSavedStateKey.Unlock() + err = ihgs.SaveNodesCoordinatorRegistry(config) + if err != nil { + return err + } + + return nil +} + +// SaveNodesCoordinatorRegistry saves a nodesCoordinator registry +func (ihgs *indexHashedNodesCoordinator) SaveNodesCoordinatorRegistry(config *NodesCoordinatorRegistry) error { ihgs.currentEpoch = config.CurrentEpoch log.Debug("loaded nodes config", "current epoch", config.CurrentEpoch) diff --git a/sharding/interface.go b/sharding/interface.go index c227fc36846..2ccc08a5731 100644 --- a/sharding/interface.go +++ b/sharding/interface.go @@ -29,6 +29,7 @@ type NodesCoordinator interface { GetValidatorWithPublicKey(publicKey []byte, epoch uint32) (validator Validator, shardId uint32, err error) UpdatePeersListAndIndex() error LoadState(key []byte) error + SaveNodesCoordinatorRegistry(config *NodesCoordinatorRegistry) error GetSavedStateKey() []byte ShardIdForEpoch(epoch uint32) (uint32, error) GetConsensusWhitelistedNodes(epoch uint32) (map[string]struct{}, error) diff --git a/sharding/networksharding/mock_test.go b/sharding/networksharding/mock_test.go index 47ab51d2de0..bb0fceec566 100644 --- a/sharding/networksharding/mock_test.go +++ b/sharding/networksharding/mock_test.go @@ -8,6 +8,11 @@ type nodesCoordinatorStub struct { GetValidatorWithPublicKeyCalled func(publicKey []byte, epoch uint32) (validator sharding.Validator, shardId uint32, err error) } +// SaveNodesCoordinatorRegistry - +func (ncs *nodesCoordinatorStub) SaveNodesCoordinatorRegistry(_ *sharding.NodesCoordinatorRegistry) error { + return nil +} + // UpdatePeersListAndIndex - func (ncs *nodesCoordinatorStub) UpdatePeersListAndIndex() error { panic("implement me") diff --git a/storage/factory/common.go b/storage/factory/common.go index 9a07822592b..520dbef27d0 100644 --- a/storage/factory/common.go +++ b/storage/factory/common.go @@ -1,20 +1,10 @@ package factory import ( - "fmt" "math" - "os" - "path/filepath" - "regexp" - "sort" "strconv" - "strings" "github.com/ElrondNetwork/elrond-go/config" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/ElrondNetwork/elrond-go/storage/lrucache" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" ) @@ -58,196 +48,6 @@ func GetBloomFromConfig(cfg config.BloomFilterConfig) storageUnit.BloomConfig { } } -// FindLatestDataFromStorage finds the last data (such as last epoch, shard ID or round) by searching over the -// storage folders and opening older databases -func FindLatestDataFromStorage( - generalConfig config.Config, - marshalizer marshal.Marshalizer, - workingDir string, - chainID string, - defaultDBPath string, - defaultEpochString string, - defaultShardString string, -) (uint32, uint32, int64, error) { - parentDir := filepath.Join( - workingDir, - defaultDBPath, - chainID) - - f, err := os.Open(parentDir) - if err != nil { - return 0, 0, 0, err - } - - files, err := f.Readdir(allFiles) - _ = f.Close() - - if err != nil { - return 0, 0, 0, err - } - - epochDirs := make([]string, 0, len(files)) - for _, file := range files { - if !file.IsDir() { - continue - } - - isEpochDir := strings.HasPrefix(file.Name(), defaultEpochString) - if !isEpochDir { - continue - } - - epochDirs = append(epochDirs, file.Name()) - } - - lastEpoch, err := getLastEpochFromDirNames(epochDirs) - if err != nil { - return 0, 0, 0, err - } - - return getLastEpochAndRoundFromStorage(generalConfig, marshalizer, parentDir, defaultEpochString, defaultShardString, lastEpoch) -} - -func getLastEpochFromDirNames(epochDirs []string) (uint32, error) { - if len(epochDirs) == 0 { - return 0, nil - } - - re := regexp.MustCompile("[0-9]+") - epochsInDirName := make([]uint32, 0, len(epochDirs)) - - for _, dirname := range epochDirs { - epochStr := re.FindString(dirname) - epoch, err := strconv.ParseInt(epochStr, 10, 64) - if err != nil { - return 0, err - } - - epochsInDirName = append(epochsInDirName, uint32(epoch)) - } - - sort.Slice(epochsInDirName, func(i, j int) bool { - return epochsInDirName[i] > epochsInDirName[j] - }) - - return epochsInDirName[0], nil -} - -func getLastEpochAndRoundFromStorage( - config config.Config, - marshalizer marshal.Marshalizer, - parentDir string, - defaultEpochString string, - defaultShardString string, - epoch uint32, -) (uint32, uint32, int64, error) { - persisterFactory := NewPersisterFactory(config.BootstrapStorage.DB) - pathWithoutShard := filepath.Join( - parentDir, - fmt.Sprintf("%s_%d", defaultEpochString, epoch), - ) - shardIdsStr, err := getShardsFromDirectory(pathWithoutShard, defaultShardString) - if err != nil { - return 0, 0, 0, err - } - - var mostRecentBootstrapData *bootstrapStorage.BootstrapData - var mostRecentShard string - highestRoundInStoredShards := int64(0) - - for _, shardIdStr := range shardIdsStr { - persisterPath := filepath.Join( - pathWithoutShard, - fmt.Sprintf("%s_%s", defaultShardString, shardIdStr), - config.BootstrapStorage.DB.FilePath, - ) - - bootstrapData, errGet := getBootstrapDataForPersisterPath(persisterFactory, persisterPath, marshalizer) - if errGet != nil { - continue - } - - if bootstrapData.LastRound > highestRoundInStoredShards { - highestRoundInStoredShards = bootstrapData.LastRound - mostRecentBootstrapData = bootstrapData - mostRecentShard = shardIdStr - } - } - - if mostRecentBootstrapData == nil { - return 0, 0, 0, storage.ErrBootstrapDataNotFoundInStorage - } - shardIDAsUint32, err := convertShardIDToUint32(mostRecentShard) - if err != nil { - return 0, 0, 0, err - } - - return mostRecentBootstrapData.LastHeader.Epoch, shardIDAsUint32, mostRecentBootstrapData.LastRound, nil -} - -func getBootstrapDataForPersisterPath( - persisterFactory *PersisterFactory, - persisterPath string, - marshalizer marshal.Marshalizer, -) (*bootstrapStorage.BootstrapData, error) { - persister, err := persisterFactory.Create(persisterPath) - if err != nil { - return nil, err - } - - defer func() { - errClose := persister.Close() - log.LogIfError(errClose) - }() - - cacher, err := lrucache.NewCache(10) - if err != nil { - return nil, err - } - - storer, err := storageUnit.NewStorageUnit(cacher, persister) - if err != nil { - return nil, err - } - - bootStorer, err := bootstrapStorage.NewBootstrapStorer(marshalizer, storer) - if err != nil { - return nil, err - } - - highestRound := bootStorer.GetHighestRound() - bootstrapData, err := bootStorer.Get(highestRound) - if err != nil { - return nil, err - } - - return &bootstrapData, nil -} - -func getShardsFromDirectory(path string, defaultShardString string) ([]string, error) { - shardIDs := make([]string, 0) - f, err := os.Open(path) - if err != nil { - return nil, err - } - - files, err := f.Readdir(allFiles) - _ = f.Close() - - for _, file := range files { - fileName := file.Name() - stringToSplitBy := defaultShardString + "_" - splitSlice := strings.Split(fileName, stringToSplitBy) - if len(splitSlice) < 2 { - continue - } - - shardIDs = append(shardIDs, splitSlice[1]) - } - - return shardIDs, nil -} - func convertShardIDToUint32(shardIDStr string) (uint32, error) { if shardIDStr == "metachain" { return math.MaxUint32, nil diff --git a/storage/factory/openStorage.go b/storage/factory/openStorage.go new file mode 100644 index 00000000000..c4e435f8f20 --- /dev/null +++ b/storage/factory/openStorage.go @@ -0,0 +1,355 @@ +package factory + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/lrucache" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" +) + +// ArgsNewOpenStorageUnits - +type ArgsNewOpenStorageUnits struct { + GeneralConfig config.Config + Marshalizer marshal.Marshalizer + WorkingDir string + ChainID string + DefaultDBPath string + DefaultEpochString string + DefaultShardString string +} + +type openStorageUnits struct { + generalConfig config.Config + marshalizer marshal.Marshalizer + workingDir string + chainID string + defaultDBPath string + defaultEpochString string + defaultShardString string +} + +// NewStorageUnitOpenHandler - +func NewStorageUnitOpenHandler(args ArgsNewOpenStorageUnits) (*openStorageUnits, error) { + o := &openStorageUnits{ + generalConfig: args.GeneralConfig, + marshalizer: args.Marshalizer, + workingDir: args.WorkingDir, + chainID: args.ChainID, + defaultDBPath: args.DefaultDBPath, + defaultEpochString: args.DefaultEpochString, + defaultShardString: args.DefaultShardString, + } + + return o, nil +} + +// OpenStorageUnits - +func (o *openStorageUnits) OpenStorageUnits( + storageUnits []string, +) ([]storage.Storer, error) { + parentDir, lastEpoch, err := getParentDirAndLastEpoch( + o.workingDir, + o.chainID, + o.defaultDBPath, + o.defaultEpochString) + if err != nil { + return nil, err + } + + persisterFactory := NewPersisterFactory(o.generalConfig.BootstrapStorage.DB) + pathWithoutShard := filepath.Join( + parentDir, + fmt.Sprintf("%s_%d", o.defaultEpochString, lastEpoch), + ) + shardIdsStr, err := getShardsFromDirectory(pathWithoutShard, o.defaultShardString) + if err != nil { + return nil, err + } + + mostRecentShard, err := o.getMostUpToDateDirectory(pathWithoutShard, shardIdsStr, persisterFactory) + if err != nil { + return nil, err + } + + openedStorers := make([]storage.Storer, 0) + for _, filePath := range storageUnits { + persisterPath := filepath.Join( + pathWithoutShard, + fmt.Sprintf("%s_%s", o.defaultShardString, mostRecentShard), + filePath, + ) + + persister, err := persisterFactory.Create(persisterPath) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + errClose := persister.Close() + log.LogIfError(errClose) + } + }() + + cacher, err := lrucache.NewCache(10) + if err != nil { + return nil, err + } + + storer, err := storageUnit.NewStorageUnit(cacher, persister) + if err != nil { + return nil, err + } + + openedStorers = append(openedStorers, storer) + } + + return openedStorers, nil +} + +func (o *openStorageUnits) getMostUpToDateDirectory( + pathWithoutShard string, + shardIdsStr []string, + persisterFactory *PersisterFactory, +) (string, error) { + var mostRecentShard string + highestRoundInStoredShards := int64(0) + + for _, shardIdStr := range shardIdsStr { + persisterPath := filepath.Join( + pathWithoutShard, + fmt.Sprintf("%s_%s", o.defaultShardString, shardIdStr), + o.generalConfig.BootstrapStorage.DB.FilePath, + ) + + bootstrapData, errGet := getBootstrapDataForPersisterPath(persisterFactory, persisterPath, o.marshalizer) + if errGet != nil { + continue + } + + if bootstrapData.LastRound > highestRoundInStoredShards { + highestRoundInStoredShards = bootstrapData.LastRound + mostRecentShard = shardIdStr + } + } + + if len(mostRecentShard) == 0 { + return "", storage.ErrBootstrapDataNotFoundInStorage + } + + return mostRecentShard, nil +} + +// FindLatestDataFromStorage finds the last data (such as last epoch, shard ID or round) by searching over the +// storage folders and opening older databases +func FindLatestDataFromStorage( + generalConfig config.Config, + marshalizer marshal.Marshalizer, + workingDir string, + chainID string, + defaultDBPath string, + defaultEpochString string, + defaultShardString string, +) (uint32, uint32, int64, error) { + parentDir, lastEpoch, err := getParentDirAndLastEpoch(workingDir, chainID, defaultDBPath, defaultEpochString) + if err != nil { + return 0, 0, 0, err + } + + return getLastEpochAndRoundFromStorage(generalConfig, marshalizer, parentDir, defaultEpochString, defaultShardString, lastEpoch) +} + +func getParentDirAndLastEpoch( + workingDir string, + chainID string, + defaultDBPath string, + defaultEpochString string, +) (string, uint32, error) { + parentDir := filepath.Join( + workingDir, + defaultDBPath, + chainID) + + f, err := os.Open(parentDir) + if err != nil { + return "", 0, err + } + + files, err := f.Readdir(allFiles) + _ = f.Close() + + if err != nil { + return "", 0, err + } + + epochDirs := make([]string, 0, len(files)) + for _, file := range files { + if !file.IsDir() { + continue + } + + isEpochDir := strings.HasPrefix(file.Name(), defaultEpochString) + if !isEpochDir { + continue + } + + epochDirs = append(epochDirs, file.Name()) + } + + lastEpoch, err := getLastEpochFromDirNames(epochDirs) + if err != nil { + return "", 0, err + } + + return parentDir, lastEpoch, nil +} + +func getLastEpochFromDirNames(epochDirs []string) (uint32, error) { + if len(epochDirs) == 0 { + return 0, nil + } + + re := regexp.MustCompile("[0-9]+") + epochsInDirName := make([]uint32, 0, len(epochDirs)) + + for _, dirname := range epochDirs { + epochStr := re.FindString(dirname) + epoch, err := strconv.ParseInt(epochStr, 10, 64) + if err != nil { + return 0, err + } + + epochsInDirName = append(epochsInDirName, uint32(epoch)) + } + + sort.Slice(epochsInDirName, func(i, j int) bool { + return epochsInDirName[i] > epochsInDirName[j] + }) + + return epochsInDirName[0], nil +} + +func getLastEpochAndRoundFromStorage( + config config.Config, + marshalizer marshal.Marshalizer, + parentDir string, + defaultEpochString string, + defaultShardString string, + epoch uint32, +) (uint32, uint32, int64, error) { + persisterFactory := NewPersisterFactory(config.BootstrapStorage.DB) + pathWithoutShard := filepath.Join( + parentDir, + fmt.Sprintf("%s_%d", defaultEpochString, epoch), + ) + shardIdsStr, err := getShardsFromDirectory(pathWithoutShard, defaultShardString) + if err != nil { + return 0, 0, 0, err + } + + var mostRecentBootstrapData *bootstrapStorage.BootstrapData + var mostRecentShard string + highestRoundInStoredShards := int64(0) + + for _, shardIdStr := range shardIdsStr { + persisterPath := filepath.Join( + pathWithoutShard, + fmt.Sprintf("%s_%s", defaultShardString, shardIdStr), + config.BootstrapStorage.DB.FilePath, + ) + + bootstrapData, errGet := getBootstrapDataForPersisterPath(persisterFactory, persisterPath, marshalizer) + if errGet != nil { + continue + } + + if bootstrapData.LastRound > highestRoundInStoredShards { + highestRoundInStoredShards = bootstrapData.LastRound + mostRecentBootstrapData = bootstrapData + mostRecentShard = shardIdStr + } + } + + if mostRecentBootstrapData == nil { + return 0, 0, 0, storage.ErrBootstrapDataNotFoundInStorage + } + shardIDAsUint32, err := convertShardIDToUint32(mostRecentShard) + if err != nil { + return 0, 0, 0, err + } + + return mostRecentBootstrapData.LastHeader.Epoch, shardIDAsUint32, mostRecentBootstrapData.LastRound, nil +} + +func getBootstrapDataForPersisterPath( + persisterFactory *PersisterFactory, + persisterPath string, + marshalizer marshal.Marshalizer, +) (*bootstrapStorage.BootstrapData, error) { + persister, err := persisterFactory.Create(persisterPath) + if err != nil { + return nil, err + } + + defer func() { + errClose := persister.Close() + log.LogIfError(errClose) + }() + + cacher, err := lrucache.NewCache(10) + if err != nil { + return nil, err + } + + storer, err := storageUnit.NewStorageUnit(cacher, persister) + if err != nil { + return nil, err + } + + bootStorer, err := bootstrapStorage.NewBootstrapStorer(marshalizer, storer) + if err != nil { + return nil, err + } + + highestRound := bootStorer.GetHighestRound() + bootstrapData, err := bootStorer.Get(highestRound) + if err != nil { + return nil, err + } + + return &bootstrapData, nil +} + +func getShardsFromDirectory(path string, defaultShardString string) ([]string, error) { + shardIDs := make([]string, 0) + f, err := os.Open(path) + if err != nil { + return nil, err + } + + files, err := f.Readdir(allFiles) + _ = f.Close() + + for _, file := range files { + fileName := file.Name() + stringToSplitBy := defaultShardString + "_" + splitSlice := strings.Split(fileName, stringToSplitBy) + if len(splitSlice) < 2 { + continue + } + + shardIDs = append(shardIDs, splitSlice[1]) + } + + return shardIDs, nil +} From 9cac5fcf930a540320197891fff461474f440c3f Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Wed, 25 Mar 2020 15:14:08 +0200 Subject: [PATCH 39/61] implementation of creating nodes coordinator data from peer changes. --- epochStart/bootstrap/nodesCoordinator.go | 277 ++++++++++++++++++ epochStart/bootstrap/syncValidatorStatus.go | 68 ++--- epochStart/errors.go | 6 + sharding/indexHashedNodesCoordinator.go | 7 +- .../indexHashedNodesCoordinatorRegistry.go | 10 +- ...ndexHashedNodesCoordinatorRegistry_test.go | 6 +- .../indexHashedNodesCoordinatorWithRater.go | 36 +++ 7 files changed, 359 insertions(+), 51 deletions(-) create mode 100644 epochStart/bootstrap/nodesCoordinator.go diff --git a/epochStart/bootstrap/nodesCoordinator.go b/epochStart/bootstrap/nodesCoordinator.go new file mode 100644 index 00000000000..b3720d9f048 --- /dev/null +++ b/epochStart/bootstrap/nodesCoordinator.go @@ -0,0 +1,277 @@ +package bootstrap + +import ( + "bytes" + "fmt" + "sort" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type nodesCoordinator struct { + shuffler sharding.NodesShuffler + chance sharding.ChanceComputer + shardCoordinator sharding.Coordinator + nodesConfig map[uint32]*epochNodesConfig + + shardConsensusGroupSize uint32 + metaConsensusGroupSize uint32 +} + +type validatorWithShardID struct { + validator sharding.Validator + shardID uint32 +} + +type epochNodesConfig struct { + nbShards uint32 + shardID uint32 + eligibleMap map[uint32][]sharding.Validator + waitingMap map[uint32][]sharding.Validator + expandedEligibleMap map[uint32][]sharding.Validator +} + +// NewStartInEpochNodesCoordinator creates an epoch start nodes coordinator +func NewStartInEpochNodesCoordinator() (*nodesCoordinator, error) { + return nil, nil +} + +// ComputeNodesConfigFor computes the actual nodes config for the set epoch from the validator info +func (n *nodesCoordinator) ComputeNodesConfigFor( + metaBlock *block.MetaBlock, + validatorInfos []*state.ValidatorInfo, + updateListInfo bool, +) (*sharding.EpochValidators, error) { + if check.IfNil(metaBlock) { + return nil, epochStart.ErrNilHeaderHandler + } + if len(validatorInfos) == 0 { + return nil, epochStart.ErrNilValidatorInfo + } + + randomness := metaBlock.GetPrevRandSeed() + newEpoch := metaBlock.GetEpoch() + + sort.Slice(validatorInfos, func(i, j int) bool { + return bytes.Compare(validatorInfos[i].PublicKey, validatorInfos[j].PublicKey) < 0 + }) + + leaving, err := n.computeLeaving(validatorInfos) + if err != nil { + return nil, err + } + + eligibleMap := make(map[uint32][]sharding.Validator) + waitingMap := make(map[uint32][]sharding.Validator) + newNodesMap := make([]sharding.Validator, 0) + for i := uint32(0); i < n.shardCoordinator.NumberOfShards(); i++ { + eligibleMap[i] = make([]sharding.Validator, 0) + waitingMap[i] = make([]sharding.Validator, 0) + } + eligibleMap[core.MetachainShardId] = make([]sharding.Validator, 0) + waitingMap[core.MetachainShardId] = make([]sharding.Validator, 0) + + mapValidatorInfo := make(map[string]*state.ValidatorInfo, len(validatorInfos)) + for _, validatorInfo := range validatorInfos { + validator, err := sharding.NewValidator(validatorInfo.PublicKey, validatorInfo.RewardAddress) + if err != nil { + return nil, err + } + mapValidatorInfo[string(validatorInfo.PublicKey)] = validatorInfo + + switch validatorInfo.List { + case string(core.WaitingList): + waitingMap[validatorInfo.ShardId] = append(waitingMap[validatorInfo.ShardId], validator) + case string(core.EligibleList): + eligibleMap[validatorInfo.ShardId] = append(eligibleMap[validatorInfo.ShardId], validator) + case string(core.NewList): + newNodesMap = append(newNodesMap, validator) + } + } + + shufflerArgs := sharding.ArgsUpdateNodes{ + Eligible: eligibleMap, + Waiting: waitingMap, + NewNodes: newNodesMap, + Leaving: leaving, + Rand: randomness, + NbShards: n.shardCoordinator.NumberOfShards(), + } + + newEligibleMap, newWaitingMap, _ := n.shuffler.UpdateNodeLists(shufflerArgs) + + err = n.setNodesPerShards(newEligibleMap, newWaitingMap, newEpoch) + if err != nil { + log.Error("set nodes per shard failed", "error", err) + return nil, err + } + + err = n.expandSavedNodes(mapValidatorInfo, newEpoch) + if err != nil { + return nil, err + } + + epochValidators := epochNodesConfigToEpochValidators(n.nodesConfig[newEpoch]) + if updateListInfo { + err = n.updateListInfoToTrie() + if err != nil { + return nil, err + } + } + + return epochValidators, nil +} + +func (n *nodesCoordinator) updateListInfoToTrie() error { + // TODO: write shuffled data to the trie + return nil +} + +func (n *nodesCoordinator) computeLeaving(allValidators []*state.ValidatorInfo) ([]sharding.Validator, error) { + leavingValidators := make([]sharding.Validator, 0) + minChances := n.chance.GetChance(0) + for _, validator := range allValidators { + + chances := n.chance.GetChance(validator.TempRating) + if chances < minChances { + val, err := sharding.NewValidator(validator.PublicKey, validator.RewardAddress) + if err != nil { + return nil, err + } + leavingValidators = append(leavingValidators, val) + } + } + + return leavingValidators, nil +} + +func (n *nodesCoordinator) setNodesPerShards( + eligible map[uint32][]sharding.Validator, + waiting map[uint32][]sharding.Validator, + epoch uint32, +) error { + nodesConfig, ok := n.nodesConfig[epoch] + if !ok { + nodesConfig = &epochNodesConfig{} + } + + nodesList, ok := eligible[core.MetachainShardId] + if !ok || uint32(len(nodesList)) < n.metaConsensusGroupSize { + return epochStart.ErrSmallMetachainEligibleListSize + } + + for shardId := uint32(0); shardId < uint32(len(eligible)-1); shardId++ { + nbNodesShard := uint32(len(eligible[shardId])) + if nbNodesShard < n.shardConsensusGroupSize { + return epochStart.ErrSmallShardEligibleListSize + } + } + + // nbShards holds number of shards without meta + nodesConfig.nbShards = uint32(len(eligible) - 1) + nodesConfig.eligibleMap = eligible + nodesConfig.waitingMap = waiting + + n.nodesConfig[epoch] = nodesConfig + return nil +} + +// ComputeShardForSelfPublicKey - +func (n *nodesCoordinator) ComputeShardForSelfPublicKey(epoch uint32, pubKey []byte) (uint32, bool) { + for shard, validators := range n.nodesConfig[epoch].eligibleMap { + for _, v := range validators { + if bytes.Equal(v.PubKey(), pubKey) { + return shard, true + } + } + } + + for shard, validators := range n.nodesConfig[epoch].waitingMap { + for _, v := range validators { + if bytes.Equal(v.PubKey(), pubKey) { + return shard, true + } + } + } + + return 0, false +} + +func (n *nodesCoordinator) expandSavedNodes( + mapValidatorInfo map[string]*state.ValidatorInfo, + epoch uint32, +) error { + nodesConfig := n.nodesConfig[epoch] + nodesConfig.expandedEligibleMap = make(map[uint32][]sharding.Validator) + + nrShards := len(nodesConfig.eligibleMap) + var err error + nodesConfig.expandedEligibleMap[core.MetachainShardId], err = n.expandEligibleList(nodesConfig.eligibleMap[core.MetachainShardId], mapValidatorInfo) + if err != nil { + return err + } + + for shardId := uint32(0); shardId < uint32(nrShards-1); shardId++ { + nodesConfig.expandedEligibleMap[shardId], err = n.expandEligibleList(nodesConfig.eligibleMap[shardId], mapValidatorInfo) + if err != nil { + return err + } + } + + return nil +} + +func (n *nodesCoordinator) expandEligibleList( + validators []sharding.Validator, + mapValidatorInfo map[string]*state.ValidatorInfo, +) ([]sharding.Validator, error) { + minChance := n.chance.GetChance(0) + minSize := len(validators) * int(minChance) + validatorList := make([]sharding.Validator, 0, minSize) + + for _, validatorInShard := range validators { + pk := validatorInShard.PubKey() + validatorInfo, ok := mapValidatorInfo[string(pk)] + if !ok { + return nil, epochStart.ErrNilValidatorInfo + } + + chances := n.chance.GetChance(validatorInfo.TempRating) + if chances < minChance { + chances = minChance + } + + for i := uint32(0); i < chances; i++ { + validatorList = append(validatorList, validatorInShard) + } + } + + return validatorList, nil +} + +func epochNodesConfigToEpochValidators(config *epochNodesConfig) *sharding.EpochValidators { + result := &sharding.EpochValidators{ + EligibleValidators: make(map[string][]*sharding.SerializableValidator, len(config.eligibleMap)), + WaitingValidators: make(map[string][]*sharding.SerializableValidator, len(config.waitingMap)), + } + + for k, v := range config.eligibleMap { + result.EligibleValidators[fmt.Sprint(k)] = sharding.ValidatorArrayToSerializableValidatorArray(v) + } + + for k, v := range config.waitingMap { + result.WaitingValidators[fmt.Sprint(k)] = sharding.ValidatorArrayToSerializableValidatorArray(v) + } + + return result +} + +// IsInterfaceNil returns true if underlying object is nil +func (n *nodesCoordinator) IsInterfaceNil() bool { + return n == nil +} diff --git a/epochStart/bootstrap/syncValidatorStatus.go b/epochStart/bootstrap/syncValidatorStatus.go index 10204572ce5..2854b95cf96 100644 --- a/epochStart/bootstrap/syncValidatorStatus.go +++ b/epochStart/bootstrap/syncValidatorStatus.go @@ -66,34 +66,43 @@ func (s *syncValidatorStatus) NodesConfigFromMetaBlock( return nil, 0, epochStart.ErrNotEpochStartBlock } - nodesConfig := &sharding.NodesCoordinatorRegistry{ - EpochsConfig: make(map[string]*sharding.EpochValidators), - CurrentEpoch: currMetaBlock.Epoch, - } + validatorInfos := make(map[uint32][]*state.ValidatorInfo) - epochValidators, selfShardId, err := s.processNodesConfigFor(currMetaBlock, publicKey) + epochValidators, err := s.processNodesConfigFor(currMetaBlock, publicKey) if err != nil { return nil, 0, err } - configId := fmt.Sprint(currMetaBlock.Epoch) - nodesConfig.EpochsConfig[configId] = epochValidators + validatorInfos[currMetaBlock.Epoch] = epochValidators - epochValidators, _, err = s.processNodesConfigFor(prevMetaBlock, nil) + prevEpochValidators, err := s.processNodesConfigFor(prevMetaBlock, nil) if err != nil { return nil, 0, err } - configId = fmt.Sprint(prevMetaBlock.Epoch) - nodesConfig.EpochsConfig[configId] = epochValidators + validatorInfos[prevMetaBlock.Epoch] = prevEpochValidators - // TODO: use shuffling process from nodesCoordinator to create the final data + s.createNodesConfig() + s.doShuffling() + s.exportFinalNodeConfig() return nodesConfig, selfShardId, nil } +func (s *syncValidatorStatus) createNodesConfig() { + +} + +func (s *syncValidatorStatus) doShuffling() { + +} + +func (s *syncValidatorStatus) exportFinalNodeConfig() { + +} + func (s *syncValidatorStatus) processNodesConfigFor( metaBlock *block.MetaBlock, publicKey []byte, -) (*sharding.EpochValidators, uint32, error) { +) ([]*state.ValidatorInfo, error) { shardMBHeaders := make([]block.ShardMiniBlockHeader, 0) for _, mbHeader := range metaBlock.MiniBlockHeaders { if mbHeader.Type != block.PeerBlock { @@ -112,51 +121,28 @@ func (s *syncValidatorStatus) processNodesConfigFor( s.miniBlocksSyncer.ClearFields() err := s.miniBlocksSyncer.SyncPendingMiniBlocks(shardMBHeaders, timeToWait) if err != nil { - return nil, 0, err + return nil, err } peerMiniBlocks, err := s.miniBlocksSyncer.GetMiniBlocks() if err != nil { - return nil, 0, err - } - - epochValidators := &sharding.EpochValidators{ - EligibleValidators: make(map[string][]*sharding.SerializableValidator), - WaitingValidators: make(map[string][]*sharding.SerializableValidator), + return nil, err } - selfShardId := core.AllShardId - found := false - shouldSearchSelfId := len(publicKey) == 0 + validatorInfos := make([]*state.ValidatorInfo, 0) for _, mb := range peerMiniBlocks { for _, txHash := range mb.TxHashes { vid := &state.ValidatorInfo{} err := s.marshalizer.Unmarshal(vid, txHash) if err != nil { - return nil, 0, err + return nil, err } - serializableValidator := &sharding.SerializableValidator{ - PubKey: vid.PublicKey, - Address: vid.RewardAddress, // TODO - take out - need to refactor validator.go and its usage across the project - } - - shardId := fmt.Sprint(vid.ShardId) - switch vid.List { - case string(core.EligibleList): - epochValidators.EligibleValidators[shardId] = append(epochValidators.EligibleValidators[shardId], serializableValidator) - case string(core.WaitingList): - epochValidators.WaitingValidators[shardId] = append(epochValidators.EligibleValidators[shardId], serializableValidator) - } - - if shouldSearchSelfId && !found && bytes.Equal(vid.PublicKey, publicKey) { - selfShardId = vid.ShardId - found = true - } + validatorInfos = append(validatorInfos, vid) } } - return epochValidators, selfShardId, nil + return validatorInfos, nil } // IsInterfaceNil returns true if underlying object is nil diff --git a/epochStart/errors.go b/epochStart/errors.go index ab646aeaab2..c909ad86533 100644 --- a/epochStart/errors.go +++ b/epochStart/errors.go @@ -133,3 +133,9 @@ var ErrNumTriesExceeded = errors.New("number of tries exceeded") // ErrMissingHeader signals that searched header is missing var ErrMissingHeader = errors.New("missing header") + +// ErrSmallShardEligibleListSize signals that the eligible validators list's size is less than the consensus size +var ErrSmallShardEligibleListSize = errors.New("small shard eligible list size") + +// ErrSmallMetachainEligibleListSize signals that the eligible validators list's size is less than the consensus size +var ErrSmallMetachainEligibleListSize = errors.New("small metachain eligible list size") diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index 0c1924ec177..cef0e425a2e 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -61,7 +61,7 @@ type indexHashedNodesCoordinator struct { metaConsensusGroupSize int nodesPerShardSetter NodesPerShardSetter consensusGroupCacher Cacher - shardIDAsObserver uint32 + shardIDAsObserver uint32 } // NewIndexHashedNodesCoordinator creates a new index hashed group selector @@ -96,7 +96,7 @@ func NewIndexHashedNodesCoordinator(arguments ArgNodesCoordinator) (*indexHashed shardConsensusGroupSize: arguments.ShardConsensusGroupSize, metaConsensusGroupSize: arguments.MetaConsensusGroupSize, consensusGroupCacher: arguments.ConsensusGroupCache, - shardIDAsObserver: arguments.ShardIDAsObserver, + shardIDAsObserver: arguments.ShardIDAsObserver, } ihgs.nodesPerShardSetter = ihgs @@ -187,6 +187,7 @@ func (ihgs *indexHashedNodesCoordinator) SetNodesPerShards( // nbShards holds number of shards without meta nodesConfig.nbShards = uint32(len(eligible) - 1) nodesConfig.eligibleMap = eligible + nodesConfig.expandedEligibleMap = eligible nodesConfig.waitingMap = waiting nodesConfig.publicKeyToValidatorMap = make(map[string]*validatorWithShardID) for shardId, shardEligible := range nodesConfig.eligibleMap { @@ -258,7 +259,7 @@ func (ihgs *indexHashedNodesCoordinator) ComputeConsensusGroup( if shardID >= nodesConfig.nbShards && shardID != core.MetachainShardId { return nil, ErrInvalidShardId } - expandedList = nodesConfig.eligibleMap[shardID] + expandedList = nodesConfig.expandedEligibleMap[shardID] } ihgs.mutNodesConfig.RUnlock() diff --git a/sharding/indexHashedNodesCoordinatorRegistry.go b/sharding/indexHashedNodesCoordinatorRegistry.go index 5b2e4b1b5c7..279406d90fd 100644 --- a/sharding/indexHashedNodesCoordinatorRegistry.go +++ b/sharding/indexHashedNodesCoordinatorRegistry.go @@ -127,7 +127,6 @@ func (ihgs *indexHashedNodesCoordinator) registryToNodesCoordinator( } var nodesConfig *epochNodesConfig - nodesConfig, err = epochValidatorsToEpochNodesConfig(epochValidators) if err != nil { return nil, err @@ -138,6 +137,8 @@ func (ihgs *indexHashedNodesCoordinator) registryToNodesCoordinator( return nil, ErrInvalidNumberOfShards } + nodesConfig.expandedEligibleMap = nodesConfig.eligibleMap + // shards without metachain shard nodesConfig.nbShards = nbShards - 1 nodesConfig.shardID = ihgs.computeShardForSelfPublicKey(nodesConfig) @@ -155,11 +156,11 @@ func epochNodesConfigToEpochValidators(config *epochNodesConfig) *EpochValidator } for k, v := range config.eligibleMap { - result.EligibleValidators[fmt.Sprint(k)] = validatorArrayToSerializableValidatorArray(v) + result.EligibleValidators[fmt.Sprint(k)] = ValidatorArrayToSerializableValidatorArray(v) } for k, v := range config.waitingMap { - result.WaitingValidators[fmt.Sprint(k)] = validatorArrayToSerializableValidatorArray(v) + result.WaitingValidators[fmt.Sprint(k)] = ValidatorArrayToSerializableValidatorArray(v) } return result @@ -203,7 +204,8 @@ func serializableValidatorsMapToValidatorsMap( return result, nil } -func validatorArrayToSerializableValidatorArray(validators []Validator) []*SerializableValidator { +// ValidatorArrayToSerializableValidatorArray - +func ValidatorArrayToSerializableValidatorArray(validators []Validator) []*SerializableValidator { result := make([]*SerializableValidator, len(validators)) for i, v := range validators { diff --git a/sharding/indexHashedNodesCoordinatorRegistry_test.go b/sharding/indexHashedNodesCoordinatorRegistry_test.go index 3f2e4d06f83..0a4d5f5cc74 100644 --- a/sharding/indexHashedNodesCoordinatorRegistry_test.go +++ b/sharding/indexHashedNodesCoordinatorRegistry_test.go @@ -154,7 +154,7 @@ func TestIndexHashedNodesCoordinator_validatorArrayToSerializableValidatorArray( validatorsMap := createDummyNodesMap(5, 2, "dummy") for _, validatorsArray := range validatorsMap { - sValidators := validatorArrayToSerializableValidatorArray(validatorsArray) + sValidators := ValidatorArrayToSerializableValidatorArray(validatorsArray) assert.True(t, validatorsEqualSerializableValidators(validatorsArray, sValidators)) } } @@ -164,7 +164,7 @@ func TestIndexHashedNodesCoordinator_serializableValidatorsMapToValidatorsMap(t sValidatorsMap := make(map[string][]*SerializableValidator) for k, validatorsArray := range validatorsMap { - sValidators := validatorArrayToSerializableValidatorArray(validatorsArray) + sValidators := ValidatorArrayToSerializableValidatorArray(validatorsArray) sValidatorsMap[fmt.Sprint(k)] = sValidators } @@ -175,7 +175,7 @@ func TestIndexHashedNodesCoordinator_serializableValidatorArrayToValidatorArray( validatorsMap := createDummyNodesMap(5, 2, "dummy") for _, validatorsArray := range validatorsMap { - sValidators := validatorArrayToSerializableValidatorArray(validatorsArray) + sValidators := ValidatorArrayToSerializableValidatorArray(validatorsArray) valArray, err := serializableValidatorArrayToValidatorArray(sValidators) assert.Nil(t, err) assert.True(t, sameValidators(validatorsArray, valArray)) diff --git a/sharding/indexHashedNodesCoordinatorWithRater.go b/sharding/indexHashedNodesCoordinatorWithRater.go index d4a216fe1f6..7b693392bb8 100644 --- a/sharding/indexHashedNodesCoordinatorWithRater.go +++ b/sharding/indexHashedNodesCoordinatorWithRater.go @@ -1,6 +1,8 @@ package sharding import ( + "encoding/json" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" ) @@ -150,3 +152,37 @@ func (ihgs *indexHashedNodesCoordinatorWithRater) expandEligibleList(validators return validatorList, nil } + +// LoadState loads the nodes coordinator state from the used boot storage +func (ihgs *indexHashedNodesCoordinatorWithRater) LoadState(key []byte) error { + ncInternalkey := append([]byte(keyPrefix), key...) + + log.Debug("getting nodes coordinator config", "key", ncInternalkey) + + data, err := ihgs.bootStorer.Get(ncInternalkey) + if err != nil { + return err + } + + config := &NodesCoordinatorRegistry{} + err = json.Unmarshal(data, config) + if err != nil { + return err + } + + ihgs.mutSavedStateKey.Lock() + ihgs.savedStateKey = key + ihgs.mutSavedStateKey.Unlock() + + err = ihgs.SaveNodesCoordinatorRegistry(config) + if err != nil { + return err + } + + err = ihgs.expandAllLists(config.CurrentEpoch) + if err != nil { + return err + } + + return nil +} From 64281ca570221beb1a37f65353d129ced3052bcf Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Wed, 25 Mar 2020 19:14:45 +0200 Subject: [PATCH 40/61] finished the remaining implementation --- cmd/node/main.go | 11 +- epochStart/bootstrap/interface.go | 14 ++- epochStart/bootstrap/nodesCoordinator.go | 117 ++++++++++++++++---- epochStart/bootstrap/process.go | 70 +++++++----- epochStart/bootstrap/syncValidatorStatus.go | 53 ++++----- 5 files changed, 189 insertions(+), 76 deletions(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index 884c5e50b81..d4cfc61de8e 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -620,6 +620,11 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { return err } + rater, err := rating.NewBlockSigningRaterAndListIndexer(economicsData.RatingsData()) + if err != nil { + return err + } + epochStartBootsrapArgs := bootstrap.ArgsEpochStartBootstrap{ PublicKey: pubKey, Marshalizer: coreComponents.InternalMarshalizer, @@ -637,6 +642,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { DefaultDBPath: defaultDBPath, DefaultEpochString: defaultEpochString, DefaultShardString: defaultShardString, + Rater: rater, } bootsrapper, err := bootstrap.NewEpochStartBootstrapHandler(epochStartBootsrapArgs) if err != nil { @@ -660,11 +666,6 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { return err } - rater, err := rating.NewBlockSigningRaterAndListIndexer(economicsData.RatingsData()) - if err != nil { - return err - } - log.Trace("initializing stats file") err = initStatsFileMonitor(generalConfig, pubKey, log, workingDir, pathManager, shardId) if err != nil { diff --git a/epochStart/bootstrap/interface.go b/epochStart/bootstrap/interface.go index 2f8bf651323..cec932a3c9d 100644 --- a/epochStart/bootstrap/interface.go +++ b/epochStart/bootstrap/interface.go @@ -2,6 +2,7 @@ package bootstrap import ( "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" ) @@ -16,8 +17,19 @@ type StartOfEpochNodesConfigHandler interface { IsInterfaceNil() bool } -// EpochStartInterceptor +// EpochStartInterceptor - type EpochStartInterceptor interface { process.Interceptor GetEpochStartMetaBlock(target int, epoch uint32) (*block.MetaBlock, error) } + +// EpochStartNodesCoordinator - +type EpochStartNodesCoordinator interface { + ComputeNodesConfigFor( + metaBlock *block.MetaBlock, + validatorInfos []*state.ValidatorInfo, + updateListInfo bool, + ) (*sharding.EpochValidators, error) + ComputeShardForSelfPublicKey(epoch uint32, pubKey []byte) (uint32, bool) + IsInterfaceNil() bool +} diff --git a/epochStart/bootstrap/nodesCoordinator.go b/epochStart/bootstrap/nodesCoordinator.go index b3720d9f048..ee19d86ecfc 100644 --- a/epochStart/bootstrap/nodesCoordinator.go +++ b/epochStart/bootstrap/nodesCoordinator.go @@ -10,22 +10,20 @@ import ( "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" ) type nodesCoordinator struct { - shuffler sharding.NodesShuffler - chance sharding.ChanceComputer - shardCoordinator sharding.Coordinator - nodesConfig map[uint32]*epochNodesConfig - + shuffler sharding.NodesShuffler + chance sharding.ChanceComputer + numShards map[uint32]uint32 shardConsensusGroupSize uint32 metaConsensusGroupSize uint32 -} + validatorAccountsDB state.AccountsAdapter + adrConv state.AddressConverter -type validatorWithShardID struct { - validator sharding.Validator - shardID uint32 + nodesConfig map[uint32]*epochNodesConfig } type epochNodesConfig struct { @@ -36,9 +34,29 @@ type epochNodesConfig struct { expandedEligibleMap map[uint32][]sharding.Validator } +// ArgsNewStartInEpochNodesCoordinator - +type ArgsNewStartInEpochNodesCoordinator struct { + Shuffler sharding.NodesShuffler + Chance sharding.ChanceComputer + ValidatorAccountsDB state.AccountsAdapter + AdrConv state.AddressConverter + ShardConsensusGroupSize uint32 + MetaConsensusGroupSize uint32 +} + // NewStartInEpochNodesCoordinator creates an epoch start nodes coordinator -func NewStartInEpochNodesCoordinator() (*nodesCoordinator, error) { - return nil, nil +func NewStartInEpochNodesCoordinator(args ArgsNewStartInEpochNodesCoordinator) (*nodesCoordinator, error) { + n := &nodesCoordinator{ + shuffler: args.Shuffler, + chance: args.Chance, + shardConsensusGroupSize: args.ShardConsensusGroupSize, + metaConsensusGroupSize: args.MetaConsensusGroupSize, + nodesConfig: make(map[uint32]*epochNodesConfig), + validatorAccountsDB: args.ValidatorAccountsDB, + adrConv: args.AdrConv, + } + + return n, nil } // ComputeNodesConfigFor computes the actual nodes config for the set epoch from the validator info @@ -56,6 +74,7 @@ func (n *nodesCoordinator) ComputeNodesConfigFor( randomness := metaBlock.GetPrevRandSeed() newEpoch := metaBlock.GetEpoch() + n.numShards[newEpoch] = uint32(len(metaBlock.EpochStart.LastFinalizedHeaders)) sort.Slice(validatorInfos, func(i, j int) bool { return bytes.Compare(validatorInfos[i].PublicKey, validatorInfos[j].PublicKey) < 0 @@ -69,7 +88,7 @@ func (n *nodesCoordinator) ComputeNodesConfigFor( eligibleMap := make(map[uint32][]sharding.Validator) waitingMap := make(map[uint32][]sharding.Validator) newNodesMap := make([]sharding.Validator, 0) - for i := uint32(0); i < n.shardCoordinator.NumberOfShards(); i++ { + for i := uint32(0); i < n.numShards[newEpoch]; i++ { eligibleMap[i] = make([]sharding.Validator, 0) waitingMap[i] = make([]sharding.Validator, 0) } @@ -100,7 +119,7 @@ func (n *nodesCoordinator) ComputeNodesConfigFor( NewNodes: newNodesMap, Leaving: leaving, Rand: randomness, - NbShards: n.shardCoordinator.NumberOfShards(), + NbShards: n.numShards[newEpoch], } newEligibleMap, newWaitingMap, _ := n.shuffler.UpdateNodeLists(shufflerArgs) @@ -118,7 +137,7 @@ func (n *nodesCoordinator) ComputeNodesConfigFor( epochValidators := epochNodesConfigToEpochValidators(n.nodesConfig[newEpoch]) if updateListInfo { - err = n.updateListInfoToTrie() + err = n.updateAccountListAndIndex(newEpoch) if err != nil { return nil, err } @@ -127,11 +146,6 @@ func (n *nodesCoordinator) ComputeNodesConfigFor( return epochValidators, nil } -func (n *nodesCoordinator) updateListInfoToTrie() error { - // TODO: write shuffled data to the trie - return nil -} - func (n *nodesCoordinator) computeLeaving(allValidators []*state.ValidatorInfo) ([]sharding.Validator, error) { leavingValidators := make([]sharding.Validator, 0) minChances := n.chance.GetChance(0) @@ -271,6 +285,71 @@ func epochNodesConfigToEpochValidators(config *epochNodesConfig) *sharding.Epoch return result } +func (n *nodesCoordinator) updateAccountListAndIndex(epoch uint32) error { + err := n.updateAccountsForGivenMap(n.nodesConfig[epoch].eligibleMap, core.EligibleList) + if err != nil { + return err + } + + err = n.updateAccountsForGivenMap(n.nodesConfig[epoch].waitingMap, core.WaitingList) + if err != nil { + return err + } + + return nil +} + +func (n *nodesCoordinator) updateAccountsForGivenMap( + validators map[uint32][]sharding.Validator, + list core.PeerType, +) error { + for shardId, accountsPerShard := range validators { + for index, account := range accountsPerShard { + err := n.updateListAndIndex( + string(account.PubKey()), + shardId, + string(list), + int32(index)) + if err != nil { + log.Warn("error while updating list and index for peer", + "error", err, + "public key", account.PubKey()) + } + } + } + + return nil +} + +func (n *nodesCoordinator) updateListAndIndex(pubKey string, shardID uint32, list string, index int32) error { + peer, err := n.getPeerAccount([]byte(pubKey)) + if err != nil { + log.Debug("error getting peer account", "error", err, "key", pubKey) + return err + } + + return peer.SetListAndIndexWithJournal(shardID, list, index) +} + +func (n *nodesCoordinator) getPeerAccount(address []byte) (state.PeerAccountHandler, error) { + addressContainer, err := n.adrConv.CreateAddressFromPublicKeyBytes(address) + if err != nil { + return nil, err + } + + account, err := n.validatorAccountsDB.GetAccountWithJournal(addressContainer) + if err != nil { + return nil, err + } + + peerAccount, ok := account.(state.PeerAccountHandler) + if !ok { + return nil, process.ErrInvalidPeerAccount + } + + return peerAccount, nil +} + // IsInterfaceNil returns true if underlying object is nil func (n *nodesCoordinator) IsInterfaceNil() bool { return n == nil diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 812bc936fab..36af46a89f5 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -10,6 +10,8 @@ import ( "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" + "github.com/ElrondNetwork/elrond-go/data/state/factory" "github.com/ElrondNetwork/elrond-go/data/syncer" "github.com/ElrondNetwork/elrond-go/data/trie" trieFactory "github.com/ElrondNetwork/elrond-go/data/trie/factory" @@ -75,6 +77,7 @@ type epochStartBootstrap struct { defaultDBPath string defaultEpochString string defaultShardString string + rater sharding.ChanceComputer // created components requestHandler process.RequestHandler @@ -124,6 +127,7 @@ type ArgsEpochStartBootstrap struct { DefaultDBPath string DefaultEpochString string DefaultShardString string + Rater sharding.ChanceComputer } // NewEpochStartBootstrap will return a new instance of epochStartBootstrap @@ -143,6 +147,7 @@ func NewEpochStartBootstrapHandler(args ArgsEpochStartBootstrap) (*epochStartBoo blockKeyGen: args.BlockKeyGen, singleSigner: args.SingleSigner, blockSingleSigner: args.BlockSingleSigner, + rater: args.Rater, } return epochStartProvider, nil @@ -287,16 +292,6 @@ func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { } e.headersSyncer, err = sync.NewMissingheadersByHashSyncer(syncMissingHeadersArgs) - argsNewValidatorStatusSyncers := ArgsNewSyncValidatorStatus{ - DataPool: e.dataPool, - Marshalizer: e.marshalizer, - RequestHandler: e.requestHandler, - } - e.nodesConfigHandler, err = NewSyncValidatorStatus(argsNewValidatorStatusSyncers) - if err != nil { - return err - } - return nil } @@ -348,7 +343,17 @@ func (e *epochStartBootstrap) requestAndProcessing() (uint32, uint32, uint32, er return 0, 0, 0, err } - e.nodesConfig, e.baseData.shardId, err = e.nodesConfigHandler.NodesConfigFromMetaBlock(e.epochStartMeta, e.prevEpochStartMeta, pubKeyBytes) + err = e.createTrieStorageManagers() + if err != nil { + return 0, 0, 0, err + } + + err = e.syncPeerAccountsState(e.epochStartMeta.ValidatorStatsRootHash) + if err != nil { + return 0, 0, 0, err + } + + err = e.processNodesConfig(pubKeyBytes, e.epochStartMeta.ValidatorStatsRootHash) if err != nil { return 0, 0, 0, err } @@ -373,18 +378,43 @@ func (e *epochStartBootstrap) requestAndProcessing() (uint32, uint32, uint32, er return e.baseData.shardId, e.baseData.numberOfShards, e.baseData.lastEpoch, nil } -func (e *epochStartBootstrap) requestAndProcessForMeta() error { - err := e.createTrieStorageManagers() +func (e *epochStartBootstrap) processNodesConfig(pubKey []byte, rootHash []byte) error { + accountFactory, err := factory.NewAccountFactoryCreator(state.ValidatorAccount) + if err != nil { + return err + } + peerAccountsDB, err := state.NewPeerAccountsDB(e.peerAccountTries[string(rootHash)], e.hasher, e.marshalizer, accountFactory) if err != nil { return err } - err = e.syncUserAccountsState(e.epochStartMeta.RootHash) + blsAddressConverter, err := addressConverters.NewPlainAddressConverter( + e.generalConfig.BLSPublicKey.Length, + e.generalConfig.BLSPublicKey.Prefix, + ) + if err != nil { + return err + } + argsNewValidatorStatusSyncers := ArgsNewSyncValidatorStatus{ + DataPool: e.dataPool, + Marshalizer: e.marshalizer, + RequestHandler: e.requestHandler, + Rater: e.rater, + GenesisNodesConfig: e.genesisNodesConfig, + ValidatorAccountsDB: peerAccountsDB, + AdrConv: blsAddressConverter, + } + e.nodesConfigHandler, err = NewSyncValidatorStatus(argsNewValidatorStatusSyncers) if err != nil { return err } - err = e.syncPeerAccountsState(e.epochStartMeta.ValidatorStatsRootHash) + e.nodesConfig, e.baseData.shardId, err = e.nodesConfigHandler.NodesConfigFromMetaBlock(e.epochStartMeta, e.prevEpochStartMeta, pubKey) + return err +} + +func (e *epochStartBootstrap) requestAndProcessForMeta() error { + err := e.syncUserAccountsState(e.epochStartMeta.RootHash) if err != nil { return err } @@ -470,21 +500,11 @@ func (e *epochStartBootstrap) requestAndProcessForShard() error { return epochStart.ErrWrongTypeAssertion } - err = e.createTrieStorageManagers() - if err != nil { - return err - } - err = e.syncUserAccountsState(ownShardHdr.RootHash) if err != nil { return err } - err = e.syncPeerAccountsState(e.epochStartMeta.ValidatorStatsRootHash) - if err != nil { - return err - } - components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: e.epochStartMeta, PreviousEpochStartMetaBlock: e.prevEpochStartMeta, diff --git a/epochStart/bootstrap/syncValidatorStatus.go b/epochStart/bootstrap/syncValidatorStatus.go index 2854b95cf96..ffc2891ebc9 100644 --- a/epochStart/bootstrap/syncValidatorStatus.go +++ b/epochStart/bootstrap/syncValidatorStatus.go @@ -1,9 +1,6 @@ package bootstrap import ( - "bytes" - "fmt" - "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" @@ -21,14 +18,18 @@ type syncValidatorStatus struct { dataPool dataRetriever.PoolsHolder marshalizer marshal.Marshalizer requestHandler process.RequestHandler - nodeCoordinator sharding.NodesCoordinator + nodeCoordinator EpochStartNodesCoordinator } // ArgsNewSyncValidatorStatus type ArgsNewSyncValidatorStatus struct { - DataPool dataRetriever.PoolsHolder - Marshalizer marshal.Marshalizer - RequestHandler process.RequestHandler + DataPool dataRetriever.PoolsHolder + Marshalizer marshal.Marshalizer + RequestHandler process.RequestHandler + Rater sharding.ChanceComputer + GenesisNodesConfig *sharding.NodesSetup + ValidatorAccountsDB state.AccountsAdapter + AdrConv state.AddressConverter } // NewSyncValidatorStatus creates a new validator status process component @@ -50,6 +51,23 @@ func NewSyncValidatorStatus(args ArgsNewSyncValidatorStatus) (*syncValidatorStat return nil, err } + nodeShuffler := sharding.NewXorValidatorsShuffler( + args.GenesisNodesConfig.MinNodesPerShard, + args.GenesisNodesConfig.MetaChainMinNodes, + args.GenesisNodesConfig.Hysteresis, + args.GenesisNodesConfig.Adaptivity, + ) + + argsNodesCoordinator := ArgsNewStartInEpochNodesCoordinator{ + Shuffler: nodeShuffler, + Chance: args.Rater, + ShardConsensusGroupSize: args.GenesisNodesConfig.ConsensusGroupSize, + MetaConsensusGroupSize: args.GenesisNodesConfig.MetaChainConsensusGroupSize, + AdrConv: args.AdrConv, + ValidatorAccountsDB: args.ValidatorAccountsDB, + } + s.nodeCoordinator, err = NewStartInEpochNodesCoordinator(argsNodesCoordinator) + return s, nil } @@ -68,40 +86,23 @@ func (s *syncValidatorStatus) NodesConfigFromMetaBlock( validatorInfos := make(map[uint32][]*state.ValidatorInfo) - epochValidators, err := s.processNodesConfigFor(currMetaBlock, publicKey) + epochValidators, err := s.processNodesConfigFor(currMetaBlock) if err != nil { return nil, 0, err } validatorInfos[currMetaBlock.Epoch] = epochValidators - prevEpochValidators, err := s.processNodesConfigFor(prevMetaBlock, nil) + prevEpochValidators, err := s.processNodesConfigFor(prevMetaBlock) if err != nil { return nil, 0, err } validatorInfos[prevMetaBlock.Epoch] = prevEpochValidators - s.createNodesConfig() - s.doShuffling() - s.exportFinalNodeConfig() - return nodesConfig, selfShardId, nil } -func (s *syncValidatorStatus) createNodesConfig() { - -} - -func (s *syncValidatorStatus) doShuffling() { - -} - -func (s *syncValidatorStatus) exportFinalNodeConfig() { - -} - func (s *syncValidatorStatus) processNodesConfigFor( metaBlock *block.MetaBlock, - publicKey []byte, ) ([]*state.ValidatorInfo, error) { shardMBHeaders := make([]block.ShardMiniBlockHeader, 0) for _, mbHeader := range metaBlock.MiniBlockHeaders { From 85d8f4aa5ffa7e921b9d0479e12211e6ac06a3ef Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Wed, 25 Mar 2020 20:23:18 +0200 Subject: [PATCH 41/61] finished the remaining implementation --- epochStart/bootstrap/interface.go | 2 +- epochStart/bootstrap/nodesCoordinator.go | 8 +++--- epochStart/bootstrap/syncValidatorStatus.go | 30 +++++++++++++++++---- 3 files changed, 30 insertions(+), 10 deletions(-) diff --git a/epochStart/bootstrap/interface.go b/epochStart/bootstrap/interface.go index cec932a3c9d..d8859bb8c1d 100644 --- a/epochStart/bootstrap/interface.go +++ b/epochStart/bootstrap/interface.go @@ -30,6 +30,6 @@ type EpochStartNodesCoordinator interface { validatorInfos []*state.ValidatorInfo, updateListInfo bool, ) (*sharding.EpochValidators, error) - ComputeShardForSelfPublicKey(epoch uint32, pubKey []byte) (uint32, bool) + ComputeShardForSelfPublicKey(epoch uint32, pubKey []byte) uint32 IsInterfaceNil() bool } diff --git a/epochStart/bootstrap/nodesCoordinator.go b/epochStart/bootstrap/nodesCoordinator.go index ee19d86ecfc..7de1696ae94 100644 --- a/epochStart/bootstrap/nodesCoordinator.go +++ b/epochStart/bootstrap/nodesCoordinator.go @@ -196,11 +196,11 @@ func (n *nodesCoordinator) setNodesPerShards( } // ComputeShardForSelfPublicKey - -func (n *nodesCoordinator) ComputeShardForSelfPublicKey(epoch uint32, pubKey []byte) (uint32, bool) { +func (n *nodesCoordinator) ComputeShardForSelfPublicKey(epoch uint32, pubKey []byte) uint32 { for shard, validators := range n.nodesConfig[epoch].eligibleMap { for _, v := range validators { if bytes.Equal(v.PubKey(), pubKey) { - return shard, true + return shard } } } @@ -208,12 +208,12 @@ func (n *nodesCoordinator) ComputeShardForSelfPublicKey(epoch uint32, pubKey []b for shard, validators := range n.nodesConfig[epoch].waitingMap { for _, v := range validators { if bytes.Equal(v.PubKey(), pubKey) { - return shard, true + return shard } } } - return 0, false + return core.AllShardId } func (n *nodesCoordinator) expandSavedNodes( diff --git a/epochStart/bootstrap/syncValidatorStatus.go b/epochStart/bootstrap/syncValidatorStatus.go index ffc2891ebc9..e7caca39b7a 100644 --- a/epochStart/bootstrap/syncValidatorStatus.go +++ b/epochStart/bootstrap/syncValidatorStatus.go @@ -1,6 +1,8 @@ package bootstrap import ( + "fmt" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" @@ -84,19 +86,37 @@ func (s *syncValidatorStatus) NodesConfigFromMetaBlock( return nil, 0, epochStart.ErrNotEpochStartBlock } - validatorInfos := make(map[uint32][]*state.ValidatorInfo) + prevEpochValidatorsInfo, err := s.processNodesConfigFor(prevMetaBlock) + if err != nil { + return nil, 0, err + } + + prevEpochsValidators, err := s.nodeCoordinator.ComputeNodesConfigFor(prevMetaBlock, prevEpochValidatorsInfo, false) + if err != nil { + return nil, 0, err + } - epochValidators, err := s.processNodesConfigFor(currMetaBlock) + currEpochValidatorsInfo, err := s.processNodesConfigFor(currMetaBlock) if err != nil { return nil, 0, err } - validatorInfos[currMetaBlock.Epoch] = epochValidators - prevEpochValidators, err := s.processNodesConfigFor(prevMetaBlock) + currEpochsValidators, err := s.nodeCoordinator.ComputeNodesConfigFor(currMetaBlock, currEpochValidatorsInfo, true) if err != nil { return nil, 0, err } - validatorInfos[prevMetaBlock.Epoch] = prevEpochValidators + + selfShardId := s.nodeCoordinator.ComputeShardForSelfPublicKey(currMetaBlock.Epoch, publicKey) + + nodesConfig := &sharding.NodesCoordinatorRegistry{ + EpochsConfig: make(map[string]*sharding.EpochValidators, 2), + CurrentEpoch: currMetaBlock.Epoch, + } + + epochConfigId := fmt.Sprint(prevMetaBlock.Epoch) + nodesConfig.EpochsConfig[epochConfigId] = prevEpochsValidators + epochConfigId = fmt.Sprint(currMetaBlock.Epoch) + nodesConfig.EpochsConfig[epochConfigId] = currEpochsValidators return nodesConfig, selfShardId, nil } From 3db82f5f961627e80f716a50e0409a668b7ce172 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Thu, 26 Mar 2020 13:06:25 +0200 Subject: [PATCH 42/61] fixes after review --- cmd/node/factory/structs.go | 102 ++++++++--------- cmd/node/main.go | 51 ++++----- dataRetriever/factory/dataPoolFactory.go | 4 +- .../baseResolversContainerFactory.go | 3 - .../metaResolversContainerFactory.go | 8 +- epochStart/bootstrap/baseStorageHandler.go | 18 ++- .../epochStartInterceptorsContainerFactory.go | 4 +- epochStart/bootstrap/fromLocalStorage.go | 40 ++++--- epochStart/bootstrap/metaStorageHandler.go | 37 ++++-- epochStart/bootstrap/process.go | 107 +++++++++++------- epochStart/bootstrap/shardStorageHandler.go | 24 ++-- integrationTests/testProcessorNode.go | 2 +- process/block/shardblock.go | 2 +- process/common.go | 2 +- process/track/shardBlockTrack.go | 2 +- update/sync/syncHeadersByHash.go | 6 + update/sync/syncMiniBlocks.go | 2 +- 17 files changed, 227 insertions(+), 187 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 307413686ff..6cd47f54c6d 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -35,7 +35,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data/typeConverters" "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" "github.com/ElrondNetwork/elrond-go/dataRetriever" - factory2 "github.com/ElrondNetwork/elrond-go/dataRetriever/factory" + dataRetrieverFactory "github.com/ElrondNetwork/elrond-go/dataRetriever/factory" "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/resolverscontainer" "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" @@ -402,12 +402,12 @@ func DataComponentsFactory(args *dataComponentsFactoryArgs) (*Data, error) { return nil, errors.New("could not create local data store: " + err.Error()) } - dataPoolArgs := factory2.ArgsDataPool{ + dataPoolArgs := dataRetrieverFactory.ArgsDataPool{ Config: args.config, EconomicsData: args.economicsData, ShardCoordinator: args.shardCoordinator, } - datapool, err = factory2.NewDataPoolFromConfig(dataPoolArgs) + datapool, err = dataRetrieverFactory.NewDataPoolFromConfig(dataPoolArgs) if err != nil { return nil, errors.New("could not create data pools: ") } @@ -543,29 +543,29 @@ func NetworkComponentsFactory( } type processComponentsFactoryArgs struct { - coreComponents *coreComponentsFactoryArgs - genesisConfig *sharding.Genesis - economicsData *economics.EconomicsData - nodesConfig *sharding.NodesSetup - gasSchedule map[string]map[string]uint64 - syncer ntp.SyncTimer - shardCoordinator sharding.Coordinator - nodesCoordinator sharding.NodesCoordinator - data *Data - coreData *Core - crypto *Crypto - state *State - network *Network - coreServiceContainer serviceContainer.Core - requestedItemsHandler dataRetriever.RequestedItemsHandler - whiteListHandler process.InterceptedDataWhiteList - epochStartNotifier EpochStartNotifier - epochStart *config.EpochStartConfig - rater sharding.PeerAccountListAndRatingHandler - startEpochNum uint32 - sizeCheckDelta uint32 - stateCheckpointModulus uint - maxComputableRounds uint64 + coreComponents *coreComponentsFactoryArgs + genesisConfig *sharding.Genesis + economicsData *economics.EconomicsData + nodesConfig *sharding.NodesSetup + gasSchedule map[string]map[string]uint64 + syncer ntp.SyncTimer + shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator + data *Data + coreData *Core + crypto *Crypto + state *State + network *Network + coreServiceContainer serviceContainer.Core + requestedItemsHandler dataRetriever.RequestedItemsHandler + whiteListHandler process.InterceptedDataWhiteList + epochStartNotifier EpochStartNotifier + epochStart *config.EpochStartConfig + rater sharding.PeerAccountListAndRatingHandler + startEpochNum uint32 + sizeCheckDelta uint32 + stateCheckpointModulus uint + maxComputableRounds uint64 numConcurrentResolverJobs int32 minSizeInBytes uint32 maxSizeInBytes uint32 @@ -601,29 +601,29 @@ func NewProcessComponentsFactoryArgs( maxSizeInBytes uint32, ) *processComponentsFactoryArgs { return &processComponentsFactoryArgs{ - coreComponents: coreComponents, - genesisConfig: genesisConfig, - economicsData: economicsData, - nodesConfig: nodesConfig, - gasSchedule: gasSchedule, - syncer: syncer, - shardCoordinator: shardCoordinator, - nodesCoordinator: nodesCoordinator, - data: data, - coreData: coreData, - crypto: crypto, - state: state, - network: network, - coreServiceContainer: coreServiceContainer, - requestedItemsHandler: requestedItemsHandler, - whiteListHandler: whiteListHandler, - epochStartNotifier: epochStartNotifier, - epochStart: epochStart, - startEpochNum: startEpochNum, - rater: rater, - sizeCheckDelta: sizeCheckDelta, - stateCheckpointModulus: stateCheckpointModulus, - maxComputableRounds: maxComputableRounds, + coreComponents: coreComponents, + genesisConfig: genesisConfig, + economicsData: economicsData, + nodesConfig: nodesConfig, + gasSchedule: gasSchedule, + syncer: syncer, + shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + data: data, + coreData: coreData, + crypto: crypto, + state: state, + network: network, + coreServiceContainer: coreServiceContainer, + requestedItemsHandler: requestedItemsHandler, + whiteListHandler: whiteListHandler, + epochStartNotifier: epochStartNotifier, + epochStart: epochStart, + startEpochNum: startEpochNum, + rater: rater, + sizeCheckDelta: sizeCheckDelta, + stateCheckpointModulus: stateCheckpointModulus, + maxComputableRounds: maxComputableRounds, numConcurrentResolverJobs: numConcurrentResolverJobs, minSizeInBytes: minSizeInBytes, maxSizeInBytes: maxSizeInBytes, @@ -1434,14 +1434,14 @@ func generateGenesisHeadersAndApplyInitialBalances(args *processComponentsFactor if errNewCache != nil { return nil, errNewCache } - newBlkc, errNewMetachain := blockchain.NewMetaChain(cache) + newBlockChain, errNewMetachain := blockchain.NewMetaChain(cache) if errNewMetachain != nil { return nil, errNewMetachain } argsMetaGenesis.ShardCoordinator = newShardCoordinator argsMetaGenesis.Accounts = newAccounts - argsMetaGenesis.Blkc = newBlkc + argsMetaGenesis.Blkc = newBlockChain } genesisBlock, err := genesis.CreateMetaGenesisBlock( diff --git a/cmd/node/main.go b/cmd/node/main.go index d4cfc61de8e..395570d2fa0 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -391,7 +391,6 @@ func getSuite(config *config.Config) (crypto.Suite, error) { func startNode(ctx *cli.Context, log logger.Logger, version string) error { log.Trace("startNode called") workingDir := getWorkingDir(ctx, log) - var networkComponents *factory.Network var err error withLogFile := ctx.GlobalBool(logSaveFile.Name) @@ -530,27 +529,9 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { preferencesConfig.Preferences.NodeDisplayName = ctx.GlobalString(nodeDisplayName.Name) } - if ctx.IsSet(workingDirectory.Name) { - workingDir = ctx.GlobalString(workingDirectory.Name) - } else { - workingDir, err = os.Getwd() - if err != nil { - log.LogIfError(err) - workingDir = "" - } - } - log.Trace("working directory", "path", workingDir) - - storageCleanupFlagValue := ctx.GlobalBool(storageCleanup.Name) - if storageCleanupFlagValue { - dbPath := filepath.Join( - workingDir, - defaultDBPath) - log.Trace("cleaning storage", "path", dbPath) - err = os.RemoveAll(dbPath) - if err != nil { - return err - } + err = cleanupStorageIfNecessary(workingDir, ctx, log) + if err != nil { + return err } pathTemplateForPruningStorer := filepath.Join( @@ -604,7 +585,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } log.Trace("creating network components") - networkComponents, err = factory.NetworkComponentsFactory(*p2pConfig, *generalConfig, coreComponents.StatusHandler) + networkComponents, err := factory.NetworkComponentsFactory(*p2pConfig, *generalConfig, coreComponents.StatusHandler) if err != nil { return err } @@ -625,7 +606,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { return err } - epochStartBootsrapArgs := bootstrap.ArgsEpochStartBootstrap{ + epochStartBootstrapArgs := bootstrap.ArgsEpochStartBootstrap{ PublicKey: pubKey, Marshalizer: coreComponents.InternalMarshalizer, Hasher: coreComponents.Hasher, @@ -644,24 +625,25 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { DefaultShardString: defaultShardString, Rater: rater, } - bootsrapper, err := bootstrap.NewEpochStartBootstrapHandler(epochStartBootsrapArgs) + bootstrapper, err := bootstrap.NewEpochStartBootstrap(epochStartBootstrapArgs) if err != nil { log.Error("could not create bootsrapper", "err", err) return err } - currentEpoch, currentShardId, numOfShards, err := bootsrapper.Bootstrap() + bootstrapParameters, err := bootstrapper.Bootstrap() if err != nil { log.Error("boostrap return error", "error", err) return err } + currentEpoch := bootstrapParameters.Epoch if !generalConfig.StoragePruning.Enabled { // TODO: refactor this as when the pruning storer is disabled, the default directory path is Epoch_0 // and it should be Epoch_ALL or something similar currentEpoch = 0 } - shardCoordinator, err := sharding.NewMultiShardCoordinator(numOfShards, currentShardId) + shardCoordinator, err := sharding.NewMultiShardCoordinator(bootstrapParameters.NumOfShards, bootstrapParameters.SelfShardId) if err != nil { return err } @@ -1000,6 +982,21 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { return nil } +func cleanupStorageIfNecessary(workingDir string, ctx *cli.Context, log logger.Logger) error { + storageCleanupFlagValue := ctx.GlobalBool(storageCleanup.Name) + if storageCleanupFlagValue { + dbPath := filepath.Join( + workingDir, + defaultDBPath) + log.Trace("cleaning storage", "path", dbPath) + err := os.RemoveAll(dbPath) + if err != nil { + return err + } + } + return nil +} + func copyConfigToStatsFolder(statsFolder string, configs []string) { for _, configFile := range configs { copySingleFile(statsFolder, configFile) diff --git a/dataRetriever/factory/dataPoolFactory.go b/dataRetriever/factory/dataPoolFactory.go index c53fc05ad3c..cc6e02e80fb 100644 --- a/dataRetriever/factory/dataPoolFactory.go +++ b/dataRetriever/factory/dataPoolFactory.go @@ -5,7 +5,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool/headersCache" - txpool2 "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/txpool" + txPoolFactory "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/txpool" "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" "github.com/ElrondNetwork/elrond-go/dataRetriever/txpool" "github.com/ElrondNetwork/elrond-go/logger" @@ -30,7 +30,7 @@ func NewDataPoolFromConfig(args ArgsDataPool) (dataRetriever.PoolsHolder, error) mainConfig := args.Config - txPool, err := txpool2.CreateTxPool(txpool.ArgShardedTxPool{ + txPool, err := txPoolFactory.CreateTxPool(txpool.ArgShardedTxPool{ Config: factory.GetCacherFromConfig(mainConfig.TxDataPool), MinGasPrice: args.EconomicsData.MinGasPrice(), NumberOfShards: args.ShardCoordinator.NumberOfShards(), diff --git a/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go index 6ff7dd5381f..5e8951f82f6 100644 --- a/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go @@ -15,9 +15,6 @@ import ( "github.com/ElrondNetwork/elrond-go/sharding" ) -// numPeersToQuery number of peers to send the message -const numPeersToQuery = 2 - const emptyExcludePeersOnTopic = "" const defaultTargetShardID = uint32(0) const numCrossShardPeers = 2 diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go index 7c19e9d216c..e3c1ff1d309 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go @@ -126,7 +126,7 @@ func (mrcf *metaResolversContainerFactory) generateShardHeaderResolvers() error identifierHeader := factory.ShardBlocksTopic + shardC.CommunicationIdentifier(idx) excludePeersFromTopic := emptyExcludePeersOnTopic - resolver, err := mrcf.createShardHeaderResolver(identifierHeader, excludePeersFromTopic, idx, numPeersToQuery) + resolver, err := mrcf.createShardHeaderResolver(identifierHeader, excludePeersFromTopic, idx) if err != nil { return err } @@ -138,12 +138,10 @@ func (mrcf *metaResolversContainerFactory) generateShardHeaderResolvers() error return mrcf.container.AddMultiple(keys, resolversSlice) } -// createShardHeaderResolver will return a shard header resolver for the given shard ID func (mrcf *metaResolversContainerFactory) createShardHeaderResolver( topic string, excludedTopic string, shardID uint32, - numPeersToQuery int, ) (dataRetriever.Resolver, error) { hdrStorer := mrcf.store.GetStorer(dataRetriever.BlockHeaderUnit) @@ -183,7 +181,7 @@ func (mrcf *metaResolversContainerFactory) createShardHeaderResolver( func (mrcf *metaResolversContainerFactory) generateMetaChainHeaderResolvers() error { identifierHeader := factory.MetachainBlocksTopic - resolver, err := mrcf.createMetaChainHeaderResolver(identifierHeader, numPeersToQuery, core.MetachainShardId) + resolver, err := mrcf.createMetaChainHeaderResolver(identifierHeader, core.MetachainShardId) if err != nil { return err } @@ -191,10 +189,8 @@ func (mrcf *metaResolversContainerFactory) generateMetaChainHeaderResolvers() er return mrcf.container.Add(identifierHeader, resolver) } -// createMetaChainHeaderResolver will return a resolver for metachain headers func (mrcf *metaResolversContainerFactory) createMetaChainHeaderResolver( identifier string, - numPeersToQuery int, shardId uint32, ) (dataRetriever.Resolver, error) { hdrStorer := mrcf.store.GetStorer(dataRetriever.MetaBlockUnit) diff --git a/epochStart/bootstrap/baseStorageHandler.go b/epochStart/bootstrap/baseStorageHandler.go index 24a95b53a70..4e26b17c016 100644 --- a/epochStart/bootstrap/baseStorageHandler.go +++ b/epochStart/bootstrap/baseStorageHandler.go @@ -14,9 +14,9 @@ import ( // HighestRoundFromBootStorage is the key for the highest round that is saved in storage const highestRoundFromBootStorage = "highestRoundFromBootStorage" -const triggerRegistrykeyPrefix = "epochStartTrigger_" +const triggerRegistryKeyPrefix = "epochStartTrigger_" -const nodesCoordinatorRegistrykeyPrefix = "indexHashed_" +const nodesCoordinatorRegistryKeyPrefix = "indexHashed_" // baseStorageHandler handles the storage functions for saving bootstrap data type baseStorageHandler struct { @@ -27,13 +27,11 @@ type baseStorageHandler struct { currentEpoch uint32 } -func (bsh *baseStorageHandler) getAndSavePendingMiniBlocks(miniBlocks map[string]*block.MiniBlock) ([]bootstrapStorage.PendingMiniBlocksInfo, error) { +func (bsh *baseStorageHandler) groupMiniBlocksByShard(miniBlocks map[string]*block.MiniBlock) ([]bootstrapStorage.PendingMiniBlocksInfo, error) { pendingMBsMap := make(map[uint32][][]byte) for hash, miniBlock := range miniBlocks { - if _, ok := pendingMBsMap[miniBlock.SenderShardID]; !ok { - pendingMBsMap[miniBlock.SenderShardID] = make([][]byte, 0) - } - pendingMBsMap[miniBlock.SenderShardID] = append(pendingMBsMap[miniBlock.SenderShardID], []byte(hash)) + senderShId := miniBlock.SenderShardID + pendingMBsMap[senderShId] = append(pendingMBsMap[senderShId], []byte(hash)) } sliceToRet := make([]bootstrapStorage.PendingMiniBlocksInfo, 0) @@ -47,11 +45,11 @@ func (bsh *baseStorageHandler) getAndSavePendingMiniBlocks(miniBlocks map[string return sliceToRet, nil } -func (bsh *baseStorageHandler) getAndSaveNodesCoordinatorKey( +func (bsh *baseStorageHandler) saveNodesCoordinatorRegistry( metaBlock *block.MetaBlock, nodesConfig *sharding.NodesCoordinatorRegistry, ) ([]byte, error) { - key := append([]byte(nodesCoordinatorRegistrykeyPrefix), metaBlock.RandSeed...) + key := append([]byte(nodesCoordinatorRegistryKeyPrefix), metaBlock.RandSeed...) registryBytes, err := json.Marshal(nodesConfig) if err != nil { @@ -66,7 +64,7 @@ func (bsh *baseStorageHandler) getAndSaveNodesCoordinatorKey( return key, nil } -func (bsh *baseStorageHandler) saveTries(components *ComponentsNeededForBootstrap) error { +func (bsh *baseStorageHandler) commitTries(components *ComponentsNeededForBootstrap) error { for _, trie := range components.UserAccountTries { err := trie.Commit() if err != nil { diff --git a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go index 03f72411d3c..9635a111a61 100644 --- a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go +++ b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go @@ -61,7 +61,7 @@ func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) validityAttester := disabled.NewValidityAttester() epochStartTrigger := disabled.NewEpochStartTrigger() - argsIntCont := interceptorscontainer.MetaInterceptorsContainerFactoryArgs{ + containerFactoryArgs := interceptorscontainer.MetaInterceptorsContainerFactoryArgs{ ShardCoordinator: args.ShardCoordinator, NodesCoordinator: nodesCoordinator, Messenger: args.Messenger, @@ -88,7 +88,7 @@ func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) WhiteListHandler: args.WhiteListHandler, } - interceptorsContainerFactory, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(argsIntCont) + interceptorsContainerFactory, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(containerFactoryArgs) if err != nil { return nil, err } diff --git a/epochStart/bootstrap/fromLocalStorage.go b/epochStart/bootstrap/fromLocalStorage.go index 3acac4d12ac..e98de285258 100644 --- a/epochStart/bootstrap/fromLocalStorage.go +++ b/epochStart/bootstrap/fromLocalStorage.go @@ -14,7 +14,7 @@ import ( storageFactory "github.com/ElrondNetwork/elrond-go/storage/factory" ) -func (e *epochStartBootstrap) searchDataInLocalStorage() { +func (e *epochStartBootstrap) initializeFromLocalStorage() { var errNotCritical error e.baseData.lastEpoch, e.baseData.shardId, e.baseData.lastRound, errNotCritical = storageFactory.FindLatestDataFromStorage( e.generalConfig, @@ -35,7 +35,7 @@ func (e *epochStartBootstrap) searchDataInLocalStorage() { } } -func (e *epochStartBootstrap) prepareEpochFromStorage() (uint32, uint32, uint32, error) { +func (e *epochStartBootstrap) prepareEpochFromStorage() (Parameters, error) { args := storageFactory.ArgsNewOpenStorageUnits{ GeneralConfig: e.generalConfig, Marshalizer: e.marshalizer, @@ -47,7 +47,7 @@ func (e *epochStartBootstrap) prepareEpochFromStorage() (uint32, uint32, uint32, } openStorageHandler, err := storageFactory.NewStorageUnitOpenHandler(args) if err != nil { - return 0, 0, 0, err + return Parameters{}, err } unitsToOpen := make([]string, 0) @@ -63,63 +63,73 @@ func (e *epochStartBootstrap) prepareEpochFromStorage() (uint32, uint32, uint32, }() if err != nil || len(storageUnits) != len(unitsToOpen) { - return 0, 0, 0, err + return Parameters{}, err } _, e.nodesConfig, err = e.getLastBootstrapData(storageUnits[0]) if err != nil { - return 0, 0, 0, err + return Parameters{}, err } pubKey, err := e.publicKey.ToByteArray() if err != nil { - return 0, 0, 0, err + return Parameters{}, err } if !e.checkIfShuffledOut(pubKey, e.nodesConfig) { - return e.baseData.lastEpoch, e.baseData.shardId, e.baseData.numberOfShards, nil + parameters := Parameters{ + Epoch: e.baseData.lastEpoch, + SelfShardId: e.baseData.shardId, + NumOfShards: e.baseData.numberOfShards, + } + return parameters, nil } e.epochStartMeta, err = e.getEpochStartMetaFromStorage(storageUnits[1]) if err != nil { - return 0, 0, 0, err + return Parameters{}, err } err = e.prepareComponentsToSyncFromNetwork() if err != nil { - return 0, 0, 0, err + return Parameters{}, err } e.syncedHeaders, err = e.syncHeadersFrom(e.epochStartMeta) if err != nil { - return 0, 0, 0, err + return Parameters{}, err } prevEpochStartMetaHash := e.epochStartMeta.EpochStart.Economics.PrevEpochStartHash prevEpochStartMeta, ok := e.syncedHeaders[string(prevEpochStartMetaHash)].(*block.MetaBlock) if !ok { - return 0, 0, 0, epochStart.ErrWrongTypeAssertion + return Parameters{}, epochStart.ErrWrongTypeAssertion } e.prevEpochStartMeta = prevEpochStartMeta e.shardCoordinator, err = sharding.NewMultiShardCoordinator(e.baseData.numberOfShards, e.baseData.shardId) if err != nil { - return 0, 0, 0, err + return Parameters{}, err } if e.shardCoordinator.SelfId() == core.MetachainShardId { err = e.requestAndProcessForShard() if err != nil { - return 0, 0, 0, err + return Parameters{}, err } } err = e.requestAndProcessForMeta() if err != nil { - return 0, 0, 0, err + return Parameters{}, err } - return e.baseData.lastEpoch, e.shardCoordinator.SelfId(), e.shardCoordinator.NumberOfShards(), nil + parameters := Parameters{ + Epoch: e.baseData.lastEpoch, + SelfShardId: e.shardCoordinator.SelfId(), + NumOfShards: e.shardCoordinator.NumberOfShards(), + } + return parameters, nil } func (e *epochStartBootstrap) checkIfShuffledOut( diff --git a/epochStart/bootstrap/metaStorageHandler.go b/epochStart/bootstrap/metaStorageHandler.go index 49ec43cd938..91d201e6cc8 100644 --- a/epochStart/bootstrap/metaStorageHandler.go +++ b/epochStart/bootstrap/metaStorageHandler.go @@ -70,29 +70,31 @@ func (msh *metaStorageHandler) SaveDataToStorage(components *ComponentsNeededFor bootStorer := msh.storageService.GetStorer(dataRetriever.BootstrapUnit) - lastHeader, err := msh.getAndSaveLastHeader(components.EpochStartMetaBlock) + lastHeader, err := msh.saveLastHeader(components.EpochStartMetaBlock) if err != nil { return err } - miniBlocks, err := msh.getAndSavePendingMiniBlocks(components.PendingMiniBlocks) + miniBlocks, err := msh.groupMiniBlocksByShard(components.PendingMiniBlocks) if err != nil { return err } - triggerConfigKey, err := msh.getAndSaveTriggerRegistry(components) + triggerConfigKey, err := msh.saveTriggerRegistry(components) if err != nil { return err } - nodesCoordinatorConfigKey, err := msh.getAndSaveNodesCoordinatorKey(components.EpochStartMetaBlock, components.NodesConfig) + nodesCoordinatorConfigKey, err := msh.saveNodesCoordinatorRegistry(components.EpochStartMetaBlock, components.NodesConfig) if err != nil { return err } + lastCrossNotarizedHeader := msh.getLastCrossNotarizedHeaders(components.EpochStartMetaBlock) + bootStrapData := bootstrapStorage.BootstrapData{ - LastHeader: lastHeader, // meta - epoch start metablock ; shard - shard header - LastCrossNotarizedHeaders: nil, // lastFinalizedMetaBlock + firstPendingMetaBlock + LastHeader: lastHeader, + LastCrossNotarizedHeaders: lastCrossNotarizedHeader, LastSelfNotarizedHeaders: []bootstrapStorage.BootstrapHeaderInfo{lastHeader}, ProcessedMiniBlocks: nil, PendingMiniBlocks: miniBlocks, @@ -111,7 +113,7 @@ func (msh *metaStorageHandler) SaveDataToStorage(components *ComponentsNeededFor return err } - err = msh.saveTries(components) + err = msh.commitTries(components) if err != nil { return err } @@ -120,14 +122,25 @@ func (msh *metaStorageHandler) SaveDataToStorage(components *ComponentsNeededFor return nil } -func (msh *metaStorageHandler) getAndSaveLastHeader(metaBlock *block.MetaBlock) (bootstrapStorage.BootstrapHeaderInfo, error) { +func (msh *metaStorageHandler) getLastCrossNotarizedHeaders(meta *block.MetaBlock) []bootstrapStorage.BootstrapHeaderInfo { + crossNotarizedHdrs := make([]bootstrapStorage.BootstrapHeaderInfo, 0) + for _, epochStartShardData := range meta.EpochStart.LastFinalizedHeaders { + crossNotarizedHdrs = append(crossNotarizedHdrs, bootstrapStorage.BootstrapHeaderInfo{ + ShardId: epochStartShardData.ShardID, + Nonce: epochStartShardData.Nonce, + Hash: epochStartShardData.HeaderHash, + }) + } + + return crossNotarizedHdrs +} + +func (msh *metaStorageHandler) saveLastHeader(metaBlock *block.MetaBlock) (bootstrapStorage.BootstrapHeaderInfo, error) { lastHeaderHash, err := core.CalculateHash(msh.marshalizer, msh.hasher, metaBlock) if err != nil { return bootstrapStorage.BootstrapHeaderInfo{}, err } - //metaBlock. - lastHeaderBytes, err := msh.marshalizer.Marshal(metaBlock) if err != nil { return bootstrapStorage.BootstrapHeaderInfo{}, err @@ -147,7 +160,7 @@ func (msh *metaStorageHandler) getAndSaveLastHeader(metaBlock *block.MetaBlock) return bootstrapHdrInfo, nil } -func (msh *metaStorageHandler) getAndSaveTriggerRegistry(components *ComponentsNeededForBootstrap) ([]byte, error) { +func (msh *metaStorageHandler) saveTriggerRegistry(components *ComponentsNeededForBootstrap) ([]byte, error) { metaBlock := components.EpochStartMetaBlock hash, err := core.CalculateHash(msh.marshalizer, msh.hasher, metaBlock) if err != nil { @@ -165,7 +178,7 @@ func (msh *metaStorageHandler) getAndSaveTriggerRegistry(components *ComponentsN } trigStateKey := fmt.Sprintf("initial_value_epoch%d", metaBlock.Epoch) - key := []byte(triggerRegistrykeyPrefix + trigStateKey) + key := []byte(triggerRegistryKeyPrefix + trigStateKey) triggerRegBytes, err := json.Marshal(&triggerReg) if err != nil { diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 36af46a89f5..f8ec2cdaacd 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -44,6 +44,13 @@ var log = logger.GetOrCreate("epochStart/bootstrap") const timeToWait = 5 * time.Second +// BootstrapParameters +type Parameters struct { + Epoch uint32 + SelfShardId uint32 + NumOfShards uint32 +} + // ComponentsNeededForBootstrap holds the components which need to be initialized from network type ComponentsNeededForBootstrap struct { EpochStartMetaBlock *block.MetaBlock @@ -131,7 +138,7 @@ type ArgsEpochStartBootstrap struct { } // NewEpochStartBootstrap will return a new instance of epochStartBootstrap -func NewEpochStartBootstrapHandler(args ArgsEpochStartBootstrap) (*epochStartBootstrap, error) { +func NewEpochStartBootstrap(args ArgsEpochStartBootstrap) (*epochStartBootstrap, error) { epochStartProvider := &epochStartBootstrap{ publicKey: args.PublicKey, marshalizer: args.Marshalizer, @@ -153,6 +160,11 @@ func NewEpochStartBootstrapHandler(args ArgsEpochStartBootstrap) (*epochStartBoo return epochStartProvider, nil } +func (e *epochStartBootstrap) computedDurationOfEpoch() time.Duration { + return time.Duration(e.genesisNodesConfig.RoundDuration * + uint64(e.generalConfig.EpochStartConfig.RoundsPerEpoch)) +} + func (e *epochStartBootstrap) isStartInEpochZero() bool { startTime := time.Unix(e.genesisNodesConfig.StartTime, 0) isCurrentTimeBeforeGenesis := time.Now().Sub(startTime) < 0 @@ -160,24 +172,26 @@ func (e *epochStartBootstrap) isStartInEpochZero() bool { return true } - timeInFirstEpochAtRoundsPerEpoch := startTime.Add(time.Duration(e.genesisNodesConfig.RoundDuration * - uint64(e.generalConfig.EpochStartConfig.RoundsPerEpoch))) - isEpochZero := time.Now().Sub(timeInFirstEpochAtRoundsPerEpoch) < 0 + configuredDurationOfEpoch := startTime.Add(e.computedDurationOfEpoch()) + isEpochZero := time.Now().Sub(configuredDurationOfEpoch) < 0 return isEpochZero } -func (e *epochStartBootstrap) prepareEpochZero() (uint32, uint32, uint32, error) { - currentEpoch := uint32(0) - return currentEpoch, e.shardCoordinator.SelfId(), e.shardCoordinator.NumberOfShards(), nil +func (e *epochStartBootstrap) prepareEpochZero() (Parameters, error) { + parameters := Parameters{ + Epoch: 0, + SelfShardId: e.shardCoordinator.SelfId(), + NumOfShards: e.shardCoordinator.NumberOfShards(), + } + return parameters, nil } func (e *epochStartBootstrap) computeMostProbableEpoch() { startTime := time.Unix(e.genesisNodesConfig.StartTime, 0) elapsedTime := time.Since(startTime) - timeForOneEpoch := time.Duration(e.genesisNodesConfig.RoundDuration * - uint64(e.generalConfig.EpochStartConfig.RoundsPerEpoch)) + timeForOneEpoch := e.computedDurationOfEpoch() elaspedTimeInSeconds := uint64(elapsedTime.Seconds()) timeForOneEpochInSeconds := uint64(timeForOneEpoch.Seconds()) @@ -185,32 +199,32 @@ func (e *epochStartBootstrap) computeMostProbableEpoch() { e.computedEpoch = uint32(elaspedTimeInSeconds / timeForOneEpochInSeconds) } -func (e *epochStartBootstrap) Bootstrap() (uint32, uint32, uint32, error) { +func (e *epochStartBootstrap) Bootstrap() (Parameters, error) { if e.isStartInEpochZero() { return e.prepareEpochZero() } e.computeMostProbableEpoch() - e.searchDataInLocalStorage() + e.initializeFromLocalStorage() // TODO: make a better decision according to lastRound, lastEpoch isCurrentEpochSaved := e.baseData.lastEpoch+1 >= e.computedEpoch if isCurrentEpochSaved { - epoch, shardId, numOfShards, err := e.prepareEpochFromStorage() + parameters, err := e.prepareEpochFromStorage() if err == nil { - return epoch, shardId, numOfShards, nil + return parameters, nil } } var err error e.shardCoordinator, err = sharding.NewMultiShardCoordinator(e.genesisNodesConfig.NumberOfShards(), core.MetachainShardId) if err != nil { - return 0, 0, 0, err + return Parameters{}, err } err = e.prepareComponentsToSyncFromNetwork() if err != nil { - return 0, 0, 0, err + return Parameters{}, err } return e.requestAndProcessing() @@ -316,11 +330,11 @@ func (e *epochStartBootstrap) syncHeadersFrom(meta *block.MetaBlock) (map[string } // Bootstrap will handle requesting and receiving the needed information the node will bootstrap from -func (e *epochStartBootstrap) requestAndProcessing() (uint32, uint32, uint32, error) { +func (e *epochStartBootstrap) requestAndProcessing() (Parameters, error) { var err error e.epochStartMeta, err = e.epochStartMetaBlockSyncer.SyncEpochStartMeta(timeToWait) if err != nil { - return 0, 0, 0, err + return Parameters{}, err } e.baseData.numberOfShards = uint32(len(e.epochStartMeta.EpochStart.LastFinalizedHeaders)) @@ -328,54 +342,59 @@ func (e *epochStartBootstrap) requestAndProcessing() (uint32, uint32, uint32, er e.syncedHeaders, err = e.syncHeadersFrom(e.epochStartMeta) if err != nil { - return 0, 0, 0, err + return Parameters{}, err } prevEpochStartMetaHash := e.epochStartMeta.EpochStart.Economics.PrevEpochStartHash prevEpochStartMeta, ok := e.syncedHeaders[string(prevEpochStartMetaHash)].(*block.MetaBlock) if !ok { - return 0, 0, 0, epochStart.ErrWrongTypeAssertion + return Parameters{}, epochStart.ErrWrongTypeAssertion } e.prevEpochStartMeta = prevEpochStartMeta pubKeyBytes, err := e.publicKey.ToByteArray() if err != nil { - return 0, 0, 0, err + return Parameters{}, err } err = e.createTrieStorageManagers() if err != nil { - return 0, 0, 0, err + return Parameters{}, err } err = e.syncPeerAccountsState(e.epochStartMeta.ValidatorStatsRootHash) if err != nil { - return 0, 0, 0, err + return Parameters{}, err } err = e.processNodesConfig(pubKeyBytes, e.epochStartMeta.ValidatorStatsRootHash) if err != nil { - return 0, 0, 0, err + return Parameters{}, err } e.shardCoordinator, err = sharding.NewMultiShardCoordinator(e.baseData.numberOfShards, e.baseData.shardId) if err != nil { - return 0, 0, 0, err + return Parameters{}, err } - if e.shardCoordinator.SelfId() == core.MetachainShardId { + if e.shardCoordinator.SelfId() != core.MetachainShardId { err = e.requestAndProcessForShard() if err != nil { - return 0, 0, 0, err + return Parameters{}, err } } err = e.requestAndProcessForMeta() if err != nil { - return 0, 0, 0, err + return Parameters{}, err } - return e.baseData.shardId, e.baseData.numberOfShards, e.baseData.lastEpoch, nil + parameters := Parameters{ + Epoch: e.baseData.shardId, + SelfShardId: core.MetachainShardId, + NumOfShards: e.baseData.numberOfShards, + } + return parameters, nil } func (e *epochStartBootstrap) processNodesConfig(pubKey []byte, rootHash []byte) error { @@ -449,21 +468,23 @@ func (e *epochStartBootstrap) requestAndProcessForMeta() error { return nil } -func (e *epochStartBootstrap) requestAndProcessForShard() error { +func (e *epochStartBootstrap) findSelfShardEpochStartData() (block.EpochStartShardData, error) { var epochStartData block.EpochStartShardData - found := false for _, shardData := range e.epochStartMeta.EpochStart.LastFinalizedHeaders { if shardData.ShardID == e.shardCoordinator.SelfId() { - epochStartData = shardData - found = true - break + return shardData, nil } } - if !found { - return epochStart.ErrEpochStartDataForShardNotFound + return epochStartData, epochStart.ErrEpochStartDataForShardNotFound +} + +func (e *epochStartBootstrap) requestAndProcessForShard() error { + epochStartData, err := e.findSelfShardEpochStartData() + if err != nil { + return err } - err := e.miniBlocksSyncer.SyncPendingMiniBlocks(epochStartData.PendingMiniBlockHeaders, timeToWait) + err = e.miniBlocksSyncer.SyncPendingMiniBlocks(epochStartData.PendingMiniBlockHeaders, timeToWait) if err != nil { return err } @@ -473,12 +494,14 @@ func (e *epochStartBootstrap) requestAndProcessForShard() error { return err } - shardIds := make([]uint32, 0, 2) - hashesToRequest := make([][]byte, 0, 2) - hashesToRequest = append(hashesToRequest, epochStartData.LastFinishedMetaBlock) - hashesToRequest = append(hashesToRequest, epochStartData.FirstPendingMetaBlock) - shardIds = append(shardIds, e.shardCoordinator.SelfId()) - shardIds = append(shardIds, e.shardCoordinator.SelfId()) + shardIds := []uint32{ + core.MetachainShardId, + core.MetachainShardId, + } + hashesToRequest := [][]byte{ + epochStartData.LastFinishedMetaBlock, + epochStartData.FirstPendingMetaBlock, + } e.headersSyncer.ClearFields() err = e.headersSyncer.SyncMissingHeadersByHash(shardIds, hashesToRequest, timeToWait) diff --git a/epochStart/bootstrap/shardStorageHandler.go b/epochStart/bootstrap/shardStorageHandler.go index 16a25c651a4..78ef6e3fcb5 100644 --- a/epochStart/bootstrap/shardStorageHandler.go +++ b/epochStart/bootstrap/shardStorageHandler.go @@ -73,7 +73,7 @@ func (ssh *shardStorageHandler) SaveDataToStorage(components *ComponentsNeededFo bootStorer := ssh.storageService.GetStorer(dataRetriever.BootstrapUnit) - lastHeader, err := ssh.getAndSaveLastHeader(components.ShardHeader) + lastHeader, err := ssh.saveLastHeader(components.ShardHeader) if err != nil { return err } @@ -83,22 +83,22 @@ func (ssh *shardStorageHandler) SaveDataToStorage(components *ComponentsNeededFo return err } - pendingMiniBlocks, err := ssh.getAndSavePendingMiniBlocks(components.PendingMiniBlocks) + pendingMiniBlocks, err := ssh.groupMiniBlocksByShard(components.PendingMiniBlocks) if err != nil { return err } - triggerConfigKey, err := ssh.getAndSaveTriggerRegistry(components) + triggerConfigKey, err := ssh.saveTriggerRegistry(components) if err != nil { return err } - nodesCoordinatorConfigKey, err := ssh.getAndSaveNodesCoordinatorKey(components.EpochStartMetaBlock, components.NodesConfig) + nodesCoordinatorConfigKey, err := ssh.saveNodesCoordinatorRegistry(components.EpochStartMetaBlock, components.NodesConfig) if err != nil { return err } - lastCrossNotarizedHdrs, err := ssh.getLastCrossNotarzierHeaders(components.EpochStartMetaBlock, components.Headers) + lastCrossNotarizedHdrs, err := ssh.getLastCrossNotarizedHeaders(components.EpochStartMetaBlock, components.Headers) if err != nil { return err } @@ -140,7 +140,7 @@ func (ssh *shardStorageHandler) SaveDataToStorage(components *ComponentsNeededFo return err } - err = ssh.saveTries(components) + err = ssh.commitTries(components) if err != nil { return err } @@ -184,7 +184,7 @@ func (ssh *shardStorageHandler) getProcessMiniBlocks( return nil, epochStart.ErrEpochStartDataForShardNotFound } -func (ssh *shardStorageHandler) getLastCrossNotarzierHeaders(meta *block.MetaBlock, headers map[string]data.HeaderHandler) ([]bootstrapStorage.BootstrapHeaderInfo, error) { +func (ssh *shardStorageHandler) getLastCrossNotarizedHeaders(meta *block.MetaBlock, headers map[string]data.HeaderHandler) ([]bootstrapStorage.BootstrapHeaderInfo, error) { crossNotarizedHdrs := make([]bootstrapStorage.BootstrapHeaderInfo, 0) for _, epochStartShardData := range meta.EpochStart.LastFinalizedHeaders { if epochStartShardData.ShardID != ssh.shardCoordinator.SelfId() { @@ -197,7 +197,7 @@ func (ssh *shardStorageHandler) getLastCrossNotarzierHeaders(meta *block.MetaBlo } crossNotarizedHdrs = append(crossNotarizedHdrs, bootstrapStorage.BootstrapHeaderInfo{ - ShardId: ssh.shardCoordinator.SelfId(), + ShardId: core.MetachainShardId, Nonce: neededMeta.GetNonce(), Hash: epochStartShardData.LastFinishedMetaBlock, }) @@ -208,7 +208,7 @@ func (ssh *shardStorageHandler) getLastCrossNotarzierHeaders(meta *block.MetaBlo } crossNotarizedHdrs = append(crossNotarizedHdrs, bootstrapStorage.BootstrapHeaderInfo{ - ShardId: ssh.shardCoordinator.SelfId(), + ShardId: core.MetachainShardId, Nonce: neededMeta.GetNonce(), Hash: epochStartShardData.FirstPendingMetaBlock, }) @@ -219,7 +219,7 @@ func (ssh *shardStorageHandler) getLastCrossNotarzierHeaders(meta *block.MetaBlo return nil, epochStart.ErrEpochStartDataForShardNotFound } -func (ssh *shardStorageHandler) getAndSaveLastHeader(shardHeader *block.Header) (bootstrapStorage.BootstrapHeaderInfo, error) { +func (ssh *shardStorageHandler) saveLastHeader(shardHeader *block.Header) (bootstrapStorage.BootstrapHeaderInfo, error) { lastHeaderHash, err := core.CalculateHash(ssh.marshalizer, ssh.hasher, shardHeader) if err != nil { return bootstrapStorage.BootstrapHeaderInfo{}, err @@ -244,7 +244,7 @@ func (ssh *shardStorageHandler) getAndSaveLastHeader(shardHeader *block.Header) return bootstrapHdrInfo, nil } -func (ssh *shardStorageHandler) getAndSaveTriggerRegistry(components *ComponentsNeededForBootstrap) ([]byte, error) { +func (ssh *shardStorageHandler) saveTriggerRegistry(components *ComponentsNeededForBootstrap) ([]byte, error) { shardHeader := components.ShardHeader metaBlock := components.EpochStartMetaBlock @@ -264,7 +264,7 @@ func (ssh *shardStorageHandler) getAndSaveTriggerRegistry(components *Components } trigStateKey := fmt.Sprintf("initial_value_epoch%d", metaBlock.Epoch) - key := []byte(triggerRegistrykeyPrefix + trigStateKey) + key := []byte(triggerRegistryKeyPrefix + trigStateKey) triggerRegBytes, err := json.Marshal(&triggerReg) if err != nil { diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 74f77c8c23e..7e466214844 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -1292,7 +1292,7 @@ func (tpn *TestProcessorNode) CommitBlock(body data.BodyHandler, header data.Hea _ = tpn.BlockProcessor.CommitBlock(header, body) } -// GetMiniBlock returns the first *dataBlock.Header stored in datapools having the nonce provided as parameter +// GetShardHeader returns the first *dataBlock.Header stored in datapools having the nonce provided as parameter func (tpn *TestProcessorNode) GetShardHeader(nonce uint64) (*dataBlock.Header, error) { invalidCachers := tpn.DataPool == nil || tpn.DataPool.Headers() == nil if invalidCachers { diff --git a/process/block/shardblock.go b/process/block/shardblock.go index eb168c0c69e..c7d08255bb7 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -1015,7 +1015,7 @@ func (sp *shardProcessor) getLastSelfNotarizedHeaderByMetachain() (data.HeaderHa hash := sp.forkDetector.GetHighestFinalBlockHash() header, err := process.GetShardHeader(hash, sp.dataPool.Headers(), sp.marshalizer, sp.store) if err != nil { - log.Warn("getLastSelfNotarizedHeaderByMetachain.GetMiniBlock", "error", err.Error(), "hash", hash, "nonce", sp.forkDetector.GetHighestFinalBlockNonce()) + log.Warn("getLastSelfNotarizedHeaderByMetachain.GetShardHeader", "error", err.Error(), "hash", hash, "nonce", sp.forkDetector.GetHighestFinalBlockNonce()) return nil, nil } diff --git a/process/common.go b/process/common.go index 71d301a2de2..2e30c4bb698 100644 --- a/process/common.go +++ b/process/common.go @@ -32,7 +32,7 @@ func EmptyChannel(ch chan bool) int { } } -// GetMiniBlock gets the header, which is associated with the given hash, from pool or storage +// GetShardHeader gets the header, which is associated with the given hash, from pool or storage func GetShardHeader( hash []byte, headersCacher dataRetriever.HeadersPool, diff --git a/process/track/shardBlockTrack.go b/process/track/shardBlockTrack.go index 7847e92a034..7191630bf4e 100644 --- a/process/track/shardBlockTrack.go +++ b/process/track/shardBlockTrack.go @@ -120,7 +120,7 @@ func (sbt *shardBlockTrack) GetSelfHeaders(headerHandler data.HeaderHandler) []* header, err := process.GetShardHeader(shardInfo.HeaderHash, sbt.headersPool, sbt.marshalizer, sbt.store) if err != nil { - log.Trace("GetSelfHeaders.GetMiniBlock", "error", err.Error()) + log.Trace("GetSelfHeaders.GetShardHeader", "error", err.Error()) continue } diff --git a/update/sync/syncHeadersByHash.go b/update/sync/syncHeadersByHash.go index 5748deff27e..8aecad5fd51 100644 --- a/update/sync/syncHeadersByHash.go +++ b/update/sync/syncHeadersByHash.go @@ -4,6 +4,7 @@ import ( "sync" "time" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" @@ -89,6 +90,11 @@ func (m *missingHeadersByHash) SyncMissingHeadersByHash( } requestedMBs++ + if shardIDs[index] == core.MetachainShardId { + m.requestHandler.RequestMetaHeader(hash) + continue + } + m.requestHandler.RequestShardHeader(shardIDs[index], hash) } m.mutMissingHdrs.Unlock() diff --git a/update/sync/syncMiniBlocks.go b/update/sync/syncMiniBlocks.go index 6a8773f8a44..8a93b6cae74 100644 --- a/update/sync/syncMiniBlocks.go +++ b/update/sync/syncMiniBlocks.go @@ -96,7 +96,7 @@ func (p *pendingMiniBlocks) SyncPendingMiniBlocksFromMeta( return p.syncMiniBlocks(listPendingMiniBlocks, waitTime) } -// SyncPendingMiniBlocksForEpochStart will sync the miniblocks for the given epoch start meta block +// SyncPendingMiniBlocks will sync the miniblocks for the given epoch start meta block func (p *pendingMiniBlocks) SyncPendingMiniBlocks( miniBlockHeaders []block.ShardMiniBlockHeader, waitTime time.Duration, From 2f4e0391c039d2f5665667dc7553982609807166 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Thu, 26 Mar 2020 14:25:29 +0200 Subject: [PATCH 43/61] fixes after review --- core/constants.go | 9 ++ epochStart/bootstrap/baseStorageHandler.go | 10 +- epochStart/bootstrap/metaStorageHandler.go | 7 +- epochStart/bootstrap/shardStorageHandler.go | 136 ++++++++++-------- .../simpleEpochStartMetaBlockInterceptor.go | 10 +- epochStart/metachain/trigger.go | 4 +- epochStart/metachain/triggerRegistry.go | 7 +- epochStart/shardchain/trigger.go | 4 +- epochStart/shardchain/triggerRegistry.go | 7 +- .../block/bootstrapStorage/bootstrapStorer.go | 10 +- .../indexHashedNodesCoordinatorRegistry.go | 8 +- 11 files changed, 111 insertions(+), 101 deletions(-) diff --git a/core/constants.go b/core/constants.go index e8b4df1a5d8..58c2e3a3668 100644 --- a/core/constants.go +++ b/core/constants.go @@ -296,3 +296,12 @@ const MetricP2PUnknownPeers = "erd_p2p_unknown_shard_peers" // MetricP2PNumConnectedPeersClassification is the metric for monitoring the number of connected peers split on the connection type const MetricP2PNumConnectedPeersClassification = "erd_p2p_num_connected_peers_classification" + +// HighestRoundFromBootStorage is the key for the highest round that is saved in storage +const HighestRoundFromBootStorage = "highestRoundFromBootStorage" + +// TriggerRegistryKeyPrefix is the key prefix to save epoch start registry to storage +const TriggerRegistryKeyPrefix = "epochStartTrigger_" + +// NodesCoordinatorRegistryKeyPrefix is the key prefix to save epoch start registry to storage +const NodesCoordinatorRegistryKeyPrefix = "indexHashed_" diff --git a/epochStart/bootstrap/baseStorageHandler.go b/epochStart/bootstrap/baseStorageHandler.go index 4e26b17c016..d297785d491 100644 --- a/epochStart/bootstrap/baseStorageHandler.go +++ b/epochStart/bootstrap/baseStorageHandler.go @@ -3,6 +3,7 @@ package bootstrap import ( "encoding/json" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/hashing" @@ -11,13 +12,6 @@ import ( "github.com/ElrondNetwork/elrond-go/sharding" ) -// HighestRoundFromBootStorage is the key for the highest round that is saved in storage -const highestRoundFromBootStorage = "highestRoundFromBootStorage" - -const triggerRegistryKeyPrefix = "epochStartTrigger_" - -const nodesCoordinatorRegistryKeyPrefix = "indexHashed_" - // baseStorageHandler handles the storage functions for saving bootstrap data type baseStorageHandler struct { storageService dataRetriever.StorageService @@ -49,7 +43,7 @@ func (bsh *baseStorageHandler) saveNodesCoordinatorRegistry( metaBlock *block.MetaBlock, nodesConfig *sharding.NodesCoordinatorRegistry, ) ([]byte, error) { - key := append([]byte(nodesCoordinatorRegistryKeyPrefix), metaBlock.RandSeed...) + key := append([]byte(core.NodesCoordinatorRegistryKeyPrefix), metaBlock.RandSeed...) registryBytes, err := json.Marshal(nodesConfig) if err != nil { diff --git a/epochStart/bootstrap/metaStorageHandler.go b/epochStart/bootstrap/metaStorageHandler.go index 91d201e6cc8..c9b8f920642 100644 --- a/epochStart/bootstrap/metaStorageHandler.go +++ b/epochStart/bootstrap/metaStorageHandler.go @@ -108,7 +108,7 @@ func (msh *metaStorageHandler) SaveDataToStorage(components *ComponentsNeededFor return err } - err = bootStorer.Put([]byte(highestRoundFromBootStorage), bootStrapDataBytes) + err = bootStorer.Put([]byte(core.HighestRoundFromBootStorage), bootStrapDataBytes) if err != nil { return err } @@ -177,15 +177,14 @@ func (msh *metaStorageHandler) saveTriggerRegistry(components *ComponentsNeededF EpochStartMeta: metaBlock, } - trigStateKey := fmt.Sprintf("initial_value_epoch%d", metaBlock.Epoch) - key := []byte(triggerRegistryKeyPrefix + trigStateKey) + trigInternalKey := append([]byte(core.TriggerRegistryKeyPrefix), []byte(fmt.Sprint(metaBlock.Round))...) triggerRegBytes, err := json.Marshal(&triggerReg) if err != nil { return nil, err } - errPut := msh.storageService.GetStorer(dataRetriever.BootstrapUnit).Put(key, triggerRegBytes) + errPut := msh.storageService.GetStorer(dataRetriever.BootstrapUnit).Put(trigInternalKey, triggerRegBytes) if errPut != nil { return nil, errPut } diff --git a/epochStart/bootstrap/shardStorageHandler.go b/epochStart/bootstrap/shardStorageHandler.go index 78ef6e3fcb5..976e6b60b52 100644 --- a/epochStart/bootstrap/shardStorageHandler.go +++ b/epochStart/bootstrap/shardStorageHandler.go @@ -7,6 +7,7 @@ import ( "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -78,7 +79,7 @@ func (ssh *shardStorageHandler) SaveDataToStorage(components *ComponentsNeededFo return err } - processedMiniBlocks, err := ssh.getProcessMiniBlocks(components.PendingMiniBlocks, components.EpochStartMetaBlock, components.Headers) + processedMiniBlocks, err := ssh.getProcessedMiniBlocks(components.PendingMiniBlocks, components.EpochStartMetaBlock, components.Headers) if err != nil { return err } @@ -128,7 +129,7 @@ func (ssh *shardStorageHandler) SaveDataToStorage(components *ComponentsNeededFo return err } - err = bootStorer.Put([]byte(highestRoundFromBootStorage), roundNumBytes) + err = bootStorer.Put([]byte(core.HighestRoundFromBootStorage), roundNumBytes) if err != nil { return err } @@ -148,75 +149,86 @@ func (ssh *shardStorageHandler) SaveDataToStorage(components *ComponentsNeededFo return nil } -func (ssh *shardStorageHandler) getProcessMiniBlocks( +func getEpochStartShardData(metaBlock *block.MetaBlock, shardId uint32) (block.EpochStartShardData, error) { + for _, epochStartShardData := range metaBlock.EpochStart.LastFinalizedHeaders { + if epochStartShardData.ShardID != shardId { + continue + } + + return epochStartShardData, nil + } + + return block.EpochStartShardData{}, epochStart.ErrEpochStartDataForShardNotFound +} + +func (ssh *shardStorageHandler) getProcessedMiniBlocks( pendingMiniBlocks map[string]*block.MiniBlock, meta *block.MetaBlock, headers map[string]data.HeaderHandler, ) ([]bootstrapStorage.MiniBlocksInMeta, error) { - processedMiniBlocks := make([]bootstrapStorage.MiniBlocksInMeta, 0) - for _, epochStartShardData := range meta.EpochStart.LastFinalizedHeaders { - if epochStartShardData.ShardID != ssh.shardCoordinator.SelfId() { - continue - } + shardData, err := getEpochStartShardData(meta, ssh.shardCoordinator.SelfId()) + if err != nil { + return nil, err + } - neededMeta, ok := headers[string(epochStartShardData.FirstPendingMetaBlock)].(*block.MetaBlock) - if !ok { - return nil, epochStart.ErrMissingHeader - } + neededMeta, ok := headers[string(shardData.FirstPendingMetaBlock)].(*block.MetaBlock) + if !ok { + return nil, epochStart.ErrMissingHeader + } - processedMbHashes := make([][]byte, 0) - miniBlocksDstMe := getAllMiniBlocksWithDst(neededMeta, ssh.shardCoordinator.SelfId()) - for hash, mb := range miniBlocksDstMe { - if _, ok := pendingMiniBlocks[hash]; ok { - continue - } + if check.IfNil(neededMeta) { + return nil, epochStart.ErrEpochStartDataForShardNotFound + } - processedMbHashes = append(processedMbHashes, mb.Hash) + processedMbHashes := make([][]byte, 0) + miniBlocksDstMe := getAllMiniBlocksWithDst(neededMeta, ssh.shardCoordinator.SelfId()) + for hash, mb := range miniBlocksDstMe { + if _, ok := pendingMiniBlocks[hash]; ok { + continue } - processedMiniBlocks = append(processedMiniBlocks, bootstrapStorage.MiniBlocksInMeta{ - MetaHash: epochStartShardData.FirstPendingMetaBlock, - MiniBlocksHashes: processedMbHashes, - }) - return processedMiniBlocks, nil + processedMbHashes = append(processedMbHashes, mb.Hash) } - return nil, epochStart.ErrEpochStartDataForShardNotFound + processedMiniBlocks := make([]bootstrapStorage.MiniBlocksInMeta, 0) + processedMiniBlocks = append(processedMiniBlocks, bootstrapStorage.MiniBlocksInMeta{ + MetaHash: shardData.FirstPendingMetaBlock, + MiniBlocksHashes: processedMbHashes, + }) + + return processedMiniBlocks, nil } func (ssh *shardStorageHandler) getLastCrossNotarizedHeaders(meta *block.MetaBlock, headers map[string]data.HeaderHandler) ([]bootstrapStorage.BootstrapHeaderInfo, error) { - crossNotarizedHdrs := make([]bootstrapStorage.BootstrapHeaderInfo, 0) - for _, epochStartShardData := range meta.EpochStart.LastFinalizedHeaders { - if epochStartShardData.ShardID != ssh.shardCoordinator.SelfId() { - continue - } - - neededMeta, ok := headers[string(epochStartShardData.LastFinishedMetaBlock)] - if !ok { - return nil, epochStart.ErrMissingHeader - } - - crossNotarizedHdrs = append(crossNotarizedHdrs, bootstrapStorage.BootstrapHeaderInfo{ - ShardId: core.MetachainShardId, - Nonce: neededMeta.GetNonce(), - Hash: epochStartShardData.LastFinishedMetaBlock, - }) + shardData, err := getEpochStartShardData(meta, ssh.shardCoordinator.SelfId()) + if err != nil { + return nil, err + } - neededMeta, ok = headers[string(epochStartShardData.LastFinishedMetaBlock)] - if !ok { - return nil, epochStart.ErrMissingHeader - } + neededMeta, ok := headers[string(shardData.LastFinishedMetaBlock)] + if !ok { + return nil, epochStart.ErrMissingHeader + } - crossNotarizedHdrs = append(crossNotarizedHdrs, bootstrapStorage.BootstrapHeaderInfo{ - ShardId: core.MetachainShardId, - Nonce: neededMeta.GetNonce(), - Hash: epochStartShardData.FirstPendingMetaBlock, - }) + crossNotarizedHdrs := make([]bootstrapStorage.BootstrapHeaderInfo, 0) + crossNotarizedHdrs = append(crossNotarizedHdrs, bootstrapStorage.BootstrapHeaderInfo{ + ShardId: core.MetachainShardId, + Nonce: neededMeta.GetNonce(), + Hash: shardData.LastFinishedMetaBlock, + }) - return crossNotarizedHdrs, nil + neededMeta, ok = headers[string(shardData.FirstPendingMetaBlock)] + if !ok { + return nil, epochStart.ErrMissingHeader } - return nil, epochStart.ErrEpochStartDataForShardNotFound + crossNotarizedHdrs = append(crossNotarizedHdrs, bootstrapStorage.BootstrapHeaderInfo{ + ShardId: core.MetachainShardId, + Nonce: neededMeta.GetNonce(), + Hash: shardData.FirstPendingMetaBlock, + }) + + return crossNotarizedHdrs, nil } func (ssh *shardStorageHandler) saveLastHeader(shardHeader *block.Header) (bootstrapStorage.BootstrapHeaderInfo, error) { @@ -263,38 +275,38 @@ func (ssh *shardStorageHandler) saveTriggerRegistry(components *ComponentsNeeded EpochFinalityAttestingRound: 0, } - trigStateKey := fmt.Sprintf("initial_value_epoch%d", metaBlock.Epoch) - key := []byte(triggerRegistryKeyPrefix + trigStateKey) + trigInternalKey := append([]byte(core.TriggerRegistryKeyPrefix), []byte(fmt.Sprint(shardHeader.Round))...) triggerRegBytes, err := json.Marshal(&triggerReg) if err != nil { return nil, err } - errPut := ssh.storageService.GetStorer(dataRetriever.BootstrapUnit).Put(key, triggerRegBytes) + errPut := ssh.storageService.GetStorer(dataRetriever.BootstrapUnit).Put(trigInternalKey, triggerRegBytes) if errPut != nil { return nil, errPut } - return key, nil + return trigInternalKey, nil } -func getAllMiniBlocksWithDst(m *block.MetaBlock, destId uint32) map[string]block.ShardMiniBlockHeader { +func getAllMiniBlocksWithDst(metaBlock *block.MetaBlock, destId uint32) map[string]block.ShardMiniBlockHeader { hashDst := make(map[string]block.ShardMiniBlockHeader) - for i := 0; i < len(m.ShardInfo); i++ { - if m.ShardInfo[i].ShardID == destId { + for i := 0; i < len(metaBlock.ShardInfo); i++ { + if metaBlock.ShardInfo[i].ShardID == destId { continue } - for _, val := range m.ShardInfo[i].ShardMiniBlockHeaders { + for _, val := range metaBlock.ShardInfo[i].ShardMiniBlockHeaders { if val.ReceiverShardID == destId && val.SenderShardID != destId { hashDst[string(val.Hash)] = val } } } - for _, val := range m.MiniBlockHeaders { - if val.ReceiverShardID == destId && val.SenderShardID != destId { + for _, val := range metaBlock.MiniBlockHeaders { + isCrossShardDestMe := val.ReceiverShardID == destId && val.SenderShardID != destId + if isCrossShardDestMe { shardMiniBlockHdr := block.ShardMiniBlockHeader{ Hash: val.Hash, ReceiverShardID: val.ReceiverShardID, diff --git a/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go index 2b7c7911c2c..ff051e9f13d 100644 --- a/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go +++ b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go @@ -51,24 +51,24 @@ func (s *simpleEpochStartMetaBlockInterceptor) SetIsDataForCurrentShardVerifier( // ProcessReceivedMessage will receive the metablocks and will add them to the maps func (s *simpleEpochStartMetaBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ p2p.PeerID) error { - var mb block.MetaBlock - err := s.marshalizer.Unmarshal(&mb, message.Data()) + metaBlock := &block.MetaBlock{} + err := s.marshalizer.Unmarshal(metaBlock, message.Data()) if err != nil { return err } - if !mb.IsStartOfEpochBlock() { + if !metaBlock.IsStartOfEpochBlock() { return epochStart.ErrNotEpochStartBlock } s.mutReceivedMetaBlocks.Lock() - mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, &mb) + mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, metaBlock) if err != nil { s.mutReceivedMetaBlocks.Unlock() return err } - s.mapReceivedMetaBlocks[string(mbHash)] = &mb + s.mapReceivedMetaBlocks[string(mbHash)] = metaBlock s.addToPeerList(string(mbHash), message.Peer()) s.mutReceivedMetaBlocks.Unlock() diff --git a/epochStart/metachain/trigger.go b/epochStart/metachain/trigger.go index ffd42284e89..2c43c35188d 100644 --- a/epochStart/metachain/trigger.go +++ b/epochStart/metachain/trigger.go @@ -95,10 +95,10 @@ func NewEpochStartTrigger(args *ArgsNewMetaEpochStartTrigger) (*trigger, error) return nil, epochStart.ErrNilMetaBlockStorage } - trigStateKey := fmt.Sprintf("initial_value_epoch%d", args.Epoch) + trigggerStateKey := fmt.Sprintf("initial_value_epoch%d", args.Epoch) trigger := &trigger{ - triggerStateKey: []byte(trigStateKey), + triggerStateKey: []byte(trigggerStateKey), roundsPerEpoch: uint64(args.Settings.RoundsPerEpoch), epochStartTime: args.GenesisTime, currEpochStartRound: args.EpochStartRound, diff --git a/epochStart/metachain/triggerRegistry.go b/epochStart/metachain/triggerRegistry.go index 5f64d384be1..a0b84c05c61 100644 --- a/epochStart/metachain/triggerRegistry.go +++ b/epochStart/metachain/triggerRegistry.go @@ -3,11 +3,10 @@ package metachain import ( "encoding/json" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data/block" ) -const keyPrefix = "epochStartTrigger_" - // TriggerRegistry holds the data required to correctly initialize the trigger when booting from saved state type TriggerRegistry struct { Epoch uint32 @@ -21,7 +20,7 @@ type TriggerRegistry struct { // LoadState loads into trigger the saved state func (t *trigger) LoadState(key []byte) error { - trigInternalKey := append([]byte(keyPrefix), key...) + trigInternalKey := append([]byte(core.TriggerRegistryKeyPrefix), key...) log.Debug("getting start of epoch trigger state", "key", trigInternalKey) data, err := t.triggerStorage.Get(trigInternalKey) @@ -64,7 +63,7 @@ func (t *trigger) saveState(key []byte) error { return err } - trigInternalKey := append([]byte(keyPrefix), key...) + trigInternalKey := append([]byte(core.TriggerRegistryKeyPrefix), key...) log.Debug("saving start of epoch trigger state", "key", trigInternalKey) return t.triggerStorage.Put(trigInternalKey, data) diff --git a/epochStart/shardchain/trigger.go b/epochStart/shardchain/trigger.go index 0221caff9a3..f69d98ae55b 100644 --- a/epochStart/shardchain/trigger.go +++ b/epochStart/shardchain/trigger.go @@ -144,10 +144,10 @@ func NewEpochStartTrigger(args *ArgsShardEpochStartTrigger) (*trigger, error) { return nil, epochStart.ErrNilShardHeaderStorage } - trigStateKey := fmt.Sprintf("initial_value_epoch%d", args.Epoch) + trigggerStateKey := fmt.Sprintf("initial_value_epoch%d", args.Epoch) newTrigger := &trigger{ - triggerStateKey: []byte(trigStateKey), + triggerStateKey: []byte(trigggerStateKey), epoch: args.Epoch, currentRoundIndex: 0, epochStartRound: 0, diff --git a/epochStart/shardchain/triggerRegistry.go b/epochStart/shardchain/triggerRegistry.go index 2bc0a6f3b3f..0ff3479a0f0 100644 --- a/epochStart/shardchain/triggerRegistry.go +++ b/epochStart/shardchain/triggerRegistry.go @@ -3,11 +3,10 @@ package shardchain import ( "encoding/json" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data/block" ) -const keyPrefix = "epochStartTrigger_" - // TriggerRegistry holds the data required to correctly initialize the trigger when booting from saved state type TriggerRegistry struct { Epoch uint32 @@ -22,7 +21,7 @@ type TriggerRegistry struct { // LoadState loads into trigger the saved state func (t *trigger) LoadState(key []byte) error { - trigInternalKey := append([]byte(keyPrefix), key...) + trigInternalKey := append([]byte(core.TriggerRegistryKeyPrefix), key...) log.Debug("getting start of epoch trigger state", "key", trigInternalKey) data, err := t.triggerStorage.Get(trigInternalKey) @@ -69,7 +68,7 @@ func (t *trigger) saveState(key []byte) error { return err } - trigInternalKey := append([]byte(keyPrefix), key...) + trigInternalKey := append([]byte(core.TriggerRegistryKeyPrefix), key...) log.Debug("saving start of epoch trigger state", "key", trigInternalKey) return t.triggerStorage.Put(trigInternalKey, data) diff --git a/process/block/bootstrapStorage/bootstrapStorer.go b/process/block/bootstrapStorage/bootstrapStorer.go index 635cf49adbc..9170094d676 100644 --- a/process/block/bootstrapStorage/bootstrapStorer.go +++ b/process/block/bootstrapStorage/bootstrapStorer.go @@ -6,14 +6,12 @@ import ( "strconv" "sync/atomic" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/storage" ) -// HighestRoundFromBootStorage is the key for the highest round that is saved in storage -const highestRoundFromBootStorage = "highestRoundFromBootStorage" - // ErrNilMarshalizer signals that an operation has been attempted to or with a nil Marshalizer implementation var ErrNilMarshalizer = errors.New("nil Marshalizer") @@ -69,7 +67,7 @@ func (bs *bootstrapStorer) Put(round int64, bootData BootstrapData) error { return err } - err = bs.store.Put([]byte(highestRoundFromBootStorage), roundBytes) + err = bs.store.Put([]byte(core.HighestRoundFromBootStorage), roundBytes) if err != nil { return err } @@ -98,7 +96,7 @@ func (bs *bootstrapStorer) Get(round int64) (BootstrapData, error) { // GetHighestRound will return highest round saved in storage func (bs *bootstrapStorer) GetHighestRound() int64 { - roundBytes, err := bs.store.Get([]byte(highestRoundFromBootStorage)) + roundBytes, err := bs.store.Get([]byte(core.HighestRoundFromBootStorage)) if err != nil { return 0 } @@ -122,7 +120,7 @@ func (bs *bootstrapStorer) SaveLastRound(round int64) error { return err } - err = bs.store.Put([]byte(highestRoundFromBootStorage), roundBytes) + err = bs.store.Put([]byte(core.HighestRoundFromBootStorage), roundBytes) if err != nil { return err } diff --git a/sharding/indexHashedNodesCoordinatorRegistry.go b/sharding/indexHashedNodesCoordinatorRegistry.go index 279406d90fd..d2235fdabe0 100644 --- a/sharding/indexHashedNodesCoordinatorRegistry.go +++ b/sharding/indexHashedNodesCoordinatorRegistry.go @@ -4,9 +4,9 @@ import ( "encoding/json" "fmt" "strconv" -) -const keyPrefix = "indexHashed_" + "github.com/ElrondNetwork/elrond-go/core" +) // SerializableValidator holds the minimal data required for marshalling and un-marshalling a validator type SerializableValidator struct { @@ -28,7 +28,7 @@ type NodesCoordinatorRegistry struct { // LoadState loads the nodes coordinator state from the used boot storage func (ihgs *indexHashedNodesCoordinator) LoadState(key []byte) error { - ncInternalkey := append([]byte(keyPrefix), key...) + ncInternalkey := append([]byte(core.NodesCoordinatorRegistryKeyPrefix), key...) log.Debug("getting nodes coordinator config", "key", ncInternalkey) @@ -90,7 +90,7 @@ func (ihgs *indexHashedNodesCoordinator) saveState(key []byte) error { return err } - ncInternalkey := append([]byte(keyPrefix), key...) + ncInternalkey := append([]byte(core.NodesCoordinatorRegistryKeyPrefix), key...) log.Debug("saving nodes coordinator config", "key", ncInternalkey) From 3002baf9dc5195116105e84559fd1cc84605cad5 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Thu, 26 Mar 2020 14:32:20 +0200 Subject: [PATCH 44/61] EN-6013: updated integration test + fixes --- .../disabled/disabledAntiFloodHandler.go | 28 +++++ .../epochStartInterceptorsContainerFactory.go | 2 + epochStart/bootstrap/fromLocalStorage.go | 1 + epochStart/bootstrap/metaStorageHandler.go | 2 +- epochStart/bootstrap/process.go | 54 +++++---- .../startInEpoch/startInEpoch_test.go | 103 +++++++++++++++++- .../indexHashedNodesCoordinatorWithRater.go | 2 +- 7 files changed, 166 insertions(+), 26 deletions(-) create mode 100644 epochStart/bootstrap/disabled/disabledAntiFloodHandler.go diff --git a/epochStart/bootstrap/disabled/disabledAntiFloodHandler.go b/epochStart/bootstrap/disabled/disabledAntiFloodHandler.go new file mode 100644 index 00000000000..6ca7b1183ed --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledAntiFloodHandler.go @@ -0,0 +1,28 @@ +package disabled + +import ( + "github.com/ElrondNetwork/elrond-go/p2p" +) + +type antiFloodHandler struct { +} + +// NewAntiFloodHandler returns a new instance of antiFloodHandler +func NewAntiFloodHandler() *antiFloodHandler { + return &antiFloodHandler{} +} + +// CanProcessMessage return nil regardless of the input +func (a *antiFloodHandler) CanProcessMessage(_ p2p.MessageP2P, _ p2p.PeerID) error { + return nil +} + +// CanProcessMessageOnTopic return nil regardless of the input +func (a *antiFloodHandler) CanProcessMessageOnTopic(_ p2p.PeerID, _ string) error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (a *antiFloodHandler) IsInterfaceNil() bool { + return a == nil +} diff --git a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go index 9635a111a61..0fdadddc9f2 100644 --- a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go +++ b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go @@ -44,6 +44,7 @@ func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) return disabled.NewDisabledStorer() }} txSignMarshalizer := marshal.JsonMarshalizer{} + antiFloodHandler := disabled.NewAntiFloodHandler() multiSigner := disabled.NewMultiSigner() accountsAdapter := disabled.NewAccountsAdapter() addressConverter, err := addressConverters.NewPlainAddressConverter( @@ -86,6 +87,7 @@ func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) ValidityAttester: validityAttester, EpochStartTrigger: epochStartTrigger, WhiteListHandler: args.WhiteListHandler, + AntifloodHandler: antiFloodHandler, } interceptorsContainerFactory, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(containerFactoryArgs) diff --git a/epochStart/bootstrap/fromLocalStorage.go b/epochStart/bootstrap/fromLocalStorage.go index e98de285258..f6ea402cf6f 100644 --- a/epochStart/bootstrap/fromLocalStorage.go +++ b/epochStart/bootstrap/fromLocalStorage.go @@ -28,6 +28,7 @@ func (e *epochStartBootstrap) initializeFromLocalStorage() { if errNotCritical != nil { log.Debug("no epoch db found in storage", "error", errNotCritical.Error()) } else { + e.baseData.storageExists = true log.Debug("got last data from storage", "epoch", e.baseData.lastEpoch, "last round", e.baseData.lastRound, diff --git a/epochStart/bootstrap/metaStorageHandler.go b/epochStart/bootstrap/metaStorageHandler.go index c9b8f920642..1a041a1281f 100644 --- a/epochStart/bootstrap/metaStorageHandler.go +++ b/epochStart/bootstrap/metaStorageHandler.go @@ -189,7 +189,7 @@ func (msh *metaStorageHandler) saveTriggerRegistry(components *ComponentsNeededF return nil, errPut } - return key, nil + return []byte(core.TriggerRegistryKeyPrefix), nil } // IsInterfaceNil returns true if there is no value under the interface diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index f8ec2cdaacd..4fd2fc75dda 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -114,6 +114,7 @@ type baseDataInStorage struct { numberOfShards uint32 lastRound int64 lastEpoch uint32 + storageExists bool } // ArgsEpochStartBootstrap holds the arguments needed for creating an epoch start data provider component @@ -145,6 +146,7 @@ func NewEpochStartBootstrap(args ArgsEpochStartBootstrap) (*epochStartBootstrap, hasher: args.Hasher, messenger: args.Messenger, generalConfig: args.GeneralConfig, + economicsData: args.EconomicsData, genesisNodesConfig: args.GenesisNodesConfig, workingDir: args.WorkingDir, defaultEpochString: args.DefaultEpochString, @@ -161,8 +163,8 @@ func NewEpochStartBootstrap(args ArgsEpochStartBootstrap) (*epochStartBootstrap, } func (e *epochStartBootstrap) computedDurationOfEpoch() time.Duration { - return time.Duration(e.genesisNodesConfig.RoundDuration * - uint64(e.generalConfig.EpochStartConfig.RoundsPerEpoch)) + return time.Duration(e.genesisNodesConfig.RoundDuration* + uint64(e.generalConfig.EpochStartConfig.RoundsPerEpoch)) * time.Millisecond } func (e *epochStartBootstrap) isStartInEpochZero() bool { @@ -200,6 +202,12 @@ func (e *epochStartBootstrap) computeMostProbableEpoch() { } func (e *epochStartBootstrap) Bootstrap() (Parameters, error) { + var err error + e.shardCoordinator, err = sharding.NewMultiShardCoordinator(e.genesisNodesConfig.NumberOfShards(), core.MetachainShardId) + if err != nil { + return Parameters{}, err + } + if e.isStartInEpochZero() { return e.prepareEpochZero() } @@ -208,7 +216,7 @@ func (e *epochStartBootstrap) Bootstrap() (Parameters, error) { e.initializeFromLocalStorage() // TODO: make a better decision according to lastRound, lastEpoch - isCurrentEpochSaved := e.baseData.lastEpoch+1 >= e.computedEpoch + isCurrentEpochSaved := (e.baseData.lastEpoch+1 >= e.computedEpoch) && e.baseData.storageExists if isCurrentEpochSaved { parameters, err := e.prepareEpochFromStorage() if err == nil { @@ -216,12 +224,6 @@ func (e *epochStartBootstrap) Bootstrap() (Parameters, error) { } } - var err error - e.shardCoordinator, err = sharding.NewMultiShardCoordinator(e.genesisNodesConfig.NumberOfShards(), core.MetachainShardId) - if err != nil { - return Parameters{}, err - } - err = e.prepareComponentsToSyncFromNetwork() if err != nil { return Parameters{}, err @@ -247,11 +249,6 @@ func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { return err } - err = e.createRequestHandler() - if err != nil { - return err - } - e.dataPool, err = factoryDataPool.NewDataPoolFromConfig( factoryDataPool.ArgsDataPool{ Config: &e.generalConfig, @@ -259,6 +256,14 @@ func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { ShardCoordinator: e.shardCoordinator, }, ) + if err != nil { + return err + } + + err = e.createRequestHandler() + if err != nil { + return err + } args := factoryInterceptors.ArgsEpochStartInterceptorContainer{ Config: e.generalConfig, @@ -676,15 +681,18 @@ func (e *epochStartBootstrap) createRequestHandler() error { triesHolder.Put([]byte(trieFactory.PeerAccountTrie), peerTrie) resolversContainerArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: e.shardCoordinator, - Messenger: e.messenger, - Store: storageService, - Marshalizer: e.marshalizer, - DataPools: e.dataPool, - Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), - DataPacker: dataPacker, - TriesContainer: triesHolder, - SizeCheckDelta: 0, + ShardCoordinator: e.shardCoordinator, + Messenger: e.messenger, + Store: storageService, + Marshalizer: e.marshalizer, + DataPools: e.dataPool, + Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), + NumConcurrentResolvingJobs: 10, + DataPacker: dataPacker, + TriesContainer: triesHolder, + SizeCheckDelta: 0, + InputAntifloodHandler: disabled.NewAntiFloodHandler(), + OutputAntifloodHandler: disabled.NewAntiFloodHandler(), } resolverFactory, err := resolverscontainer.NewMetaResolversContainerFactory(resolversContainerArgs) if err != nil { diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index baf11333fca..dd89f5f6856 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -7,11 +7,14 @@ import ( "testing" "time" + "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" "github.com/ElrondNetwork/elrond-go/integrationTests/multiShard/endOfEpoch" "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" ) func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { @@ -79,7 +82,7 @@ func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { time.Sleep(time.Second) /////////----- wait for epoch end period - epoch := uint32(2) + epoch := uint32(1) nrRoundsToPropagateMultiShard := uint64(5) for i := uint64(0); i <= (uint64(epoch)*roundsPerEpoch)+nrRoundsToPropagateMultiShard; i++ { integrationTests.UpdateRound(nodes, round) @@ -110,10 +113,42 @@ func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { } nodesConfig := sharding.NodesSetup{ + StartTime: time.Now().Unix(), RoundDuration: 4000, InitialNodes: getInitialNodes(nodesMap), } nodesConfig.SetNumberOfShards(uint32(numOfShards)) + + messenger := integrationTests.CreateMessengerWithKadDht(context.Background(), integrationTests.GetConnectableAddress(advertiser)) + _ = messenger.Bootstrap() + time.Sleep(integrationTests.P2pBootstrapDelay) + argsBootstrapHandler := bootstrap.ArgsEpochStartBootstrap{ + PublicKey: nodeToJoinLate.NodeKeys.Pk, + Marshalizer: integrationTests.TestMarshalizer, + Hasher: integrationTests.TestHasher, + Messenger: messenger, + GeneralConfig: getGeneralConfig(), + EconomicsData: integrationTests.CreateEconomicsData(), + SingleSigner: &mock.SignerMock{}, + BlockSingleSigner: &mock.SignerMock{}, + KeyGen: &mock.KeyGenMock{}, + BlockKeyGen: &mock.KeyGenMock{}, + GenesisNodesConfig: &nodesConfig, + PathManager: &mock.PathManagerStub{}, + WorkingDir: "test_directory", + DefaultDBPath: "test_db", + DefaultEpochString: "test_epoch", + DefaultShardString: "test_shard", + Rater: &mock.RaterMock{}, + } + epochStartBootstrap, err := bootstrap.NewEpochStartBootstrap(argsBootstrapHandler) + assert.Nil(t, err) + + params, err := epochStartBootstrap.Bootstrap() + assert.NoError(t, err) + assert.Equal(t, epoch, params.Epoch) + assert.Equal(t, uint32(0), params.SelfShardId) + assert.Equal(t, uint32(2), params.NumOfShards) } func convertToSlice(originalMap map[uint32][]*integrationTests.TestProcessorNode) []*integrationTests.TestProcessorNode { @@ -144,3 +179,69 @@ func getInitialNodes(nodesMap map[uint32][]*integrationTests.TestProcessorNode) return sliceToRet } + +func getGeneralConfig() config.Config { + return config.Config{ + EpochStartConfig: config.EpochStartConfig{ + MinRoundsBetweenEpochs: 5, + RoundsPerEpoch: 10, + }, + WhiteListPool: config.CacheConfig{ + Size: 10000, + Type: "LRU", + Shards: 1, + }, + StoragePruning: config.StoragePruningConfig{ + Enabled: false, + FullArchive: true, + NumEpochsToKeep: 3, + NumActivePersisters: 3, + }, + AccountsTrieStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "AccountsDB", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + PeerAccountsTrieStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "AccountsDB", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + TxDataPool: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + UnsignedTransactionDataPool: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + RewardTransactionDataPool: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + HeadersPoolConfig: config.HeadersPoolConfig{ + MaxHeadersPerShard: 10, + NumElementsToRemoveOnEviction: 1, + }, + TxBlockBodyDataPool: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + PeerBlockBodyDataPool: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + TrieNodesDataPool: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + } +} diff --git a/sharding/indexHashedNodesCoordinatorWithRater.go b/sharding/indexHashedNodesCoordinatorWithRater.go index 7b693392bb8..de4ac30cf0b 100644 --- a/sharding/indexHashedNodesCoordinatorWithRater.go +++ b/sharding/indexHashedNodesCoordinatorWithRater.go @@ -155,7 +155,7 @@ func (ihgs *indexHashedNodesCoordinatorWithRater) expandEligibleList(validators // LoadState loads the nodes coordinator state from the used boot storage func (ihgs *indexHashedNodesCoordinatorWithRater) LoadState(key []byte) error { - ncInternalkey := append([]byte(keyPrefix), key...) + ncInternalkey := append([]byte(core.NodesCoordinatorRegistryKeyPrefix), key...) log.Debug("getting nodes coordinator config", "key", ncInternalkey) From d96a683d6534fc6fbe8171ff57094bd954e5aa0c Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Thu, 26 Mar 2020 15:50:40 +0200 Subject: [PATCH 45/61] fixes after review --- cmd/node/main.go | 2 +- consensus/mock/nodesCoordinatorMock.go | 4 +- dataRetriever/factory/dataPoolFactory.go | 1 + .../disabled/disabledAccountsAdapter.go | 51 +++++--- .../bootstrap/disabled/disabledChainStorer.go | 117 +++++++++++------- .../disabled/disabledEpochStartTrigger.go | 28 +++-- .../disabled/disabledHeaderSigVerifier.go | 7 +- .../bootstrap/disabled/disabledMultiSigner.go | 26 ++-- .../disabled/disabledNodesCoordinator.go | 21 +++- .../bootstrap/disabled/disabledStorer.go | 90 +++----------- .../disabled/disabledValidityAttester.go | 7 +- .../epochStartInterceptorsContainerFactory.go | 7 +- epochStart/bootstrap/process.go | 10 +- .../simpleEpochStartMetaBlockInterceptor.go | 21 ++-- epochStart/bootstrap/syncEpochStartMeta.go | 11 +- epochStart/bootstrap/syncValidatorStatus.go | 13 +- epochStart/mock/nodesCoordinatorStub.go | 4 +- integrationTests/mock/nodesCoordinatorMock.go | 4 +- node/mock/nodesCoordinatorMock.go | 4 +- .../baseInterceptorsContainerFactory.go | 3 +- process/interface.go | 8 ++ process/mock/nodesCoordinatorMock.go | 4 +- .../indexHashedNodesCoordinatorRegistry.go | 7 +- .../indexHashedNodesCoordinatorWithRater.go | 2 +- sharding/interface.go | 2 +- sharding/networksharding/mock_test.go | 4 +- update/sync/syncHeadersByHash.go | 41 +++--- 27 files changed, 261 insertions(+), 238 deletions(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index 395570d2fa0..f33101c5a98 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -72,7 +72,7 @@ const ( defaultStaticDbString = "Static" defaultShardString = "Shard" metachainShardName = "metachain" - secondsToWaitForP2PBootstrap = 3 + secondsToWaitForP2PBootstrap = 20 ) var ( diff --git a/consensus/mock/nodesCoordinatorMock.go b/consensus/mock/nodesCoordinatorMock.go index 63f78da50f0..1b5dc6e1b3e 100644 --- a/consensus/mock/nodesCoordinatorMock.go +++ b/consensus/mock/nodesCoordinatorMock.go @@ -11,8 +11,8 @@ type NodesCoordinatorMock struct { GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) } -// SaveNodesCoordinatorRegistry - -func (ncm *NodesCoordinatorMock) SaveNodesCoordinatorRegistry(_ *sharding.NodesCoordinatorRegistry) error { +// SetConfig - +func (ncm *NodesCoordinatorMock) SetConfig(_ *sharding.NodesCoordinatorRegistry) error { return nil } diff --git a/dataRetriever/factory/dataPoolFactory.go b/dataRetriever/factory/dataPoolFactory.go index cc6e02e80fb..94db829412a 100644 --- a/dataRetriever/factory/dataPoolFactory.go +++ b/dataRetriever/factory/dataPoolFactory.go @@ -24,6 +24,7 @@ type ArgsDataPool struct { ShardCoordinator sharding.Coordinator } +// TODO: unit tests // NewDataPoolFromConfig will return a new instance of a PoolsHolder func NewDataPoolFromConfig(args ArgsDataPool) (dataRetriever.PoolsHolder, error) { log.Debug("creatingDataPool from config") diff --git a/epochStart/bootstrap/disabled/disabledAccountsAdapter.go b/epochStart/bootstrap/disabled/disabledAccountsAdapter.go index 600a304d530..180f18f4d4e 100644 --- a/epochStart/bootstrap/disabled/disabledAccountsAdapter.go +++ b/epochStart/bootstrap/disabled/disabledAccountsAdapter.go @@ -13,86 +13,107 @@ func NewAccountsAdapter() *accountsAdapter { return &accountsAdapter{} } -func (a *accountsAdapter) GetAccountWithJournal(addressContainer state.AddressContainer) (state.AccountHandler, error) { +// GetAccountWithJournal - +func (a *accountsAdapter) GetAccountWithJournal(_ state.AddressContainer) (state.AccountHandler, error) { return nil, nil } -func (a *accountsAdapter) GetExistingAccount(addressContainer state.AddressContainer) (state.AccountHandler, error) { +// GetExistingAccount - +func (a *accountsAdapter) GetExistingAccount(_ state.AddressContainer) (state.AccountHandler, error) { return nil, nil } -func (a *accountsAdapter) HasAccount(addressContainer state.AddressContainer) (bool, error) { +// HasAccount - +func (a *accountsAdapter) HasAccount(_ state.AddressContainer) (bool, error) { return false, nil } -func (a *accountsAdapter) RemoveAccount(addressContainer state.AddressContainer) error { +// RemoveAccount - +func (a *accountsAdapter) RemoveAccount(_ state.AddressContainer) error { return nil } +// Commit - func (a *accountsAdapter) Commit() ([]byte, error) { return nil, nil } +// JournalLen - func (a *accountsAdapter) JournalLen() int { return 0 } -func (a *accountsAdapter) RevertToSnapshot(snapshot int) error { +// RevertToSnapshot - +func (a *accountsAdapter) RevertToSnapshot(_ int) error { return nil } +// RootHash - func (a *accountsAdapter) RootHash() ([]byte, error) { return nil, nil } -func (a *accountsAdapter) RecreateTrie(rootHash []byte) error { +// RecreateTrie - +func (a *accountsAdapter) RecreateTrie(_ []byte) error { return nil } -func (a *accountsAdapter) PutCode(accountHandler state.AccountHandler, code []byte) error { +// PutCode - +func (a *accountsAdapter) PutCode(_ state.AccountHandler, _ []byte) error { return nil } -func (a *accountsAdapter) RemoveCode(codeHash []byte) error { +// RemoveCode - +func (a *accountsAdapter) RemoveCode(_ []byte) error { return nil } -func (a *accountsAdapter) SaveDataTrie(accountHandler state.AccountHandler) error { +// SaveDataTrie - +func (a *accountsAdapter) SaveDataTrie(_ state.AccountHandler) error { return nil } -func (a *accountsAdapter) PruneTrie(rootHash []byte, identifier data.TriePruningIdentifier) error { +// PruneTrie - +func (a *accountsAdapter) PruneTrie(_ []byte, _ data.TriePruningIdentifier) error { return nil } -func (a *accountsAdapter) CancelPrune(rootHash []byte, identifier data.TriePruningIdentifier) { +// CancelPrune - +func (a *accountsAdapter) CancelPrune(_ []byte, _ data.TriePruningIdentifier) { return } -func (a *accountsAdapter) SnapshotState(rootHash []byte) { +// SnapshotState - +func (a *accountsAdapter) SnapshotState(_ []byte) { return } -func (a *accountsAdapter) SetStateCheckpoint(rootHash []byte) { +// SetStateCheckpoint - +func (a *accountsAdapter) SetStateCheckpoint(_ []byte) { return } +// IsPruningEnabled - func (a *accountsAdapter) IsPruningEnabled() bool { return false } +// ClosePersister - func (a *accountsAdapter) ClosePersister() error { return nil } -func (a *accountsAdapter) GetAllLeaves(rootHash []byte) (map[string][]byte, error) { +// GetAllLeaves - +func (a *accountsAdapter) GetAllLeaves(_ []byte) (map[string][]byte, error) { return nil, nil } -func (a *accountsAdapter) RecreateAllTries(rootHash []byte) (map[string]data.Trie, error) { +// RecreateAllTries - +func (a *accountsAdapter) RecreateAllTries(_ []byte) (map[string]data.Trie, error) { return nil, nil } +// IsInterfaceNil - func (a *accountsAdapter) IsInterfaceNil() bool { return a == nil } diff --git a/epochStart/bootstrap/disabled/disabledChainStorer.go b/epochStart/bootstrap/disabled/disabledChainStorer.go index c0b6a06328e..6e4f1a522c3 100644 --- a/epochStart/bootstrap/disabled/disabledChainStorer.go +++ b/epochStart/bootstrap/disabled/disabledChainStorer.go @@ -1,100 +1,121 @@ package disabled import ( - "errors" + "sync" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/storage" ) // ChainStorer is a mock implementation of the ChianStorer interface -type ChainStorer struct { - AddStorerCalled func(key dataRetriever.UnitType, s storage.Storer) - GetStorerCalled func(unitType dataRetriever.UnitType) storage.Storer - HasCalled func(unitType dataRetriever.UnitType, key []byte) error - GetCalled func(unitType dataRetriever.UnitType, key []byte) ([]byte, error) - PutCalled func(unitType dataRetriever.UnitType, key []byte, value []byte) error - GetAllCalled func(unitType dataRetriever.UnitType, keys [][]byte) (map[string][]byte, error) - DestroyCalled func() error - CloseAllCalled func() error +type chainStorer struct { + mapStorages map[dataRetriever.UnitType]storage.Storer + mutex sync.Mutex +} + +// NewChainStorer - +func NewChainStorer() *chainStorer { + return &chainStorer{ + mapStorages: make(map[dataRetriever.UnitType]storage.Storer), + } } // CloseAll - -func (bc *ChainStorer) CloseAll() error { - if bc.CloseAllCalled != nil { - return bc.CloseAllCalled() +func (c *chainStorer) CloseAll() error { + c.mutex.Lock() + defer c.mutex.Unlock() + + for _, store := range c.mapStorages { + err := store.Close() + if err != nil { + return err + } } return nil } // AddStorer will add a new storer to the chain map -func (bc *ChainStorer) AddStorer(key dataRetriever.UnitType, s storage.Storer) { - if bc.AddStorerCalled != nil { - bc.AddStorerCalled(key, s) - } +func (c *chainStorer) AddStorer(key dataRetriever.UnitType, s storage.Storer) { + c.mutex.Lock() + defer c.mutex.Unlock() + + c.mapStorages[key] = s } // GetStorer returns the storer from the chain map or nil if the storer was not found -func (bc *ChainStorer) GetStorer(unitType dataRetriever.UnitType) storage.Storer { - if bc.GetStorerCalled != nil { - return bc.GetStorerCalled(unitType) +func (c *chainStorer) GetStorer(unitType dataRetriever.UnitType) storage.Storer { + c.mutex.Lock() + defer c.mutex.Unlock() + + _, ok := c.mapStorages[unitType] + if !ok { + c.mapStorages[unitType] = CreateMemUnit() } - return nil + + store := c.mapStorages[unitType] + return store } // Has returns true if the key is found in the selected Unit or false otherwise // It can return an error if the provided unit type is not supported or if the // underlying implementation of the storage unit reports an error. -func (bc *ChainStorer) Has(unitType dataRetriever.UnitType, key []byte) error { - if bc.HasCalled != nil { - return bc.HasCalled(unitType, key) - } - return errors.New("key not found") +func (c *chainStorer) Has(unitType dataRetriever.UnitType, key []byte) error { + store := c.GetStorer(unitType) + return store.Has(key) } // Get returns the value for the given key if found in the selected storage unit, // nil otherwise. It can return an error if the provided unit type is not supported // or if the storage unit underlying implementation reports an error -func (bc *ChainStorer) Get(unitType dataRetriever.UnitType, key []byte) ([]byte, error) { - if bc.GetCalled != nil { - return bc.GetCalled(unitType, key) - } - return nil, nil +func (c *chainStorer) Get(unitType dataRetriever.UnitType, key []byte) ([]byte, error) { + store := c.GetStorer(unitType) + return store.Get(key) } // Put stores the key, value pair in the selected storage unit // It can return an error if the provided unit type is not supported // or if the storage unit underlying implementation reports an error -func (bc *ChainStorer) Put(unitType dataRetriever.UnitType, key []byte, value []byte) error { - if bc.PutCalled != nil { - return bc.PutCalled(unitType, key, value) - } - return nil +func (c *chainStorer) Put(unitType dataRetriever.UnitType, key []byte, value []byte) error { + store := c.GetStorer(unitType) + return store.Put(key, value) } // GetAll gets all the elements with keys in the keys array, from the selected storage unit // It can report an error if the provided unit type is not supported, if there is a missing // key in the unit, or if the underlying implementation of the storage unit reports an error. -func (bc *ChainStorer) GetAll(unitType dataRetriever.UnitType, keys [][]byte) (map[string][]byte, error) { - if bc.GetAllCalled != nil { - return bc.GetAllCalled(unitType, keys) +func (c *chainStorer) GetAll(unitType dataRetriever.UnitType, keys [][]byte) (map[string][]byte, error) { + store := c.GetStorer(unitType) + allValues := make(map[string][]byte, len(keys)) + + for _, key := range keys { + value, err := store.Get(key) + if err != nil { + return nil, err + } + + allValues[string(key)] = value } - return nil, nil + + return allValues, nil } // Destroy removes the underlying files/resources used by the storage service -func (bc *ChainStorer) Destroy() error { - if bc.DestroyCalled != nil { - return bc.DestroyCalled() +func (c *chainStorer) Destroy() error { + c.mutex.Lock() + defer c.mutex.Unlock() + + for _, store := range c.mapStorages { + err := store.DestroyUnit() + if err != nil { + return err + } } + return nil } // IsInterfaceNil returns true if there is no value under the interface -func (bc *ChainStorer) IsInterfaceNil() bool { - if bc == nil { - return true - } - return false +func (c *chainStorer) IsInterfaceNil() bool { + return c == nil } diff --git a/epochStart/bootstrap/disabled/disabledEpochStartTrigger.go b/epochStart/bootstrap/disabled/disabledEpochStartTrigger.go index 3ab662f4bff..bc1f941b2be 100644 --- a/epochStart/bootstrap/disabled/disabledEpochStartTrigger.go +++ b/epochStart/bootstrap/disabled/disabledEpochStartTrigger.go @@ -12,53 +12,67 @@ func NewEpochStartTrigger() *epochStartTrigger { return &epochStartTrigger{} } -func (e *epochStartTrigger) Update(round uint64) { +// Update - +func (e *epochStartTrigger) Update(_ uint64) { } -func (e *epochStartTrigger) ReceivedHeader(header data.HeaderHandler) { +// ReceivedHeader - +func (e *epochStartTrigger) ReceivedHeader(_ data.HeaderHandler) { } +// IsEpochStart - func (e *epochStartTrigger) IsEpochStart() bool { return false } +// Epoch - func (e *epochStartTrigger) Epoch() uint32 { return 0 } +// EpochStartRound - func (e *epochStartTrigger) EpochStartRound() uint64 { return 0 } -func (e *epochStartTrigger) SetProcessed(header data.HeaderHandler) { +// SetProcessed - +func (e *epochStartTrigger) SetProcessed(_ data.HeaderHandler) { } -func (e *epochStartTrigger) RevertStateToBlock(header data.HeaderHandler) error { +// RevertStateToBlock - +func (e *epochStartTrigger) RevertStateToBlock(_ data.HeaderHandler) error { return nil } +// EpochStartMetaHdrHash - func (e *epochStartTrigger) EpochStartMetaHdrHash() []byte { return nil } +// GetSavedStateKey - func (e *epochStartTrigger) GetSavedStateKey() []byte { return nil } -func (e *epochStartTrigger) LoadState(key []byte) error { +// LoadState - +func (e *epochStartTrigger) LoadState(_ []byte) error { return nil } -func (e *epochStartTrigger) SetFinalityAttestingRound(round uint64) { +// SetFinalityAttestingRound - +func (e *epochStartTrigger) SetFinalityAttestingRound(_ uint64) { } +// EpochFinalityAttestingRound - func (e *epochStartTrigger) EpochFinalityAttestingRound() uint64 { return 0 } -func (e *epochStartTrigger) RequestEpochStartIfNeeded(interceptedHeader data.HeaderHandler) { +// RequestEpochStartIfNeeded - +func (e *epochStartTrigger) RequestEpochStartIfNeeded(_ data.HeaderHandler) { } +// IsInterfaceNil - func (e *epochStartTrigger) IsInterfaceNil() bool { return e == nil } diff --git a/epochStart/bootstrap/disabled/disabledHeaderSigVerifier.go b/epochStart/bootstrap/disabled/disabledHeaderSigVerifier.go index e7d2009a05d..317c3552efc 100644 --- a/epochStart/bootstrap/disabled/disabledHeaderSigVerifier.go +++ b/epochStart/bootstrap/disabled/disabledHeaderSigVerifier.go @@ -12,14 +12,17 @@ func NewHeaderSigVerifier() *headerSigVerifier { return &headerSigVerifier{} } -func (h *headerSigVerifier) VerifyRandSeedAndLeaderSignature(header data.HeaderHandler) error { +// VerifyRandSeedAndLeaderSignature - +func (h *headerSigVerifier) VerifyRandSeedAndLeaderSignature(_ data.HeaderHandler) error { return nil } -func (h *headerSigVerifier) VerifySignature(header data.HeaderHandler) error { +// VerifySignature - +func (h *headerSigVerifier) VerifySignature(_ data.HeaderHandler) error { return nil } +// IsInterfaceNil - func (h *headerSigVerifier) IsInterfaceNil() bool { return h == nil } diff --git a/epochStart/bootstrap/disabled/disabledMultiSigner.go b/epochStart/bootstrap/disabled/disabledMultiSigner.go index a2011eab77e..36e63c70b69 100644 --- a/epochStart/bootstrap/disabled/disabledMultiSigner.go +++ b/epochStart/bootstrap/disabled/disabledMultiSigner.go @@ -12,42 +12,52 @@ func NewMultiSigner() *multiSigner { return &multiSigner{} } -func (m *multiSigner) Create(pubKeys []string, index uint16) (crypto.MultiSigner, error) { +// Create - +func (m *multiSigner) Create(_ []string, _ uint16) (crypto.MultiSigner, error) { return nil, nil } +// SetAggregatedSig - func (m *multiSigner) SetAggregatedSig([]byte) error { return nil } -func (m *multiSigner) Verify(msg []byte, bitmap []byte) error { +// Verify - +func (m *multiSigner) Verify(_ []byte, _ []byte) error { return nil } -func (m *multiSigner) Reset(pubKeys []string, index uint16) error { +// Reset - +func (m *multiSigner) Reset(_ []string, _ uint16) error { return nil } -func (m *multiSigner) CreateSignatureShare(msg []byte, bitmap []byte) ([]byte, error) { +// CreateSignatureShare - +func (m *multiSigner) CreateSignatureShare(_ []byte, _ []byte) ([]byte, error) { return nil, nil } -func (m *multiSigner) StoreSignatureShare(index uint16, sig []byte) error { +// StoreSignatureShare - +func (m *multiSigner) StoreSignatureShare(_ uint16, _ []byte) error { return nil } -func (m *multiSigner) SignatureShare(index uint16) ([]byte, error) { +// SignatureShare - +func (m *multiSigner) SignatureShare(_ uint16) ([]byte, error) { return nil, nil } -func (m *multiSigner) VerifySignatureShare(index uint16, sig []byte, msg []byte, bitmap []byte) error { +// VerifySignatureShare - +func (m *multiSigner) VerifySignatureShare(_ uint16, _ []byte, _ []byte, _ []byte) error { return nil } -func (m *multiSigner) AggregateSigs(bitmap []byte) ([]byte, error) { +// AggregateSigs - +func (m *multiSigner) AggregateSigs(_ []byte) ([]byte, error) { return nil, nil } +// IsInterfaceNil - func (m *multiSigner) IsInterfaceNil() bool { return m == nil } diff --git a/epochStart/bootstrap/disabled/disabledNodesCoordinator.go b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go index 4e04375d2cc..e1c0e531de6 100644 --- a/epochStart/bootstrap/disabled/disabledNodesCoordinator.go +++ b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go @@ -13,6 +13,7 @@ func NewNodesCoordinator() *nodesCoordinator { return &nodesCoordinator{} } +// SetNodesPerShards - func (n *nodesCoordinator) SetNodesPerShards( _ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, @@ -22,71 +23,87 @@ func (n *nodesCoordinator) SetNodesPerShards( return nil } -// SaveNodesCoordinatorRegistry - -func (n *nodesCoordinator) SaveNodesCoordinatorRegistry(_ *sharding.NodesCoordinatorRegistry) error { +// SetConfig - +func (n *nodesCoordinator) SetConfig(_ *sharding.NodesCoordinatorRegistry) error { return nil } +// ComputeLeaving - func (n *nodesCoordinator) ComputeLeaving(_ []sharding.Validator) []sharding.Validator { return nil } +// GetValidatorsIndexes - func (n *nodesCoordinator) GetValidatorsIndexes(_ []string, _ uint32) ([]uint64, error) { return nil, nil } +// GetAllEligibleValidatorsPublicKeys - func (n *nodesCoordinator) GetAllEligibleValidatorsPublicKeys(_ uint32) (map[uint32][][]byte, error) { return nil, nil } +// GetAllWaitingValidatorsPublicKeys - func (n *nodesCoordinator) GetAllWaitingValidatorsPublicKeys(_ uint32) (map[uint32][][]byte, error) { return nil, nil } +// GetConsensusValidatorsPublicKeys - func (n *nodesCoordinator) GetConsensusValidatorsPublicKeys(_ []byte, _ uint64, _ uint32, _ uint32) ([]string, error) { return nil, nil } +// GetOwnPublicKey - func (n *nodesCoordinator) GetOwnPublicKey() []byte { return nil } +// ComputeConsensusGroup - func (n *nodesCoordinator) ComputeConsensusGroup(_ []byte, _ uint64, _ uint32, _ uint32) (validatorsGroup []sharding.Validator, err error) { return nil, nil } +// GetValidatorWithPublicKey - func (n *nodesCoordinator) GetValidatorWithPublicKey(_ []byte, _ uint32) (validator sharding.Validator, shardId uint32, err error) { return nil, 0, nil } +// UpdatePeersListAndIndex - func (n *nodesCoordinator) UpdatePeersListAndIndex() error { return nil } +// LoadState - func (n *nodesCoordinator) LoadState(_ []byte) error { return nil } +// GetSavedStateKey - func (n *nodesCoordinator) GetSavedStateKey() []byte { return nil } +// ShardIdForEpoch - func (n *nodesCoordinator) ShardIdForEpoch(_ uint32) (uint32, error) { return 0, nil } +// GetConsensusWhitelistedNodes - func (n *nodesCoordinator) GetConsensusWhitelistedNodes(_ uint32) (map[string]struct{}, error) { return nil, nil } +// ConsensusGroupSize - func (n *nodesCoordinator) ConsensusGroupSize(uint32) int { return 0 } +// GetNumTotalEligible - func (n *nodesCoordinator) GetNumTotalEligible() uint64 { return 0 } +// IsInterfaceNil - func (n *nodesCoordinator) IsInterfaceNil() bool { return n == nil } diff --git a/epochStart/bootstrap/disabled/disabledStorer.go b/epochStart/bootstrap/disabled/disabledStorer.go index 61d7b8d9ce0..6cc75e4c653 100644 --- a/epochStart/bootstrap/disabled/disabledStorer.go +++ b/epochStart/bootstrap/disabled/disabledStorer.go @@ -1,87 +1,25 @@ package disabled import ( - "encoding/base64" - "errors" - "fmt" - "sync" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/memorydb" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" ) -// Storer - -type Storer struct { - mut sync.Mutex - data map[string][]byte -} +const defaultMemDBSize = 1000 +const defaultNumShards = 1 -// NewDisabledStorer - -func NewDisabledStorer() *Storer { - return &Storer{ - data: make(map[string][]byte), +// CreateMemUnit creates an in-memory storer unit using maps +func CreateMemUnit() storage.Storer { + cache, err := storageUnit.NewCache(storageUnit.LRUCache, defaultMemDBSize, defaultNumShards) + if err != nil { + return nil } -} - -// Put - -func (sm *Storer) Put(key, data []byte) error { - sm.mut.Lock() - defer sm.mut.Unlock() - sm.data[string(key)] = data - - return nil -} - -// Get - -func (sm *Storer) Get(key []byte) ([]byte, error) { - sm.mut.Lock() - defer sm.mut.Unlock() - val, ok := sm.data[string(key)] - if !ok { - return nil, fmt.Errorf("key: %s not found", base64.StdEncoding.EncodeToString(key)) + unit, err := storageUnit.NewStorageUnit(cache, memorydb.New()) + if err != nil { + return nil } - return val, nil -} - -// GetFromEpoch - -func (sm *Storer) GetFromEpoch(key []byte, _ uint32) ([]byte, error) { - return sm.Get(key) -} - -// HasInEpoch - -func (sm *Storer) HasInEpoch(key []byte, epoch uint32) error { - return errors.New("not implemented") -} - -// SearchFirst - -func (sm *Storer) SearchFirst(key []byte) ([]byte, error) { - return nil, errors.New("not implemented") -} - -// Close - -func (sm *Storer) Close() error { - return nil -} - -// Has - -func (sm *Storer) Has(key []byte) error { - return errors.New("not implemented") -} - -// Remove - -func (sm *Storer) Remove(key []byte) error { - return errors.New("not implemented") -} - -// ClearCache - -func (sm *Storer) ClearCache() { -} - -// DestroyUnit - -func (sm *Storer) DestroyUnit() error { - return nil -} - -// IsInterfaceNil returns true if there is no value under the interface -func (sm *Storer) IsInterfaceNil() bool { - return sm == nil + return unit } diff --git a/epochStart/bootstrap/disabled/disabledValidityAttester.go b/epochStart/bootstrap/disabled/disabledValidityAttester.go index 6e4b43d733e..ca315b29fa5 100644 --- a/epochStart/bootstrap/disabled/disabledValidityAttester.go +++ b/epochStart/bootstrap/disabled/disabledValidityAttester.go @@ -12,14 +12,17 @@ func NewValidityAttester() *validityAttester { return &validityAttester{} } -func (v *validityAttester) CheckBlockAgainstFinal(headerHandler data.HeaderHandler) error { +// CheckBlockAgainstFinal - +func (v *validityAttester) CheckBlockAgainstFinal(_ data.HeaderHandler) error { return nil } -func (v *validityAttester) CheckBlockAgainstRounder(headerHandler data.HeaderHandler) error { +// CheckBlockAgainstRounder - +func (v *validityAttester) CheckBlockAgainstRounder(_ data.HeaderHandler) error { return nil } +// IsInterfaceNil - func (v *validityAttester) IsInterfaceNil() bool { return v == nil } diff --git a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go index 0fdadddc9f2..fa4d31426fa 100644 --- a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go +++ b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go @@ -15,7 +15,6 @@ import ( "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/factory/interceptorscontainer" "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/storage/timecache" "github.com/ElrondNetwork/elrond-go/update" ) @@ -40,9 +39,7 @@ type ArgsEpochStartInterceptorContainer struct { // components func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) (process.InterceptorsContainer, error) { nodesCoordinator := disabled.NewNodesCoordinator() - storer := disabled.ChainStorer{GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { - return disabled.NewDisabledStorer() - }} + storer := disabled.NewChainStorer() txSignMarshalizer := marshal.JsonMarshalizer{} antiFloodHandler := disabled.NewAntiFloodHandler() multiSigner := disabled.NewMultiSigner() @@ -66,7 +63,7 @@ func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) ShardCoordinator: args.ShardCoordinator, NodesCoordinator: nodesCoordinator, Messenger: args.Messenger, - Store: &storer, + Store: storer, ProtoMarshalizer: args.Marshalizer, TxSignMarshalizer: &txSignMarshalizer, Hasher: args.Hasher, diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 4fd2fc75dda..c8e1cf08e1f 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -296,7 +296,7 @@ func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { } syncMiniBlocksArgs := sync.ArgsNewPendingMiniBlocksSyncer{ - Storage: &disabled.Storer{}, + Storage: disabled.CreateMemUnit(), Cache: e.dataPool.MiniBlocks(), Marshalizer: e.marshalizer, RequestHandler: e.requestHandler, @@ -304,7 +304,7 @@ func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { e.miniBlocksSyncer, err = sync.NewPendingMiniBlocksSyncer(syncMiniBlocksArgs) syncMissingHeadersArgs := sync.ArgsNewMissingHeadersByHashSyncer{ - Storage: &disabled.Storer{}, + Storage: disabled.CreateMemUnit(), Cache: e.dataPool.Headers(), Marshalizer: e.marshalizer, RequestHandler: e.requestHandler, @@ -656,11 +656,7 @@ func (e *epochStartBootstrap) createRequestHandler() error { return err } - storageService := &disabled.ChainStorer{ - GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { - return disabled.NewDisabledStorer() - }, - } + storageService := disabled.NewChainStorer() triesHolder := state.NewDataTriesHolder() err = e.createTrieStorageManagers() diff --git a/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go index ff051e9f13d..ea7212a4622 100644 --- a/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go +++ b/epochStart/bootstrap/simpleEpochStartMetaBlockInterceptor.go @@ -61,13 +61,12 @@ func (s *simpleEpochStartMetaBlockInterceptor) ProcessReceivedMessage(message p2 return epochStart.ErrNotEpochStartBlock } - s.mutReceivedMetaBlocks.Lock() mbHash, err := core.CalculateHash(s.marshalizer, s.hasher, metaBlock) if err != nil { - s.mutReceivedMetaBlocks.Unlock() return err } + s.mutReceivedMetaBlocks.Lock() s.mapReceivedMetaBlocks[string(mbHash)] = metaBlock s.addToPeerList(string(mbHash), message.Peer()) s.mutReceivedMetaBlocks.Unlock() @@ -76,20 +75,14 @@ func (s *simpleEpochStartMetaBlockInterceptor) ProcessReceivedMessage(message p2 } // this func should be called under mutex protection -func (s *simpleEpochStartMetaBlockInterceptor) addToPeerList(hash string, id p2p.PeerID) { - peersListForHash, ok := s.mapMetaBlocksFromPeers[hash] - if !ok { - s.mapMetaBlocksFromPeers[hash] = append(s.mapMetaBlocksFromPeers[hash], id) - return - } - - for _, peer := range peersListForHash { - if peer == id { +func (s *simpleEpochStartMetaBlockInterceptor) addToPeerList(hash string, peer p2p.PeerID) { + peersListForHash := s.mapMetaBlocksFromPeers[hash] + for _, pid := range peersListForHash { + if pid == peer { return } } - - s.mapMetaBlocksFromPeers[hash] = append(s.mapMetaBlocksFromPeers[hash], id) + s.mapMetaBlocksFromPeers[hash] = append(s.mapMetaBlocksFromPeers[hash], peer) } // GetEpochStartMetaBlock will return the metablock after it is confirmed or an error if the number of tries was exceeded @@ -102,8 +95,8 @@ func (s *simpleEpochStartMetaBlockInterceptor) GetEpochStartMetaBlock(target int log.Debug("metablock from peers", "num peers", len(peersList), "target", target, "hash", []byte(hash)) isOk := s.isMapEntryOk(peersList, hash, target, epoch) if isOk { - s.mutReceivedMetaBlocks.RUnlock() metaBlockToReturn := s.mapReceivedMetaBlocks[hash] + s.mutReceivedMetaBlocks.RUnlock() s.clearFields() return metaBlockToReturn, nil } diff --git a/epochStart/bootstrap/syncEpochStartMeta.go b/epochStart/bootstrap/syncEpochStartMeta.go index e6b8b4fc0f9..b86d29f405e 100644 --- a/epochStart/bootstrap/syncEpochStartMeta.go +++ b/epochStart/bootstrap/syncEpochStartMeta.go @@ -51,7 +51,7 @@ func NewEpochStartMetaSyncer(args ArgsNewEpochStartMetaSyncer) (*epochStartMetaS } // SyncEpochStartMeta syncs the latest epoch start metablock -func (e *epochStartMetaSyncer) SyncEpochStartMeta(waitTime time.Duration) (*block.MetaBlock, error) { +func (e *epochStartMetaSyncer) SyncEpochStartMeta(_ time.Duration) (*block.MetaBlock, error) { err := e.initTopicForEpochStartMetaBlockInterceptor() if err != nil { return nil, err @@ -66,20 +66,21 @@ func (e *epochStartMetaSyncer) SyncEpochStartMeta(waitTime time.Duration) (*bloc count := 0 for { if count > maxNumTimesToRetry { - panic("can't sync with other peers") + return nil, epochStart.ErrNumTriesExceeded } + count++ - numConnectedPeers := len(e.messenger.Peers()) + numConnectedPeers := len(e.messenger.ConnectedPeers()) threshold := int(thresholdForConsideringMetaBlockCorrect * float64(numConnectedPeers)) + mb, errConsensusNotReached := e.epochStartMetaBlockInterceptor.GetEpochStartMetaBlock(threshold, unknownEpoch) if errConsensusNotReached == nil { return mb, nil } + log.Info("consensus not reached for meta block. re-requesting and trying again...") e.requestEpochStartMetaBlock() } - - return nil, epochStart.ErrNumTriesExceeded } func (e *epochStartMetaSyncer) requestEpochStartMetaBlock() { diff --git a/epochStart/bootstrap/syncValidatorStatus.go b/epochStart/bootstrap/syncValidatorStatus.go index e7caca39b7a..80a7f0bf9e8 100644 --- a/epochStart/bootstrap/syncValidatorStatus.go +++ b/epochStart/bootstrap/syncValidatorStatus.go @@ -42,7 +42,7 @@ func NewSyncValidatorStatus(args ArgsNewSyncValidatorStatus) (*syncValidatorStat requestHandler: args.RequestHandler, } syncMiniBlocksArgs := sync.ArgsNewPendingMiniBlocksSyncer{ - Storage: &disabled.Storer{}, + Storage: disabled.CreateMemUnit(), Cache: s.dataPool.MiniBlocks(), Marshalizer: s.marshalizer, RequestHandler: s.requestHandler, @@ -121,9 +121,7 @@ func (s *syncValidatorStatus) NodesConfigFromMetaBlock( return nodesConfig, selfShardId, nil } -func (s *syncValidatorStatus) processNodesConfigFor( - metaBlock *block.MetaBlock, -) ([]*state.ValidatorInfo, error) { +func findPeerMiniBlockHeaders(metaBlock *block.MetaBlock) []block.ShardMiniBlockHeader { shardMBHeaders := make([]block.ShardMiniBlockHeader, 0) for _, mbHeader := range metaBlock.MiniBlockHeaders { if mbHeader.Type != block.PeerBlock { @@ -138,6 +136,13 @@ func (s *syncValidatorStatus) processNodesConfigFor( } shardMBHeaders = append(shardMBHeaders, shardMBHdr) } + return shardMBHeaders +} + +func (s *syncValidatorStatus) processNodesConfigFor( + metaBlock *block.MetaBlock, +) ([]*state.ValidatorInfo, error) { + shardMBHeaders := findPeerMiniBlockHeaders(metaBlock) s.miniBlocksSyncer.ClearFields() err := s.miniBlocksSyncer.SyncPendingMiniBlocks(shardMBHeaders, timeToWait) diff --git a/epochStart/mock/nodesCoordinatorStub.go b/epochStart/mock/nodesCoordinatorStub.go index b7c3ba77cb9..63a1ed75e83 100644 --- a/epochStart/mock/nodesCoordinatorStub.go +++ b/epochStart/mock/nodesCoordinatorStub.go @@ -13,8 +13,8 @@ type NodesCoordinatorStub struct { GetAllValidatorsPublicKeysCalled func() (map[uint32][][]byte, error) } -// SaveNodesCoordinatorRegistry - -func (ncm *NodesCoordinatorStub) SaveNodesCoordinatorRegistry(_ *sharding.NodesCoordinatorRegistry) error { +// SetConfig - +func (ncm *NodesCoordinatorStub) SetConfig(_ *sharding.NodesCoordinatorRegistry) error { return nil } diff --git a/integrationTests/mock/nodesCoordinatorMock.go b/integrationTests/mock/nodesCoordinatorMock.go index 1c933f32feb..79f49b2f89f 100644 --- a/integrationTests/mock/nodesCoordinatorMock.go +++ b/integrationTests/mock/nodesCoordinatorMock.go @@ -14,8 +14,8 @@ type NodesCoordinatorMock struct { GetAllWaitingValidatorsPublicKeysCalled func() (map[uint32][][]byte, error) } -// SaveNodesCoordinatorRegistry - -func (ncm *NodesCoordinatorMock) SaveNodesCoordinatorRegistry(_ *sharding.NodesCoordinatorRegistry) error { +// SetConfig - +func (ncm *NodesCoordinatorMock) SetConfig(_ *sharding.NodesCoordinatorRegistry) error { return nil } diff --git a/node/mock/nodesCoordinatorMock.go b/node/mock/nodesCoordinatorMock.go index a043acb54f9..8e6367568c9 100644 --- a/node/mock/nodesCoordinatorMock.go +++ b/node/mock/nodesCoordinatorMock.go @@ -12,8 +12,8 @@ type NodesCoordinatorMock struct { GetAllEligibleValidatorsPublicKeysCalled func() (map[uint32][][]byte, error) } -// SaveNodesCoordinatorRegistry - -func (ncm *NodesCoordinatorMock) SaveNodesCoordinatorRegistry(_ *sharding.NodesCoordinatorRegistry) error { +// SetConfig - +func (ncm *NodesCoordinatorMock) SetConfig(_ *sharding.NodesCoordinatorRegistry) error { return nil } diff --git a/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go b/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go index 1e32be067bc..69cfaeccdd7 100644 --- a/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go +++ b/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go @@ -16,7 +16,6 @@ import ( "github.com/ElrondNetwork/elrond-go/process/interceptors/processor" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/update" ) const numGoRoutines = 2000 @@ -465,7 +464,7 @@ func (bicf *baseInterceptorsContainerFactory) generateUnsignedTxsInterceptors() } // SetWhiteListHandlerToInterceptors will set the white list handler to all given interceptors -func SetWhiteListHandlerToInterceptors(containter process.InterceptorsContainer, handler update.WhiteListHandler) error { +func SetWhiteListHandlerToInterceptors(containter process.InterceptorsContainer, handler process.WhiteListHandler) error { var err error containter.Iterate(func(key string, interceptor process.Interceptor) bool { diff --git a/process/interface.go b/process/interface.go index 4bd55eec5a7..aff48872026 100644 --- a/process/interface.go +++ b/process/interface.go @@ -764,3 +764,11 @@ type InterceptedDataWhiteList interface { Add(keys [][]byte) IsInterfaceNil() bool } + +// WhiteListHandler is the interface needed to add whitelisted data +type WhiteListHandler interface { + Remove(keys [][]byte) + Add(keys [][]byte) + IsForCurrentShard(interceptedData InterceptedData) bool + IsInterfaceNil() bool +} diff --git a/process/mock/nodesCoordinatorMock.go b/process/mock/nodesCoordinatorMock.go index 593aaeb3cc3..492c1a98ba9 100644 --- a/process/mock/nodesCoordinatorMock.go +++ b/process/mock/nodesCoordinatorMock.go @@ -63,8 +63,8 @@ func NewNodesCoordinatorMock() *NodesCoordinatorMock { } } -// SaveNodesCoordinatorRegistry - -func (ncm *NodesCoordinatorMock) SaveNodesCoordinatorRegistry(_ *sharding.NodesCoordinatorRegistry) error { +// SetConfig - +func (ncm *NodesCoordinatorMock) SetConfig(_ *sharding.NodesCoordinatorRegistry) error { return nil } diff --git a/sharding/indexHashedNodesCoordinatorRegistry.go b/sharding/indexHashedNodesCoordinatorRegistry.go index d2235fdabe0..a47dcff3c5c 100644 --- a/sharding/indexHashedNodesCoordinatorRegistry.go +++ b/sharding/indexHashedNodesCoordinatorRegistry.go @@ -26,6 +26,7 @@ type NodesCoordinatorRegistry struct { CurrentEpoch uint32 `json:"currentEpoch"` } +// TODO: add proto marshalizer for these package - replace all json marshalizers // LoadState loads the nodes coordinator state from the used boot storage func (ihgs *indexHashedNodesCoordinator) LoadState(key []byte) error { ncInternalkey := append([]byte(core.NodesCoordinatorRegistryKeyPrefix), key...) @@ -47,7 +48,7 @@ func (ihgs *indexHashedNodesCoordinator) LoadState(key []byte) error { ihgs.savedStateKey = key ihgs.mutSavedStateKey.Unlock() - err = ihgs.SaveNodesCoordinatorRegistry(config) + err = ihgs.SetConfig(config) if err != nil { return err } @@ -55,8 +56,8 @@ func (ihgs *indexHashedNodesCoordinator) LoadState(key []byte) error { return nil } -// SaveNodesCoordinatorRegistry saves a nodesCoordinator registry -func (ihgs *indexHashedNodesCoordinator) SaveNodesCoordinatorRegistry(config *NodesCoordinatorRegistry) error { +// SetConfig saves a nodesCoordinator registry +func (ihgs *indexHashedNodesCoordinator) SetConfig(config *NodesCoordinatorRegistry) error { ihgs.currentEpoch = config.CurrentEpoch log.Debug("loaded nodes config", "current epoch", config.CurrentEpoch) diff --git a/sharding/indexHashedNodesCoordinatorWithRater.go b/sharding/indexHashedNodesCoordinatorWithRater.go index de4ac30cf0b..f1c14b25501 100644 --- a/sharding/indexHashedNodesCoordinatorWithRater.go +++ b/sharding/indexHashedNodesCoordinatorWithRater.go @@ -174,7 +174,7 @@ func (ihgs *indexHashedNodesCoordinatorWithRater) LoadState(key []byte) error { ihgs.savedStateKey = key ihgs.mutSavedStateKey.Unlock() - err = ihgs.SaveNodesCoordinatorRegistry(config) + err = ihgs.SetConfig(config) if err != nil { return err } diff --git a/sharding/interface.go b/sharding/interface.go index 2ccc08a5731..2c1f51dc79c 100644 --- a/sharding/interface.go +++ b/sharding/interface.go @@ -29,7 +29,7 @@ type NodesCoordinator interface { GetValidatorWithPublicKey(publicKey []byte, epoch uint32) (validator Validator, shardId uint32, err error) UpdatePeersListAndIndex() error LoadState(key []byte) error - SaveNodesCoordinatorRegistry(config *NodesCoordinatorRegistry) error + SetConfig(config *NodesCoordinatorRegistry) error GetSavedStateKey() []byte ShardIdForEpoch(epoch uint32) (uint32, error) GetConsensusWhitelistedNodes(epoch uint32) (map[string]struct{}, error) diff --git a/sharding/networksharding/mock_test.go b/sharding/networksharding/mock_test.go index bb0fceec566..62c461569dd 100644 --- a/sharding/networksharding/mock_test.go +++ b/sharding/networksharding/mock_test.go @@ -8,8 +8,8 @@ type nodesCoordinatorStub struct { GetValidatorWithPublicKeyCalled func(publicKey []byte, epoch uint32) (validator sharding.Validator, shardId uint32, err error) } -// SaveNodesCoordinatorRegistry - -func (ncs *nodesCoordinatorStub) SaveNodesCoordinatorRegistry(_ *sharding.NodesCoordinatorRegistry) error { +// SetConfig - +func (ncs *nodesCoordinatorStub) SetConfig(_ *sharding.NodesCoordinatorRegistry) error { return nil } diff --git a/update/sync/syncHeadersByHash.go b/update/sync/syncHeadersByHash.go index 8aecad5fd51..9e139a47c17 100644 --- a/update/sync/syncHeadersByHash.go +++ b/update/sync/syncHeadersByHash.go @@ -15,7 +15,7 @@ import ( "github.com/ElrondNetwork/elrond-go/update" ) -type missingHeadersByHash struct { +type syncHeadersByHash struct { mutMissingHdrs sync.Mutex mapHeaders map[string]data.HeaderHandler mapHashes map[string]struct{} @@ -38,7 +38,7 @@ type ArgsNewMissingHeadersByHashSyncer struct { } // NewMissingheadersByHashSyncer creates a syncer for all missing headers -func NewMissingheadersByHashSyncer(args ArgsNewMissingHeadersByHashSyncer) (*missingHeadersByHash, error) { +func NewMissingheadersByHashSyncer(args ArgsNewMissingHeadersByHashSyncer) (*syncHeadersByHash, error) { if check.IfNil(args.Storage) { return nil, dataRetriever.ErrNilHeadersStorage } @@ -52,7 +52,7 @@ func NewMissingheadersByHashSyncer(args ArgsNewMissingHeadersByHashSyncer) (*mis return nil, process.ErrNilRequestHandler } - p := &missingHeadersByHash{ + p := &syncHeadersByHash{ mutMissingHdrs: sync.Mutex{}, mapHeaders: make(map[string]data.HeaderHandler), mapHashes: make(map[string]struct{}), @@ -71,7 +71,7 @@ func NewMissingheadersByHashSyncer(args ArgsNewMissingHeadersByHashSyncer) (*mis } // SyncMissingHeadersByHash syncs the missing headers -func (m *missingHeadersByHash) SyncMissingHeadersByHash( +func (m *syncHeadersByHash) SyncMissingHeadersByHash( shardIDs []uint32, headersHashes [][]byte, waitTime time.Duration, @@ -100,28 +100,23 @@ func (m *missingHeadersByHash) SyncMissingHeadersByHash( m.mutMissingHdrs.Unlock() var err error - defer func() { - m.mutMissingHdrs.Lock() - m.stopSyncing = true - if err == nil { - m.syncedAll = true - } - m.mutMissingHdrs.Unlock() - }() - if requestedMBs > 0 { err = WaitFor(m.chReceivedAll, waitTime) - if err != nil { - return err - } } - return nil + m.mutMissingHdrs.Lock() + m.stopSyncing = true + if err == nil { + m.syncedAll = true + } + m.mutMissingHdrs.Unlock() + + return err } // receivedHeader is a callback function when a new header was received // it will further ask for missing transactions -func (m *missingHeadersByHash) receivedHeader(hdrHandler data.HeaderHandler, hdrHash []byte) { +func (m *syncHeadersByHash) receivedHeader(hdrHandler data.HeaderHandler, hdrHash []byte) { m.mutMissingHdrs.Lock() if m.stopSyncing { m.mutMissingHdrs.Unlock() @@ -146,7 +141,7 @@ func (m *missingHeadersByHash) receivedHeader(hdrHandler data.HeaderHandler, hdr } } -func (m *missingHeadersByHash) getHeaderFromPoolOrStorage(hash []byte) (data.HeaderHandler, bool) { +func (m *syncHeadersByHash) getHeaderFromPoolOrStorage(hash []byte) (data.HeaderHandler, bool) { header, ok := m.getHeaderFromPool(hash) if ok { return header, true @@ -166,7 +161,7 @@ func (m *missingHeadersByHash) getHeaderFromPoolOrStorage(hash []byte) (data.Hea return &hdr, true } -func (m *missingHeadersByHash) getHeaderFromPool(hash []byte) (data.HeaderHandler, bool) { +func (m *syncHeadersByHash) getHeaderFromPool(hash []byte) (data.HeaderHandler, bool) { val, err := m.pool.GetHeaderByHash(hash) if err != nil { return nil, false @@ -176,7 +171,7 @@ func (m *missingHeadersByHash) getHeaderFromPool(hash []byte) (data.HeaderHandle } // GetHeaders returns the synced headers -func (m *missingHeadersByHash) GetHeaders() (map[string]data.HeaderHandler, error) { +func (m *syncHeadersByHash) GetHeaders() (map[string]data.HeaderHandler, error) { m.mutMissingHdrs.Lock() defer m.mutMissingHdrs.Unlock() if !m.syncedAll { @@ -187,7 +182,7 @@ func (m *missingHeadersByHash) GetHeaders() (map[string]data.HeaderHandler, erro } // ClearFields will clear all the maps -func (m *missingHeadersByHash) ClearFields() { +func (m *syncHeadersByHash) ClearFields() { m.mutMissingHdrs.Lock() m.mapHashes = make(map[string]struct{}) m.mapHeaders = make(map[string]data.HeaderHandler) @@ -195,6 +190,6 @@ func (m *missingHeadersByHash) ClearFields() { } // IsInterfaceNil returns nil if underlying object is nil -func (m *missingHeadersByHash) IsInterfaceNil() bool { +func (m *syncHeadersByHash) IsInterfaceNil() bool { return m == nil } From 982dbcce1c1b44bae575d528692cd79f32bf6d0c Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Thu, 26 Mar 2020 16:51:26 +0200 Subject: [PATCH 46/61] fixes after review --- cmd/node/main.go | 3 ++ facade/elrondNodeFacade_test.go | 6 ---- facade/interface.go | 3 -- facade/mock/nodeMock.go | 6 ---- integrationTests/consensus/testInitializer.go | 2 +- .../interceptedBulkTx_test.go | 2 +- .../interceptedBulkUnsignedTx_test.go | 2 +- integrationTests/testInitializer.go | 2 +- node/node.go | 21 +++---------- node/node_test.go | 30 ------------------- 10 files changed, 11 insertions(+), 66 deletions(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index f33101c5a98..6b5c8bd15ac 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -975,6 +975,9 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { log.LogIfError(err) } + err = networkComponents.NetMessenger.Close() + log.LogIfError(err) + log.Info("closing network connections...") err = networkComponents.NetMessenger.Close() log.LogIfError(err) diff --git a/facade/elrondNodeFacade_test.go b/facade/elrondNodeFacade_test.go index 63073b4e3e7..a6a79305112 100644 --- a/facade/elrondNodeFacade_test.go +++ b/facade/elrondNodeFacade_test.go @@ -137,9 +137,6 @@ func TestElrondFacade_StartNodeWithNodeNotNullShouldNotReturnError(t *testing.T) StartHandler: func() { started = true }, - P2PBootstrapHandler: func() error { - return nil - }, IsRunningHandler: func() bool { return started }, @@ -163,9 +160,6 @@ func TestElrondFacade_StartNodeWithErrorOnStartConsensusShouldReturnError(t *tes StartHandler: func() { started = true }, - P2PBootstrapHandler: func() error { - return nil - }, IsRunningHandler: func() bool { return started }, diff --git a/facade/interface.go b/facade/interface.go index 35869346fce..2658c55caae 100644 --- a/facade/interface.go +++ b/facade/interface.go @@ -16,9 +16,6 @@ type NodeWrapper interface { // Start will set up the Node state as running Start() - // P2PBootstrap starts the peer discovery process and peer connection filtering - P2PBootstrap() error - //IsRunning returns if the underlying node is running IsRunning() bool diff --git a/facade/mock/nodeMock.go b/facade/mock/nodeMock.go index 5c2d7d5112c..2eff5a65381 100644 --- a/facade/mock/nodeMock.go +++ b/facade/mock/nodeMock.go @@ -13,7 +13,6 @@ type NodeMock struct { AddressHandler func() (string, error) StartHandler func() StopHandler func() error - P2PBootstrapHandler func() error IsRunningHandler func() bool ConnectToAddressesHandler func([]string) error StartConsensusHandler func() error @@ -42,11 +41,6 @@ func (nm *NodeMock) Start() { nm.StartHandler() } -// P2PBootstrap - -func (nm *NodeMock) P2PBootstrap() error { - return nm.P2PBootstrapHandler() -} - // IsRunning - func (nm *NodeMock) IsRunning() bool { return nm.IsRunningHandler() diff --git a/integrationTests/consensus/testInitializer.go b/integrationTests/consensus/testInitializer.go index 81db3171147..2332870fc75 100644 --- a/integrationTests/consensus/testInitializer.go +++ b/integrationTests/consensus/testInitializer.go @@ -127,7 +127,7 @@ func displayAndStartNodes(nodes []*testNode) { hex.EncodeToString(pkBuff), ) n.node.Start() - _ = n.node.P2PBootstrap() + _ = n.mesenger.Bootstrap() } } diff --git a/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go b/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go index 0999db92d37..022b5155727 100644 --- a/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go @@ -36,7 +36,7 @@ func TestNode_GenerateSendInterceptBulkTransactionsWithMessenger(t *testing.T) { _ = n.Node.Stop() }() - _ = n.Node.P2PBootstrap() + _ = n.Messenger.Bootstrap() time.Sleep(stepDelay) diff --git a/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx/interceptedBulkUnsignedTx_test.go b/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx/interceptedBulkUnsignedTx_test.go index 9e373fc30d8..48634a1dc1d 100644 --- a/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx/interceptedBulkUnsignedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx/interceptedBulkUnsignedTx_test.go @@ -42,7 +42,7 @@ func TestNode_GenerateSendInterceptBulkUnsignedTransactionsWithMessenger(t *test _ = n.Node.Stop() }() - _ = n.Node.P2PBootstrap() + _ = n.Messenger.Bootstrap() time.Sleep(time.Second) diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 54dbdfdd467..240fd29eee0 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -985,7 +985,7 @@ func DisplayAndStartNodes(nodes []*TestProcessorNode) { hex.EncodeToString(pkBuff), ) n.Node.Start() - _ = n.Node.P2PBootstrap() + _ = n.Messenger.Bootstrap() } fmt.Println("Delaying for node bootstrap and topic announcement...") diff --git a/node/node.go b/node/node.go index fed8c05ef28..5e42f955e7a 100644 --- a/node/node.go +++ b/node/node.go @@ -55,9 +55,9 @@ type Option func(*Node) error // Node is a structure that passes the configuration parameters and initializes // required services as requested type Node struct { - internalMarshalizer marshal.Marshalizer - vmMarshalizer marshal.Marshalizer - txSignMarshalizer marshal.Marshalizer + internalMarshalizer marshal.Marshalizer + vmMarshalizer marshal.Marshalizer + txSignMarshalizer marshal.Marshalizer ctx context.Context hasher hashing.Hasher feeHandler process.FeeHandler @@ -175,23 +175,10 @@ func (n *Node) Stop() error { if !n.IsRunning() { return nil } - err := n.messenger.Close() - if err != nil { - return err - } return nil } -// P2PBootstrap will try to connect to many peers as possible -func (n *Node) P2PBootstrap() error { - if n.messenger == nil { - return ErrNilMessenger - } - - return n.messenger.Bootstrap() -} - // CreateShardedStores instantiate sharded cachers for Transactions and Headers func (n *Node) CreateShardedStores() error { if n.shardCoordinator == nil { @@ -331,7 +318,7 @@ func (n *Node) StartConsensus() error { NodesCoordinator: n.nodesCoordinator, SyncTimer: n.syncTimer, EpochStartRegistrationHandler: n.epochStartRegistrationHandler, - AntifloodHandler: n.inputAntifloodHandler, + AntifloodHandler: n.inputAntifloodHandler, } consensusDataContainer, err := spos.NewConsensusCore( diff --git a/node/node_test.go b/node/node_test.go index dfcba70aff3..121ffa50773 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -201,46 +201,16 @@ func TestStop_NotStartedYet(t *testing.T) { assert.False(t, n.IsRunning()) } -func TestStop_MessengerCloseErrors(t *testing.T) { - errorString := "messenger close error" - messenger := getMessenger() - messenger.CloseCalled = func() error { - return errors.New(errorString) - } - n, _ := node.NewNode( - node.WithMessenger(messenger), - node.WithInternalMarshalizer(getMarshalizer(), testSizeCheckDelta), - node.WithVmMarshalizer(getMarshalizer()), - node.WithHasher(getHasher()), - ) - - n.Start() - - err := n.Stop() - assert.NotNil(t, err) - assert.Contains(t, err.Error(), errorString) -} - func TestStop(t *testing.T) { - - messengerCloseWasCalled := false - - messenger := getMessenger() - messenger.CloseCalled = func() error { - messengerCloseWasCalled = true - return nil - } n, _ := node.NewNode( node.WithInternalMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithVmMarshalizer(getMarshalizer()), node.WithHasher(getHasher()), - node.WithMessenger(messenger), ) n.Start() err := n.Stop() assert.Nil(t, err) - assert.True(t, messengerCloseWasCalled) } func TestGetBalance_NoAddrConverterShouldError(t *testing.T) { From 38d5c2df6543ca22bfb01fc0d28aa03d17890948 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Thu, 26 Mar 2020 17:59:59 +0200 Subject: [PATCH 47/61] fix edge case --- cmd/node/main.go | 23 +------ epochStart/bootstrap/interface.go | 1 + epochStart/bootstrap/metaStorageHandler.go | 2 +- epochStart/bootstrap/nodesCoordinator.go | 20 ++++++ epochStart/bootstrap/process.go | 67 +++++++++++-------- epochStart/bootstrap/syncValidatorStatus.go | 45 +++++++------ .../indexHashedNodesCoordinatorRegistry.go | 20 ++++++ 7 files changed, 108 insertions(+), 70 deletions(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index 6b5c8bd15ac..615ef4a1b5b 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -1224,12 +1224,12 @@ func createNodesCoordinator( metaConsensusGroupSize := int(nodesConfig.MetaChainConsensusGroupSize) eligibleNodesInfo, waitingNodesInfo := nodesConfig.InitialNodesInfo() - eligibleValidators, errEligibleValidators := nodesInfoToValidators(eligibleNodesInfo) + eligibleValidators, errEligibleValidators := sharding.NodesInfoToValidators(eligibleNodesInfo) if errEligibleValidators != nil { return nil, errEligibleValidators } - waitingValidators, errWaitingValidators := nodesInfoToValidators(waitingNodesInfo) + waitingValidators, errWaitingValidators := sharding.NodesInfoToValidators(waitingNodesInfo) if errWaitingValidators != nil { return nil, errWaitingValidators } @@ -1280,25 +1280,6 @@ func createNodesCoordinator( return nodesCoordinator, nil } -func nodesInfoToValidators(nodesInfo map[uint32][]*sharding.NodeInfo) (map[uint32][]sharding.Validator, error) { - validatorsMap := make(map[uint32][]sharding.Validator) - - for shId, nodeInfoList := range nodesInfo { - validators := make([]sharding.Validator, 0) - for _, nodeInfo := range nodeInfoList { - validator, err := sharding.NewValidator(nodeInfo.PubKey(), nodeInfo.Address()) - if err != nil { - return nil, err - } - - validators = append(validators, validator) - } - validatorsMap[shId] = validators - } - - return validatorsMap, nil -} - func processDestinationShardAsObserver(prefsConfig config.PreferencesConfig) (uint32, error) { destShard := strings.ToLower(prefsConfig.DestinationShardAsObserver) if len(destShard) == 0 { diff --git a/epochStart/bootstrap/interface.go b/epochStart/bootstrap/interface.go index d8859bb8c1d..cb095ffc838 100644 --- a/epochStart/bootstrap/interface.go +++ b/epochStart/bootstrap/interface.go @@ -30,6 +30,7 @@ type EpochStartNodesCoordinator interface { validatorInfos []*state.ValidatorInfo, updateListInfo bool, ) (*sharding.EpochValidators, error) + ComputeNodesConfigForGenesis(genesis *sharding.NodesSetup) (*sharding.EpochValidators, error) ComputeShardForSelfPublicKey(epoch uint32, pubKey []byte) uint32 IsInterfaceNil() bool } diff --git a/epochStart/bootstrap/metaStorageHandler.go b/epochStart/bootstrap/metaStorageHandler.go index 1a041a1281f..4d80aecdc45 100644 --- a/epochStart/bootstrap/metaStorageHandler.go +++ b/epochStart/bootstrap/metaStorageHandler.go @@ -172,7 +172,7 @@ func (msh *metaStorageHandler) saveTriggerRegistry(components *ComponentsNeededF CurrentRound: metaBlock.Round, EpochFinalityAttestingRound: metaBlock.Round, CurrEpochStartRound: metaBlock.Round, - PrevEpochStartRound: components.PreviousEpochStartMetaBlock.Round, + PrevEpochStartRound: components.PreviousEpochStartRound, EpochStartMetaHash: hash, EpochStartMeta: metaBlock, } diff --git a/epochStart/bootstrap/nodesCoordinator.go b/epochStart/bootstrap/nodesCoordinator.go index 7de1696ae94..df765c45f07 100644 --- a/epochStart/bootstrap/nodesCoordinator.go +++ b/epochStart/bootstrap/nodesCoordinator.go @@ -59,6 +59,26 @@ func NewStartInEpochNodesCoordinator(args ArgsNewStartInEpochNodesCoordinator) ( return n, nil } +// ComputeNodesConfigForGenesis creates the actual node config for genesis +func (n *nodesCoordinator) ComputeNodesConfigForGenesis(nodesConfig *sharding.NodesSetup) (*sharding.EpochValidators, error) { + eligibleNodesInfo, waitingNodesInfo := nodesConfig.InitialNodesInfo() + + eligibleValidators, err := sharding.NodesInfoToValidators(eligibleNodesInfo) + if err != nil { + return nil, err + } + + waitingValidators, err := sharding.NodesInfoToValidators(waitingNodesInfo) + if err != nil { + return nil, err + } + + err = n.setNodesPerShards(eligibleValidators, waitingValidators, 0) + epochValidators := epochNodesConfigToEpochValidators(n.nodesConfig[0]) + + return epochValidators, nil +} + // ComputeNodesConfigFor computes the actual nodes config for the set epoch from the validator info func (n *nodesCoordinator) ComputeNodesConfigFor( metaBlock *block.MetaBlock, diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index c8e1cf08e1f..ed29e85d216 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -53,15 +53,15 @@ type Parameters struct { // ComponentsNeededForBootstrap holds the components which need to be initialized from network type ComponentsNeededForBootstrap struct { - EpochStartMetaBlock *block.MetaBlock - PreviousEpochStartMetaBlock *block.MetaBlock - ShardHeader *block.Header - NodesConfig *sharding.NodesCoordinatorRegistry - Headers map[string]data.HeaderHandler - ShardCoordinator sharding.Coordinator - UserAccountTries map[string]data.Trie - PeerAccountTries map[string]data.Trie - PendingMiniBlocks map[string]*block.MiniBlock + EpochStartMetaBlock *block.MetaBlock + PreviousEpochStartRound uint64 + ShardHeader *block.Header + NodesConfig *sharding.NodesCoordinatorRegistry + Headers map[string]data.HeaderHandler + ShardCoordinator sharding.Coordinator + UserAccountTries map[string]data.Trie + PeerAccountTries map[string]data.Trie + PendingMiniBlocks map[string]*block.MiniBlock } // epochStartBootstrap will handle requesting the needed data to start when joining late the network @@ -323,15 +323,26 @@ func (e *epochStartBootstrap) syncHeadersFrom(meta *block.MetaBlock) (map[string shardIds = append(shardIds, epochStartData.ShardID) } - hashesToRequest = append(hashesToRequest, meta.EpochStart.Economics.PrevEpochStartHash) - shardIds = append(shardIds, core.MetachainShardId) + if meta.Epoch > 1 { // no need to request genesis block + hashesToRequest = append(hashesToRequest, meta.EpochStart.Economics.PrevEpochStartHash) + shardIds = append(shardIds, core.MetachainShardId) + } err := e.headersSyncer.SyncMissingHeadersByHash(shardIds, hashesToRequest, timeToWait) if err != nil { return nil, err } - return e.headersSyncer.GetHeaders() + syncedHeaders, err := e.headersSyncer.GetHeaders() + if err != nil { + return nil, err + } + + if meta.Epoch == 1 { + syncedHeaders[string(meta.EpochStart.Economics.PrevEpochStartHash)] = &block.MetaBlock{} + } + + return syncedHeaders, nil } // Bootstrap will handle requesting and receiving the needed information the node will bootstrap from @@ -444,13 +455,13 @@ func (e *epochStartBootstrap) requestAndProcessForMeta() error { } components := &ComponentsNeededForBootstrap{ - EpochStartMetaBlock: e.epochStartMeta, - PreviousEpochStartMetaBlock: e.prevEpochStartMeta, - NodesConfig: e.nodesConfig, - Headers: e.syncedHeaders, - ShardCoordinator: e.shardCoordinator, - UserAccountTries: e.userAccountTries, - PeerAccountTries: e.peerAccountTries, + EpochStartMetaBlock: e.epochStartMeta, + PreviousEpochStartRound: e.prevEpochStartMeta.Round, + NodesConfig: e.nodesConfig, + Headers: e.syncedHeaders, + ShardCoordinator: e.shardCoordinator, + UserAccountTries: e.userAccountTries, + PeerAccountTries: e.peerAccountTries, } storageHandlerComponent, err := NewMetaStorageHandler( @@ -534,15 +545,15 @@ func (e *epochStartBootstrap) requestAndProcessForShard() error { } components := &ComponentsNeededForBootstrap{ - EpochStartMetaBlock: e.epochStartMeta, - PreviousEpochStartMetaBlock: e.prevEpochStartMeta, - ShardHeader: ownShardHdr, - NodesConfig: e.nodesConfig, - Headers: e.syncedHeaders, - ShardCoordinator: e.shardCoordinator, - UserAccountTries: e.userAccountTries, - PeerAccountTries: e.peerAccountTries, - PendingMiniBlocks: pendingMiniBlocks, + EpochStartMetaBlock: e.epochStartMeta, + PreviousEpochStartRound: e.prevEpochStartMeta.Round, + ShardHeader: ownShardHdr, + NodesConfig: e.nodesConfig, + Headers: e.syncedHeaders, + ShardCoordinator: e.shardCoordinator, + UserAccountTries: e.userAccountTries, + PeerAccountTries: e.peerAccountTries, + PendingMiniBlocks: pendingMiniBlocks, } storageHandlerComponent, err := NewShardStorageHandler( diff --git a/epochStart/bootstrap/syncValidatorStatus.go b/epochStart/bootstrap/syncValidatorStatus.go index 80a7f0bf9e8..9ee38ac14dd 100644 --- a/epochStart/bootstrap/syncValidatorStatus.go +++ b/epochStart/bootstrap/syncValidatorStatus.go @@ -16,11 +16,12 @@ import ( ) type syncValidatorStatus struct { - miniBlocksSyncer epochStart.PendingMiniBlocksSyncHandler - dataPool dataRetriever.PoolsHolder - marshalizer marshal.Marshalizer - requestHandler process.RequestHandler - nodeCoordinator EpochStartNodesCoordinator + miniBlocksSyncer epochStart.PendingMiniBlocksSyncHandler + dataPool dataRetriever.PoolsHolder + marshalizer marshal.Marshalizer + requestHandler process.RequestHandler + nodeCoordinator EpochStartNodesCoordinator + genesisNodesConfig *sharding.NodesSetup } // ArgsNewSyncValidatorStatus @@ -37,9 +38,10 @@ type ArgsNewSyncValidatorStatus struct { // NewSyncValidatorStatus creates a new validator status process component func NewSyncValidatorStatus(args ArgsNewSyncValidatorStatus) (*syncValidatorStatus, error) { s := &syncValidatorStatus{ - dataPool: args.DataPool, - marshalizer: args.Marshalizer, - requestHandler: args.RequestHandler, + dataPool: args.DataPool, + marshalizer: args.Marshalizer, + requestHandler: args.RequestHandler, + genesisNodesConfig: args.GenesisNodesConfig, } syncMiniBlocksArgs := sync.ArgsNewPendingMiniBlocksSyncer{ Storage: disabled.CreateMemUnit(), @@ -86,22 +88,12 @@ func (s *syncValidatorStatus) NodesConfigFromMetaBlock( return nil, 0, epochStart.ErrNotEpochStartBlock } - prevEpochValidatorsInfo, err := s.processNodesConfigFor(prevMetaBlock) + prevEpochsValidators, err := s.computeNodesConfigFor(prevMetaBlock, false) if err != nil { return nil, 0, err } - prevEpochsValidators, err := s.nodeCoordinator.ComputeNodesConfigFor(prevMetaBlock, prevEpochValidatorsInfo, false) - if err != nil { - return nil, 0, err - } - - currEpochValidatorsInfo, err := s.processNodesConfigFor(currMetaBlock) - if err != nil { - return nil, 0, err - } - - currEpochsValidators, err := s.nodeCoordinator.ComputeNodesConfigFor(currMetaBlock, currEpochValidatorsInfo, true) + currEpochsValidators, err := s.computeNodesConfigFor(currMetaBlock, true) if err != nil { return nil, 0, err } @@ -121,6 +113,19 @@ func (s *syncValidatorStatus) NodesConfigFromMetaBlock( return nodesConfig, selfShardId, nil } +func (s *syncValidatorStatus) computeNodesConfigFor(metaBlock *block.MetaBlock, updateValidatorInfo bool) (*sharding.EpochValidators, error) { + if metaBlock.Epoch == 0 { + return s.nodeCoordinator.ComputeNodesConfigForGenesis(s.genesisNodesConfig) + } + + epochValidatorsInfo, err := s.processNodesConfigFor(metaBlock) + if err != nil { + return nil, err + } + + return s.nodeCoordinator.ComputeNodesConfigFor(metaBlock, epochValidatorsInfo, updateValidatorInfo) +} + func findPeerMiniBlockHeaders(metaBlock *block.MetaBlock) []block.ShardMiniBlockHeader { shardMBHeaders := make([]block.ShardMiniBlockHeader, 0) for _, mbHeader := range metaBlock.MiniBlockHeaders { diff --git a/sharding/indexHashedNodesCoordinatorRegistry.go b/sharding/indexHashedNodesCoordinatorRegistry.go index a47dcff3c5c..8859648e4bb 100644 --- a/sharding/indexHashedNodesCoordinatorRegistry.go +++ b/sharding/indexHashedNodesCoordinatorRegistry.go @@ -232,3 +232,23 @@ func serializableValidatorArrayToValidatorArray(sValidators []*SerializableValid return result, nil } + +// NodesInfoToValidators maps nodeInfo to validator interface +func NodesInfoToValidators(nodesInfo map[uint32][]*NodeInfo) (map[uint32][]Validator, error) { + validatorsMap := make(map[uint32][]Validator) + + for shId, nodeInfoList := range nodesInfo { + validators := make([]Validator, 0) + for _, nodeInfo := range nodeInfoList { + validator, err := NewValidator(nodeInfo.PubKey(), nodeInfo.Address()) + if err != nil { + return nil, err + } + + validators = append(validators, validator) + } + validatorsMap[shId] = validators + } + + return validatorsMap, nil +} From c0d7f5f3092f3252d2e024b8011d003c40e20a46 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Thu, 26 Mar 2020 18:31:48 +0200 Subject: [PATCH 48/61] EN-6013: updated integration test + fixes 2 --- .../epochStartInterceptorsContainerFactory.go | 2 +- epochStart/bootstrap/fromLocalStorage.go | 2 +- epochStart/bootstrap/nodesCoordinator.go | 1 + epochStart/bootstrap/process.go | 55 ++++++++++++------- epochStart/bootstrap/syncEpochStartMeta.go | 8 +-- integrationTests/mock/raterMock.go | 6 +- .../startInEpoch/startInEpoch_test.go | 9 ++- 7 files changed, 50 insertions(+), 33 deletions(-) diff --git a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go index fa4d31426fa..de1b159bbd1 100644 --- a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go +++ b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go @@ -54,7 +54,7 @@ func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) blackListHandler := timecache.NewTimeCache(1 * time.Minute) feeHandler := genesis.NewGenesisFeeHandler() headerSigVerifier := disabled.NewHeaderSigVerifier() - chainID := []byte("chain ID") + chainID := []byte("integration tests chain ID") // TODO: change this with the real Chain ID sizeCheckDelta := 0 validityAttester := disabled.NewValidityAttester() epochStartTrigger := disabled.NewEpochStartTrigger() diff --git a/epochStart/bootstrap/fromLocalStorage.go b/epochStart/bootstrap/fromLocalStorage.go index f6ea402cf6f..5f1a4c938ae 100644 --- a/epochStart/bootstrap/fromLocalStorage.go +++ b/epochStart/bootstrap/fromLocalStorage.go @@ -91,7 +91,7 @@ func (e *epochStartBootstrap) prepareEpochFromStorage() (Parameters, error) { return Parameters{}, err } - err = e.prepareComponentsToSyncFromNetwork() + err = e.createSyncers() if err != nil { return Parameters{}, err } diff --git a/epochStart/bootstrap/nodesCoordinator.go b/epochStart/bootstrap/nodesCoordinator.go index df765c45f07..399647d4da0 100644 --- a/epochStart/bootstrap/nodesCoordinator.go +++ b/epochStart/bootstrap/nodesCoordinator.go @@ -52,6 +52,7 @@ func NewStartInEpochNodesCoordinator(args ArgsNewStartInEpochNodesCoordinator) ( shardConsensusGroupSize: args.ShardConsensusGroupSize, metaConsensusGroupSize: args.MetaConsensusGroupSize, nodesConfig: make(map[uint32]*epochNodesConfig), + numShards: make(map[uint32]uint32), validatorAccountsDB: args.ValidatorAccountsDB, adrConv: args.AdrConv, } diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index ed29e85d216..64083932fa1 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -42,7 +42,7 @@ import ( var log = logger.GetOrCreate("epochStart/bootstrap") -const timeToWait = 5 * time.Second +const timeToWait = 8 * time.Second // BootstrapParameters type Parameters struct { @@ -149,6 +149,7 @@ func NewEpochStartBootstrap(args ArgsEpochStartBootstrap) (*epochStartBootstrap, economicsData: args.EconomicsData, genesisNodesConfig: args.GenesisNodesConfig, workingDir: args.WorkingDir, + pathManager: args.PathManager, defaultEpochString: args.DefaultEpochString, defaultDBPath: args.DefaultEpochString, defaultShardString: args.DefaultShardString, @@ -225,6 +226,16 @@ func (e *epochStartBootstrap) Bootstrap() (Parameters, error) { } err = e.prepareComponentsToSyncFromNetwork() + if err != nil { + return Parameters{}, nil + } + + e.epochStartMeta, err = e.epochStartMetaBlockSyncer.SyncEpochStartMeta(timeToWait) + if err != nil { + return Parameters{}, err + } + + err = e.createSyncers() if err != nil { return Parameters{}, err } @@ -233,8 +244,6 @@ func (e *epochStartBootstrap) Bootstrap() (Parameters, error) { } func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { - var err error - whiteListCache, err := storageUnit.NewCache( storageUnit.CacheType(e.generalConfig.WhiteListPool.Type), e.generalConfig.WhiteListPool.Size, @@ -265,6 +274,23 @@ func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { return err } + argsEpochStartSyncer := ArgsNewEpochStartMetaSyncer{ + RequestHandler: e.requestHandler, + Messenger: e.messenger, + Marshalizer: e.marshalizer, + Hasher: e.hasher, + } + e.epochStartMetaBlockSyncer, err = NewEpochStartMetaSyncer(argsEpochStartSyncer) + if err != nil { + return err + } + + return nil +} + +func (e *epochStartBootstrap) createSyncers() error { + var err error + args := factoryInterceptors.ArgsEpochStartInterceptorContainer{ Config: e.generalConfig, ShardCoordinator: e.shardCoordinator, @@ -284,17 +310,6 @@ func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { return err } - argsEpochStartSyncer := ArgsNewEpochStartMetaSyncer{ - RequestHandler: e.requestHandler, - Messenger: e.messenger, - Marshalizer: e.marshalizer, - Hasher: e.hasher, - } - e.epochStartMetaBlockSyncer, err = NewEpochStartMetaSyncer(argsEpochStartSyncer) - if err != nil { - return err - } - syncMiniBlocksArgs := sync.ArgsNewPendingMiniBlocksSyncer{ Storage: disabled.CreateMemUnit(), Cache: e.dataPool.MiniBlocks(), @@ -328,7 +343,7 @@ func (e *epochStartBootstrap) syncHeadersFrom(meta *block.MetaBlock) (map[string shardIds = append(shardIds, core.MetachainShardId) } - err := e.headersSyncer.SyncMissingHeadersByHash(shardIds, hashesToRequest, timeToWait) + err := e.headersSyncer.SyncMissingHeadersByHash(shardIds, hashesToRequest, timeToWait*5) if err != nil { return nil, err } @@ -348,11 +363,6 @@ func (e *epochStartBootstrap) syncHeadersFrom(meta *block.MetaBlock) (map[string // Bootstrap will handle requesting and receiving the needed information the node will bootstrap from func (e *epochStartBootstrap) requestAndProcessing() (Parameters, error) { var err error - e.epochStartMeta, err = e.epochStartMetaBlockSyncer.SyncEpochStartMeta(timeToWait) - if err != nil { - return Parameters{}, err - } - e.baseData.numberOfShards = uint32(len(e.epochStartMeta.EpochStart.LastFinalizedHeaders)) e.baseData.lastEpoch = e.epochStartMeta.Epoch @@ -360,6 +370,7 @@ func (e *epochStartBootstrap) requestAndProcessing() (Parameters, error) { if err != nil { return Parameters{}, err } + log.Debug("start in epoch bootstrap: got shard headers and previous epoch start meta block") prevEpochStartMetaHash := e.epochStartMeta.EpochStart.Economics.PrevEpochStartHash prevEpochStartMeta, ok := e.syncedHeaders[string(prevEpochStartMetaHash)].(*block.MetaBlock) @@ -388,6 +399,9 @@ func (e *epochStartBootstrap) requestAndProcessing() (Parameters, error) { return Parameters{}, err } + if e.baseData.shardId == core.AllShardId { + e.baseData.shardId = 0 // TODO: replace with preferred shard ID as observer + } e.shardCoordinator, err = sharding.NewMultiShardCoordinator(e.baseData.numberOfShards, e.baseData.shardId) if err != nil { return Parameters{}, err @@ -556,6 +570,7 @@ func (e *epochStartBootstrap) requestAndProcessForShard() error { PendingMiniBlocks: pendingMiniBlocks, } + log.Info("reached maximum tested point from integration test") storageHandlerComponent, err := NewShardStorageHandler( e.generalConfig, e.shardCoordinator, diff --git a/epochStart/bootstrap/syncEpochStartMeta.go b/epochStart/bootstrap/syncEpochStartMeta.go index b86d29f405e..260b17c5e4d 100644 --- a/epochStart/bootstrap/syncEpochStartMeta.go +++ b/epochStart/bootstrap/syncEpochStartMeta.go @@ -100,13 +100,7 @@ func (e *epochStartMetaSyncer) resetTopicsAndInterceptors() { } func (e *epochStartMetaSyncer) initTopicForEpochStartMetaBlockInterceptor() error { - err := e.messenger.UnregisterMessageProcessor(factory.MetachainBlocksTopic) - if err != nil { - log.Info("error unregistering message processor", "error", err) - return err - } - - err = e.messenger.CreateTopic(factory.MetachainBlocksTopic, true) + err := e.messenger.CreateTopic(factory.MetachainBlocksTopic, true) if err != nil { log.Info("error registering message processor", "error", err) return err diff --git a/integrationTests/mock/raterMock.go b/integrationTests/mock/raterMock.go index 2048f631eb8..7c09cb2eae9 100644 --- a/integrationTests/mock/raterMock.go +++ b/integrationTests/mock/raterMock.go @@ -60,7 +60,11 @@ func (rm *RaterMock) ComputeDecreaseValidator(rating uint32) uint32 { // GetChance - func (rm *RaterMock) GetChance(rating uint32) uint32 { - return rm.GetChanceCalled(rating) + if rm.GetChanceCalled != nil { + return rm.GetChanceCalled(rating) + } + + return 80 } // SetRatingReader - diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index dd89f5f6856..f2f767f2c2d 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -82,7 +82,7 @@ func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { time.Sleep(time.Second) /////////----- wait for epoch end period - epoch := uint32(1) + epoch := uint32(2) nrRoundsToPropagateMultiShard := uint64(5) for i := uint64(0); i <= (uint64(epoch)*roundsPerEpoch)+nrRoundsToPropagateMultiShard; i++ { integrationTests.UpdateRound(nodes, round) @@ -112,8 +112,11 @@ func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { _ = dataRetriever.SetEpochHandlerToHdrResolver(node.ResolversContainer, epochHandler) } + generalConfig := getGeneralConfig() + roundDurationMillis := 4000 + epochDurationMillis := generalConfig.EpochStartConfig.RoundsPerEpoch * int64(roundDurationMillis) nodesConfig := sharding.NodesSetup{ - StartTime: time.Now().Unix(), + StartTime: time.Now().Add(-time.Duration(epochDurationMillis) * time.Millisecond).Unix(), RoundDuration: 4000, InitialNodes: getInitialNodes(nodesMap), } @@ -231,7 +234,7 @@ func getGeneralConfig() config.Config { Size: 10000, Type: "LRU", Shards: 1, }, HeadersPoolConfig: config.HeadersPoolConfig{ - MaxHeadersPerShard: 10, + MaxHeadersPerShard: 100, NumElementsToRemoveOnEviction: 1, }, TxBlockBodyDataPool: config.CacheConfig{ From ab3a0269e6cfb15968494d20fdee03ecd4d6932d Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Thu, 26 Mar 2020 19:22:44 +0200 Subject: [PATCH 49/61] EN-6013: fixed 2 hardcoded fields --- cmd/node/main.go | 35 ++--- .../epochStartInterceptorsContainerFactory.go | 4 +- epochStart/bootstrap/process.go | 121 ++++++++++-------- .../startInEpoch/startInEpoch_test.go | 36 +++--- 4 files changed, 107 insertions(+), 89 deletions(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index 615ef4a1b5b..fe4252da803 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -607,23 +607,24 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } epochStartBootstrapArgs := bootstrap.ArgsEpochStartBootstrap{ - PublicKey: pubKey, - Marshalizer: coreComponents.InternalMarshalizer, - Hasher: coreComponents.Hasher, - Messenger: networkComponents.NetMessenger, - GeneralConfig: *generalConfig, - EconomicsData: economicsData, - SingleSigner: cryptoComponents.TxSingleSigner, - BlockSingleSigner: cryptoComponents.SingleSigner, - KeyGen: cryptoComponents.TxSignKeyGen, - BlockKeyGen: cryptoComponents.BlockSignKeyGen, - GenesisNodesConfig: genesisNodesConfig, - PathManager: pathManager, - WorkingDir: workingDir, - DefaultDBPath: defaultDBPath, - DefaultEpochString: defaultEpochString, - DefaultShardString: defaultShardString, - Rater: rater, + PublicKey: pubKey, + Marshalizer: coreComponents.InternalMarshalizer, + Hasher: coreComponents.Hasher, + Messenger: networkComponents.NetMessenger, + GeneralConfig: *generalConfig, + EconomicsData: economicsData, + SingleSigner: cryptoComponents.TxSingleSigner, + BlockSingleSigner: cryptoComponents.SingleSigner, + KeyGen: cryptoComponents.TxSignKeyGen, + BlockKeyGen: cryptoComponents.BlockSignKeyGen, + GenesisNodesConfig: genesisNodesConfig, + PathManager: pathManager, + WorkingDir: workingDir, + DefaultDBPath: defaultDBPath, + DefaultEpochString: defaultEpochString, + DefaultShardString: defaultShardString, + Rater: rater, + DestinationShardAsObserver: ctx.GlobalString(destinationShardAsObserver.Name), } bootstrapper, err := bootstrap.NewEpochStartBootstrap(epochStartBootstrapArgs) if err != nil { diff --git a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go index de1b159bbd1..78c28554e5d 100644 --- a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go +++ b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go @@ -33,6 +33,7 @@ type ArgsEpochStartInterceptorContainer struct { KeyGen crypto.KeyGenerator BlockKeyGen crypto.KeyGenerator WhiteListHandler update.WhiteListHandler + ChainID []byte } // NewEpochStartInterceptorsContainer will return a real interceptors container factory, but will many disabled @@ -54,7 +55,6 @@ func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) blackListHandler := timecache.NewTimeCache(1 * time.Minute) feeHandler := genesis.NewGenesisFeeHandler() headerSigVerifier := disabled.NewHeaderSigVerifier() - chainID := []byte("integration tests chain ID") // TODO: change this with the real Chain ID sizeCheckDelta := 0 validityAttester := disabled.NewValidityAttester() epochStartTrigger := disabled.NewEpochStartTrigger() @@ -79,7 +79,7 @@ func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) TxFeeHandler: feeHandler, BlackList: blackListHandler, HeaderSigVerifier: headerSigVerifier, - ChainID: chainID, + ChainID: args.ChainID, SizeCheckDelta: uint32(sizeCheckDelta), ValidityAttester: validityAttester, EpochStartTrigger: epochStartTrigger, diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 64083932fa1..d96515c5a4f 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -1,6 +1,7 @@ package bootstrap import ( + "strconv" "time" "github.com/ElrondNetwork/elrond-go/config" @@ -67,24 +68,25 @@ type ComponentsNeededForBootstrap struct { // epochStartBootstrap will handle requesting the needed data to start when joining late the network type epochStartBootstrap struct { // should come via arguments - publicKey crypto.PublicKey - marshalizer marshal.Marshalizer - hasher hashing.Hasher - messenger p2p.Messenger - generalConfig config.Config - economicsData *economics.EconomicsData - singleSigner crypto.SingleSigner - blockSingleSigner crypto.SingleSigner - keyGen crypto.KeyGenerator - blockKeyGen crypto.KeyGenerator - shardCoordinator sharding.Coordinator - genesisNodesConfig *sharding.NodesSetup - pathManager storage.PathManagerHandler - workingDir string - defaultDBPath string - defaultEpochString string - defaultShardString string - rater sharding.ChanceComputer + publicKey crypto.PublicKey + marshalizer marshal.Marshalizer + hasher hashing.Hasher + messenger p2p.Messenger + generalConfig config.Config + economicsData *economics.EconomicsData + singleSigner crypto.SingleSigner + blockSingleSigner crypto.SingleSigner + keyGen crypto.KeyGenerator + blockKeyGen crypto.KeyGenerator + shardCoordinator sharding.Coordinator + genesisNodesConfig *sharding.NodesSetup + pathManager storage.PathManagerHandler + workingDir string + defaultDBPath string + defaultEpochString string + defaultShardString string + destinationShardAsObserver string + rater sharding.ChanceComputer // created components requestHandler process.RequestHandler @@ -119,45 +121,47 @@ type baseDataInStorage struct { // ArgsEpochStartBootstrap holds the arguments needed for creating an epoch start data provider component type ArgsEpochStartBootstrap struct { - PublicKey crypto.PublicKey - Marshalizer marshal.Marshalizer - Hasher hashing.Hasher - Messenger p2p.Messenger - GeneralConfig config.Config - EconomicsData *economics.EconomicsData - SingleSigner crypto.SingleSigner - BlockSingleSigner crypto.SingleSigner - KeyGen crypto.KeyGenerator - BlockKeyGen crypto.KeyGenerator - GenesisNodesConfig *sharding.NodesSetup - PathManager storage.PathManagerHandler - WorkingDir string - DefaultDBPath string - DefaultEpochString string - DefaultShardString string - Rater sharding.ChanceComputer + PublicKey crypto.PublicKey + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + Messenger p2p.Messenger + GeneralConfig config.Config + EconomicsData *economics.EconomicsData + SingleSigner crypto.SingleSigner + BlockSingleSigner crypto.SingleSigner + KeyGen crypto.KeyGenerator + BlockKeyGen crypto.KeyGenerator + GenesisNodesConfig *sharding.NodesSetup + PathManager storage.PathManagerHandler + WorkingDir string + DefaultDBPath string + DefaultEpochString string + DefaultShardString string + Rater sharding.ChanceComputer + DestinationShardAsObserver string } // NewEpochStartBootstrap will return a new instance of epochStartBootstrap func NewEpochStartBootstrap(args ArgsEpochStartBootstrap) (*epochStartBootstrap, error) { epochStartProvider := &epochStartBootstrap{ - publicKey: args.PublicKey, - marshalizer: args.Marshalizer, - hasher: args.Hasher, - messenger: args.Messenger, - generalConfig: args.GeneralConfig, - economicsData: args.EconomicsData, - genesisNodesConfig: args.GenesisNodesConfig, - workingDir: args.WorkingDir, - pathManager: args.PathManager, - defaultEpochString: args.DefaultEpochString, - defaultDBPath: args.DefaultEpochString, - defaultShardString: args.DefaultShardString, - keyGen: args.KeyGen, - blockKeyGen: args.BlockKeyGen, - singleSigner: args.SingleSigner, - blockSingleSigner: args.BlockSingleSigner, - rater: args.Rater, + publicKey: args.PublicKey, + marshalizer: args.Marshalizer, + hasher: args.Hasher, + messenger: args.Messenger, + generalConfig: args.GeneralConfig, + economicsData: args.EconomicsData, + genesisNodesConfig: args.GenesisNodesConfig, + workingDir: args.WorkingDir, + pathManager: args.PathManager, + defaultEpochString: args.DefaultEpochString, + defaultDBPath: args.DefaultEpochString, + defaultShardString: args.DefaultShardString, + keyGen: args.KeyGen, + blockKeyGen: args.BlockKeyGen, + singleSigner: args.SingleSigner, + blockSingleSigner: args.BlockSingleSigner, + rater: args.Rater, + destinationShardAsObserver: args.DestinationShardAsObserver, } return epochStartProvider, nil @@ -303,6 +307,7 @@ func (e *epochStartBootstrap) createSyncers() error { KeyGen: e.keyGen, BlockKeyGen: e.blockKeyGen, WhiteListHandler: e.whiteListHandler, + ChainID: []byte(e.genesisNodesConfig.ChainID), } e.interceptorContainer, err = factoryInterceptors.NewEpochStartInterceptorsContainer(args) @@ -400,7 +405,17 @@ func (e *epochStartBootstrap) requestAndProcessing() (Parameters, error) { } if e.baseData.shardId == core.AllShardId { - e.baseData.shardId = 0 // TODO: replace with preferred shard ID as observer + destShardID := core.MetachainShardId + if e.destinationShardAsObserver != "metachain" { + var destShardIDUint64 uint64 + destShardIDUint64, err = strconv.ParseUint(e.destinationShardAsObserver, 10, 64) + if err != nil { + return Parameters{}, nil + } + destShardID = uint32(destShardIDUint64) + } + + e.baseData.shardId = destShardID } e.shardCoordinator, err = sharding.NewMultiShardCoordinator(e.baseData.numberOfShards, e.baseData.shardId) if err != nil { diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index f2f767f2c2d..5eb98270673 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -119,6 +119,7 @@ func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { StartTime: time.Now().Add(-time.Duration(epochDurationMillis) * time.Millisecond).Unix(), RoundDuration: 4000, InitialNodes: getInitialNodes(nodesMap), + ChainID: string(integrationTests.ChainID), } nodesConfig.SetNumberOfShards(uint32(numOfShards)) @@ -126,23 +127,24 @@ func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { _ = messenger.Bootstrap() time.Sleep(integrationTests.P2pBootstrapDelay) argsBootstrapHandler := bootstrap.ArgsEpochStartBootstrap{ - PublicKey: nodeToJoinLate.NodeKeys.Pk, - Marshalizer: integrationTests.TestMarshalizer, - Hasher: integrationTests.TestHasher, - Messenger: messenger, - GeneralConfig: getGeneralConfig(), - EconomicsData: integrationTests.CreateEconomicsData(), - SingleSigner: &mock.SignerMock{}, - BlockSingleSigner: &mock.SignerMock{}, - KeyGen: &mock.KeyGenMock{}, - BlockKeyGen: &mock.KeyGenMock{}, - GenesisNodesConfig: &nodesConfig, - PathManager: &mock.PathManagerStub{}, - WorkingDir: "test_directory", - DefaultDBPath: "test_db", - DefaultEpochString: "test_epoch", - DefaultShardString: "test_shard", - Rater: &mock.RaterMock{}, + PublicKey: nodeToJoinLate.NodeKeys.Pk, + Marshalizer: integrationTests.TestMarshalizer, + Hasher: integrationTests.TestHasher, + Messenger: messenger, + GeneralConfig: getGeneralConfig(), + EconomicsData: integrationTests.CreateEconomicsData(), + SingleSigner: &mock.SignerMock{}, + BlockSingleSigner: &mock.SignerMock{}, + KeyGen: &mock.KeyGenMock{}, + BlockKeyGen: &mock.KeyGenMock{}, + GenesisNodesConfig: &nodesConfig, + PathManager: &mock.PathManagerStub{}, + WorkingDir: "test_directory", + DefaultDBPath: "test_db", + DefaultEpochString: "test_epoch", + DefaultShardString: "test_shard", + Rater: &mock.RaterMock{}, + DestinationShardAsObserver: "0", } epochStartBootstrap, err := bootstrap.NewEpochStartBootstrap(argsBootstrapHandler) assert.Nil(t, err) From f79f4f1ade5f3e95159adce0559116052c400f0e Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Fri, 27 Mar 2020 15:14:55 +0200 Subject: [PATCH 50/61] logs + configs --- epochStart/bootstrap/process.go | 17 +- .../startInEpoch/startInEpoch_test.go | 162 +++++++++++++++++- 2 files changed, 173 insertions(+), 6 deletions(-) diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index d96515c5a4f..d7c4587e677 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -375,7 +375,7 @@ func (e *epochStartBootstrap) requestAndProcessing() (Parameters, error) { if err != nil { return Parameters{}, err } - log.Debug("start in epoch bootstrap: got shard headers and previous epoch start meta block") + log.Info("start in epoch bootstrap: got shard headers and previous epoch start meta block") prevEpochStartMetaHash := e.epochStartMeta.EpochStart.Economics.PrevEpochStartHash prevEpochStartMeta, ok := e.syncedHeaders[string(prevEpochStartMetaHash)].(*block.MetaBlock) @@ -393,16 +393,20 @@ func (e *epochStartBootstrap) requestAndProcessing() (Parameters, error) { if err != nil { return Parameters{}, err } + log.Info("start in epoch bootstrap: createTrieStorageManagers") + log.Info("start in epoch bootstrap: started syncPeerAccountsState") err = e.syncPeerAccountsState(e.epochStartMeta.ValidatorStatsRootHash) if err != nil { return Parameters{}, err } + log.Info("start in epoch bootstrap: syncPeerAccountsState", "peer account tries map length", len(e.peerAccountTries)) err = e.processNodesConfig(pubKeyBytes, e.epochStartMeta.ValidatorStatsRootHash) if err != nil { return Parameters{}, err } + log.Info("start in epoch bootstrap: processNodesConfig") if e.baseData.shardId == core.AllShardId { destShardID := core.MetachainShardId @@ -421,6 +425,7 @@ func (e *epochStartBootstrap) requestAndProcessing() (Parameters, error) { if err != nil { return Parameters{}, err } + log.Info("start in epoch bootstrap: shardCoordinator") if e.shardCoordinator.SelfId() != core.MetachainShardId { err = e.requestAndProcessForShard() @@ -538,6 +543,7 @@ func (e *epochStartBootstrap) requestAndProcessForShard() error { if err != nil { return err } + log.Info("start in epoch bootstrap: GetMiniBlocks") shardIds := []uint32{ core.MetachainShardId, @@ -558,6 +564,7 @@ func (e *epochStartBootstrap) requestAndProcessForShard() error { if err != nil { return err } + log.Info("start in epoch bootstrap: SyncMissingHeadersByHash") for hash, hdr := range neededHeaders { e.syncedHeaders[hash] = hdr @@ -568,10 +575,12 @@ func (e *epochStartBootstrap) requestAndProcessForShard() error { return epochStart.ErrWrongTypeAssertion } + log.Info("start in epoch bootstrap: started syncUserAccountsState") err = e.syncUserAccountsState(ownShardHdr.RootHash) if err != nil { return err } + log.Info("start in epoch bootstrap: syncUserAccountsState") components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: e.epochStartMeta, @@ -649,7 +658,7 @@ func (e *epochStartBootstrap) createTrieStorageManagers() error { } dbConfig = storageFactory.GetDBFromConfig(e.generalConfig.PeerAccountsTrieStorage.DB) - trieStorage, err = storageUnit.NewStorageUnitFromConf( + peerTrieStorage, err := storageUnit.NewStorageUnitFromConf( storageFactory.GetCacherFromConfig(e.generalConfig.PeerAccountsTrieStorage.Cache), dbConfig, storageFactory.GetBloomFromConfig(e.generalConfig.PeerAccountsTrieStorage.Bloom), @@ -658,7 +667,7 @@ func (e *epochStartBootstrap) createTrieStorageManagers() error { return err } - e.peerTrieStorageManager, err = trie.NewTrieStorageManagerWithoutPruning(trieStorage) + e.peerTrieStorageManager, err = trie.NewTrieStorageManagerWithoutPruning(peerTrieStorage) if err != nil { return err } @@ -673,7 +682,7 @@ func (e *epochStartBootstrap) syncPeerAccountsState(rootHash []byte) error { Marshalizer: e.marshalizer, TrieStorageManager: e.peerTrieStorageManager, RequestHandler: e.requestHandler, - WaitTime: timeToWait, + WaitTime: timeToWait * 10, Cacher: e.dataPool.TrieNodes(), }, } diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index 5eb98270673..9534c60c218 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -207,7 +207,7 @@ func getGeneralConfig() config.Config { Size: 10000, Type: "LRU", Shards: 1, }, DB: config.DBConfig{ - FilePath: "AccountsDB", + FilePath: "AccountsTrie/MainDB", Type: "MemoryDB", BatchDelaySeconds: 30, MaxBatchSize: 6, @@ -219,7 +219,7 @@ func getGeneralConfig() config.Config { Size: 10000, Type: "LRU", Shards: 1, }, DB: config.DBConfig{ - FilePath: "AccountsDB", + FilePath: "PeerAccountsTrie/MainDB", Type: "MemoryDB", BatchDelaySeconds: 30, MaxBatchSize: 6, @@ -248,5 +248,163 @@ func getGeneralConfig() config.Config { TrieNodesDataPool: config.CacheConfig{ Size: 10000, Type: "LRU", Shards: 1, }, + TxStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "Transactions", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + MiniBlocksStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "MiniBlocks", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + MiniBlockHeadersStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "MiniBlocks", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + ShardHdrNonceHashStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "ShardHdrHashNonce", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + MetaBlockStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "MetaBlock", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + MetaHdrNonceHashStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "MetaHdrHashNonce", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + UnsignedTransactionStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "UnsignedTransactions", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + RewardTxStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "RewardTransactions", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + BlockHeaderStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "BlockHeaders", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + Heartbeat: config.HeartbeatConfig{ + HeartbeatStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "HeartbeatStorage", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + }, + StatusMetricsStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "StatusMetricsStorageDB", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + PeerBlockBodyStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "PeerBlocks", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, + BootstrapStorage: config.StorageConfig{ + Cache: config.CacheConfig{ + Size: 10000, Type: "LRU", Shards: 1, + }, + DB: config.DBConfig{ + FilePath: "BootstrapData", + Type: "MemoryDB", + BatchDelaySeconds: 30, + MaxBatchSize: 6, + MaxOpenFiles: 10, + }, + }, } } From dcc1718807c6ccab732daa1413e70fd5746370ed Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Fri, 27 Mar 2020 16:34:23 +0200 Subject: [PATCH 51/61] merge from feat. --- cmd/node/factory/structs.go | 21 +- data/state/interface.go | 4 +- data/state/peerAccount.go | 12 +- data/state/peerAccountData.pb.go | 195 ++++++++---------- data/state/proto/peerAccountData.proto | 2 +- dataRetriever/factory/dataPoolFactory.go | 2 +- .../disabled/disabledAccountsAdapter.go | 38 +--- epochStart/bootstrap/nodesCoordinator.go | 9 +- epochStart/bootstrap/process.go | 7 +- integrationTests/mock/listIndexUpdaterStub.go | 6 +- process/mock/peerAccountHandlerMock.go | 14 +- process/mock/raterMock.go | 8 +- process/peer/listIndexUpdater.go | 4 +- process/peer/process.go | 2 +- process/rating/blockSigningRater.go | 2 +- process/rating/disabledListIndexUpdater.go | 2 +- sharding/indexHashedNodesCoordinator.go | 2 +- .../indexHashedNodesCoordinatorRegistry.go | 2 + .../indexHashedNodesCoordinatorWithRater.go | 36 ---- sharding/interface.go | 5 +- sharding/mock/listIndexUpdaterStub.go | 6 +- 21 files changed, 151 insertions(+), 228 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 40c9ef85300..f6f7d90b84f 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -941,17 +941,6 @@ func CreateSoftwareVersionChecker(statusHandler core.AppStatusHandler) (*softwar return softwareVersionChecker, nil } -func getHasherFromConfig(cfg *config.Config) (hashing.Hasher, error) { - switch cfg.Hasher.Type { - case "sha256": - return sha256.Sha256{}, nil - case "blake2b": - return &blake2b.Blake2b{}, nil - } - - return nil, errors.New("no hasher provided in config file") -} - func createBlockChainFromConfig(coordinator sharding.Coordinator, ash core.AppStatusHandler) (data.ChainHandler, error) { if coordinator == nil { @@ -1418,15 +1407,7 @@ func generateGenesisHeadersAndApplyInitialBalances(args *processComponentsFactor return nil, err } - cache, errNewCache := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) - if errNewCache != nil { - return nil, errNewCache - } - newBlockChain, errNewMetachain := blockchain.NewMetaChain(cache) - if errNewMetachain != nil { - return nil, errNewMetachain - } - + newBlockChain := blockchain.NewMetaChain() argsMetaGenesis.ShardCoordinator = newShardCoordinator argsMetaGenesis.Accounts = newAccounts argsMetaGenesis.Blkc = newBlockChain diff --git a/data/state/interface.go b/data/state/interface.go index c718f79a534..0c3f34ec5a4 100644 --- a/data/state/interface.go +++ b/data/state/interface.go @@ -71,6 +71,8 @@ type PeerAccountHandler interface { AddToAccumulatedFees(*big.Int) GetJailTime() TimePeriod SetJailTime(TimePeriod) + GetList() string + GetIndex() uint32 GetCurrentShardId() uint32 SetCurrentShardId(uint32) GetNextShardId() uint32 @@ -87,7 +89,7 @@ type PeerAccountHandler interface { IncreaseNumSelectedInSuccessBlocks() GetLeaderSuccessRate() SignRate GetValidatorSuccessRate() SignRate - SetListAndIndex(shardID uint32, list string, index int32) + SetListAndIndex(shardID uint32, list string, index uint32) GetRating() uint32 SetRating(uint32) GetTempRating() uint32 diff --git a/data/state/peerAccount.go b/data/state/peerAccount.go index 36fd6e54e82..daa10a19444 100644 --- a/data/state/peerAccount.go +++ b/data/state/peerAccount.go @@ -146,12 +146,22 @@ func (pa *peerAccount) SetTempRating(rating uint32) { } // SetListAndIndex will update the peer's list (eligible, waiting) and the index inside it with journal -func (pa *peerAccount) SetListAndIndex(shardID uint32, list string, index int32) { +func (pa *peerAccount) SetListAndIndex(shardID uint32, list string, index uint32) { pa.CurrentShardId = shardID pa.List = list pa.IndexInList = index } +// GetList returns the list the peer is in +func (pa *peerAccount) GetList() string { + return pa.List +} + +// GetIndex returns the index in list +func (pa *peerAccount) GetIndex() uint32 { + return pa.IndexInList +} + // IsInterfaceNil return if there is no value under the interface func (pa *peerAccount) IsInterfaceNil() bool { return pa == nil diff --git a/data/state/peerAccountData.pb.go b/data/state/peerAccountData.pb.go index c6271545adf..4b0f436aea5 100644 --- a/data/state/peerAccountData.pb.go +++ b/data/state/peerAccountData.pb.go @@ -7,16 +7,15 @@ import ( bytes "bytes" encoding_binary "encoding/binary" fmt "fmt" + github_com_ElrondNetwork_elrond_go_data "github.com/ElrondNetwork/elrond-go/data" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" io "io" math "math" math_big "math/big" math_bits "math/bits" reflect "reflect" strings "strings" - - github_com_ElrondNetwork_elrond_go_data "github.com/ElrondNetwork/elrond-go/data" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. @@ -272,8 +271,8 @@ type PeerAccountData struct { TempRating uint32 `protobuf:"varint,15,opt,name=TempRating,proto3" json:"TempRating,omitempty"` AccumulatedFees *math_big.Int `protobuf:"bytes,16,opt,name=AccumulatedFees,proto3,casttypewith=math/big.Int;github.com/ElrondNetwork/elrond-go/data.BigIntCaster" json:"AccumulatedFees,omitempty"` NumSelectedInSuccessBlocks uint32 `protobuf:"varint,17,opt,name=NumSelectedInSuccessBlocks,proto3" json:"NumSelectedInSuccessBlocks,omitempty"` - IndexInList int32 `protobuf:"varint,20,opt,name=IndexInList,proto3" json:"IndexInList,omitempty"` - List string `protobuf:"bytes,21,opt,name=List,proto3" json:"List,omitempty"` + IndexInList uint32 `protobuf:"varint,18,opt,name=IndexInList,proto3" json:"IndexInList,omitempty"` + List string `protobuf:"bytes,19,opt,name=List,proto3" json:"List,omitempty"` } func (m *PeerAccountData) Reset() { *m = PeerAccountData{} } @@ -423,6 +422,20 @@ func (m *PeerAccountData) GetNumSelectedInSuccessBlocks() uint32 { return 0 } +func (m *PeerAccountData) GetIndexInList() uint32 { + if m != nil { + return m.IndexInList + } + return 0 +} + +func (m *PeerAccountData) GetList() string { + if m != nil { + return m.List + } + return "" +} + func init() { proto.RegisterType((*TimeStamp)(nil), "proto.TimeStamp") proto.RegisterType((*TimePeriod)(nil), "proto.TimePeriod") @@ -434,106 +447,58 @@ func init() { func init() { proto.RegisterFile("peerAccountData.proto", fileDescriptor_26bd0314afcce126) } var fileDescriptor_26bd0314afcce126 = []byte{ - // 802 bytes of a gzipped FileDescriptorProto + // 806 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x41, 0x6f, 0xe3, 0x44, - 0x18, 0x8d, 0xb3, 0x4d, 0x9a, 0x7e, 0x4d, 0x9b, 0x76, 0x76, 0x59, 0x59, 0x15, 0x72, 0xa2, 0x08, - 0xa1, 0x0a, 0xb1, 0x09, 0x62, 0x91, 0x38, 0xa0, 0x05, 0xc5, 0xa5, 0x15, 0x81, 0xca, 0xaa, 0xc6, - 0x0b, 0x48, 0x70, 0x9a, 0x78, 0x06, 0xc7, 0xaa, 0x33, 0x13, 0x8d, 0xc7, 0x5a, 0xb8, 0xf1, 0x13, - 0xf8, 0x19, 0x88, 0xff, 0xc0, 0x7d, 0x8f, 0x3d, 0xf6, 0x14, 0xa8, 0x7b, 0x41, 0x39, 0xf5, 0xc8, - 0x11, 0x79, 0x6c, 0xa7, 0x71, 0x12, 0xe5, 0xc4, 0x29, 0xf9, 0xde, 0xf7, 0xde, 0x9b, 0x99, 0xcf, - 0xf3, 0x06, 0xde, 0x99, 0x32, 0x26, 0x07, 0x9e, 0x27, 0x62, 0xae, 0xbe, 0x24, 0x8a, 0xf4, 0xa6, - 0x52, 0x28, 0x81, 0x6a, 0xfa, 0xe7, 0xe4, 0x85, 0x1f, 0xa8, 0x71, 0x3c, 0xea, 0x79, 0x62, 0xd2, - 0xf7, 0x85, 0x2f, 0xfa, 0x1a, 0x1e, 0xc5, 0x3f, 0xe9, 0x4a, 0x17, 0xfa, 0x5f, 0xa6, 0xea, 0x7e, - 0x0a, 0x7b, 0xaf, 0x83, 0x09, 0x73, 0x15, 0x99, 0x4c, 0xd1, 0x33, 0xa8, 0x9d, 0x4f, 0x85, 0x37, - 0x36, 0x8d, 0x8e, 0x71, 0xba, 0x83, 0xb3, 0x22, 0x45, 0xb1, 0x88, 0x39, 0x35, 0xab, 0x19, 0xaa, - 0x8b, 0xae, 0x02, 0x48, 0x85, 0x57, 0x4c, 0x06, 0x82, 0xa2, 0x4f, 0x60, 0xcf, 0x55, 0x44, 0xaa, - 0x14, 0xd2, 0xea, 0xfd, 0x8f, 0x8f, 0xb2, 0x15, 0x7a, 0x0b, 0x7b, 0x7b, 0xe7, 0xed, 0xac, 0x5d, - 0xc1, 0x8f, 0x44, 0xf4, 0x11, 0xec, 0x9e, 0x73, 0xaa, 0x35, 0xd5, 0xad, 0x9a, 0x82, 0xd6, 0xbd, - 0x80, 0x86, 0x1b, 0xf8, 0x1c, 0x13, 0xc5, 0xd0, 0xbb, 0xb0, 0xe7, 0x48, 0x37, 0xf6, 0x3c, 0x16, - 0x45, 0x7a, 0xcd, 0x03, 0xfc, 0x08, 0x64, 0xdd, 0x0b, 0x12, 0x84, 0xb1, 0xcc, 0xdc, 0x75, 0x37, - 0x07, 0xba, 0xff, 0x56, 0xe1, 0xd9, 0x77, 0x24, 0x0c, 0x28, 0x51, 0x42, 0x0e, 0xa6, 0x01, 0x66, - 0xd1, 0x54, 0xf0, 0x88, 0xa1, 0x57, 0xd0, 0x72, 0xe4, 0x25, 0x23, 0x94, 0x95, 0xad, 0xed, 0xa7, - 0xf3, 0x59, 0xbb, 0xc5, 0xcb, 0x2d, 0xbc, 0xca, 0x5d, 0x96, 0x97, 0xd6, 0x2e, 0xcb, 0xf3, 0x16, - 0x5e, 0xe5, 0xa2, 0x0b, 0x40, 0x8e, 0x5c, 0xec, 0xab, 0xd8, 0xc0, 0x13, 0xed, 0xf0, 0x7c, 0x3e, - 0x6b, 0x23, 0xbe, 0xd6, 0xc5, 0x1b, 0x14, 0x2b, 0x3e, 0xc5, 0x4e, 0x76, 0x36, 0xfa, 0x14, 0x9b, - 0xd9, 0xa0, 0x40, 0x5d, 0xa8, 0x63, 0xa2, 0x02, 0xee, 0x9b, 0xb5, 0x8e, 0x71, 0x5a, 0xb5, 0x61, - 0x3e, 0x6b, 0xd7, 0xa5, 0x46, 0x70, 0xde, 0x41, 0x3d, 0x80, 0xd7, 0x6c, 0x32, 0xcd, 0x79, 0x75, - 0xcd, 0x3b, 0x9c, 0xcf, 0xda, 0xa0, 0x16, 0x28, 0x5e, 0x62, 0x74, 0xff, 0xdc, 0x85, 0xd6, 0x55, - 0xf9, 0x06, 0xa3, 0x2e, 0x34, 0xed, 0x4b, 0xf7, 0x2a, 0x1e, 0x85, 0x81, 0xf7, 0x0d, 0xfb, 0x45, - 0x8f, 0xbc, 0x89, 0x4b, 0x18, 0xfa, 0x00, 0x8e, 0x5c, 0x6f, 0xcc, 0x85, 0x94, 0x8f, 0xbc, 0xaa, - 0xe6, 0xad, 0xe1, 0xe8, 0x3d, 0x38, 0xc0, 0xec, 0x0d, 0x91, 0x74, 0x40, 0xa9, 0x2c, 0x46, 0xd8, - 0xc4, 0x65, 0x10, 0xfd, 0x08, 0x35, 0x57, 0x91, 0xeb, 0x6c, 0x30, 0x4d, 0xfb, 0xfc, 0x8f, 0xbf, - 0xda, 0x83, 0x09, 0x51, 0xe3, 0xfe, 0x28, 0xf0, 0x7b, 0x43, 0xae, 0x3e, 0x5b, 0x8a, 0xd2, 0x79, - 0x28, 0x05, 0xa7, 0x0e, 0x53, 0x6f, 0x84, 0xbc, 0xee, 0x33, 0x5d, 0xbd, 0xf0, 0x45, 0x9f, 0xa6, - 0x01, 0xb4, 0x03, 0x7f, 0xc8, 0xd5, 0x19, 0x89, 0x14, 0x93, 0x38, 0xf3, 0x44, 0x02, 0x5a, 0x03, - 0xcf, 0x8b, 0x27, 0x71, 0x48, 0x14, 0xa3, 0x17, 0x8c, 0x45, 0x26, 0xfa, 0x3f, 0x97, 0x59, 0x75, - 0x47, 0x2f, 0xa1, 0xf1, 0x35, 0x09, 0x42, 0x9d, 0xa6, 0x9a, 0x4e, 0xd3, 0xf1, 0x52, 0x9a, 0xb2, - 0x9c, 0xe6, 0x71, 0x5a, 0x10, 0xd1, 0x2b, 0x38, 0xb8, 0x22, 0x91, 0x2a, 0xea, 0xc8, 0xac, 0x77, - 0x9e, 0x6c, 0x53, 0x96, 0xd9, 0xe8, 0x7d, 0x38, 0x3c, 0x8b, 0xa5, 0x64, 0x5c, 0xb9, 0x63, 0x22, - 0xe9, 0x90, 0x9a, 0xbb, 0x3a, 0x69, 0x2b, 0x28, 0xea, 0xc0, 0xbe, 0xc3, 0x7e, 0x5e, 0x90, 0x1a, - 0x9a, 0xb4, 0x0c, 0xa1, 0x0f, 0xe1, 0xd8, 0x11, 0x94, 0x0d, 0xf9, 0xf7, 0x24, 0x48, 0xaf, 0xc9, - 0x65, 0x10, 0x29, 0x73, 0xaf, 0x63, 0x9c, 0x36, 0xf0, 0x7a, 0x23, 0xfd, 0xbe, 0xdf, 0x72, 0x3d, - 0x67, 0xea, 0x08, 0xee, 0x31, 0x13, 0xf4, 0xd3, 0x54, 0x06, 0xd1, 0x70, 0x29, 0xe3, 0x45, 0x5a, - 0x88, 0x62, 0xe6, 0xbe, 0x9e, 0x4e, 0x2b, 0x3f, 0x63, 0xf1, 0x9e, 0xe4, 0x27, 0xdc, 0x28, 0x41, - 0x67, 0x70, 0x5c, 0x4e, 0x7e, 0xea, 0xd3, 0xdc, 0xe6, 0xb3, 0xce, 0x47, 0x9f, 0xc3, 0x89, 0x13, - 0x4f, 0x5c, 0x16, 0x32, 0x4f, 0x31, 0x3a, 0xe4, 0x79, 0xcf, 0x0e, 0x85, 0x77, 0x1d, 0x99, 0x4f, - 0xf5, 0x50, 0xb6, 0x30, 0xd0, 0x09, 0x34, 0xce, 0x04, 0x65, 0x5f, 0x91, 0x68, 0x6c, 0x1e, 0xe8, - 0x0b, 0xbd, 0xa8, 0xd1, 0xf3, 0x45, 0x52, 0x0f, 0xb5, 0x4f, 0x91, 0x4e, 0xab, 0x94, 0xce, 0x63, - 0xdd, 0x5b, 0x42, 0x52, 0x4f, 0x2c, 0x84, 0xd2, 0x9e, 0xad, 0xcc, 0xb3, 0xa8, 0xd3, 0x87, 0x3f, - 0x9b, 0xee, 0x51, 0xf6, 0xf0, 0xeb, 0xc2, 0xfe, 0xe2, 0xe6, 0xce, 0xaa, 0xdc, 0xde, 0x59, 0x95, - 0x87, 0x3b, 0xcb, 0xf8, 0x35, 0xb1, 0x8c, 0xdf, 0x13, 0xcb, 0x78, 0x9b, 0x58, 0xc6, 0x4d, 0x62, - 0x19, 0xb7, 0x89, 0x65, 0xfc, 0x9d, 0x58, 0xc6, 0x3f, 0x89, 0x55, 0x79, 0x48, 0x2c, 0xe3, 0xb7, - 0x7b, 0xab, 0x72, 0x73, 0x6f, 0x55, 0x6e, 0xef, 0xad, 0xca, 0x0f, 0xb5, 0x48, 0x11, 0xc5, 0x46, - 0x75, 0x3d, 0xaf, 0x97, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x10, 0xf5, 0x41, 0xb2, 0xc8, 0x06, - 0x00, 0x00, - // 744 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x41, 0x6f, 0xda, 0x48, - 0x18, 0xc5, 0x24, 0x24, 0x30, 0x81, 0x10, 0x66, 0xb3, 0x91, 0x15, 0xad, 0x0c, 0x42, 0xab, 0x55, - 0xb4, 0xda, 0xc0, 0xaa, 0xa9, 0xd4, 0x43, 0x95, 0x56, 0x90, 0x82, 0x44, 0x1b, 0x59, 0xc8, 0x4e, - 0x5b, 0xa9, 0x3d, 0x0d, 0x9e, 0xa9, 0xb1, 0x62, 0x66, 0xd0, 0x78, 0xac, 0xb4, 0xb7, 0x5e, 0x7a, - 0xef, 0x8f, 0xe8, 0xa1, 0xea, 0x2f, 0xc9, 0x31, 0xc7, 0x9c, 0xd2, 0xc6, 0xb9, 0x54, 0x39, 0xe5, - 0x27, 0x54, 0x1e, 0xdb, 0x04, 0x03, 0xe2, 0xd4, 0x13, 0x7c, 0xef, 0x7b, 0xef, 0xcd, 0xf7, 0x8d, - 0xde, 0x18, 0xfc, 0x39, 0x26, 0x84, 0xb7, 0x2c, 0x8b, 0xf9, 0x54, 0x3c, 0x43, 0x02, 0x35, 0xc6, - 0x9c, 0x09, 0x06, 0x73, 0xf2, 0x67, 0x77, 0xdf, 0x76, 0xc4, 0xd0, 0x1f, 0x34, 0x2c, 0x36, 0x6a, - 0xda, 0xcc, 0x66, 0x4d, 0x09, 0x0f, 0xfc, 0x77, 0xb2, 0x92, 0x85, 0xfc, 0x17, 0xa9, 0xea, 0x8f, - 0x40, 0xe1, 0xc4, 0x19, 0x11, 0x53, 0xa0, 0xd1, 0x18, 0x6e, 0x83, 0x5c, 0x67, 0xcc, 0xac, 0xa1, - 0xaa, 0xd4, 0x94, 0xbd, 0x55, 0x23, 0x2a, 0x42, 0xd4, 0x60, 0x3e, 0xc5, 0x6a, 0x36, 0x42, 0x65, - 0x51, 0x17, 0x00, 0x84, 0xc2, 0x3e, 0xe1, 0x0e, 0xc3, 0xf0, 0x21, 0x28, 0x98, 0x02, 0x71, 0x11, - 0x42, 0x52, 0xbd, 0xf1, 0x60, 0x2b, 0x3a, 0xa1, 0x31, 0xb1, 0x6f, 0xaf, 0x9e, 0x5f, 0x55, 0x33, - 0xc6, 0x3d, 0x11, 0xfe, 0x0f, 0xd6, 0x3b, 0x14, 0x4b, 0x4d, 0x76, 0xa9, 0x26, 0xa1, 0xd5, 0xbb, - 0x20, 0x6f, 0x3a, 0x36, 0x35, 0x90, 0x20, 0xf0, 0x2f, 0x50, 0xd0, 0xb9, 0xe9, 0x5b, 0x16, 0xf1, - 0x3c, 0x79, 0x66, 0xc9, 0xb8, 0x07, 0xa2, 0x6e, 0x17, 0x39, 0xae, 0xcf, 0x23, 0x77, 0xd9, 0x8d, - 0x81, 0xfa, 0x97, 0x2c, 0xd8, 0x7e, 0x85, 0x5c, 0x07, 0x23, 0xc1, 0x78, 0x6b, 0xec, 0x18, 0xc4, - 0x1b, 0x33, 0xea, 0x11, 0x78, 0x08, 0xca, 0x3a, 0x3f, 0x26, 0x08, 0x93, 0xb4, 0x75, 0xfb, 0x8f, - 0xdb, 0xab, 0x6a, 0x99, 0xa6, 0x5b, 0xc6, 0x2c, 0x77, 0x5a, 0x9e, 0x3a, 0x3b, 0x2d, 0x8f, 0x5b, - 0xc6, 0x2c, 0x17, 0x76, 0x01, 0xd4, 0xf9, 0x64, 0xae, 0x64, 0x80, 0x15, 0xe9, 0xb0, 0x73, 0x7b, - 0x55, 0x85, 0x74, 0xae, 0x6b, 0x2c, 0x50, 0xcc, 0xf8, 0x24, 0x93, 0xac, 0x2e, 0xf4, 0x49, 0x86, - 0x59, 0xa0, 0xa8, 0x7f, 0x5a, 0x07, 0xe5, 0x7e, 0x3a, 0x6d, 0xb0, 0x0e, 0x8a, 0xed, 0x63, 0xb3, - 0xef, 0x0f, 0x5c, 0xc7, 0x7a, 0x41, 0x3e, 0xc8, 0xeb, 0x29, 0x1a, 0x29, 0x0c, 0xfe, 0x0b, 0xb6, - 0x4c, 0x6b, 0x48, 0x19, 0xe7, 0xf7, 0xbc, 0xac, 0xe4, 0xcd, 0xe1, 0xf0, 0x6f, 0x50, 0x32, 0xc8, - 0x19, 0xe2, 0xb8, 0x85, 0x31, 0x4f, 0xd6, 0x2d, 0x1a, 0x69, 0x10, 0xbe, 0x05, 0x39, 0x53, 0xa0, - 0xd3, 0x68, 0x89, 0x62, 0xbb, 0xf3, 0xed, 0x7b, 0xb5, 0x35, 0x42, 0x62, 0xd8, 0x1c, 0x38, 0x76, - 0xa3, 0x47, 0xc5, 0xe3, 0xa9, 0xd8, 0x77, 0x5c, 0xce, 0x28, 0xd6, 0x89, 0x38, 0x63, 0xfc, 0xb4, - 0x49, 0x64, 0xb5, 0x6f, 0xb3, 0x26, 0x0e, 0x1f, 0x4b, 0xdb, 0xb1, 0x7b, 0x54, 0x1c, 0x21, 0x4f, - 0x10, 0x6e, 0x44, 0x9e, 0xf0, 0x00, 0xe4, 0x9f, 0x23, 0xc7, 0x95, 0x41, 0xcc, 0xc9, 0x20, 0x56, - 0xa6, 0x82, 0x18, 0x45, 0x3c, 0x4e, 0xe2, 0x84, 0x08, 0x0f, 0x41, 0xa9, 0x8f, 0x3c, 0x91, 0xd4, - 0x9e, 0xba, 0x56, 0x5b, 0x59, 0xa6, 0x4c, 0xb3, 0xe1, 0x3f, 0x60, 0xf3, 0xc8, 0xe7, 0x9c, 0x50, - 0x61, 0x0e, 0x11, 0xc7, 0x3d, 0xac, 0xae, 0xcb, 0x90, 0xce, 0xa0, 0xb0, 0x06, 0x36, 0x74, 0xf2, - 0x7e, 0x42, 0xca, 0x4b, 0xd2, 0x34, 0x04, 0xff, 0x03, 0x15, 0x9d, 0x61, 0xd2, 0xa3, 0xaf, 0x91, - 0x23, 0x1c, 0x6a, 0x1f, 0x3b, 0x9e, 0x50, 0x0b, 0x35, 0x65, 0x2f, 0x6f, 0xcc, 0x37, 0xc2, 0xeb, - 0x7e, 0x49, 0xe5, 0xda, 0x58, 0x67, 0xd4, 0x22, 0x2a, 0x90, 0xaf, 0x3a, 0x0d, 0xc2, 0xde, 0xd4, - 0xf3, 0x48, 0x82, 0x86, 0x04, 0x51, 0x37, 0xe4, 0xed, 0x94, 0xe3, 0x1d, 0x93, 0xa7, 0x18, 0x6f, - 0xb8, 0x50, 0x02, 0x8f, 0x40, 0x25, 0xfd, 0x68, 0x42, 0x9f, 0xe2, 0x32, 0x9f, 0x79, 0x3e, 0xdc, - 0x01, 0x6b, 0x06, 0x0a, 0x77, 0x50, 0x4b, 0xf2, 0x02, 0xe2, 0x2a, 0xfc, 0x36, 0x45, 0x5b, 0x6c, - 0x46, 0xdf, 0xa6, 0x68, 0x7a, 0x0d, 0x80, 0x13, 0x32, 0x1a, 0xc7, 0x8a, 0xb2, 0x54, 0x4c, 0x21, - 0x90, 0x81, 0x72, 0xcb, 0xb2, 0xfc, 0x91, 0xef, 0x22, 0x41, 0x70, 0x97, 0x10, 0x4f, 0xdd, 0xfa, - 0x9d, 0xb1, 0x9a, 0x75, 0x87, 0x4f, 0xc0, 0xae, 0xee, 0x8f, 0x4c, 0xe2, 0x12, 0x4b, 0x10, 0xdc, - 0xa3, 0xf1, 0x6a, 0x6d, 0x97, 0x59, 0xa7, 0x9e, 0x5a, 0x91, 0x03, 0x2e, 0x61, 0xb4, 0x9f, 0x5e, - 0x5c, 0x6b, 0x99, 0xcb, 0x6b, 0x2d, 0x73, 0x77, 0xad, 0x29, 0x1f, 0x03, 0x4d, 0xf9, 0x1a, 0x68, - 0xca, 0x79, 0xa0, 0x29, 0x17, 0x81, 0xa6, 0x5c, 0x06, 0x9a, 0xf2, 0x23, 0xd0, 0x94, 0x9f, 0x81, - 0x96, 0xb9, 0x0b, 0x34, 0xe5, 0xf3, 0x8d, 0x96, 0xb9, 0xb8, 0xd1, 0x32, 0x97, 0x37, 0x5a, 0xe6, - 0x4d, 0xce, 0x13, 0x48, 0x90, 0xc1, 0x9a, 0xbc, 0xe8, 0x83, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, - 0x0c, 0xdc, 0x43, 0xdb, 0x3c, 0x06, 0x00, 0x00, + 0x14, 0x8e, 0xb3, 0x4d, 0xda, 0xbc, 0xa6, 0x4d, 0x3b, 0x5b, 0x56, 0xd6, 0x0a, 0x39, 0x51, 0x84, + 0x50, 0x85, 0xd8, 0x04, 0xb1, 0x48, 0x1c, 0xd0, 0x82, 0xe2, 0xd2, 0x4a, 0x86, 0xca, 0xaa, 0xc6, + 0x0b, 0x48, 0x70, 0x9a, 0x78, 0x06, 0xc7, 0xaa, 0x33, 0x13, 0x8d, 0xc7, 0xda, 0xe5, 0xc6, 0x4f, + 0xe0, 0x67, 0x20, 0xfe, 0x02, 0x7f, 0x60, 0x8f, 0x3d, 0xf6, 0x14, 0xa8, 0x7b, 0x41, 0x3d, 0xf5, + 0xc8, 0x11, 0x79, 0x6c, 0xa7, 0x76, 0x5a, 0xf5, 0xc4, 0x29, 0x79, 0xdf, 0xfb, 0xbe, 0x37, 0x6f, + 0xde, 0x7c, 0xcf, 0xf0, 0xde, 0x82, 0x31, 0x39, 0xf1, 0x7d, 0x91, 0x70, 0xf5, 0x35, 0x51, 0x64, + 0xb4, 0x90, 0x42, 0x09, 0xd4, 0xd2, 0x3f, 0xcf, 0x5f, 0x04, 0xa1, 0x9a, 0x25, 0xd3, 0x91, 0x2f, + 0xe6, 0xe3, 0x40, 0x04, 0x62, 0xac, 0xe1, 0x69, 0xf2, 0xb3, 0x8e, 0x74, 0xa0, 0xff, 0xe5, 0xaa, + 0xe1, 0xe7, 0xd0, 0x79, 0x1d, 0xce, 0x99, 0xa7, 0xc8, 0x7c, 0x81, 0x0e, 0xa0, 0x75, 0xbc, 0x10, + 0xfe, 0xcc, 0x34, 0x06, 0xc6, 0xe1, 0x06, 0xce, 0x83, 0x0c, 0xc5, 0x22, 0xe1, 0xd4, 0x6c, 0xe6, + 0xa8, 0x0e, 0x86, 0x0a, 0x20, 0x13, 0x9e, 0x31, 0x19, 0x0a, 0x8a, 0x3e, 0x83, 0x8e, 0xa7, 0x88, + 0x54, 0x19, 0xa4, 0xd5, 0xdb, 0x9f, 0xee, 0xe5, 0x27, 0x8c, 0x56, 0xe5, 0xed, 0x8d, 0x77, 0xcb, + 0x7e, 0x03, 0xdf, 0x11, 0xd1, 0x27, 0xb0, 0x79, 0xcc, 0xa9, 0xd6, 0x34, 0x1f, 0xd5, 0x94, 0xb4, + 0xe1, 0x09, 0x6c, 0x79, 0x61, 0xc0, 0x31, 0x51, 0x0c, 0xbd, 0x0f, 0x1d, 0x57, 0x7a, 0x89, 0xef, + 0xb3, 0x38, 0xd6, 0x67, 0xee, 0xe0, 0x3b, 0x20, 0xcf, 0x9e, 0x90, 0x30, 0x4a, 0x64, 0x5e, 0x5d, + 0x67, 0x0b, 0x60, 0xf8, 0x6f, 0x13, 0x0e, 0xbe, 0x27, 0x51, 0x48, 0x89, 0x12, 0x72, 0xb2, 0x08, + 0x31, 0x8b, 0x17, 0x82, 0xc7, 0x0c, 0xbd, 0x82, 0x9e, 0x2b, 0x4f, 0x19, 0xa1, 0xac, 0x5e, 0xda, + 0x7e, 0x7a, 0xb3, 0xec, 0xf7, 0x78, 0x3d, 0x85, 0xd7, 0xb9, 0x55, 0x79, 0xed, 0xec, 0xba, 0xbc, + 0x48, 0xe1, 0x75, 0x2e, 0x3a, 0x01, 0xe4, 0xca, 0x55, 0x5f, 0x65, 0x03, 0x4f, 0x74, 0x85, 0x67, + 0x37, 0xcb, 0x3e, 0xe2, 0xf7, 0xb2, 0xf8, 0x01, 0xc5, 0x5a, 0x9d, 0xb2, 0x93, 0x8d, 0x07, 0xeb, + 0x94, 0xcd, 0x3c, 0xa0, 0x40, 0x43, 0x68, 0x63, 0xa2, 0x42, 0x1e, 0x98, 0xad, 0x81, 0x71, 0xd8, + 0xb4, 0xe1, 0x66, 0xd9, 0x6f, 0x4b, 0x8d, 0xe0, 0x22, 0x83, 0x46, 0x00, 0xaf, 0xd9, 0x7c, 0x51, + 0xf0, 0xda, 0x9a, 0xb7, 0x7b, 0xb3, 0xec, 0x83, 0x5a, 0xa1, 0xb8, 0xc2, 0x18, 0xfe, 0xb9, 0x09, + 0xbd, 0xb3, 0xba, 0x83, 0xd1, 0x10, 0xba, 0xf6, 0xa9, 0x77, 0x96, 0x4c, 0xa3, 0xd0, 0xff, 0x96, + 0xfd, 0xa2, 0x47, 0xde, 0xc5, 0x35, 0x0c, 0x7d, 0x04, 0x7b, 0x9e, 0x3f, 0xe3, 0x42, 0xca, 0x3b, + 0x5e, 0x53, 0xf3, 0xee, 0xe1, 0xe8, 0x03, 0xd8, 0xc1, 0xec, 0x0d, 0x91, 0x74, 0x42, 0xa9, 0x2c, + 0x47, 0xd8, 0xc5, 0x75, 0x10, 0xfd, 0x04, 0x2d, 0x4f, 0x91, 0xf3, 0x7c, 0x30, 0x5d, 0xfb, 0xf8, + 0x8f, 0xbf, 0xfa, 0x93, 0x39, 0x51, 0xb3, 0xf1, 0x34, 0x0c, 0x46, 0x0e, 0x57, 0x5f, 0x54, 0x56, + 0xe9, 0x38, 0x92, 0x82, 0x53, 0x97, 0xa9, 0x37, 0x42, 0x9e, 0x8f, 0x99, 0x8e, 0x5e, 0x04, 0x62, + 0x4c, 0xb3, 0x05, 0xb4, 0xc3, 0xc0, 0xe1, 0xea, 0x88, 0xc4, 0x8a, 0x49, 0x9c, 0xd7, 0x44, 0x2f, + 0x61, 0xeb, 0x1b, 0x12, 0x46, 0xda, 0xdc, 0x2d, 0x6d, 0xee, 0xfd, 0x8a, 0xb9, 0xf3, 0xb5, 0x29, + 0xdc, 0xbd, 0x22, 0xa2, 0x57, 0xb0, 0x73, 0x46, 0x62, 0x55, 0xc6, 0xb1, 0xd9, 0x1e, 0x3c, 0x79, + 0x4c, 0x59, 0x67, 0xa3, 0x0f, 0x61, 0xf7, 0x28, 0x91, 0x92, 0x71, 0xe5, 0xcd, 0x88, 0xa4, 0x0e, + 0x35, 0x37, 0xb5, 0xf1, 0xd7, 0x50, 0x34, 0x80, 0x6d, 0x97, 0xbd, 0x5d, 0x91, 0xb6, 0x34, 0xa9, + 0x0a, 0xa1, 0x8f, 0x61, 0xdf, 0x15, 0x94, 0x39, 0xfc, 0x07, 0x12, 0x66, 0xaf, 0x76, 0x1a, 0xc6, + 0xca, 0xec, 0x0c, 0x8c, 0xc3, 0x2d, 0x7c, 0x3f, 0x91, 0x8d, 0xfb, 0x3b, 0xae, 0xaf, 0x4d, 0x5d, + 0xc1, 0x7d, 0x66, 0x82, 0xfe, 0x52, 0xd4, 0x41, 0xe4, 0x54, 0x56, 0xae, 0x34, 0x2f, 0x51, 0xcc, + 0xdc, 0xd6, 0xd3, 0xe9, 0x15, 0x77, 0x2c, 0xd7, 0xbb, 0xb8, 0xe1, 0x83, 0x12, 0x74, 0x04, 0xfb, + 0xf5, 0x45, 0xcc, 0xea, 0x74, 0x1f, 0xab, 0x73, 0x9f, 0x8f, 0x9e, 0xad, 0xcc, 0xbd, 0xa3, 0x07, + 0x50, 0x1a, 0xfa, 0x00, 0x5a, 0xf9, 0x2d, 0x76, 0xf3, 0xef, 0x5d, 0xde, 0xbd, 0x55, 0xb3, 0x79, + 0x4f, 0x2b, 0x2a, 0x08, 0x12, 0xd0, 0x9b, 0xf8, 0x7e, 0x32, 0x4f, 0x22, 0xa2, 0x18, 0x3d, 0x61, + 0x2c, 0x36, 0xf7, 0xfe, 0x4f, 0x5b, 0xad, 0x57, 0x47, 0x5f, 0xc2, 0x73, 0x37, 0x99, 0x7b, 0x2c, + 0x62, 0xbe, 0x62, 0xd4, 0xe1, 0xc5, 0xd5, 0xec, 0x48, 0xf8, 0xe7, 0xb1, 0xb9, 0xaf, 0x1b, 0x7c, + 0x84, 0x91, 0x99, 0xc0, 0xe1, 0x94, 0xbd, 0x75, 0xb8, 0x7e, 0x5c, 0x94, 0x9b, 0xa0, 0x02, 0x21, + 0x04, 0x1b, 0x3a, 0xf5, 0x74, 0x60, 0x1c, 0x76, 0xb0, 0xfe, 0x6f, 0x7f, 0x75, 0x71, 0x65, 0x35, + 0x2e, 0xaf, 0xac, 0xc6, 0xed, 0x95, 0x65, 0xfc, 0x9a, 0x5a, 0xc6, 0xef, 0xa9, 0x65, 0xbc, 0x4b, + 0x2d, 0xe3, 0x22, 0xb5, 0x8c, 0xcb, 0xd4, 0x32, 0xfe, 0x4e, 0x2d, 0xe3, 0x9f, 0xd4, 0x6a, 0xdc, + 0xa6, 0x96, 0xf1, 0xdb, 0xb5, 0xd5, 0xb8, 0xb8, 0xb6, 0x1a, 0x97, 0xd7, 0x56, 0xe3, 0xc7, 0x56, + 0xac, 0x88, 0x62, 0xd3, 0xb6, 0x7e, 0x9e, 0x97, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x3b, + 0x3a, 0xdc, 0xc6, 0x06, 0x00, 0x00, } func (this *TimeStamp) Equal(that interface{}) bool { @@ -737,6 +702,12 @@ func (this *PeerAccountData) Equal(that interface{}) bool { if this.NumSelectedInSuccessBlocks != that1.NumSelectedInSuccessBlocks { return false } + if this.IndexInList != that1.IndexInList { + return false + } + if this.List != that1.List { + return false + } return true } func (this *TimeStamp) GoString() string { @@ -791,7 +762,7 @@ func (this *PeerAccountData) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 21) + s := make([]string, 0, 23) s = append(s, "&state.PeerAccountData{") s = append(s, "BLSPublicKey: "+fmt.Sprintf("%#v", this.BLSPublicKey)+",\n") s = append(s, "SchnorrPublicKey: "+fmt.Sprintf("%#v", this.SchnorrPublicKey)+",\n") @@ -816,6 +787,8 @@ func (this *PeerAccountData) GoString() string { s = append(s, "TempRating: "+fmt.Sprintf("%#v", this.TempRating)+",\n") s = append(s, "AccumulatedFees: "+fmt.Sprintf("%#v", this.AccumulatedFees)+",\n") s = append(s, "NumSelectedInSuccessBlocks: "+fmt.Sprintf("%#v", this.NumSelectedInSuccessBlocks)+",\n") + s = append(s, "IndexInList: "+fmt.Sprintf("%#v", this.IndexInList)+",\n") + s = append(s, "List: "+fmt.Sprintf("%#v", this.List)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -1018,14 +991,14 @@ func (m *PeerAccountData) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x1 i-- - dAtA[i] = 0xaa + dAtA[i] = 0x9a } if m.IndexInList != 0 { i = encodeVarintPeerAccountData(dAtA, i, uint64(m.IndexInList)) i-- dAtA[i] = 0x1 i-- - dAtA[i] = 0xa0 + dAtA[i] = 0x90 } if m.NumSelectedInSuccessBlocks != 0 { i = encodeVarintPeerAccountData(dAtA, i, uint64(m.NumSelectedInSuccessBlocks)) @@ -2361,7 +2334,7 @@ func (m *PeerAccountData) Unmarshal(dAtA []byte) error { break } } - case 20: + case 18: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field IndexInList", wireType) } @@ -2375,12 +2348,12 @@ func (m *PeerAccountData) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.IndexInList |= int32(b&0x7F) << shift + m.IndexInList |= uint32(b&0x7F) << shift if b < 0x80 { break } } - case 21: + case 19: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field List", wireType) } diff --git a/data/state/proto/peerAccountData.proto b/data/state/proto/peerAccountData.proto index adc3facefb5..144a10608e6 100644 --- a/data/state/proto/peerAccountData.proto +++ b/data/state/proto/peerAccountData.proto @@ -58,6 +58,6 @@ message PeerAccountData { uint32 TempRating = 15; bytes AccumulatedFees = 16 [(gogoproto.casttypewith) = "math/big.Int;github.com/ElrondNetwork/elrond-go/data.BigIntCaster"]; uint32 NumSelectedInSuccessBlocks = 17; - int32 IndexInList = 18; + uint32 IndexInList = 18; string List = 19; } diff --git a/dataRetriever/factory/dataPoolFactory.go b/dataRetriever/factory/dataPoolFactory.go index 94db829412a..ac888fd2c63 100644 --- a/dataRetriever/factory/dataPoolFactory.go +++ b/dataRetriever/factory/dataPoolFactory.go @@ -1,6 +1,7 @@ package factory import ( + logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" @@ -8,7 +9,6 @@ import ( txPoolFactory "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/txpool" "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" "github.com/ElrondNetwork/elrond-go/dataRetriever/txpool" - "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/process/economics" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage/factory" diff --git a/epochStart/bootstrap/disabled/disabledAccountsAdapter.go b/epochStart/bootstrap/disabled/disabledAccountsAdapter.go index 180f18f4d4e..291e2ee9cb7 100644 --- a/epochStart/bootstrap/disabled/disabledAccountsAdapter.go +++ b/epochStart/bootstrap/disabled/disabledAccountsAdapter.go @@ -13,21 +13,25 @@ func NewAccountsAdapter() *accountsAdapter { return &accountsAdapter{} } -// GetAccountWithJournal - -func (a *accountsAdapter) GetAccountWithJournal(_ state.AddressContainer) (state.AccountHandler, error) { +// LoadAccount - +func (a *accountsAdapter) LoadAccount(_ state.AddressContainer) (state.AccountHandler, error) { return nil, nil } +// SaveAccount - +func (a *accountsAdapter) SaveAccount(_ state.AccountHandler) error { + return nil +} + +// PruneTrie - +func (a *accountsAdapter) PruneTrie(_ []byte, _ data.TriePruningIdentifier) { +} + // GetExistingAccount - func (a *accountsAdapter) GetExistingAccount(_ state.AddressContainer) (state.AccountHandler, error) { return nil, nil } -// HasAccount - -func (a *accountsAdapter) HasAccount(_ state.AddressContainer) (bool, error) { - return false, nil -} - // RemoveAccount - func (a *accountsAdapter) RemoveAccount(_ state.AddressContainer) error { return nil @@ -58,26 +62,6 @@ func (a *accountsAdapter) RecreateTrie(_ []byte) error { return nil } -// PutCode - -func (a *accountsAdapter) PutCode(_ state.AccountHandler, _ []byte) error { - return nil -} - -// RemoveCode - -func (a *accountsAdapter) RemoveCode(_ []byte) error { - return nil -} - -// SaveDataTrie - -func (a *accountsAdapter) SaveDataTrie(_ state.AccountHandler) error { - return nil -} - -// PruneTrie - -func (a *accountsAdapter) PruneTrie(_ []byte, _ data.TriePruningIdentifier) error { - return nil -} - // CancelPrune - func (a *accountsAdapter) CancelPrune(_ []byte, _ data.TriePruningIdentifier) { return diff --git a/epochStart/bootstrap/nodesCoordinator.go b/epochStart/bootstrap/nodesCoordinator.go index 399647d4da0..d3876ac45a4 100644 --- a/epochStart/bootstrap/nodesCoordinator.go +++ b/epochStart/bootstrap/nodesCoordinator.go @@ -330,7 +330,7 @@ func (n *nodesCoordinator) updateAccountsForGivenMap( string(account.PubKey()), shardId, string(list), - int32(index)) + uint32(index)) if err != nil { log.Warn("error while updating list and index for peer", "error", err, @@ -342,14 +342,15 @@ func (n *nodesCoordinator) updateAccountsForGivenMap( return nil } -func (n *nodesCoordinator) updateListAndIndex(pubKey string, shardID uint32, list string, index int32) error { +func (n *nodesCoordinator) updateListAndIndex(pubKey string, shardID uint32, list string, index uint32) error { peer, err := n.getPeerAccount([]byte(pubKey)) if err != nil { log.Debug("error getting peer account", "error", err, "key", pubKey) return err } - return peer.SetListAndIndexWithJournal(shardID, list, index) + peer.SetListAndIndex(shardID, list, index) + return nil } func (n *nodesCoordinator) getPeerAccount(address []byte) (state.PeerAccountHandler, error) { @@ -358,7 +359,7 @@ func (n *nodesCoordinator) getPeerAccount(address []byte) (state.PeerAccountHand return nil, err } - account, err := n.validatorAccountsDB.GetAccountWithJournal(addressContainer) + account, err := n.validatorAccountsDB.LoadAccount(addressContainer) if err != nil { return nil, err } diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index d7c4587e677..24d94a597d2 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -4,6 +4,7 @@ import ( "strconv" "time" + logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/partitioning" @@ -26,7 +27,6 @@ import ( "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" factoryInterceptors "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/factory" "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/process" @@ -448,10 +448,7 @@ func (e *epochStartBootstrap) requestAndProcessing() (Parameters, error) { } func (e *epochStartBootstrap) processNodesConfig(pubKey []byte, rootHash []byte) error { - accountFactory, err := factory.NewAccountFactoryCreator(state.ValidatorAccount) - if err != nil { - return err - } + accountFactory := factory.NewAccountCreator() peerAccountsDB, err := state.NewPeerAccountsDB(e.peerAccountTries[string(rootHash)], e.hasher, e.marshalizer, accountFactory) if err != nil { return err diff --git a/integrationTests/mock/listIndexUpdaterStub.go b/integrationTests/mock/listIndexUpdaterStub.go index 0471a84ac23..31c5ae19b76 100644 --- a/integrationTests/mock/listIndexUpdaterStub.go +++ b/integrationTests/mock/listIndexUpdaterStub.go @@ -2,13 +2,13 @@ package mock // ListIndexUpdaterStub - type ListIndexUpdaterStub struct { - UpdateListAndIndexCalled func(pubKey string, list string, index int32) error + UpdateListAndIndexCalled func(pubKey string, shardID uint32, list string, index uint32) error } // UpdateListAndIndex - -func (lius *ListIndexUpdaterStub) UpdateListAndIndex(pubKey string, shardID uint32, list string, index int32) error { +func (lius *ListIndexUpdaterStub) UpdateListAndIndex(pubKey string, shardID uint32, list string, index uint32) error { if lius.UpdateListAndIndexCalled != nil { - return lius.UpdateListAndIndexCalled(pubKey, list, index) + return lius.UpdateListAndIndexCalled(pubKey, shardID, list, index) } return nil diff --git a/process/mock/peerAccountHandlerMock.go b/process/mock/peerAccountHandlerMock.go index 0b64c53220f..f8ecdfb9177 100644 --- a/process/mock/peerAccountHandlerMock.go +++ b/process/mock/peerAccountHandlerMock.go @@ -17,7 +17,17 @@ type PeerAccountHandlerMock struct { GetTempRatingCalled func() uint32 SetAccumulatedFeesCalled func(*big.Int) GetAccumulatedFeesCalled func() *big.Int - SetListAndIndexCalled func(shardID uint32, list string, index int32) + SetListAndIndexCalled func(shardID uint32, list string, index uint32) +} + +// GetList - +func (p *PeerAccountHandlerMock) GetList() string { + return "" +} + +// GetIndex - +func (p *PeerAccountHandlerMock) GetIndex() uint32 { + return 0 } // GetBLSPublicKey - @@ -263,7 +273,7 @@ func (p *PeerAccountHandlerMock) DataTrieTracker() state.DataTrieTracker { } // SetListAndIndex - -func (pahm *PeerAccountHandlerMock) SetListAndIndex(shardID uint32, list string, index int32) { +func (pahm *PeerAccountHandlerMock) SetListAndIndex(shardID uint32, list string, index uint32) { if pahm.SetListAndIndexCalled != nil { pahm.SetListAndIndexCalled(shardID, list, index) } diff --git a/process/mock/raterMock.go b/process/mock/raterMock.go index 186dfd48d42..85826dd06b9 100644 --- a/process/mock/raterMock.go +++ b/process/mock/raterMock.go @@ -21,7 +21,7 @@ type RaterMock struct { ComputeIncreaseValidatorCalled func(val uint32) uint32 ComputeDecreaseValidatorCalled func(val uint32) uint32 GetChancesCalled func(val uint32) uint32 - UpdateListAndIndexCalled func(pubKey string, list string, index int32) error + UpdateListAndIndexCalled func(pubKey string, shardID uint32, list string, index uint32) error RatingReader sharding.RatingReader } @@ -116,13 +116,13 @@ func (rm *RaterMock) GetChance(rating uint32) uint32 { } // SetListIndexUpdater - -func (rm *RaterMock) SetListIndexUpdater(updater sharding.ListIndexUpdaterHandler) { +func (rm *RaterMock) SetListIndexUpdater(_ sharding.ListIndexUpdaterHandler) { } // UpdateListAndIndex - -func (rm *RaterMock) UpdateListAndIndex(pubKey string, shardID uint32, list string, index int32) error { +func (rm *RaterMock) UpdateListAndIndex(pubKey string, shardID uint32, list string, index uint32) error { if rm.UpdateListAndIndexCalled != nil { - return rm.UpdateListAndIndexCalled(pubKey, list, index) + return rm.UpdateListAndIndexCalled(pubKey, shardID, list, index) } return nil diff --git a/process/peer/listIndexUpdater.go b/process/peer/listIndexUpdater.go index cec9c6d0a1d..a379d3bda07 100644 --- a/process/peer/listIndexUpdater.go +++ b/process/peer/listIndexUpdater.go @@ -2,11 +2,11 @@ package peer // ListIndexUpdater will handle the updating of list type and the index for a peer type ListIndexUpdater struct { - updateListAndIndex func(pubKey string, shardID uint32, list string, index int32) error + updateListAndIndex func(pubKey string, shardID uint32, list string, index uint32) error } // UpdateListAndIndex will update the list and the index for a given peer -func (liu *ListIndexUpdater) UpdateListAndIndex(pubKey string, shardID uint32, list string, index int32) error { +func (liu *ListIndexUpdater) UpdateListAndIndex(pubKey string, shardID uint32, list string, index uint32) error { return liu.updateListAndIndex(pubKey, shardID, list, index) } diff --git a/process/peer/process.go b/process/peer/process.go index d44399bbff4..3638df12f62 100644 --- a/process/peer/process.go +++ b/process/peer/process.go @@ -828,7 +828,7 @@ func (vs *validatorStatistics) updateRatingFromTempRating(pks []string) error { } // updateListAndIndex updates the list and the index for a given public key -func (vs *validatorStatistics) updateListAndIndex(pubKey string, shardID uint32, list string, index int32) error { +func (vs *validatorStatistics) updateListAndIndex(pubKey string, shardID uint32, list string, index uint32) error { peer, err := vs.GetPeerAccount([]byte(pubKey)) if err != nil { log.Debug("error getting peer account", "error", err, "key", pubKey) diff --git a/process/rating/blockSigningRater.go b/process/rating/blockSigningRater.go index cb743cf766c..3167fc54eba 100644 --- a/process/rating/blockSigningRater.go +++ b/process/rating/blockSigningRater.go @@ -146,7 +146,7 @@ func (bsr *BlockSigningRaterAndListIndexer) ComputeDecreaseValidator(val uint32) } // UpdateListAndIndex will update the list and the index for a peer -func (bsr *BlockSigningRaterAndListIndexer) UpdateListAndIndex(pubKey string, shardID uint32, list string, index int32) error { +func (bsr *BlockSigningRaterAndListIndexer) UpdateListAndIndex(pubKey string, shardID uint32, list string, index uint32) error { return bsr.ListIndexUpdaterHandler.UpdateListAndIndex(pubKey, shardID, list, index) } diff --git a/process/rating/disabledListIndexUpdater.go b/process/rating/disabledListIndexUpdater.go index 5cfb4988944..bb17fd07d93 100644 --- a/process/rating/disabledListIndexUpdater.go +++ b/process/rating/disabledListIndexUpdater.go @@ -5,7 +5,7 @@ type DisabledListIndexUpdater struct { } // UpdateListAndIndex will return nil -func (n *DisabledListIndexUpdater) UpdateListAndIndex(pubKey string, shardID uint32, list string, index int32) error { +func (n *DisabledListIndexUpdater) UpdateListAndIndex(_ string, _ uint32, _ string, _ uint32) error { return nil } diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index 7d818299268..fcc4e448f1a 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -673,7 +673,7 @@ func (ihgs *indexHashedNodesCoordinator) updatePeerAccountsForGivenMap( string(account.PubKey()), shardId, string(list), - int32(index)) + uint32(index)) if err != nil { log.Warn("error while updating list and index for peer", "error", err, diff --git a/sharding/indexHashedNodesCoordinatorRegistry.go b/sharding/indexHashedNodesCoordinatorRegistry.go index 01eab0c0eef..6e8872f0038 100644 --- a/sharding/indexHashedNodesCoordinatorRegistry.go +++ b/sharding/indexHashedNodesCoordinatorRegistry.go @@ -4,6 +4,8 @@ import ( "encoding/json" "fmt" "strconv" + + "github.com/ElrondNetwork/elrond-go/core" ) const keyPrefix = "indexHashed_" diff --git a/sharding/indexHashedNodesCoordinatorWithRater.go b/sharding/indexHashedNodesCoordinatorWithRater.go index f1c14b25501..d4a216fe1f6 100644 --- a/sharding/indexHashedNodesCoordinatorWithRater.go +++ b/sharding/indexHashedNodesCoordinatorWithRater.go @@ -1,8 +1,6 @@ package sharding import ( - "encoding/json" - "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" ) @@ -152,37 +150,3 @@ func (ihgs *indexHashedNodesCoordinatorWithRater) expandEligibleList(validators return validatorList, nil } - -// LoadState loads the nodes coordinator state from the used boot storage -func (ihgs *indexHashedNodesCoordinatorWithRater) LoadState(key []byte) error { - ncInternalkey := append([]byte(core.NodesCoordinatorRegistryKeyPrefix), key...) - - log.Debug("getting nodes coordinator config", "key", ncInternalkey) - - data, err := ihgs.bootStorer.Get(ncInternalkey) - if err != nil { - return err - } - - config := &NodesCoordinatorRegistry{} - err = json.Unmarshal(data, config) - if err != nil { - return err - } - - ihgs.mutSavedStateKey.Lock() - ihgs.savedStateKey = key - ihgs.mutSavedStateKey.Unlock() - - err = ihgs.SetConfig(config) - if err != nil { - return err - } - - err = ihgs.expandAllLists(config.CurrentEpoch) - if err != nil { - return err - } - - return nil -} diff --git a/sharding/interface.go b/sharding/interface.go index c0358cc6bdf..8f4fe085abe 100644 --- a/sharding/interface.go +++ b/sharding/interface.go @@ -30,7 +30,6 @@ type NodesCoordinator interface { GetValidatorWithPublicKey(publicKey []byte, epoch uint32) (validator Validator, shardId uint32, err error) UpdatePeersListAndIndex() error LoadState(key []byte) error - SetConfig(config *NodesCoordinatorRegistry) error GetSavedStateKey() []byte ShardIdForEpoch(epoch uint32) (uint32, error) GetConsensusWhitelistedNodes(epoch uint32) (map[string]struct{}, error) @@ -88,7 +87,7 @@ type PeerAccountListAndRatingHandler interface { //GetChance returns the chances for the the rating GetChance(uint32) uint32 // UpdateListAndIndex updated the list and the index for a peer - UpdateListAndIndex(pubKey string, shardID uint32, list string, index int32) error + UpdateListAndIndex(pubKey string, shardID uint32, list string, index uint32) error //GetStartRating gets the start rating values GetStartRating() uint32 //ComputeIncreaseProposer computes the new rating for the increaseLeader @@ -104,7 +103,7 @@ type PeerAccountListAndRatingHandler interface { // ListIndexUpdaterHandler defines what a component which can update the list and index for a peer should do type ListIndexUpdaterHandler interface { // UpdateListAndIndex updated the list and the index for a peer - UpdateListAndIndex(pubKey string, shardID uint32, list string, index int32) error + UpdateListAndIndex(pubKey string, shardID uint32, list string, index uint32) error //IsInterfaceNil verifies if the interface is nil IsInterfaceNil() bool } diff --git a/sharding/mock/listIndexUpdaterStub.go b/sharding/mock/listIndexUpdaterStub.go index 0471a84ac23..31c5ae19b76 100644 --- a/sharding/mock/listIndexUpdaterStub.go +++ b/sharding/mock/listIndexUpdaterStub.go @@ -2,13 +2,13 @@ package mock // ListIndexUpdaterStub - type ListIndexUpdaterStub struct { - UpdateListAndIndexCalled func(pubKey string, list string, index int32) error + UpdateListAndIndexCalled func(pubKey string, shardID uint32, list string, index uint32) error } // UpdateListAndIndex - -func (lius *ListIndexUpdaterStub) UpdateListAndIndex(pubKey string, shardID uint32, list string, index int32) error { +func (lius *ListIndexUpdaterStub) UpdateListAndIndex(pubKey string, shardID uint32, list string, index uint32) error { if lius.UpdateListAndIndexCalled != nil { - return lius.UpdateListAndIndexCalled(pubKey, list, index) + return lius.UpdateListAndIndexCalled(pubKey, shardID, list, index) } return nil From e4b048792d019be83ffb6544e15bc1db168ea698 Mon Sep 17 00:00:00 2001 From: Iulian Pascalau Date: Fri, 27 Mar 2020 16:42:49 +0200 Subject: [PATCH 52/61] fixed network sharding integration tests --- integrationTests/p2p/networkSharding/networkSharding_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integrationTests/p2p/networkSharding/networkSharding_test.go b/integrationTests/p2p/networkSharding/networkSharding_test.go index 2fd2ec0d0e6..2d6fe98b0de 100644 --- a/integrationTests/p2p/networkSharding/networkSharding_test.go +++ b/integrationTests/p2p/networkSharding/networkSharding_test.go @@ -124,7 +124,7 @@ func stopNodes(advertiser p2p.Messenger, nodesMap map[uint32][]*integrationTests func startNodes(nodesMap map[uint32][]*integrationTests.TestP2PNode) { for _, nodes := range nodesMap { for _, n := range nodes { - n.Node.Start() + _ = n.Messenger.Bootstrap() } } } From b456da30aca6d7b5f480fc217e2fad4a0438cb01 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Fri, 27 Mar 2020 17:32:11 +0200 Subject: [PATCH 53/61] logs + configs --- epochStart/bootstrap/metaStorageHandler.go | 16 +++++- epochStart/bootstrap/shardStorageHandler.go | 1 + .../startInEpoch/startInEpoch_test.go | 55 +++++++++++++++++-- 3 files changed, 66 insertions(+), 6 deletions(-) diff --git a/epochStart/bootstrap/metaStorageHandler.go b/epochStart/bootstrap/metaStorageHandler.go index 4d80aecdc45..d1001ec5d17 100644 --- a/epochStart/bootstrap/metaStorageHandler.go +++ b/epochStart/bootstrap/metaStorageHandler.go @@ -3,6 +3,7 @@ package bootstrap import ( "encoding/json" "fmt" + "strconv" "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/core" @@ -108,7 +109,19 @@ func (msh *metaStorageHandler) SaveDataToStorage(components *ComponentsNeededFor return err } - err = bootStorer.Put([]byte(core.HighestRoundFromBootStorage), bootStrapDataBytes) + round := int64(components.EpochStartMetaBlock.Round) + roundNum := bootstrapStorage.RoundNum{Num: round} + roundNumBytes, err := msh.marshalizer.Marshal(&roundNum) + if err != nil { + return err + } + + err = bootStorer.Put([]byte(core.HighestRoundFromBootStorage), roundNumBytes) + if err != nil { + return err + } + key := []byte(strconv.FormatInt(round, 10)) + err = bootStorer.Put(key, bootStrapDataBytes) if err != nil { return err } @@ -153,6 +166,7 @@ func (msh *metaStorageHandler) saveLastHeader(metaBlock *block.MetaBlock) (boots bootstrapHdrInfo := bootstrapStorage.BootstrapHeaderInfo{ ShardId: core.MetachainShardId, + Epoch: metaBlock.Epoch, Nonce: metaBlock.Nonce, Hash: lastHeaderHash, } diff --git a/epochStart/bootstrap/shardStorageHandler.go b/epochStart/bootstrap/shardStorageHandler.go index 976e6b60b52..28284c7bdf5 100644 --- a/epochStart/bootstrap/shardStorageHandler.go +++ b/epochStart/bootstrap/shardStorageHandler.go @@ -249,6 +249,7 @@ func (ssh *shardStorageHandler) saveLastHeader(shardHeader *block.Header) (boots bootstrapHdrInfo := bootstrapStorage.BootstrapHeaderInfo{ ShardId: core.MetachainShardId, + Epoch: shardHeader.Epoch, Nonce: shardHeader.Nonce, Hash: lastHeaderHash, } diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index 9534c60c218..2b7fca8a480 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -4,16 +4,22 @@ import ( "context" "encoding/hex" "math/big" + "os" + "strconv" "testing" "time" "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" "github.com/ElrondNetwork/elrond-go/integrationTests/multiShard/endOfEpoch" + "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage/factory" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/stretchr/testify/assert" ) @@ -123,6 +129,11 @@ func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { } nodesConfig.SetNumberOfShards(uint32(numOfShards)) + defer func() { + errRemoveDir := os.RemoveAll("Epoch_0") + assert.NoError(t, errRemoveDir) + }() + messenger := integrationTests.CreateMessengerWithKadDht(context.Background(), integrationTests.GetConnectableAddress(advertiser)) _ = messenger.Bootstrap() time.Sleep(integrationTests.P2pBootstrapDelay) @@ -149,11 +160,45 @@ func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { epochStartBootstrap, err := bootstrap.NewEpochStartBootstrap(argsBootstrapHandler) assert.Nil(t, err) - params, err := epochStartBootstrap.Bootstrap() + _, err = epochStartBootstrap.Bootstrap() assert.NoError(t, err) - assert.Equal(t, epoch, params.Epoch) - assert.Equal(t, uint32(0), params.SelfShardId) - assert.Equal(t, uint32(2), params.NumOfShards) + //assert.Equal(t, epoch, params.Epoch) + //assert.Equal(t, uint32(0), params.SelfShardId) + //assert.Equal(t, uint32(2), params.NumOfShards) + + shardC, _ := sharding.NewMultiShardCoordinator(2, 0) + + storageFactory, err := factory.NewStorageServiceFactory( + &generalConfig, + shardC, + &mock.PathManagerStub{}, + &mock.EpochStartNotifierStub{}, + epoch) + assert.NoError(t, err) + storageServiceShard, err := storageFactory.CreateForShard() + assert.NoError(t, err) + assert.NotNil(t, storageServiceShard) + + bootstrapUnit := storageServiceShard.GetStorer(dataRetriever.BootstrapUnit) + assert.NotNil(t, bootstrapUnit) + + highestRound, err := bootstrapUnit.Get([]byte(core.HighestRoundFromBootStorage)) + assert.NoError(t, err) + var roundFromStorage bootstrapStorage.RoundNum + err = integrationTests.TestMarshalizer.Unmarshal(&roundFromStorage, highestRound) + assert.NoError(t, err) + + roundInt64 := roundFromStorage.Num + assert.Equal(t, int64(22), roundInt64) + + key := []byte(strconv.FormatInt(roundInt64, 10)) + bootstrapDataBytes, err := bootstrapUnit.Get(key) + assert.NoError(t, err) + + var bd bootstrapStorage.BootstrapData + err = integrationTests.TestMarshalizer.Unmarshal(&bd, bootstrapDataBytes) + assert.NoError(t, err) + assert.Equal(t, epoch, bd.LastHeader.Epoch) } func convertToSlice(originalMap map[uint32][]*integrationTests.TestProcessorNode) []*integrationTests.TestProcessorNode { @@ -400,7 +445,7 @@ func getGeneralConfig() config.Config { }, DB: config.DBConfig{ FilePath: "BootstrapData", - Type: "MemoryDB", + Type: string(storageUnit.LvlDBSerial), BatchDelaySeconds: 30, MaxBatchSize: 6, MaxOpenFiles: 10, From 250aadae2e1f43a931463557acbe332d6d529041 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Mon, 30 Mar 2020 15:30:45 +0300 Subject: [PATCH 54/61] new trie syncer with Context. --- data/interface.go | 3 +- data/syncer/baseAccountsSyncer.go | 4 +- data/syncer/userAccountsSyncer.go | 8 +- data/syncer/validatorAccountsSyncer.go | 6 + data/trie/branchNode.go | 14 +- data/trie/branchNode_test.go | 27 +-- data/trie/errors.go | 3 + data/trie/extensionNode.go | 14 +- data/trie/extensionNode_test.go | 23 +- data/trie/interface.go | 2 +- data/trie/leafNode.go | 5 +- data/trie/leafNode_test.go | 12 +- data/trie/sync.go | 225 +++++++++++------- data/trie/sync_test.go | 9 +- .../state/stateTrieSync/stateTrieSync_test.go | 10 +- update/interface.go | 3 +- update/mock/trieSyncersStub.go | 8 +- 17 files changed, 224 insertions(+), 152 deletions(-) diff --git a/data/interface.go b/data/interface.go index 04c0080dff9..21e970e189f 100644 --- a/data/interface.go +++ b/data/interface.go @@ -1,6 +1,7 @@ package data import ( + "context" "math/big" "github.com/ElrondNetwork/elrond-go/config" @@ -152,7 +153,7 @@ type DBRemoveCacher interface { // TrieSyncer synchronizes the trie, asking on the network for the missing nodes type TrieSyncer interface { - StartSyncing(rootHash []byte) error + StartSyncing(rootHash []byte, ctx context.Context) error Trie() Trie IsInterfaceNil() bool } diff --git a/data/syncer/baseAccountsSyncer.go b/data/syncer/baseAccountsSyncer.go index a758f1a37d7..496fc47a354 100644 --- a/data/syncer/baseAccountsSyncer.go +++ b/data/syncer/baseAccountsSyncer.go @@ -1,6 +1,7 @@ package syncer import ( + "context" "fmt" "sync" "time" @@ -26,6 +27,7 @@ type baseAccountsSyncer struct { shardId uint32 cacher storage.Cacher rootHash []byte + ctx context.Context } const minWaitTime = time.Second @@ -78,7 +80,7 @@ func (b *baseAccountsSyncer) syncMainTrie(rootHash []byte, trieTopic string) err } b.trieSyncers[string(rootHash)] = trieSyncer - err = trieSyncer.StartSyncing(rootHash) + err = trieSyncer.StartSyncing(rootHash, b.ctx) if err != nil { return err } diff --git a/data/syncer/userAccountsSyncer.go b/data/syncer/userAccountsSyncer.go index 8a3cc99fe49..e17037590d3 100644 --- a/data/syncer/userAccountsSyncer.go +++ b/data/syncer/userAccountsSyncer.go @@ -1,6 +1,8 @@ package syncer import ( + "context" + logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/state" @@ -52,6 +54,10 @@ func (u *userAccountsSyncer) SyncAccounts(rootHash []byte) error { u.mutex.Lock() defer u.mutex.Unlock() + ctx, cancel := context.WithTimeout(context.Background(), u.waitTime) + defer cancel() + u.ctx = ctx + err := u.syncMainTrie(rootHash, factory.AccountTrieNodesTopic) if err != nil { return nil @@ -85,7 +91,7 @@ func (u *userAccountsSyncer) syncAccountDataTries(rootHashes [][]byte) error { } u.trieSyncers[string(rootHash)] = trieSyncer - err = trieSyncer.StartSyncing(rootHash) + err = trieSyncer.StartSyncing(rootHash, u.ctx) if err != nil { return err } diff --git a/data/syncer/validatorAccountsSyncer.go b/data/syncer/validatorAccountsSyncer.go index e300ec5d5e2..3effdee7b61 100644 --- a/data/syncer/validatorAccountsSyncer.go +++ b/data/syncer/validatorAccountsSyncer.go @@ -1,6 +1,8 @@ package syncer import ( + "context" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/process/factory" @@ -47,5 +49,9 @@ func (v *validatorAccountsSyncer) SyncAccounts(rootHash []byte) error { v.mutex.Lock() defer v.mutex.Unlock() + ctx, cancel := context.WithTimeout(context.Background(), v.waitTime) + defer cancel() + v.ctx = ctx + return v.syncMainTrie(rootHash, factory.ValidatorTrieNodesTopic) } diff --git a/data/trie/branchNode.go b/data/trie/branchNode.go index 5b33232a59a..5e77cfcb4fe 100644 --- a/data/trie/branchNode.go +++ b/data/trie/branchNode.go @@ -664,29 +664,29 @@ func (bn *branchNode) setDirty(dirty bool) { bn.dirty = dirty } -func (bn *branchNode) loadChildren(syncer *trieSyncer) error { +func (bn *branchNode) loadChildren(getNode func([]byte) (node, error)) ([][]byte, error) { err := bn.isEmptyOrNil() if err != nil { - return err + return nil, err } + missingChildren := make([][]byte, 0) for i := range bn.EncodedChildren { if len(bn.EncodedChildren[i]) == 0 { continue } var child node - child, err = syncer.getNode(bn.EncodedChildren[i]) + child, err = getNode(bn.EncodedChildren[i]) if err != nil { - return err + missingChildren = append(missingChildren, bn.EncodedChildren[i]) + continue } bn.children[i] = child } - syncer.interceptedNodes.Remove(bn.hash) - - return nil + return missingChildren, nil } func (bn *branchNode) getAllLeaves(leaves map[string][]byte, key []byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer) error { diff --git a/data/trie/branchNode_test.go b/data/trie/branchNode_test.go index 8ec825d1cef..5fa3d6ee95e 100644 --- a/data/trie/branchNode_test.go +++ b/data/trie/branchNode_test.go @@ -6,7 +6,6 @@ import ( "io/ioutil" "reflect" "testing" - "time" "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/data" @@ -1047,31 +1046,27 @@ func TestBranchNode_loadChildren(t *testing.T) { _ = tr.root.setRootHash() nodes, _ := getEncodedTrieNodesAndHashes(tr) nodesCacher, _ := lrucache.NewCache(100) - - resolver := &mock.RequestHandlerStub{ - RequestTrieNodesCalled: func(shardId uint32, hash []byte, topic string) { - for i := range nodes { - node, _ := NewInterceptedTrieNode(nodes[i], marsh, hasher) - nodesCacher.Put(node.hash, node) - } - }, + for i := range nodes { + node, _ := NewInterceptedTrieNode(nodes[i], marsh, hasher) + nodesCacher.Put(node.hash, node) } - syncer, _ := NewTrieSyncer(resolver, nodesCacher, tr, time.Second, 0, "trie") - syncer.interceptedNodes.RegisterHandler(func(key []byte) { - syncer.chRcvTrieNodes <- true - }) firstChildIndex := 5 secondChildIndex := 7 bn := getCollapsedBn(t, tr.root) - err := bn.loadChildren(syncer) + getNode := func(hash []byte) (node, error) { + cacheData, _ := nodesCacher.Get(hash) + return trieNode(cacheData) + } + + missing, err := bn.loadChildren(getNode) assert.Nil(t, err) assert.NotNil(t, bn.children[firstChildIndex]) assert.NotNil(t, bn.children[secondChildIndex]) - - assert.Equal(t, 5, nodesCacher.Len()) + assert.Equal(t, 0, len(missing)) + assert.Equal(t, 6, nodesCacher.Len()) } func getCollapsedBn(t *testing.T, n node) *branchNode { diff --git a/data/trie/errors.go b/data/trie/errors.go index 6215bb1b4d8..59a835bba78 100644 --- a/data/trie/errors.go +++ b/data/trie/errors.go @@ -66,3 +66,6 @@ var ErrNilPathManager = errors.New("nil path manager") // ErrInvalidTrieTopic signals that invalid trie topic has been provided var ErrInvalidTrieTopic = errors.New("invalid trie topic") + +// ErrNilContext signals that nil context has been provided +var ErrNilContext = errors.New("nil context") diff --git a/data/trie/extensionNode.go b/data/trie/extensionNode.go index a8f1bdb6310..038775fce03 100644 --- a/data/trie/extensionNode.go +++ b/data/trie/extensionNode.go @@ -567,25 +567,23 @@ func (en *extensionNode) setDirty(dirty bool) { en.dirty = dirty } -func (en *extensionNode) loadChildren(syncer *trieSyncer) error { +func (en *extensionNode) loadChildren(getNode func([]byte) (node, error)) ([][]byte, error) { err := en.isEmptyOrNil() if err != nil { - return err + return nil, err } if en.EncodedChild == nil { - return ErrNilNode + return nil, ErrNilNode } - child, err := syncer.getNode(en.EncodedChild) + child, err := getNode(en.EncodedChild) if err != nil { - return err + return [][]byte{en.EncodedChild}, nil } en.child = child - syncer.interceptedNodes.Remove(en.hash) - - return nil + return nil, nil } func (en *extensionNode) getAllLeaves(leaves map[string][]byte, key []byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer) error { diff --git a/data/trie/extensionNode_test.go b/data/trie/extensionNode_test.go index f3771f374e6..5a3645ab7e8 100644 --- a/data/trie/extensionNode_test.go +++ b/data/trie/extensionNode_test.go @@ -5,7 +5,6 @@ import ( "fmt" "reflect" "testing" - "time" "github.com/ElrondNetwork/elrond-go/data/mock" "github.com/ElrondNetwork/elrond-go/storage/lrucache" @@ -835,26 +834,22 @@ func TestExtensionNode_loadChildren(t *testing.T) { _ = tr.root.setRootHash() nodes, _ := getEncodedTrieNodesAndHashes(tr) nodesCacher, _ := lrucache.NewCache(100) - resolver := &mock.RequestHandlerStub{ - RequestTrieNodesCalled: func(shardId uint32, hash []byte, topic string) { - for i := range nodes { - node, _ := NewInterceptedTrieNode(nodes[i], marsh, hasher) - nodesCacher.Put(node.hash, node) - } - }, + for i := range nodes { + node, _ := NewInterceptedTrieNode(nodes[i], marsh, hasher) + nodesCacher.Put(node.hash, node) } - syncer, _ := NewTrieSyncer(resolver, nodesCacher, tr, time.Second, 0, "trie") - syncer.interceptedNodes.RegisterHandler(func(key []byte) { - syncer.chRcvTrieNodes <- true - }) en := getCollapsedEn(t, tr.root) - err := en.loadChildren(syncer) + getNode := func(hash []byte) (node, error) { + cacheData, _ := nodesCacher.Get(hash) + return trieNode(cacheData) + } + _, err := en.loadChildren(getNode) assert.Nil(t, err) assert.NotNil(t, en.child) - assert.Equal(t, 3, nodesCacher.Len()) + assert.Equal(t, 4, nodesCacher.Len()) } func getCollapsedEn(t *testing.T, n node) *extensionNode { diff --git a/data/trie/interface.go b/data/trie/interface.go index 4f7101b1531..504674a8f9f 100644 --- a/data/trie/interface.go +++ b/data/trie/interface.go @@ -36,7 +36,7 @@ type node interface { getChildren(db data.DBWriteCacher) ([]node, error) isValid() bool setDirty(bool) - loadChildren(*trieSyncer) error + loadChildren(func([]byte) (node, error)) ([][]byte, error) getAllLeaves(map[string][]byte, []byte, data.DBWriteCacher, marshal.Marshalizer) error getMarshalizer() marshal.Marshalizer diff --git a/data/trie/leafNode.go b/data/trie/leafNode.go index fa0da3ff636..ed5dbb40c85 100644 --- a/data/trie/leafNode.go +++ b/data/trie/leafNode.go @@ -335,9 +335,8 @@ func (ln *leafNode) setDirty(dirty bool) { ln.dirty = dirty } -func (ln *leafNode) loadChildren(syncer *trieSyncer) error { - syncer.interceptedNodes.Remove(ln.hash) - return nil +func (ln *leafNode) loadChildren(_ func([]byte) (node, error)) ([][]byte, error) { + return nil, nil } func (ln *leafNode) getAllLeaves(leaves map[string][]byte, key []byte, _ data.DBWriteCacher, _ marshal.Marshalizer) error { diff --git a/data/trie/leafNode_test.go b/data/trie/leafNode_test.go index 1a4d6abd50c..fe6b6fe9053 100644 --- a/data/trie/leafNode_test.go +++ b/data/trie/leafNode_test.go @@ -6,7 +6,6 @@ import ( "fmt" "reflect" "testing" - "time" "github.com/ElrondNetwork/elrond-go/data/mock" "github.com/ElrondNetwork/elrond-go/hashing" @@ -534,22 +533,17 @@ func TestLeafNode_loadChildren(t *testing.T) { tr := initTrie() nodes, hashes := getEncodedTrieNodesAndHashes(tr) nodesCacher, _ := lrucache.NewCache(100) - - resolver := &mock.RequestHandlerStub{} for i := range nodes { node, _ := NewInterceptedTrieNode(nodes[i], marsh, hasher) nodesCacher.Put(node.hash, node) } - syncer, _ := NewTrieSyncer(resolver, nodesCacher, tr, time.Second, 0, "trie") - syncer.interceptedNodes.RegisterHandler(func(key []byte) { - syncer.chRcvTrieNodes <- true - }) lnPosition := 5 ln := &leafNode{baseNode: &baseNode{hash: hashes[lnPosition]}} - err := ln.loadChildren(syncer) + missing, err := ln.loadChildren(nil) assert.Nil(t, err) - assert.Equal(t, 5, nodesCacher.Len()) + assert.Equal(t, 6, nodesCacher.Len()) + assert.Equal(t, 0, len(missing)) } //------- deepClone diff --git a/data/trie/sync.go b/data/trie/sync.go index f2f0246d33c..4dcb33a548e 100644 --- a/data/trie/sync.go +++ b/data/trie/sync.go @@ -2,6 +2,7 @@ package trie import ( "bytes" + "context" "sync" "time" @@ -11,16 +12,21 @@ import ( ) type trieSyncer struct { - trie *patriciaMerkleTrie + trie *patriciaMerkleTrie + rootFound bool + rootHash []byte + requestHandler RequestHandler interceptedNodes storage.Cacher - chRcvTrieNodes chan bool - waitTime time.Duration shardId uint32 topic string + waitTime time.Duration + + nodeHashes map[string]struct{} + nodeHashesMutex sync.Mutex - requestedHashes [][]byte - requestedHashesMutex sync.Mutex + receivedNodes map[string]node + receivedNodesMutex sync.Mutex } // NewTrieSyncer creates a new instance of trieSyncer @@ -50,72 +56,137 @@ func NewTrieSyncer( return nil, ErrWrongTypeAssertion } - return &trieSyncer{ + ts := &trieSyncer{ requestHandler: requestHandler, interceptedNodes: interceptedNodes, trie: pmt, - chRcvTrieNodes: make(chan bool), - requestedHashes: make([][]byte, 0), - waitTime: waitTime, + nodeHashes: make(map[string]struct{}), + receivedNodes: make(map[string]node), topic: topic, shardId: shardId, - }, nil + waitTime: waitTime, + } + ts.interceptedNodes.RegisterHandler(ts.trieNodeIntercepted) + + return ts, nil } // StartSyncing completes the trie, asking for missing trie nodes on the network -func (ts *trieSyncer) StartSyncing(rootHash []byte) error { - // TODO: add implementation to try to request for trie nodes for several times before returning with error - +func (ts *trieSyncer) StartSyncing(rootHash []byte, ctx context.Context) error { if len(rootHash) == 0 { return ErrInvalidHash } - ts.interceptedNodes.RegisterHandler(ts.trieNodeIntercepted) - - currentNode, err := ts.getNode(rootHash) - if err != nil { - return err + if ctx == nil { + return ErrNilContext } - ts.trie.root = currentNode - err = ts.trie.root.loadChildren(ts) - if err != nil { - return err - } + ts.nodeHashesMutex.Lock() + ts.nodeHashes = make(map[string]struct{}) + ts.nodeHashes[string(rootHash)] = struct{}{} + ts.nodeHashesMutex.Unlock() - nextNodes, err := ts.trie.root.getChildren(ts.trie.Database()) - if err != nil { - return err - } + ts.rootFound = false + ts.rootHash = rootHash - for len(nextNodes) != 0 { - currentNode, err = ts.getNode(nextNodes[0].getHash()) + for { + err := ts.getNextNodes() if err != nil { return err } - nextNodes = nextNodes[1:] + numRequested := ts.requestNodes() + if numRequested == 0 { + err := ts.trie.Commit() + if err != nil { + return err + } - err = currentNode.loadChildren(ts) - if err != nil { - return err + return nil } - var children []node - children, err = currentNode.getChildren(ts.trie.Database()) - if err != nil { - return err + select { + case <-time.After(ts.waitTime): + continue + case <-ctx.Done(): + return ErrTimeIsOut } - nextNodes = append(nextNodes, children...) } +} - err = ts.trie.Commit() - if err != nil { - return err +func (ts *trieSyncer) getNextNodes() error { + var currentNode node + var err error + nextNodes := make([]node, 0) + missingNodes := make([][]byte, 0) + currMissingNodes := make([][]byte, 0) + + newElement := true + + for newElement { + newElement = false + + ts.nodeHashesMutex.Lock() + for nodeHash := range ts.nodeHashes { + currMissingNodes = currMissingNodes[:0] + + currentNode, err = ts.getNode([]byte(nodeHash)) + if err != nil { + continue + } + + if !ts.rootFound && bytes.Equal([]byte(nodeHash), ts.rootHash) { + ts.trie.root = currentNode + } + + currMissingNodes, err = currentNode.loadChildren(ts.getNode) + if err != nil { + return err + } + + if len(currMissingNodes) > 0 { + missingNodes = append(missingNodes, currMissingNodes...) + continue + } + + delete(ts.nodeHashes, nodeHash) + ts.receivedNodesMutex.Lock() + delete(ts.receivedNodes, nodeHash) + ts.receivedNodesMutex.Unlock() + + nextNodes, err = currentNode.getChildren(ts.trie.Database()) + if err != nil { + return err + } + + tmpNewElement := ts.addNew(nextNodes) + newElement = newElement || tmpNewElement + } + ts.nodeHashesMutex.Unlock() } + ts.nodeHashesMutex.Lock() + for _, missingNode := range missingNodes { + ts.nodeHashes[string(missingNode)] = struct{}{} + } + ts.nodeHashesMutex.Unlock() + return nil } +// adds new elements to needed hash map, lock ts.nodeHashesMutex before calling +func (ts *trieSyncer) addNew(nextNodes []node) bool { + newElement := false + for _, nextNode := range nextNodes { + nextHash := string(nextNode.getHash()) + if _, ok := ts.nodeHashes[nextHash]; !ok { + ts.nodeHashes[nextHash] = struct{}{} + newElement = true + } + } + + return newElement +} + // Trie returns the synced trie func (ts *trieSyncer) Trie() data.Trie { return ts.trie @@ -127,13 +198,15 @@ func (ts *trieSyncer) getNode(hash []byte) (node, error) { return trieNode(n) } - err := ts.requestNode(hash) - if err != nil { - return nil, err + ts.receivedNodesMutex.Lock() + node, ok := ts.receivedNodes[string(hash)] + ts.receivedNodesMutex.Unlock() + + if ok { + return node, nil } - n, _ = ts.interceptedNodes.Get(hash) - return trieNode(n) + return nil, ErrNodeNotFound } func trieNode(data interface{}) (node, error) { @@ -145,51 +218,39 @@ func trieNode(data interface{}) (node, error) { return n.node, nil } -func (ts *trieSyncer) requestNode(hash []byte) error { - receivedRequestedHashTrigger := append(hash, hash...) - ts.requestedHashesMutex.Lock() - ts.requestedHashes = append(ts.requestedHashes, receivedRequestedHashTrigger) - ts.requestedHashesMutex.Unlock() - - ts.requestHandler.RequestTrieNodes(ts.shardId, hash, ts.topic) - - return ts.waitForTrieNode() -} - -func (ts *trieSyncer) waitForTrieNode() error { - select { - case <-ts.chRcvTrieNodes: - return nil - case <-time.After(ts.waitTime): - return ErrTimeIsOut +func (ts *trieSyncer) requestNodes() uint32 { + ts.nodeHashesMutex.Lock() + numRequested := uint32(len(ts.nodeHashes)) + for hash := range ts.nodeHashes { + ts.requestHandler.RequestTrieNodes(ts.shardId, []byte(hash), ts.topic) } + ts.nodeHashesMutex.Unlock() + + return numRequested } func (ts *trieSyncer) trieNodeIntercepted(hash []byte) { - ts.requestedHashesMutex.Lock() - - if hashInSlice(hash, ts.requestedHashes) { - ts.chRcvTrieNodes <- true - ts.removeRequestedHash(hash) + ts.nodeHashesMutex.Lock() + _, ok := ts.nodeHashes[string(hash)] + ts.nodeHashesMutex.Unlock() + if !ok { + return } - ts.requestedHashesMutex.Unlock() -} -func (ts *trieSyncer) removeRequestedHash(hash []byte) { - for i := range ts.requestedHashes { - if bytes.Equal(ts.requestedHashes[i], hash) { - ts.requestedHashes = append(ts.requestedHashes[:i], ts.requestedHashes[i+1:]...) - } + interceptedData, ok := ts.interceptedNodes.Get(hash) + if !ok { + return } -} -func hashInSlice(hash []byte, hashes [][]byte) bool { - for _, h := range hashes { - if bytes.Equal(h, hash) { - return true - } + node, err := trieNode(interceptedData) + if err != nil { + return } - return false + + ts.receivedNodesMutex.Lock() + defer ts.receivedNodesMutex.Unlock() + + ts.receivedNodes[string(hash)] = node } // IsInterfaceNil returns true if there is no value under the interface diff --git a/data/trie/sync_test.go b/data/trie/sync_test.go index c69297f97ac..9cd89c165bf 100644 --- a/data/trie/sync_test.go +++ b/data/trie/sync_test.go @@ -1,6 +1,7 @@ package trie_test import ( + "context" "io/ioutil" "math/rand" "strconv" @@ -73,10 +74,14 @@ func TestTrieSyncer_StartSyncing(t *testing.T) { } rootHash, _ := syncTrie.Root() - sync, _ := trie.NewTrieSyncer(resolver, interceptedNodesCacher, tr, 10*time.Second, 0, "trie") + sync, _ := trie.NewTrieSyncer(resolver, interceptedNodesCacher, tr, time.Second, 0, "trie") + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - _ = sync.StartSyncing(rootHash) + err := sync.StartSyncing(rootHash, ctx) + + cancel() newTrieRootHash, _ := tr.Root() + assert.Nil(t, err) assert.Equal(t, rootHash, newTrieRootHash) assert.Equal(t, expectedRequests, nrRequests) } diff --git a/integrationTests/state/stateTrieSync/stateTrieSync_test.go b/integrationTests/state/stateTrieSync/stateTrieSync_test.go index 19f544ac537..db00c43c77d 100644 --- a/integrationTests/state/stateTrieSync/stateTrieSync_test.go +++ b/integrationTests/state/stateTrieSync/stateTrieSync_test.go @@ -1,6 +1,7 @@ package stateTrieSync import ( + "context" "fmt" "testing" "time" @@ -65,9 +66,12 @@ func TestNode_RequestInterceptTrieNodesWithMessenger(t *testing.T) { nRequester.ShardCoordinator.SelfId(), ) - waitTime := 5 * time.Second - trieSyncer, _ := trie.NewTrieSyncer(requestHandler, nRequester.DataPool.TrieNodes(), requesterTrie, waitTime, core.MetachainShardId, factory.AccountTrieNodesTopic) - err = trieSyncer.StartSyncing(rootHash) + waitTime := 10 * time.Second + trieSyncer, _ := trie.NewTrieSyncer(requestHandler, nRequester.DataPool.TrieNodes(), requesterTrie, time.Second, core.MetachainShardId, factory.AccountTrieNodesTopic) + ctx, cancel := context.WithTimeout(context.Background(), waitTime) + defer cancel() + + err = trieSyncer.StartSyncing(rootHash, ctx) assert.Nil(t, err) newRootHash, _ := requesterTrie.Root() diff --git a/update/interface.go b/update/interface.go index 37c1bf9d0dc..5312c216d09 100644 --- a/update/interface.go +++ b/update/interface.go @@ -1,6 +1,7 @@ package update import ( + "context" "time" "github.com/ElrondNetwork/elrond-go/data" @@ -21,7 +22,7 @@ type StateSyncer interface { // TrieSyncer synchronizes the trie, asking on the network for the missing nodes type TrieSyncer interface { - StartSyncing(rootHash []byte) error + StartSyncing(rootHash []byte, ctx context.Context) error Trie() data.Trie IsInterfaceNil() bool } diff --git a/update/mock/trieSyncersStub.go b/update/mock/trieSyncersStub.go index 97153d4f5dd..db1b0884fb5 100644 --- a/update/mock/trieSyncersStub.go +++ b/update/mock/trieSyncersStub.go @@ -1,6 +1,8 @@ package mock import ( + "context" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/update" ) @@ -12,13 +14,13 @@ type TrieSyncersStub struct { ReplaceCalled func(key string, val update.TrieSyncer) error RemoveCalled func(key string) LenCalled func() int - StartSyncingCalled func(rootHash []byte) error + StartSyncingCalled func(rootHash []byte, ctx context.Context) error TrieCalled func() data.Trie } -func (tss *TrieSyncersStub) StartSyncing(rootHash []byte) error { +func (tss *TrieSyncersStub) StartSyncing(rootHash []byte, ctx context.Context) error { if tss.StartSyncingCalled != nil { - return tss.StartSyncingCalled(rootHash) + return tss.StartSyncingCalled(rootHash, ctx) } return nil } From 73db3917a7d414579e8f4ede63008bcc54733d53 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Mon, 30 Mar 2020 17:09:07 +0300 Subject: [PATCH 55/61] added more implementation. --- cmd/node/factory/structs.go | 1 + core/common.go | 14 ++++ core/common_test.go | 66 +++++++++++++++++++ data/mock/requestHandlerStub.go | 7 ++ data/syncer/baseAccountsSyncer.go | 2 +- data/syncer/userAccountsSyncer.go | 2 +- data/trie/interface.go | 2 + data/trie/sync.go | 42 +++++++----- data/trie/sync_test.go | 2 +- dataRetriever/errors.go | 6 +- .../requestHandlers/requestHandler.go | 13 +++- .../requestHandlers/requestHandler_test.go | 38 +++++++++++ epochStart/bootstrap/process.go | 13 +++- epochStart/interface.go | 1 + epochStart/mock/requestHandlerStub.go | 7 ++ integrationTests/mock/requestHandlerStub.go | 7 ++ .../state/stateTrieSync/stateTrieSync_test.go | 3 +- integrationTests/testProcessorNode.go | 2 + node/mock/requestHandlerStub.go | 9 ++- process/block/metablock.go | 2 +- .../block/preprocess/rewardTxPreProcessor.go | 2 +- .../block/preprocess/smartContractResults.go | 2 +- process/block/preprocess/transactions.go | 2 +- process/block/shardblock.go | 2 +- process/common.go | 13 ---- process/common_test.go | 59 ----------------- process/interface.go | 1 + process/mock/requestHandlerStub.go | 7 ++ process/sync/baseSync.go | 2 +- process/sync/metablock.go | 4 +- process/sync/shardblock.go | 4 +- update/factory/trieSyncersContainerFactory.go | 4 +- update/interface.go | 1 + update/mock/requestHandlerStub.go | 13 +++- update/sync/syncHeaders.go | 4 +- update/sync/syncHeadersByHash.go | 2 +- update/sync/syncMiniBlocks.go | 3 +- update/sync/syncTransactions.go | 3 +- 38 files changed, 245 insertions(+), 122 deletions(-) create mode 100644 core/common.go create mode 100644 core/common_test.go diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index f6f7d90b84f..6e444c3cec3 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -674,6 +674,7 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err args.whiteListHandler, MaxTxsToRequest, args.shardCoordinator.SelfId(), + time.Second, ) if err != nil { return nil, err diff --git a/core/common.go b/core/common.go new file mode 100644 index 00000000000..ca33765a674 --- /dev/null +++ b/core/common.go @@ -0,0 +1,14 @@ +package core + +// EmptyChannel empties the given channel +func EmptyChannel(ch chan bool) int { + readsCnt := 0 + for { + select { + case <-ch: + readsCnt++ + default: + return readsCnt + } + } +} diff --git a/core/common_test.go b/core/common_test.go new file mode 100644 index 00000000000..572f81480bb --- /dev/null +++ b/core/common_test.go @@ -0,0 +1,66 @@ +package core + +import ( + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestEmptyChannelShouldWorkOnBufferedChannel(t *testing.T) { + ch := make(chan bool, 10) + + assert.Equal(t, 0, len(ch)) + readsCnt := EmptyChannel(ch) + assert.Equal(t, 0, len(ch)) + assert.Equal(t, 0, readsCnt) + + ch <- true + ch <- true + ch <- true + + assert.Equal(t, 3, len(ch)) + readsCnt = EmptyChannel(ch) + assert.Equal(t, 0, len(ch)) + assert.Equal(t, 3, readsCnt) +} + +func TestEmptyChannelShouldWorkOnNotBufferedChannel(t *testing.T) { + ch := make(chan bool) + + assert.Equal(t, 0, len(ch)) + readsCnt := int32(EmptyChannel(ch)) + assert.Equal(t, 0, len(ch)) + assert.Equal(t, int32(0), readsCnt) + + wg := sync.WaitGroup{} + wgChanWasWritten := sync.WaitGroup{} + numConcurrentWrites := 50 + wg.Add(numConcurrentWrites) + wgChanWasWritten.Add(numConcurrentWrites) + for i := 0; i < numConcurrentWrites; i++ { + go func() { + wg.Done() + time.Sleep(time.Millisecond) + ch <- true + wgChanWasWritten.Done() + }() + } + + // wait for go routines to start + wg.Wait() + + go func() { + for readsCnt < int32(numConcurrentWrites) { + atomic.AddInt32(&readsCnt, int32(EmptyChannel(ch))) + } + }() + + // wait for go routines to finish + wgChanWasWritten.Wait() + + assert.Equal(t, 0, len(ch)) + assert.Equal(t, int32(numConcurrentWrites), atomic.LoadInt32(&readsCnt)) +} diff --git a/data/mock/requestHandlerStub.go b/data/mock/requestHandlerStub.go index 18676dd70cd..3565a7a2fc7 100644 --- a/data/mock/requestHandlerStub.go +++ b/data/mock/requestHandlerStub.go @@ -1,5 +1,7 @@ package mock +import "time" + // RequestHandlerStub - type RequestHandlerStub struct { RequestShardHeaderCalled func(shardID uint32, hash []byte) @@ -15,6 +17,11 @@ type RequestHandlerStub struct { RequestStartOfEpochMetaBlockCalled func(epoch uint32) } +// RequestInterval - +func (rhs *RequestHandlerStub) RequestInterval() time.Duration { + return time.Second +} + // RequestStartOfEpochMetaBlock - func (rhs *RequestHandlerStub) RequestStartOfEpochMetaBlock(epoch uint32) { if rhs.RequestStartOfEpochMetaBlockCalled == nil { diff --git a/data/syncer/baseAccountsSyncer.go b/data/syncer/baseAccountsSyncer.go index 496fc47a354..45bd9bd68a2 100644 --- a/data/syncer/baseAccountsSyncer.go +++ b/data/syncer/baseAccountsSyncer.go @@ -74,7 +74,7 @@ func (b *baseAccountsSyncer) syncMainTrie(rootHash []byte, trieTopic string) err } b.dataTries[string(rootHash)] = dataTrie - trieSyncer, err := trie.NewTrieSyncer(b.requestHandler, b.cacher, dataTrie, b.waitTime, b.shardId, trieTopic) + trieSyncer, err := trie.NewTrieSyncer(b.requestHandler, b.cacher, dataTrie, b.shardId, trieTopic) if err != nil { return err } diff --git a/data/syncer/userAccountsSyncer.go b/data/syncer/userAccountsSyncer.go index e17037590d3..bb4c78d5bd4 100644 --- a/data/syncer/userAccountsSyncer.go +++ b/data/syncer/userAccountsSyncer.go @@ -85,7 +85,7 @@ func (u *userAccountsSyncer) syncAccountDataTries(rootHashes [][]byte) error { } u.dataTries[string(rootHash)] = dataTrie - trieSyncer, err := trie.NewTrieSyncer(u.requestHandler, u.cacher, dataTrie, u.waitTime, u.shardId, factory.AccountTrieNodesTopic) + trieSyncer, err := trie.NewTrieSyncer(u.requestHandler, u.cacher, dataTrie, u.shardId, factory.AccountTrieNodesTopic) if err != nil { return err } diff --git a/data/trie/interface.go b/data/trie/interface.go index 504674a8f9f..e37260de8c5 100644 --- a/data/trie/interface.go +++ b/data/trie/interface.go @@ -3,6 +3,7 @@ package trie import ( "io" "sync" + "time" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/hashing" @@ -59,5 +60,6 @@ type snapshotNode interface { // RequestHandler defines the methods through which request to data can be made type RequestHandler interface { RequestTrieNodes(destShardID uint32, hash []byte, topic string) + RequestInterval() time.Duration IsInterfaceNil() bool } diff --git a/data/trie/sync.go b/data/trie/sync.go index 4dcb33a548e..cf0f2606a58 100644 --- a/data/trie/sync.go +++ b/data/trie/sync.go @@ -6,6 +6,7 @@ import ( "sync" "time" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/storage" @@ -16,17 +17,19 @@ type trieSyncer struct { rootFound bool rootHash []byte - requestHandler RequestHandler - interceptedNodes storage.Cacher - shardId uint32 - topic string - waitTime time.Duration + requestHandler RequestHandler + interceptedNodes storage.Cacher + shardId uint32 + topic string + waitTimeBetweenRequests time.Duration nodeHashes map[string]struct{} nodeHashesMutex sync.Mutex receivedNodes map[string]node receivedNodesMutex sync.Mutex + + chanReceivedNew chan bool } // NewTrieSyncer creates a new instance of trieSyncer @@ -34,7 +37,6 @@ func NewTrieSyncer( requestHandler RequestHandler, interceptedNodes storage.Cacher, trie data.Trie, - waitTime time.Duration, shardId uint32, topic string, ) (*trieSyncer, error) { @@ -57,14 +59,15 @@ func NewTrieSyncer( } ts := &trieSyncer{ - requestHandler: requestHandler, - interceptedNodes: interceptedNodes, - trie: pmt, - nodeHashes: make(map[string]struct{}), - receivedNodes: make(map[string]node), - topic: topic, - shardId: shardId, - waitTime: waitTime, + requestHandler: requestHandler, + interceptedNodes: interceptedNodes, + trie: pmt, + nodeHashes: make(map[string]struct{}), + receivedNodes: make(map[string]node), + topic: topic, + shardId: shardId, + waitTimeBetweenRequests: requestHandler.RequestInterval(), + chanReceivedNew: make(chan bool), } ts.interceptedNodes.RegisterHandler(ts.trieNodeIntercepted) @@ -94,6 +97,8 @@ func (ts *trieSyncer) StartSyncing(rootHash []byte, ctx context.Context) error { return err } + _ = core.EmptyChannel(ts.chanReceivedNew) + numRequested := ts.requestNodes() if numRequested == 0 { err := ts.trie.Commit() @@ -105,7 +110,9 @@ func (ts *trieSyncer) StartSyncing(rootHash []byte, ctx context.Context) error { } select { - case <-time.After(ts.waitTime): + case <-ts.chanReceivedNew: + continue + case <-time.After(ts.waitTimeBetweenRequests): continue case <-ctx.Done(): return ErrTimeIsOut @@ -248,9 +255,10 @@ func (ts *trieSyncer) trieNodeIntercepted(hash []byte) { } ts.receivedNodesMutex.Lock() - defer ts.receivedNodesMutex.Unlock() - ts.receivedNodes[string(hash)] = node + ts.receivedNodesMutex.Unlock() + + ts.chanReceivedNew <- true } // IsInterfaceNil returns true if there is no value under the interface diff --git a/data/trie/sync_test.go b/data/trie/sync_test.go index 9cd89c165bf..72a659254ba 100644 --- a/data/trie/sync_test.go +++ b/data/trie/sync_test.go @@ -74,7 +74,7 @@ func TestTrieSyncer_StartSyncing(t *testing.T) { } rootHash, _ := syncTrie.Root() - sync, _ := trie.NewTrieSyncer(resolver, interceptedNodesCacher, tr, time.Second, 0, "trie") + sync, _ := trie.NewTrieSyncer(resolver, interceptedNodesCacher, tr, 0, "trie") ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) err := sync.StartSyncing(rootHash, ctx) diff --git a/dataRetriever/errors.go b/dataRetriever/errors.go index e76dcb12b02..eccecf52042 100644 --- a/dataRetriever/errors.go +++ b/dataRetriever/errors.go @@ -146,9 +146,6 @@ var ErrInvalidMaxTxRequest = errors.New("max tx request number is invalid") // ErrNilPeerListCreator signals that a nil peer list creator implementation has been provided var ErrNilPeerListCreator = errors.New("nil peer list creator provided") -// ErrInvalidNumberOfPeersToQuery signals that an invalid number of peers to query has been provided -var ErrInvalidNumberOfPeersToQuery = errors.New("invalid number of peers to query provided") - // ErrNilTrieDataGetter signals that a nil trie data getter has been provided var ErrNilTrieDataGetter = errors.New("nil trie data getter provided") @@ -181,3 +178,6 @@ var ErrInvalidValue = errors.New("invalid value") // ErrNilWhiteListHandler signals that white list handler is nil var ErrNilWhiteListHandler = errors.New("nil white list handler") + +// ErrRequestIntervalTooSmall signals that request interval is too small +var ErrRequestIntervalTooSmall = errors.New("request interval is too small") diff --git a/dataRetriever/requestHandlers/requestHandler.go b/dataRetriever/requestHandlers/requestHandler.go index 34f12268dd0..831ae66e38a 100644 --- a/dataRetriever/requestHandlers/requestHandler.go +++ b/dataRetriever/requestHandlers/requestHandler.go @@ -21,6 +21,7 @@ type resolverRequestHandler struct { shardID uint32 maxTxsToRequest int sweepTime time.Time + requestInterval time.Duration mutSweepTime sync.Mutex } @@ -33,6 +34,7 @@ func NewResolverRequestHandler( whiteList dataRetriever.WhiteListHandler, maxTxsToRequest int, shardID uint32, + requestInterval time.Duration, ) (*resolverRequestHandler, error) { if check.IfNil(finder) { @@ -47,6 +49,9 @@ func NewResolverRequestHandler( if check.IfNil(whiteList) { return nil, dataRetriever.ErrNilWhiteListHandler } + if requestInterval < time.Millisecond { + return nil, dataRetriever.ErrRequestIntervalTooSmall + } rrh := &resolverRequestHandler{ resolversFinder: finder, @@ -55,6 +60,7 @@ func NewResolverRequestHandler( shardID: shardID, maxTxsToRequest: maxTxsToRequest, whiteList: whiteList, + requestInterval: requestInterval, } rrh.sweepTime = time.Now() @@ -503,6 +509,11 @@ func (rrh *resolverRequestHandler) RequestStartOfEpochMetaBlock(epoch uint32) { rrh.addRequestedItem([]byte(epochStartIdentifier)) } +// RequestInterval returns the request interval between sending the same request +func (rrh *resolverRequestHandler) RequestInterval() time.Duration { + return rrh.requestInterval +} + // IsInterfaceNil returns true if there is no value under the interface func (rrh *resolverRequestHandler) IsInterfaceNil() bool { return rrh == nil @@ -526,7 +537,7 @@ func (rrh *resolverRequestHandler) sweepIfNeeded() { rrh.mutSweepTime.Lock() defer rrh.mutSweepTime.Unlock() - if time.Since(rrh.sweepTime) <= time.Second { + if time.Since(rrh.sweepTime) <= rrh.requestInterval { return } diff --git a/dataRetriever/requestHandlers/requestHandler_test.go b/dataRetriever/requestHandlers/requestHandler_test.go index 58c6c7b73ec..2a3ee7090d4 100644 --- a/dataRetriever/requestHandlers/requestHandler_test.go +++ b/dataRetriever/requestHandlers/requestHandler_test.go @@ -40,6 +40,7 @@ func TestNewResolverRequestHandlerNilFinder(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) assert.Nil(t, rrh) @@ -55,6 +56,7 @@ func TestNewResolverRequestHandlerNilRequestedItemsHandler(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) assert.Nil(t, rrh) @@ -70,6 +72,7 @@ func TestNewResolverRequestHandlerMaxTxRequestTooSmall(t *testing.T) { &mock.WhiteListHandlerStub{}, 0, 0, + time.Second, ) assert.Nil(t, rrh) @@ -85,6 +88,7 @@ func TestNewResolverRequestHandler(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) assert.Nil(t, err) @@ -114,6 +118,7 @@ func TestResolverRequestHandler_RequestTransactionErrorWhenGettingCrossShardReso &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestTransaction(0, make([][]byte, 0)) @@ -141,6 +146,7 @@ func TestResolverRequestHandler_RequestTransactionWrongResolverShouldNotPanic(t &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestTransaction(0, make([][]byte, 0)) @@ -167,6 +173,7 @@ func TestResolverRequestHandler_RequestTransactionShouldRequestTransactions(t *t &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) @@ -209,6 +216,7 @@ func TestResolverRequestHandler_RequestTransactionErrorsOnRequestShouldNotPanic( &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) @@ -245,6 +253,7 @@ func TestResolverRequestHandler_RequestMiniBlockErrorWhenGettingCrossShardResolv &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestMiniBlock(0, make([]byte, 0)) @@ -277,6 +286,7 @@ func TestResolverRequestHandler_RequestMiniBlockErrorsOnRequestShouldNotPanic(t &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestMiniBlock(0, []byte("mbHash")) @@ -303,6 +313,7 @@ func TestResolverRequestHandler_RequestMiniBlockShouldCallRequestOnResolver(t *t &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestMiniBlock(0, []byte("mbHash")) @@ -331,6 +342,7 @@ func TestResolverRequestHandler_RequestMiniBlockShouldCallWithTheCorrectEpoch(t &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.SetEpoch(expectedEpoch) @@ -353,6 +365,7 @@ func TestResolverRequestHandler_RequestShardHeaderHashAlreadyRequestedShouldNotR &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestShardHeader(0, make([]byte, 0)) @@ -367,6 +380,7 @@ func TestResolverRequestHandler_RequestShardHeaderHashBadRequest(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestShardHeader(1, make([]byte, 0)) @@ -393,6 +407,7 @@ func TestResolverRequestHandler_RequestShardHeaderShouldCallRequestOnResolver(t &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestShardHeader(0, []byte("hdrHash")) @@ -415,6 +430,7 @@ func TestResolverRequestHandler_RequestMetadHeaderHashAlreadyRequestedShouldNotR &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestMetaHeader(make([]byte, 0)) @@ -441,6 +457,7 @@ func TestResolverRequestHandler_RequestMetadHeaderHashNotHeaderResolverShouldNot &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestMetaHeader([]byte("hdrHash")) @@ -469,6 +486,7 @@ func TestResolverRequestHandler_RequestMetaHeaderShouldCallRequestOnResolver(t * &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestMetaHeader([]byte("hdrHash")) @@ -493,6 +511,7 @@ func TestResolverRequestHandler_RequestShardHeaderByNonceAlreadyRequestedShouldN &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestShardHeaderByNonce(0, 0) @@ -515,6 +534,7 @@ func TestResolverRequestHandler_RequestShardHeaderByNonceBadRequest(t *testing.T &mock.WhiteListHandlerStub{}, 1, core.MetachainShardId, + time.Second, ) rrh.RequestShardHeaderByNonce(1, 0) @@ -543,6 +563,7 @@ func TestResolverRequestHandler_RequestShardHeaderByNonceFinderReturnsErrorShoul &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestShardHeaderByNonce(0, 0) @@ -575,6 +596,7 @@ func TestResolverRequestHandler_RequestShardHeaderByNonceFinderReturnsAWrongReso &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestShardHeaderByNonce(0, 0) @@ -607,6 +629,7 @@ func TestResolverRequestHandler_RequestShardHeaderByNonceResolverFailsShouldNotP &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestShardHeaderByNonce(0, 0) @@ -633,6 +656,7 @@ func TestResolverRequestHandler_RequestShardHeaderByNonceShouldRequest(t *testin &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestShardHeaderByNonce(0, 0) @@ -655,6 +679,7 @@ func TestResolverRequestHandler_RequestMetaHeaderHashAlreadyRequestedShouldNotRe &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestMetaHeaderByNonce(0) @@ -681,6 +706,7 @@ func TestResolverRequestHandler_RequestMetaHeaderByNonceShouldRequest(t *testing &mock.WhiteListHandlerStub{}, 100, 0, + time.Second, ) rrh.RequestMetaHeaderByNonce(0) @@ -711,6 +737,7 @@ func TestResolverRequestHandler_RequestScrErrorWhenGettingCrossShardResolverShou &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestUnsignedTransactions(0, make([][]byte, 0)) @@ -738,6 +765,7 @@ func TestResolverRequestHandler_RequestScrWrongResolverShouldNotPanic(t *testing &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestUnsignedTransactions(0, make([][]byte, 0)) @@ -764,6 +792,7 @@ func TestResolverRequestHandler_RequestScrShouldRequestScr(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestUnsignedTransactions(0, [][]byte{[]byte("txHash")}) @@ -806,6 +835,7 @@ func TestResolverRequestHandler_RequestScrErrorsOnRequestShouldNotPanic(t *testi &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestUnsignedTransactions(0, [][]byte{[]byte("txHash")}) @@ -842,6 +872,7 @@ func TestResolverRequestHandler_RequestRewardShouldRequestReward(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestRewardTransactions(0, [][]byte{[]byte("txHash")}) @@ -876,6 +907,7 @@ func TestRequestTrieNodes_ShouldWork(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestTrieNodes(0, []byte("hash"), "topic") @@ -898,6 +930,7 @@ func TestRequestTrieNodes_NilResolver(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestTrieNodes(core.MetachainShardId, []byte("hash"), "topic") @@ -926,6 +959,7 @@ func TestRequestTrieNodes_RequestByHashError(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestTrieNodes(0, []byte("hash"), "topic") @@ -948,6 +982,7 @@ func TestRequestStartOfEpochMetaBlock_MissingResolver(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestStartOfEpochMetaBlock(0) @@ -971,6 +1006,7 @@ func TestRequestStartOfEpochMetaBlock_WrongResolver(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestStartOfEpochMetaBlock(0) @@ -999,6 +1035,7 @@ func TestRequestStartOfEpochMetaBlock_RequestDataFromEpochError(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestStartOfEpochMetaBlock(0) @@ -1031,6 +1068,7 @@ func TestRequestStartOfEpochMetaBlock_AddError(t *testing.T) { &mock.WhiteListHandlerStub{}, 1, 0, + time.Second, ) rrh.RequestStartOfEpochMetaBlock(0) diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 24d94a597d2..188d5066f8a 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -619,7 +619,7 @@ func (e *epochStartBootstrap) syncUserAccountsState(rootHash []byte) error { Marshalizer: e.marshalizer, TrieStorageManager: e.userTrieStorageManager, RequestHandler: e.requestHandler, - WaitTime: timeToWait, + WaitTime: time.Minute, Cacher: e.dataPool.TrieNodes(), }, ShardId: e.shardCoordinator.SelfId(), @@ -679,7 +679,7 @@ func (e *epochStartBootstrap) syncPeerAccountsState(rootHash []byte) error { Marshalizer: e.marshalizer, TrieStorageManager: e.peerTrieStorageManager, RequestHandler: e.requestHandler, - WaitTime: timeToWait * 10, + WaitTime: time.Minute, Cacher: e.dataPool.TrieNodes(), }, } @@ -754,7 +754,14 @@ func (e *epochStartBootstrap) createRequestHandler() error { requestedItemsHandler := timecache.NewTimeCache(100) maxToRequest := 100 - e.requestHandler, err = requestHandlers.NewResolverRequestHandler(finder, requestedItemsHandler, e.whiteListHandler, maxToRequest, core.MetachainShardId) + e.requestHandler, err = requestHandlers.NewResolverRequestHandler( + finder, + requestedItemsHandler, + e.whiteListHandler, + maxToRequest, + core.MetachainShardId, + 100*time.Millisecond, + ) return err } diff --git a/epochStart/interface.go b/epochStart/interface.go index 22cd8410cde..ef663b0b565 100644 --- a/epochStart/interface.go +++ b/epochStart/interface.go @@ -49,6 +49,7 @@ type RequestHandler interface { RequestShardHeaderByNonce(shardId uint32, nonce uint64) RequestStartOfEpochMetaBlock(epoch uint32) RequestMiniBlocks(destShardID uint32, miniblocksHashes [][]byte) + RequestInterval() time.Duration IsInterfaceNil() bool } diff --git a/epochStart/mock/requestHandlerStub.go b/epochStart/mock/requestHandlerStub.go index 9044d157c54..c99460de944 100644 --- a/epochStart/mock/requestHandlerStub.go +++ b/epochStart/mock/requestHandlerStub.go @@ -1,5 +1,7 @@ package mock +import "time" + // RequestHandlerStub - type RequestHandlerStub struct { RequestShardHeaderCalled func(shardId uint32, hash []byte) @@ -13,6 +15,11 @@ type RequestHandlerStub struct { RequestStartOfEpochMetaBlockCalled func(epoch uint32) } +// RequestInterval - +func (rhs *RequestHandlerStub) RequestInterval() time.Duration { + return time.Second +} + // RequestStartOfEpochMetaBlock - func (rhs *RequestHandlerStub) RequestStartOfEpochMetaBlock(epoch uint32) { if rhs.RequestStartOfEpochMetaBlockCalled == nil { diff --git a/integrationTests/mock/requestHandlerStub.go b/integrationTests/mock/requestHandlerStub.go index 18676dd70cd..3565a7a2fc7 100644 --- a/integrationTests/mock/requestHandlerStub.go +++ b/integrationTests/mock/requestHandlerStub.go @@ -1,5 +1,7 @@ package mock +import "time" + // RequestHandlerStub - type RequestHandlerStub struct { RequestShardHeaderCalled func(shardID uint32, hash []byte) @@ -15,6 +17,11 @@ type RequestHandlerStub struct { RequestStartOfEpochMetaBlockCalled func(epoch uint32) } +// RequestInterval - +func (rhs *RequestHandlerStub) RequestInterval() time.Duration { + return time.Second +} + // RequestStartOfEpochMetaBlock - func (rhs *RequestHandlerStub) RequestStartOfEpochMetaBlock(epoch uint32) { if rhs.RequestStartOfEpochMetaBlockCalled == nil { diff --git a/integrationTests/state/stateTrieSync/stateTrieSync_test.go b/integrationTests/state/stateTrieSync/stateTrieSync_test.go index db00c43c77d..48bddca9ca1 100644 --- a/integrationTests/state/stateTrieSync/stateTrieSync_test.go +++ b/integrationTests/state/stateTrieSync/stateTrieSync_test.go @@ -64,10 +64,11 @@ func TestNode_RequestInterceptTrieNodesWithMessenger(t *testing.T) { whiteListHandler, 10000, nRequester.ShardCoordinator.SelfId(), + time.Second, ) waitTime := 10 * time.Second - trieSyncer, _ := trie.NewTrieSyncer(requestHandler, nRequester.DataPool.TrieNodes(), requesterTrie, time.Second, core.MetachainShardId, factory.AccountTrieNodesTopic) + trieSyncer, _ := trie.NewTrieSyncer(requestHandler, nRequester.DataPool.TrieNodes(), requesterTrie, core.MetachainShardId, factory.AccountTrieNodesTopic) ctx, cancel := context.WithTimeout(context.Background(), waitTime) defer cancel() diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index d55206e062a..26a0f1f4b11 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -666,6 +666,7 @@ func (tpn *TestProcessorNode) initResolvers() { tpn.WhiteListHandler, 100, tpn.ShardCoordinator.SelfId(), + time.Second, ) } else { resolversContainerFactory, _ := resolverscontainer.NewShardResolversContainerFactory(resolverContainerFactory) @@ -680,6 +681,7 @@ func (tpn *TestProcessorNode) initResolvers() { tpn.WhiteListHandler, 100, tpn.ShardCoordinator.SelfId(), + time.Second, ) } } diff --git a/node/mock/requestHandlerStub.go b/node/mock/requestHandlerStub.go index 19278e33634..3565a7a2fc7 100644 --- a/node/mock/requestHandlerStub.go +++ b/node/mock/requestHandlerStub.go @@ -1,5 +1,7 @@ package mock +import "time" + // RequestHandlerStub - type RequestHandlerStub struct { RequestShardHeaderCalled func(shardID uint32, hash []byte) @@ -15,6 +17,11 @@ type RequestHandlerStub struct { RequestStartOfEpochMetaBlockCalled func(epoch uint32) } +// RequestInterval - +func (rhs *RequestHandlerStub) RequestInterval() time.Duration { + return time.Second +} + // RequestStartOfEpochMetaBlock - func (rhs *RequestHandlerStub) RequestStartOfEpochMetaBlock(epoch uint32) { if rhs.RequestStartOfEpochMetaBlockCalled == nil { @@ -24,7 +31,7 @@ func (rhs *RequestHandlerStub) RequestStartOfEpochMetaBlock(epoch uint32) { } // SetEpoch - -func (rhs *RequestHandlerStub) SetEpoch(epoch uint32) { +func (rhs *RequestHandlerStub) SetEpoch(_ uint32) { } // RequestShardHeader - diff --git a/process/block/metablock.go b/process/block/metablock.go index dd007962c0c..3b46cdcfc64 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -1547,7 +1547,7 @@ func (mp *metaProcessor) requestMissingFinalityAttestingShardHeaders() uint32 { } func (mp *metaProcessor) requestShardHeaders(metaBlock *block.MetaBlock) (uint32, uint32) { - _ = process.EmptyChannel(mp.chRcvAllHdrs) + _ = core.EmptyChannel(mp.chRcvAllHdrs) if len(metaBlock.ShardInfo) == 0 { return 0, 0 diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index 5b03f44311c..815ed119dcb 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -260,7 +260,7 @@ func (rtp *rewardTxPreprocessor) receivedRewardTransaction(txHash []byte) { // CreateBlockStarted cleans the local cache map for processed/created reward transactions at this round func (rtp *rewardTxPreprocessor) CreateBlockStarted() { - _ = process.EmptyChannel(rtp.chReceivedAllRewardTxs) + _ = core.EmptyChannel(rtp.chReceivedAllRewardTxs) rtp.rewardTxsForBlock.mutTxsForBlock.Lock() rtp.rewardTxsForBlock.missingTxs = 0 diff --git a/process/block/preprocess/smartContractResults.go b/process/block/preprocess/smartContractResults.go index a6218f81054..b4e0b322293 100644 --- a/process/block/preprocess/smartContractResults.go +++ b/process/block/preprocess/smartContractResults.go @@ -281,7 +281,7 @@ func (scr *smartContractResults) receivedSmartContractResult(txHash []byte) { // CreateBlockStarted cleans the local cache map for processed/created smartContractResults at this round func (scr *smartContractResults) CreateBlockStarted() { - _ = process.EmptyChannel(scr.chRcvAllScrs) + _ = core.EmptyChannel(scr.chRcvAllScrs) scr.scrForBlock.mutTxsForBlock.Lock() scr.scrForBlock.missingTxs = 0 diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index c5cc252b849..991183ac73d 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -516,7 +516,7 @@ func (txs *transactions) receivedTransaction(txHash []byte) { // CreateBlockStarted cleans the local cache map for processed/created transactions at this round func (txs *transactions) CreateBlockStarted() { - _ = process.EmptyChannel(txs.chRcvAllTxs) + _ = core.EmptyChannel(txs.chRcvAllTxs) txs.txsForCurrBlock.mutTxsForBlock.Lock() txs.txsForCurrBlock.missingTxs = 0 diff --git a/process/block/shardblock.go b/process/block/shardblock.go index df684d98240..2eba4176108 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -1403,7 +1403,7 @@ func (sp *shardProcessor) receivedMetaBlock(headerHandler data.HeaderHandler, me } func (sp *shardProcessor) requestMetaHeaders(shardHeader *block.Header) (uint32, uint32) { - _ = process.EmptyChannel(sp.chRcvAllMetaHdrs) + _ = core.EmptyChannel(sp.chRcvAllMetaHdrs) if len(shardHeader.MetaBlockHashes) == 0 { return 0, 0 diff --git a/process/common.go b/process/common.go index aa235a6a3d9..f654f29d72d 100644 --- a/process/common.go +++ b/process/common.go @@ -20,19 +20,6 @@ import ( var log = logger.GetOrCreate("process") -// EmptyChannel empties the given channel -func EmptyChannel(ch chan bool) int { - readsCnt := 0 - for { - select { - case <-ch: - readsCnt++ - default: - return readsCnt - } - } -} - // GetShardHeader gets the header, which is associated with the given hash, from pool or storage func GetShardHeader( hash []byte, diff --git a/process/common_test.go b/process/common_test.go index e3216a7180c..4e0c32c74ee 100644 --- a/process/common_test.go +++ b/process/common_test.go @@ -4,10 +4,7 @@ import ( "bytes" "errors" "math/big" - "sync" - "sync/atomic" "testing" - "time" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data" @@ -20,62 +17,6 @@ import ( "github.com/stretchr/testify/assert" ) -func TestEmptyChannelShouldWorkOnBufferedChannel(t *testing.T) { - ch := make(chan bool, 10) - - assert.Equal(t, 0, len(ch)) - readsCnt := process.EmptyChannel(ch) - assert.Equal(t, 0, len(ch)) - assert.Equal(t, 0, readsCnt) - - ch <- true - ch <- true - ch <- true - - assert.Equal(t, 3, len(ch)) - readsCnt = process.EmptyChannel(ch) - assert.Equal(t, 0, len(ch)) - assert.Equal(t, 3, readsCnt) -} - -func TestEmptyChannelShouldWorkOnNotBufferedChannel(t *testing.T) { - ch := make(chan bool) - - assert.Equal(t, 0, len(ch)) - readsCnt := int32(process.EmptyChannel(ch)) - assert.Equal(t, 0, len(ch)) - assert.Equal(t, int32(0), readsCnt) - - wg := sync.WaitGroup{} - wgChanWasWritten := sync.WaitGroup{} - numConcurrentWrites := 50 - wg.Add(numConcurrentWrites) - wgChanWasWritten.Add(numConcurrentWrites) - for i := 0; i < numConcurrentWrites; i++ { - go func() { - wg.Done() - time.Sleep(time.Millisecond) - ch <- true - wgChanWasWritten.Done() - }() - } - - // wait for go routines to start - wg.Wait() - - go func() { - for readsCnt < int32(numConcurrentWrites) { - atomic.AddInt32(&readsCnt, int32(process.EmptyChannel(ch))) - } - }() - - // wait for go routines to finish - wgChanWasWritten.Wait() - - assert.Equal(t, 0, len(ch)) - assert.Equal(t, int32(numConcurrentWrites), atomic.LoadInt32(&readsCnt)) -} - func TestGetShardHeaderShouldErrNilCacher(t *testing.T) { hash := []byte("X") diff --git a/process/interface.go b/process/interface.go index 8841635ecb3..256be730d9d 100644 --- a/process/interface.go +++ b/process/interface.go @@ -434,6 +434,7 @@ type RequestHandler interface { RequestMiniBlocks(destShardID uint32, miniblocksHashes [][]byte) RequestTrieNodes(destShardID uint32, hash []byte, topic string) RequestStartOfEpochMetaBlock(epoch uint32) + RequestInterval() time.Duration IsInterfaceNil() bool } diff --git a/process/mock/requestHandlerStub.go b/process/mock/requestHandlerStub.go index 18676dd70cd..3565a7a2fc7 100644 --- a/process/mock/requestHandlerStub.go +++ b/process/mock/requestHandlerStub.go @@ -1,5 +1,7 @@ package mock +import "time" + // RequestHandlerStub - type RequestHandlerStub struct { RequestShardHeaderCalled func(shardID uint32, hash []byte) @@ -15,6 +17,11 @@ type RequestHandlerStub struct { RequestStartOfEpochMetaBlockCalled func(epoch uint32) } +// RequestInterval - +func (rhs *RequestHandlerStub) RequestInterval() time.Duration { + return time.Second +} + // RequestStartOfEpochMetaBlock - func (rhs *RequestHandlerStub) RequestStartOfEpochMetaBlock(epoch uint32) { if rhs.RequestStartOfEpochMetaBlockCalled == nil { diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index 9a949950c1d..26ee2c8ccfd 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -865,7 +865,7 @@ func (boot *baseBootstrap) requestMiniBlocksByHashes(hashes [][]byte) { func (boot *baseBootstrap) getMiniBlocksRequestingIfMissing(hashes [][]byte) (block.MiniBlockSlice, error) { miniBlocks, missingMiniBlocksHashes := boot.miniBlocksResolver.GetMiniBlocksFromPool(hashes) if len(missingMiniBlocksHashes) > 0 { - _ = process.EmptyChannel(boot.chRcvMiniBlocks) + _ = core.EmptyChannel(boot.chRcvMiniBlocks) boot.requestMiniBlocksByHashes(missingMiniBlocksHashes) err := boot.waitForMiniBlocks() if err != nil { diff --git a/process/sync/metablock.go b/process/sync/metablock.go index 1d83c8c3971..ec805055dcc 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -171,7 +171,7 @@ func (boot *MetaBootstrap) getHeaderWithNonceRequestingIfMissing(nonce uint64) ( nonce, boot.headers) if err != nil { - _ = process.EmptyChannel(boot.chRcvHdrNonce) + _ = core.EmptyChannel(boot.chRcvHdrNonce) boot.requestHeaderWithNonce(nonce) err = boot.waitForHeaderNonce() if err != nil { @@ -194,7 +194,7 @@ func (boot *MetaBootstrap) getHeaderWithNonceRequestingIfMissing(nonce uint64) ( func (boot *MetaBootstrap) getHeaderWithHashRequestingIfMissing(hash []byte) (data.HeaderHandler, error) { hdr, err := process.GetMetaHeader(hash, boot.headers, boot.marshalizer, boot.store) if err != nil { - _ = process.EmptyChannel(boot.chRcvHdrHash) + _ = core.EmptyChannel(boot.chRcvHdrHash) boot.requestHeaderWithHash(hash) err = boot.waitForHeaderHash() if err != nil { diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index 93746649b4b..b7a47c2d99b 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -148,7 +148,7 @@ func (boot *ShardBootstrap) getHeaderWithNonceRequestingIfMissing(nonce uint64) boot.shardCoordinator.SelfId(), boot.headers) if err != nil { - _ = process.EmptyChannel(boot.chRcvHdrNonce) + _ = core.EmptyChannel(boot.chRcvHdrNonce) boot.requestHeaderWithNonce(nonce) err = boot.waitForHeaderNonce() if err != nil { @@ -172,7 +172,7 @@ func (boot *ShardBootstrap) getHeaderWithNonceRequestingIfMissing(nonce uint64) func (boot *ShardBootstrap) getHeaderWithHashRequestingIfMissing(hash []byte) (data.HeaderHandler, error) { hdr, err := process.GetShardHeader(hash, boot.headers, boot.marshalizer, boot.store) if err != nil { - _ = process.EmptyChannel(boot.chRcvHdrHash) + _ = core.EmptyChannel(boot.chRcvHdrHash) boot.requestHeaderWithHash(hash) err = boot.waitForHeaderHash() if err != nil { diff --git a/update/factory/trieSyncersContainerFactory.go b/update/factory/trieSyncersContainerFactory.go index 6d4af35e60a..ad6327c7efc 100644 --- a/update/factory/trieSyncersContainerFactory.go +++ b/update/factory/trieSyncersContainerFactory.go @@ -1,8 +1,6 @@ package factory import ( - "time" - "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data/state" @@ -95,7 +93,7 @@ func (t *trieSyncersContainerFactory) createOneTrieSyncer( return update.ErrNilDataTrieContainer } - trieSyncer, err := trie.NewTrieSyncer(t.requestHandler, t.trieCacher, dataTrie, time.Minute, shId, trieTopicFromAccountType(accType)) + trieSyncer, err := trie.NewTrieSyncer(t.requestHandler, t.trieCacher, dataTrie, shId, trieTopicFromAccountType(accType)) if err != nil { return err } diff --git a/update/interface.go b/update/interface.go index 5312c216d09..4dcb9e78e67 100644 --- a/update/interface.go +++ b/update/interface.go @@ -89,6 +89,7 @@ type RequestHandler interface { RequestMetaHeaderByNonce(nonce uint64) RequestShardHeaderByNonce(shardId uint32, nonce uint64) RequestTrieNodes(destShardID uint32, hash []byte, topic string) + RequestInterval() time.Duration IsInterfaceNil() bool } diff --git a/update/mock/requestHandlerStub.go b/update/mock/requestHandlerStub.go index 93273a719a4..69be9806699 100644 --- a/update/mock/requestHandlerStub.go +++ b/update/mock/requestHandlerStub.go @@ -1,5 +1,7 @@ package mock +import "time" + type RequestHandlerStub struct { RequestShardHeaderCalled func(shardId uint32, hash []byte) RequestMetaHeaderCalled func(hash []byte) @@ -13,15 +15,20 @@ type RequestHandlerStub struct { RequestStartOfEpochMetaBlockCalled func(epoch uint32) } -func (rhs *RequestHandlerStub) SetEpoch(epoch uint32) { +// RequestInterval - +func (rhs *RequestHandlerStub) RequestInterval() time.Duration { + return time.Second +} + +func (rhs *RequestHandlerStub) SetEpoch(_ uint32) { panic("implement me") } -func (rhs *RequestHandlerStub) RequestMiniBlocks(destShardID uint32, miniblocksHashes [][]byte) { +func (rhs *RequestHandlerStub) RequestMiniBlocks(_ uint32, _ [][]byte) { panic("implement me") } -func (rhs *RequestHandlerStub) RequestTrieNodes(destShardID uint32, hash []byte, topic string) { +func (rhs *RequestHandlerStub) RequestTrieNodes(_ uint32, _ []byte, _ string) { panic("implement me") } diff --git a/update/sync/syncHeaders.go b/update/sync/syncHeaders.go index be2b09882e1..cecb22eafa1 100644 --- a/update/sync/syncHeaders.go +++ b/update/sync/syncHeaders.go @@ -259,7 +259,7 @@ func (h *headersToSync) syncFirstPendingMetaBlocks(waitTime time.Duration) error h.firstPendingMetaBlocks[metaHash] = metaHdr } - _ = process.EmptyChannel(h.chReceivedAll) + _ = core.EmptyChannel(h.chReceivedAll) for metaHash := range h.missingMetaBlocks { h.stopSyncing = false h.requestHandler.RequestMetaHeader([]byte(metaHash)) @@ -290,7 +290,7 @@ func (h *headersToSync) syncAllNeededMetaHeaders(waitTime time.Duration) error { lowestPendingNonce := h.lowestPendingNonceFrom(h.firstPendingMetaBlocks) h.computeMissingNonce(lowestPendingNonce, h.epochStartMetaBlock.Nonce) - _ = process.EmptyChannel(h.chReceivedAll) + _ = core.EmptyChannel(h.chReceivedAll) for nonce := range h.missingMetaNonces { h.stopSyncing = false h.requestHandler.RequestMetaHeaderByNonce(nonce) diff --git a/update/sync/syncHeadersByHash.go b/update/sync/syncHeadersByHash.go index 9e139a47c17..27f3fdae622 100644 --- a/update/sync/syncHeadersByHash.go +++ b/update/sync/syncHeadersByHash.go @@ -76,7 +76,7 @@ func (m *syncHeadersByHash) SyncMissingHeadersByHash( headersHashes [][]byte, waitTime time.Duration, ) error { - _ = process.EmptyChannel(m.chReceivedAll) + _ = core.EmptyChannel(m.chReceivedAll) requestedMBs := 0 m.mutMissingHdrs.Lock() diff --git a/update/sync/syncMiniBlocks.go b/update/sync/syncMiniBlocks.go index 8a93b6cae74..3fed39486f5 100644 --- a/update/sync/syncMiniBlocks.go +++ b/update/sync/syncMiniBlocks.go @@ -4,6 +4,7 @@ import ( "sync" "time" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -105,7 +106,7 @@ func (p *pendingMiniBlocks) SyncPendingMiniBlocks( } func (p *pendingMiniBlocks) syncMiniBlocks(listPendingMiniBlocks []block.ShardMiniBlockHeader, waitTime time.Duration) error { - _ = process.EmptyChannel(p.chReceivedAll) + _ = core.EmptyChannel(p.chReceivedAll) requestedMBs := 0 p.mutPendingMb.Lock() diff --git a/update/sync/syncTransactions.go b/update/sync/syncTransactions.go index 0698cda2c6b..83aee08e097 100644 --- a/update/sync/syncTransactions.go +++ b/update/sync/syncTransactions.go @@ -4,6 +4,7 @@ import ( "sync" "time" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" @@ -83,7 +84,7 @@ func NewPendingTransactionsSyncer(args ArgsNewPendingTransactionsSyncer) (*pendi // SyncPendingTransactionsFor syncs pending transactions for a list of miniblocks func (p *pendingTransactions) SyncPendingTransactionsFor(miniBlocks map[string]*block.MiniBlock, epoch uint32, waitTime time.Duration) error { - _ = process.EmptyChannel(p.chReceivedAll) + _ = core.EmptyChannel(p.chReceivedAll) p.mutPendingTx.Lock() p.epochToSync = epoch From 3bf3409c3a5e83db13c6d007b6aa49f11e75ce37 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Mon, 30 Mar 2020 18:42:44 +0300 Subject: [PATCH 56/61] added more implementation. --- epochStart/bootstrap/process.go | 13 +- epochStart/bootstrap/syncValidatorStatus.go | 6 +- epochStart/interface.go | 5 +- go.mod | 1 + update/errors.go | 3 + update/interface.go | 6 +- update/mock/epochMiniBlocksSyncHandlerMock.go | 8 +- .../pendingTransactionsSyncHandlerMock.go | 9 +- update/sync/coordinator.go | 9 +- update/sync/coordinator_test.go | 6 +- update/sync/syncAccountsDBs.go | 1 - update/sync/syncHeaders.go | 1 + update/sync/syncHeadersByHash.go | 130 +++++++++-------- update/sync/syncMiniBlocks.go | 131 ++++++++++-------- update/sync/syncMiniBlocks_test.go | 13 +- update/sync/syncTransactions.go | 101 ++++++++------ update/sync/syncTransactions_test.go | 13 +- 17 files changed, 269 insertions(+), 187 deletions(-) diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 188d5066f8a..b0781120835 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -1,6 +1,7 @@ package bootstrap import ( + "context" "strconv" "time" @@ -348,7 +349,9 @@ func (e *epochStartBootstrap) syncHeadersFrom(meta *block.MetaBlock) (map[string shardIds = append(shardIds, core.MetachainShardId) } - err := e.headersSyncer.SyncMissingHeadersByHash(shardIds, hashesToRequest, timeToWait*5) + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + err := e.headersSyncer.SyncMissingHeadersByHash(shardIds, hashesToRequest, ctx) + cancel() if err != nil { return nil, err } @@ -531,7 +534,9 @@ func (e *epochStartBootstrap) requestAndProcessForShard() error { return err } - err = e.miniBlocksSyncer.SyncPendingMiniBlocks(epochStartData.PendingMiniBlockHeaders, timeToWait) + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + err = e.miniBlocksSyncer.SyncPendingMiniBlocks(epochStartData.PendingMiniBlockHeaders, ctx) + cancel() if err != nil { return err } @@ -552,7 +557,9 @@ func (e *epochStartBootstrap) requestAndProcessForShard() error { } e.headersSyncer.ClearFields() - err = e.headersSyncer.SyncMissingHeadersByHash(shardIds, hashesToRequest, timeToWait) + ctx, cancel = context.WithTimeout(context.Background(), time.Minute) + err = e.headersSyncer.SyncMissingHeadersByHash(shardIds, hashesToRequest, ctx) + cancel() if err != nil { return err } diff --git a/epochStart/bootstrap/syncValidatorStatus.go b/epochStart/bootstrap/syncValidatorStatus.go index 9ee38ac14dd..634a7a02a69 100644 --- a/epochStart/bootstrap/syncValidatorStatus.go +++ b/epochStart/bootstrap/syncValidatorStatus.go @@ -1,7 +1,9 @@ package bootstrap import ( + "context" "fmt" + "time" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data/block" @@ -150,7 +152,9 @@ func (s *syncValidatorStatus) processNodesConfigFor( shardMBHeaders := findPeerMiniBlockHeaders(metaBlock) s.miniBlocksSyncer.ClearFields() - err := s.miniBlocksSyncer.SyncPendingMiniBlocks(shardMBHeaders, timeToWait) + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + err := s.miniBlocksSyncer.SyncPendingMiniBlocks(shardMBHeaders, ctx) + cancel() if err != nil { return nil, err } diff --git a/epochStart/interface.go b/epochStart/interface.go index ef663b0b565..f8508273e1f 100644 --- a/epochStart/interface.go +++ b/epochStart/interface.go @@ -1,6 +1,7 @@ package epochStart import ( + "context" "time" "github.com/ElrondNetwork/elrond-go/data" @@ -82,7 +83,7 @@ type ValidatorStatisticsProcessorHandler interface { // HeadersByHashSyncer defines the methods to sync all missing headers by hash type HeadersByHashSyncer interface { - SyncMissingHeadersByHash(shardIDs []uint32, headersHashes [][]byte, waitTime time.Duration) error + SyncMissingHeadersByHash(shardIDs []uint32, headersHashes [][]byte, ctx context.Context) error GetHeaders() (map[string]data.HeaderHandler, error) ClearFields() IsInterfaceNil() bool @@ -90,7 +91,7 @@ type HeadersByHashSyncer interface { // PendingMiniBlocksSyncHandler defines the methods to sync all pending miniblocks type PendingMiniBlocksSyncHandler interface { - SyncPendingMiniBlocks(miniBlockHeaders []block.ShardMiniBlockHeader, waitTime time.Duration) error + SyncPendingMiniBlocks(miniBlockHeaders []block.ShardMiniBlockHeader, ctx context.Context) error GetMiniBlocks() (map[string]*block.MiniBlock, error) ClearFields() IsInterfaceNil() bool diff --git a/go.mod b/go.mod index e3543141a03..88f961e7190 100644 --- a/go.mod +++ b/go.mod @@ -44,5 +44,6 @@ require ( github.com/whyrusleeping/go-logging v0.0.1 // indirect github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 + golang.org/x/net v0.0.0-20190620200207-3b0461eec859 gopkg.in/go-playground/validator.v8 v8.18.2 ) diff --git a/update/errors.go b/update/errors.go index 605dc1c574b..bf8c51a0c7a 100644 --- a/update/errors.go +++ b/update/errors.go @@ -157,3 +157,6 @@ var ErrNilAccountsDBSyncContainer = errors.New("nil accounts db sync container") // ErrNilValidatorInfoProcessor signals that nil validator info was provided var ErrNilValidatorInfoProcessor = errors.New("nil validator info processor") + +// ErrTimeIsOut signals that time is out +var ErrTimeIsOut = errors.New("time is out") diff --git a/update/interface.go b/update/interface.go index 4dcb9e78e67..a6c4eb36edf 100644 --- a/update/interface.go +++ b/update/interface.go @@ -123,21 +123,21 @@ type EpochStartTriesSyncHandler interface { // EpochStartPendingMiniBlocksSyncHandler defines the methods to sync all pending miniblocks type EpochStartPendingMiniBlocksSyncHandler interface { - SyncPendingMiniBlocksFromMeta(epochStart *block.MetaBlock, unFinished map[string]*block.MetaBlock, waitTime time.Duration) error + SyncPendingMiniBlocksFromMeta(epochStart *block.MetaBlock, unFinished map[string]*block.MetaBlock, ctx context.Context) error GetMiniBlocks() (map[string]*block.MiniBlock, error) IsInterfaceNil() bool } // PendingTransactionsSyncHandler defines the methods to sync all transactions from a set of miniblocks type PendingTransactionsSyncHandler interface { - SyncPendingTransactionsFor(miniBlocks map[string]*block.MiniBlock, epoch uint32, waitTime time.Duration) error + SyncPendingTransactionsFor(miniBlocks map[string]*block.MiniBlock, epoch uint32, ctx context.Context) error GetTransactions() (map[string]data.TransactionHandler, error) IsInterfaceNil() bool } // MissingHeadersByHashSyncer defines the methods to sync all missing headers by hash type MissingHeadersByHashSyncer interface { - SyncMissingHeadersByHash(shardIDs []uint32, headersHashes [][]byte, waitTime time.Duration) error + SyncMissingHeadersByHash(shardIDs []uint32, headersHashes [][]byte, ctx context.Context) error GetHeaders() (map[string]data.HeaderHandler, error) ClearFields() IsInterfaceNil() bool diff --git a/update/mock/epochMiniBlocksSyncHandlerMock.go b/update/mock/epochMiniBlocksSyncHandlerMock.go index 1b50417882f..ae4266241ea 100644 --- a/update/mock/epochMiniBlocksSyncHandlerMock.go +++ b/update/mock/epochMiniBlocksSyncHandlerMock.go @@ -1,19 +1,19 @@ package mock import ( - "time" + "context" "github.com/ElrondNetwork/elrond-go/data/block" ) type EpochStartPendingMiniBlocksSyncHandlerMock struct { - SyncPendingMiniBlocksFromMetaCalled func(epochStart *block.MetaBlock, unFinished map[string]*block.MetaBlock, waitTime time.Duration) error + SyncPendingMiniBlocksFromMetaCalled func(epochStart *block.MetaBlock, unFinished map[string]*block.MetaBlock, ctx context.Context) error GetMiniBlocksCalled func() (map[string]*block.MiniBlock, error) } -func (ep *EpochStartPendingMiniBlocksSyncHandlerMock) SyncPendingMiniBlocksFromMeta(epochStart *block.MetaBlock, unFinished map[string]*block.MetaBlock, waitTime time.Duration) error { +func (ep *EpochStartPendingMiniBlocksSyncHandlerMock) SyncPendingMiniBlocksFromMeta(epochStart *block.MetaBlock, unFinished map[string]*block.MetaBlock, ctx context.Context) error { if ep.SyncPendingMiniBlocksFromMetaCalled != nil { - return ep.SyncPendingMiniBlocksFromMetaCalled(epochStart, unFinished, waitTime) + return ep.SyncPendingMiniBlocksFromMetaCalled(epochStart, unFinished, ctx) } return nil } diff --git a/update/mock/pendingTransactionsSyncHandlerMock.go b/update/mock/pendingTransactionsSyncHandlerMock.go index 5a6f3c1de16..08b5266e725 100644 --- a/update/mock/pendingTransactionsSyncHandlerMock.go +++ b/update/mock/pendingTransactionsSyncHandlerMock.go @@ -1,20 +1,19 @@ package mock import ( - "time" - "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" + "golang.org/x/net/context" ) type PendingTransactionsSyncHandlerMock struct { - SyncPendingTransactionsForCalled func(miniBlocks map[string]*block.MiniBlock, epoch uint32, waitTime time.Duration) error + SyncPendingTransactionsForCalled func(miniBlocks map[string]*block.MiniBlock, epoch uint32, ctx context.Context) error GetTransactionsCalled func() (map[string]data.TransactionHandler, error) } -func (et *PendingTransactionsSyncHandlerMock) SyncPendingTransactionsFor(miniBlocks map[string]*block.MiniBlock, epoch uint32, waitTime time.Duration) error { +func (et *PendingTransactionsSyncHandlerMock) SyncPendingTransactionsFor(miniBlocks map[string]*block.MiniBlock, epoch uint32, ctx context.Context) error { if et.SyncPendingTransactionsForCalled != nil { - return et.SyncPendingTransactionsForCalled(miniBlocks, epoch, waitTime) + return et.SyncPendingTransactionsForCalled(miniBlocks, epoch, ctx) } return nil } diff --git a/update/sync/coordinator.go b/update/sync/coordinator.go index ee003aeebb5..68d411f7a8b 100644 --- a/update/sync/coordinator.go +++ b/update/sync/coordinator.go @@ -1,6 +1,7 @@ package sync import ( + "context" "sync" "time" @@ -96,7 +97,9 @@ func (ss *syncState) SyncAllState(epoch uint32) error { go func() { defer wg.Done() - err := ss.miniBlocks.SyncPendingMiniBlocksFromMeta(meta, unFinished, time.Hour) + ctx, cancel := context.WithTimeout(context.Background(), time.Hour) + err := ss.miniBlocks.SyncPendingMiniBlocksFromMeta(meta, unFinished, ctx) + cancel() if err != nil { mutErr.Lock() errFound = err @@ -112,7 +115,9 @@ func (ss *syncState) SyncAllState(epoch uint32) error { return } - err = ss.transactions.SyncPendingTransactionsFor(syncedMiniBlocks, ss.syncingEpoch, time.Hour) + ctx, cancel = context.WithTimeout(context.Background(), time.Hour) + err = ss.transactions.SyncPendingTransactionsFor(syncedMiniBlocks, ss.syncingEpoch, ctx) + cancel() if err != nil { mutErr.Lock() errFound = err diff --git a/update/sync/coordinator_test.go b/update/sync/coordinator_test.go index 7ed436f932b..0b775db4de5 100644 --- a/update/sync/coordinator_test.go +++ b/update/sync/coordinator_test.go @@ -1,11 +1,11 @@ package sync import ( + "context" "encoding/json" "errors" "math/big" "testing" - "time" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data" @@ -216,7 +216,7 @@ func TestSyncState_SyncAllStatePendingMiniBlocksErr(t *testing.T) { }, Tries: &mock.EpochStartTriesSyncHandlerMock{}, MiniBlocks: &mock.EpochStartPendingMiniBlocksSyncHandlerMock{ - SyncPendingMiniBlocksFromMetaCalled: func(meta *block.MetaBlock, unFinished map[string]*block.MetaBlock, waitTime time.Duration) error { + SyncPendingMiniBlocksFromMetaCalled: func(meta *block.MetaBlock, unFinished map[string]*block.MetaBlock, ctx context.Context) error { return localErr }, }, @@ -275,7 +275,7 @@ func TestSyncState_SyncAllStateSyncTxsErr(t *testing.T) { Tries: &mock.EpochStartTriesSyncHandlerMock{}, MiniBlocks: &mock.EpochStartPendingMiniBlocksSyncHandlerMock{}, Transactions: &mock.PendingTransactionsSyncHandlerMock{ - SyncPendingTransactionsForCalled: func(miniBlocks map[string]*block.MiniBlock, epoch uint32, waitTime time.Duration) error { + SyncPendingTransactionsForCalled: func(miniBlocks map[string]*block.MiniBlock, epoch uint32, ctx context.Context) error { return localErr }, }, diff --git a/update/sync/syncAccountsDBs.go b/update/sync/syncAccountsDBs.go index 95d95027502..d02e276e7f4 100644 --- a/update/sync/syncAccountsDBs.go +++ b/update/sync/syncAccountsDBs.go @@ -50,7 +50,6 @@ func NewSyncAccountsDBsHandler(args ArgsNewSyncAccountsDBsHandler) (*syncAccount // SyncTriesFrom syncs all the state tries from an epoch start metachain func (st *syncAccountsDBs) SyncTriesFrom(meta *block.MetaBlock, waitTime time.Duration) error { - //TODO: use context instead of waitTime if !meta.IsStartOfEpochBlock() { return update.ErrNotEpochStartBlock } diff --git a/update/sync/syncHeaders.go b/update/sync/syncHeaders.go index cecb22eafa1..a486f28e265 100644 --- a/update/sync/syncHeaders.go +++ b/update/sync/syncHeaders.go @@ -153,6 +153,7 @@ func (h *headersToSync) receivedUnFinishedMetaBlocks(headerHandler data.HeaderHa // SyncUnFinishedMetaHeaders syncs and validates all the unfinished metaHeaders for each shard func (h *headersToSync) SyncUnFinishedMetaHeaders(epoch uint32) error { + // TODO: do this with context.Context err := h.syncEpochStartMetaHeader(epoch, waitTimeForHeaders) if err != nil { return err diff --git a/update/sync/syncHeadersByHash.go b/update/sync/syncHeadersByHash.go index 27f3fdae622..bd1727c07ef 100644 --- a/update/sync/syncHeadersByHash.go +++ b/update/sync/syncHeadersByHash.go @@ -1,6 +1,7 @@ package sync import ( + "context" "sync" "time" @@ -16,17 +17,18 @@ import ( ) type syncHeadersByHash struct { - mutMissingHdrs sync.Mutex - mapHeaders map[string]data.HeaderHandler - mapHashes map[string]struct{} - pool dataRetriever.HeadersPool - storage update.HistoryStorer - chReceivedAll chan bool - marshalizer marshal.Marshalizer - stopSyncing bool - epochToSync uint32 - syncedAll bool - requestHandler process.RequestHandler + mutMissingHdrs sync.Mutex + mapHeaders map[string]data.HeaderHandler + mapHashes map[string]struct{} + pool dataRetriever.HeadersPool + storage update.HistoryStorer + chReceivedAll chan bool + marshalizer marshal.Marshalizer + stopSyncing bool + epochToSync uint32 + syncedAll bool + requestHandler process.RequestHandler + waitTimeBetweenRequests time.Duration } // ArgsNewMissingHeadersByHashSyncer defines the arguments needed for the sycner @@ -53,16 +55,17 @@ func NewMissingheadersByHashSyncer(args ArgsNewMissingHeadersByHashSyncer) (*syn } p := &syncHeadersByHash{ - mutMissingHdrs: sync.Mutex{}, - mapHeaders: make(map[string]data.HeaderHandler), - mapHashes: make(map[string]struct{}), - pool: args.Cache, - storage: args.Storage, - chReceivedAll: make(chan bool), - requestHandler: args.RequestHandler, - stopSyncing: true, - syncedAll: false, - marshalizer: args.Marshalizer, + mutMissingHdrs: sync.Mutex{}, + mapHeaders: make(map[string]data.HeaderHandler), + mapHashes: make(map[string]struct{}), + pool: args.Cache, + storage: args.Storage, + chReceivedAll: make(chan bool), + requestHandler: args.RequestHandler, + stopSyncing: true, + syncedAll: false, + marshalizer: args.Marshalizer, + waitTimeBetweenRequests: args.RequestHandler.RequestInterval(), } p.pool.RegisterHandler(p.receivedHeader) @@ -71,47 +74,66 @@ func NewMissingheadersByHashSyncer(args ArgsNewMissingHeadersByHashSyncer) (*syn } // SyncMissingHeadersByHash syncs the missing headers -func (m *syncHeadersByHash) SyncMissingHeadersByHash( - shardIDs []uint32, - headersHashes [][]byte, - waitTime time.Duration, -) error { +func (m *syncHeadersByHash) SyncMissingHeadersByHash(shardIDs []uint32, headersHashes [][]byte, ctx context.Context) error { _ = core.EmptyChannel(m.chReceivedAll) - requestedMBs := 0 - m.mutMissingHdrs.Lock() - m.stopSyncing = false + mapHashesToRequest := make(map[string]uint32) for index, hash := range headersHashes { - m.mapHashes[string(hash)] = struct{}{} - header, ok := m.getHeaderFromPoolOrStorage(hash) - if ok { - m.mapHeaders[string(hash)] = header - continue + mapHashesToRequest[string(hash)] = shardIDs[index] + } + + for { + requestedHdrs := 0 + + m.mutMissingHdrs.Lock() + m.stopSyncing = false + for hash, shardId := range mapHashesToRequest { + if _, ok := m.mapHeaders[hash]; ok { + delete(mapHashesToRequest, hash) + } + + m.mapHashes[hash] = struct{}{} + header, ok := m.getHeaderFromPoolOrStorage([]byte(hash)) + if ok { + m.mapHeaders[hash] = header + delete(mapHashesToRequest, hash) + continue + } + + requestedHdrs++ + if shardId == core.MetachainShardId { + m.requestHandler.RequestMetaHeader([]byte(hash)) + continue + } + + m.requestHandler.RequestShardHeader(shardId, []byte(hash)) } + m.mutMissingHdrs.Unlock() - requestedMBs++ - if shardIDs[index] == core.MetachainShardId { - m.requestHandler.RequestMetaHeader(hash) - continue + if requestedHdrs == 0 { + m.mutMissingHdrs.Lock() + m.stopSyncing = true + m.syncedAll = true + m.mutMissingHdrs.Unlock() + return nil } - m.requestHandler.RequestShardHeader(shardIDs[index], hash) - } - m.mutMissingHdrs.Unlock() - - var err error - if requestedMBs > 0 { - err = WaitFor(m.chReceivedAll, waitTime) - } - - m.mutMissingHdrs.Lock() - m.stopSyncing = true - if err == nil { - m.syncedAll = true + select { + case <-m.chReceivedAll: + m.mutMissingHdrs.Lock() + m.stopSyncing = true + m.syncedAll = true + m.mutMissingHdrs.Unlock() + return nil + case <-time.After(m.waitTimeBetweenRequests): + continue + case <-ctx.Done(): + m.mutMissingHdrs.Lock() + m.stopSyncing = true + m.mutMissingHdrs.Unlock() + return update.ErrTimeIsOut + } } - m.mutMissingHdrs.Unlock() - - return err } // receivedHeader is a callback function when a new header was received diff --git a/update/sync/syncMiniBlocks.go b/update/sync/syncMiniBlocks.go index 3fed39486f5..186ac9bc63e 100644 --- a/update/sync/syncMiniBlocks.go +++ b/update/sync/syncMiniBlocks.go @@ -1,6 +1,7 @@ package sync import ( + "context" "sync" "time" @@ -15,17 +16,18 @@ import ( ) type pendingMiniBlocks struct { - mutPendingMb sync.Mutex - mapMiniBlocks map[string]*block.MiniBlock - mapHashes map[string]struct{} - pool storage.Cacher - storage update.HistoryStorer - chReceivedAll chan bool - marshalizer marshal.Marshalizer - stopSyncing bool - epochToSync uint32 - syncedAll bool - requestHandler process.RequestHandler + mutPendingMb sync.Mutex + mapMiniBlocks map[string]*block.MiniBlock + mapHashes map[string]struct{} + pool storage.Cacher + storage update.HistoryStorer + chReceivedAll chan bool + marshalizer marshal.Marshalizer + stopSyncing bool + epochToSync uint32 + syncedAll bool + requestHandler process.RequestHandler + waitTimeBetweenRequests time.Duration } // ArgsNewPendingMiniBlocksSyncer defines the arguments needed for the sycner @@ -52,16 +54,17 @@ func NewPendingMiniBlocksSyncer(args ArgsNewPendingMiniBlocksSyncer) (*pendingMi } p := &pendingMiniBlocks{ - mutPendingMb: sync.Mutex{}, - mapMiniBlocks: make(map[string]*block.MiniBlock), - mapHashes: make(map[string]struct{}), - pool: args.Cache, - storage: args.Storage, - chReceivedAll: make(chan bool), - requestHandler: args.RequestHandler, - stopSyncing: true, - syncedAll: false, - marshalizer: args.Marshalizer, + mutPendingMb: sync.Mutex{}, + mapMiniBlocks: make(map[string]*block.MiniBlock), + mapHashes: make(map[string]struct{}), + pool: args.Cache, + storage: args.Storage, + chReceivedAll: make(chan bool), + requestHandler: args.RequestHandler, + stopSyncing: true, + syncedAll: false, + marshalizer: args.Marshalizer, + waitTimeBetweenRequests: args.RequestHandler.RequestInterval(), } p.pool.RegisterHandler(p.receivedMiniBlock) @@ -70,11 +73,7 @@ func NewPendingMiniBlocksSyncer(args ArgsNewPendingMiniBlocksSyncer) (*pendingMi } // SyncPendingMiniBlocksFromMeta syncs the pending miniblocks from an epoch start metaBlock -func (p *pendingMiniBlocks) SyncPendingMiniBlocksFromMeta( - epochStart *block.MetaBlock, - unFinished map[string]*block.MetaBlock, - waitTime time.Duration, -) error { +func (p *pendingMiniBlocks) SyncPendingMiniBlocksFromMeta(epochStart *block.MetaBlock, unFinished map[string]*block.MetaBlock, ctx context.Context) error { if !epochStart.IsStartOfEpochBlock() { return update.ErrNotEpochStartBlock } @@ -94,54 +93,72 @@ func (p *pendingMiniBlocks) SyncPendingMiniBlocksFromMeta( listPendingMiniBlocks = append(listPendingMiniBlocks, computedPending...) } - return p.syncMiniBlocks(listPendingMiniBlocks, waitTime) + return p.syncMiniBlocks(listPendingMiniBlocks, ctx) } // SyncPendingMiniBlocks will sync the miniblocks for the given epoch start meta block -func (p *pendingMiniBlocks) SyncPendingMiniBlocks( - miniBlockHeaders []block.ShardMiniBlockHeader, - waitTime time.Duration, -) error { - return p.syncMiniBlocks(miniBlockHeaders, waitTime) +func (p *pendingMiniBlocks) SyncPendingMiniBlocks(miniBlockHeaders []block.ShardMiniBlockHeader, ctx context.Context) error { + return p.syncMiniBlocks(miniBlockHeaders, ctx) } -func (p *pendingMiniBlocks) syncMiniBlocks(listPendingMiniBlocks []block.ShardMiniBlockHeader, waitTime time.Duration) error { +func (p *pendingMiniBlocks) syncMiniBlocks(listPendingMiniBlocks []block.ShardMiniBlockHeader, ctx context.Context) error { _ = core.EmptyChannel(p.chReceivedAll) - requestedMBs := 0 - p.mutPendingMb.Lock() - p.stopSyncing = false + mapHashesToRequest := make(map[string]uint32) for _, mbHeader := range listPendingMiniBlocks { - p.mapHashes[string(mbHeader.Hash)] = struct{}{} - miniBlock, ok := p.getMiniBlockFromPoolOrStorage(mbHeader.Hash) - if ok { - p.mapMiniBlocks[string(mbHeader.Hash)] = miniBlock - continue - } - - requestedMBs++ - p.requestHandler.RequestMiniBlock(mbHeader.SenderShardID, mbHeader.Hash) + mapHashesToRequest[string(mbHeader.Hash)] = mbHeader.SenderShardID } + + p.mutPendingMb.Lock() + p.stopSyncing = false p.mutPendingMb.Unlock() - var err error - defer func() { + for { + requestedMBs := 0 p.mutPendingMb.Lock() - p.stopSyncing = true - if err == nil { - p.syncedAll = true + p.stopSyncing = false + for hash, shardId := range mapHashesToRequest { + if _, ok := p.mapMiniBlocks[hash]; ok { + delete(mapHashesToRequest, hash) + } + + p.mapHashes[hash] = struct{}{} + miniBlock, ok := p.getMiniBlockFromPoolOrStorage([]byte(hash)) + if ok { + p.mapMiniBlocks[hash] = miniBlock + delete(mapHashesToRequest, hash) + continue + } + + p.requestHandler.RequestMiniBlock(shardId, []byte(hash)) + requestedMBs++ } p.mutPendingMb.Unlock() - }() - if requestedMBs > 0 { - err = WaitFor(p.chReceivedAll, waitTime) - if err != nil { - return err + if requestedMBs == 0 { + p.mutPendingMb.Lock() + p.stopSyncing = true + p.syncedAll = true + p.mutPendingMb.Unlock() + return nil } - } - return nil + select { + case <-p.chReceivedAll: + p.mutPendingMb.Lock() + p.stopSyncing = true + p.syncedAll = true + p.mutPendingMb.Unlock() + return nil + case <-time.After(p.waitTimeBetweenRequests): + continue + case <-ctx.Done(): + p.mutPendingMb.Lock() + p.stopSyncing = true + p.mutPendingMb.Unlock() + return update.ErrTimeIsOut + } + } } func (p *pendingMiniBlocks) createNonceToHashMap(unFinished map[string]*block.MetaBlock) map[uint64]string { diff --git a/update/sync/syncMiniBlocks_test.go b/update/sync/syncMiniBlocks_test.go index 05b5661f87b..18f5074c2da 100644 --- a/update/sync/syncMiniBlocks_test.go +++ b/update/sync/syncMiniBlocks_test.go @@ -1,6 +1,7 @@ package sync import ( + "context" "errors" "testing" "time" @@ -116,7 +117,9 @@ func TestSyncPendingMiniBlocksFromMeta_MiniBlocksInPool(t *testing.T) { } unFinished := make(map[string]*block.MetaBlock) unFinished["firstPending"] = metaBlock - err = pendingMiniBlocksSyncer.SyncPendingMiniBlocksFromMeta(metaBlock, unFinished, time.Second) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + err = pendingMiniBlocksSyncer.SyncPendingMiniBlocksFromMeta(metaBlock, unFinished, ctx) + cancel() require.Nil(t, err) require.True(t, miniBlockInPool) @@ -167,7 +170,9 @@ func TestSyncPendingMiniBlocksFromMeta_MiniBlocksInPoolMissingTimeout(t *testing } unFinished := make(map[string]*block.MetaBlock) unFinished["firstPending"] = metaBlock - err = pendingMiniBlocksSyncer.SyncPendingMiniBlocksFromMeta(metaBlock, unFinished, time.Second) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + err = pendingMiniBlocksSyncer.SyncPendingMiniBlocksFromMeta(metaBlock, unFinished, ctx) + cancel() require.Equal(t, process.ErrTimeIsOut, err) } @@ -215,6 +220,8 @@ func TestSyncPendingMiniBlocksFromMeta_MiniBlocksInPoolReceive(t *testing.T) { _ = pendingMiniBlocksSyncer.pool.Put(mbHash, mb) }() - err = pendingMiniBlocksSyncer.SyncPendingMiniBlocksFromMeta(metaBlock, unFinished, time.Second) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + err = pendingMiniBlocksSyncer.SyncPendingMiniBlocksFromMeta(metaBlock, unFinished, ctx) + cancel() require.Nil(t, err) } diff --git a/update/sync/syncTransactions.go b/update/sync/syncTransactions.go index 83aee08e097..55f993a076c 100644 --- a/update/sync/syncTransactions.go +++ b/update/sync/syncTransactions.go @@ -1,6 +1,7 @@ package sync import ( + "context" "sync" "time" @@ -18,17 +19,18 @@ import ( ) type pendingTransactions struct { - mutPendingTx sync.Mutex - mapTransactions map[string]data.TransactionHandler - mapHashes map[string]*block.MiniBlock - txPools map[block.Type]dataRetriever.ShardedDataCacherNotifier - storage map[block.Type]update.HistoryStorer - chReceivedAll chan bool - requestHandler process.RequestHandler - marshalizer marshal.Marshalizer - epochToSync uint32 - stopSync bool - syncedAll bool + mutPendingTx sync.Mutex + mapTransactions map[string]data.TransactionHandler + mapHashes map[string]*block.MiniBlock + txPools map[block.Type]dataRetriever.ShardedDataCacherNotifier + storage map[block.Type]update.HistoryStorer + chReceivedAll chan bool + requestHandler process.RequestHandler + marshalizer marshal.Marshalizer + epochToSync uint32 + stopSync bool + syncedAll bool + waitTimeBetweenRequests time.Duration } // ArgsNewPendingTransactionsSyncer defines the arguments needed for a new transactions syncer @@ -55,14 +57,15 @@ func NewPendingTransactionsSyncer(args ArgsNewPendingTransactionsSyncer) (*pendi } p := &pendingTransactions{ - mutPendingTx: sync.Mutex{}, - mapTransactions: make(map[string]data.TransactionHandler), - mapHashes: make(map[string]*block.MiniBlock), - chReceivedAll: make(chan bool), - requestHandler: args.RequestHandler, - marshalizer: args.Marshalizer, - stopSync: true, - syncedAll: true, + mutPendingTx: sync.Mutex{}, + mapTransactions: make(map[string]data.TransactionHandler), + mapHashes: make(map[string]*block.MiniBlock), + chReceivedAll: make(chan bool), + requestHandler: args.RequestHandler, + marshalizer: args.Marshalizer, + stopSync: true, + syncedAll: true, + waitTimeBetweenRequests: args.RequestHandler.RequestInterval(), } p.txPools = make(map[block.Type]dataRetriever.ShardedDataCacherNotifier) @@ -83,42 +86,48 @@ func NewPendingTransactionsSyncer(args ArgsNewPendingTransactionsSyncer) (*pendi } // SyncPendingTransactionsFor syncs pending transactions for a list of miniblocks -func (p *pendingTransactions) SyncPendingTransactionsFor(miniBlocks map[string]*block.MiniBlock, epoch uint32, waitTime time.Duration) error { +func (p *pendingTransactions) SyncPendingTransactionsFor(miniBlocks map[string]*block.MiniBlock, epoch uint32, ctx context.Context) error { _ = core.EmptyChannel(p.chReceivedAll) - p.mutPendingTx.Lock() - p.epochToSync = epoch - p.syncedAll = false - p.stopSync = false - - requestedTxs := 0 - for _, miniBlock := range miniBlocks { - for _, txHash := range miniBlock.TxHashes { - p.mapHashes[string(txHash)] = miniBlock + for { + p.mutPendingTx.Lock() + p.epochToSync = epoch + p.syncedAll = false + p.stopSync = false + + requestedTxs := 0 + for _, miniBlock := range miniBlocks { + for _, txHash := range miniBlock.TxHashes { + p.mapHashes[string(txHash)] = miniBlock + } + requestedTxs += p.requestTransactionsFor(miniBlock) } - requestedTxs += p.requestTransactionsFor(miniBlock) - } - p.mutPendingTx.Unlock() + p.mutPendingTx.Unlock() - var err error - defer func() { - p.mutPendingTx.Lock() - p.stopSync = true - if err == nil { + if requestedTxs == 0 { + p.mutPendingTx.Lock() + p.stopSync = true p.syncedAll = true + p.mutPendingTx.Unlock() + return nil } - p.mutPendingTx.Unlock() - }() - if requestedTxs > 0 { - err = WaitFor(p.chReceivedAll, waitTime) - if err != nil { - log.Warn("could not finish syncing", "error", err) - return err + select { + case <-p.chReceivedAll: + p.mutPendingTx.Lock() + p.stopSync = true + p.syncedAll = true + p.mutPendingTx.Unlock() + return nil + case <-time.After(p.waitTimeBetweenRequests): + continue + case <-ctx.Done(): + p.mutPendingTx.Lock() + p.stopSync = true + p.mutPendingTx.Unlock() + return update.ErrTimeIsOut } } - - return nil } func (p *pendingTransactions) requestTransactionsFor(miniBlock *block.MiniBlock) int { diff --git a/update/sync/syncTransactions_test.go b/update/sync/syncTransactions_test.go index 08d37354734..9248d397953 100644 --- a/update/sync/syncTransactions_test.go +++ b/update/sync/syncTransactions_test.go @@ -1,6 +1,7 @@ package sync import ( + "context" "encoding/json" "math/big" "testing" @@ -106,7 +107,9 @@ func TestSyncPendingTransactionsFor(t *testing.T) { miniBlocks := make(map[string]*block.MiniBlock) mb := &block.MiniBlock{TxHashes: [][]byte{[]byte("txHash")}} miniBlocks["key"] = mb - err = pendingTxsSyncer.SyncPendingTransactionsFor(miniBlocks, 1, time.Second) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + err = pendingTxsSyncer.SyncPendingTransactionsFor(miniBlocks, 1, ctx) + cancel() require.Nil(t, err) } @@ -132,7 +135,9 @@ func TestSyncPendingTransactionsFor_MissingTxFromPool(t *testing.T) { mb := &block.MiniBlock{TxHashes: [][]byte{[]byte("txHash")}} miniBlocks["key"] = mb - err = pendingTxsSyncer.SyncPendingTransactionsFor(miniBlocks, 1, time.Second) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + err = pendingTxsSyncer.SyncPendingTransactionsFor(miniBlocks, 1, ctx) + cancel() require.Equal(t, process.ErrTimeIsOut, err) } @@ -170,6 +175,8 @@ func TestSyncPendingTransactionsFor_ReceiveMissingTx(t *testing.T) { pendingTxsSyncer.receivedTransaction(txHash) }() - err = pendingTxsSyncer.SyncPendingTransactionsFor(miniBlocks, 1, time.Second) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + err = pendingTxsSyncer.SyncPendingTransactionsFor(miniBlocks, 1, ctx) + cancel() require.Nil(t, err) } From 22b8222202fc8b9c353d2b6066e0208d26bc217c Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Tue, 31 Mar 2020 00:51:24 +0300 Subject: [PATCH 57/61] no need for updateListIndex, fixed shard coordinator return when starting from epoch 0. --- cmd/node/main.go | 5 +- consensus/mock/nodesCoordinatorMock.go | 7 +- .../disabled/disabledNodesCoordinator.go | 6 -- epochStart/bootstrap/interface.go | 1 - epochStart/bootstrap/nodesCoordinator.go | 80 ------------------- epochStart/bootstrap/process.go | 39 +++------ epochStart/bootstrap/syncValidatorStatus.go | 22 +++-- epochStart/mock/nodesCoordinatorStub.go | 7 +- integrationTests/consensus/testInitializer.go | 1 - integrationTests/mock/nodesCoordinatorMock.go | 7 +- integrationTests/nodesCoordinatorFactory.go | 2 - integrationTests/testP2PNode.go | 2 - .../testProcessorNodeWithMultisigner.go | 2 - node/mock/nodesCoordinatorMock.go | 11 --- process/mock/nodesCoordinatorMock.go | 1 - process/peer/process.go | 19 +---- sharding/indexHashedNodesCoordinator.go | 71 +--------------- .../indexHashedNodesCoordinatorWithRater.go | 3 +- ...dexHashedNodesCoordinatorWithRater_test.go | 9 +-- sharding/indexHashedNodesCoordinator_test.go | 18 +---- sharding/interface.go | 2 - sharding/networksharding/mock_test.go | 12 +-- sharding/shardingArgs.go | 1 - 23 files changed, 35 insertions(+), 293 deletions(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index c0f59412792..c7bc017dc55 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -631,6 +631,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { KeyGen: cryptoComponents.TxSignKeyGen, BlockKeyGen: cryptoComponents.BlockSignKeyGen, GenesisNodesConfig: genesisNodesConfig, + GenesisShardCoordinator: genesisShardCoordinator, PathManager: pathManager, WorkingDir: workingDir, DefaultDBPath: defaultDBPath, @@ -1192,8 +1193,7 @@ func createShardCoordinator( prefsConfig config.PreferencesConfig, log logger.Logger, ) (sharding.Coordinator, core.NodeType, error) { - // TODO: after start in epoch is merged, this needs to be refactored as the shardID cannot always be taken - // from initial configuration but needs to be determined by nodes coordinator + selfShardId, err := getShardIdFromNodePubKey(pubKey, nodesConfig) nodeType := core.NodeTypeValidator if err == sharding.ErrPublicKeyNotFoundInGenesis { @@ -1271,7 +1271,6 @@ func createNodesCoordinator( argumentsNodesCoordinator := sharding.ArgNodesCoordinator{ ShardConsensusGroupSize: shardConsensusGroupSize, MetaConsensusGroupSize: metaConsensusGroupSize, - ListIndexUpdater: ratingAndListIndexHandler, Hasher: hasher, Shuffler: nodeShuffler, EpochStartNotifier: epochStartNotifier, diff --git a/consensus/mock/nodesCoordinatorMock.go b/consensus/mock/nodesCoordinatorMock.go index 1b5dc6e1b3e..451c9f1ee7c 100644 --- a/consensus/mock/nodesCoordinatorMock.go +++ b/consensus/mock/nodesCoordinatorMock.go @@ -21,11 +21,6 @@ func (ncm *NodesCoordinatorMock) GetWaitingPublicKeysPerShard(_ uint32) (map[uin return nil, nil } -// UpdatePeersListAndIndex - -func (ncm *NodesCoordinatorMock) UpdatePeersListAndIndex() error { - return nil -} - // ComputeConsensusGroup - func (ncm *NodesCoordinatorMock) ComputeConsensusGroup( randomness []byte, @@ -122,7 +117,7 @@ func (ncm *NodesCoordinatorMock) GetConsensusWhitelistedNodes( } // SetNodesPerShards - -func (ncm *NodesCoordinatorMock) SetNodesPerShards(_ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, _ uint32, _ bool) error { +func (ncm *NodesCoordinatorMock) SetNodesPerShards(_ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, _ uint32) error { return nil } diff --git a/epochStart/bootstrap/disabled/disabledNodesCoordinator.go b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go index e1c0e531de6..098d64210f5 100644 --- a/epochStart/bootstrap/disabled/disabledNodesCoordinator.go +++ b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go @@ -18,7 +18,6 @@ func (n *nodesCoordinator) SetNodesPerShards( _ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, _ uint32, - _ bool, ) error { return nil } @@ -68,11 +67,6 @@ func (n *nodesCoordinator) GetValidatorWithPublicKey(_ []byte, _ uint32) (valida return nil, 0, nil } -// UpdatePeersListAndIndex - -func (n *nodesCoordinator) UpdatePeersListAndIndex() error { - return nil -} - // LoadState - func (n *nodesCoordinator) LoadState(_ []byte) error { return nil diff --git a/epochStart/bootstrap/interface.go b/epochStart/bootstrap/interface.go index cb095ffc838..c890cb6c364 100644 --- a/epochStart/bootstrap/interface.go +++ b/epochStart/bootstrap/interface.go @@ -28,7 +28,6 @@ type EpochStartNodesCoordinator interface { ComputeNodesConfigFor( metaBlock *block.MetaBlock, validatorInfos []*state.ValidatorInfo, - updateListInfo bool, ) (*sharding.EpochValidators, error) ComputeNodesConfigForGenesis(genesis *sharding.NodesSetup) (*sharding.EpochValidators, error) ComputeShardForSelfPublicKey(epoch uint32, pubKey []byte) uint32 diff --git a/epochStart/bootstrap/nodesCoordinator.go b/epochStart/bootstrap/nodesCoordinator.go index d3876ac45a4..e1d6d73a21d 100644 --- a/epochStart/bootstrap/nodesCoordinator.go +++ b/epochStart/bootstrap/nodesCoordinator.go @@ -10,7 +10,6 @@ import ( "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/epochStart" - "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" ) @@ -20,8 +19,6 @@ type nodesCoordinator struct { numShards map[uint32]uint32 shardConsensusGroupSize uint32 metaConsensusGroupSize uint32 - validatorAccountsDB state.AccountsAdapter - adrConv state.AddressConverter nodesConfig map[uint32]*epochNodesConfig } @@ -38,8 +35,6 @@ type epochNodesConfig struct { type ArgsNewStartInEpochNodesCoordinator struct { Shuffler sharding.NodesShuffler Chance sharding.ChanceComputer - ValidatorAccountsDB state.AccountsAdapter - AdrConv state.AddressConverter ShardConsensusGroupSize uint32 MetaConsensusGroupSize uint32 } @@ -53,8 +48,6 @@ func NewStartInEpochNodesCoordinator(args ArgsNewStartInEpochNodesCoordinator) ( metaConsensusGroupSize: args.MetaConsensusGroupSize, nodesConfig: make(map[uint32]*epochNodesConfig), numShards: make(map[uint32]uint32), - validatorAccountsDB: args.ValidatorAccountsDB, - adrConv: args.AdrConv, } return n, nil @@ -84,7 +77,6 @@ func (n *nodesCoordinator) ComputeNodesConfigForGenesis(nodesConfig *sharding.No func (n *nodesCoordinator) ComputeNodesConfigFor( metaBlock *block.MetaBlock, validatorInfos []*state.ValidatorInfo, - updateListInfo bool, ) (*sharding.EpochValidators, error) { if check.IfNil(metaBlock) { return nil, epochStart.ErrNilHeaderHandler @@ -157,12 +149,6 @@ func (n *nodesCoordinator) ComputeNodesConfigFor( } epochValidators := epochNodesConfigToEpochValidators(n.nodesConfig[newEpoch]) - if updateListInfo { - err = n.updateAccountListAndIndex(newEpoch) - if err != nil { - return nil, err - } - } return epochValidators, nil } @@ -306,72 +292,6 @@ func epochNodesConfigToEpochValidators(config *epochNodesConfig) *sharding.Epoch return result } -func (n *nodesCoordinator) updateAccountListAndIndex(epoch uint32) error { - err := n.updateAccountsForGivenMap(n.nodesConfig[epoch].eligibleMap, core.EligibleList) - if err != nil { - return err - } - - err = n.updateAccountsForGivenMap(n.nodesConfig[epoch].waitingMap, core.WaitingList) - if err != nil { - return err - } - - return nil -} - -func (n *nodesCoordinator) updateAccountsForGivenMap( - validators map[uint32][]sharding.Validator, - list core.PeerType, -) error { - for shardId, accountsPerShard := range validators { - for index, account := range accountsPerShard { - err := n.updateListAndIndex( - string(account.PubKey()), - shardId, - string(list), - uint32(index)) - if err != nil { - log.Warn("error while updating list and index for peer", - "error", err, - "public key", account.PubKey()) - } - } - } - - return nil -} - -func (n *nodesCoordinator) updateListAndIndex(pubKey string, shardID uint32, list string, index uint32) error { - peer, err := n.getPeerAccount([]byte(pubKey)) - if err != nil { - log.Debug("error getting peer account", "error", err, "key", pubKey) - return err - } - - peer.SetListAndIndex(shardID, list, index) - return nil -} - -func (n *nodesCoordinator) getPeerAccount(address []byte) (state.PeerAccountHandler, error) { - addressContainer, err := n.adrConv.CreateAddressFromPublicKeyBytes(address) - if err != nil { - return nil, err - } - - account, err := n.validatorAccountsDB.LoadAccount(addressContainer) - if err != nil { - return nil, err - } - - peerAccount, ok := account.(state.PeerAccountHandler) - if !ok { - return nil, process.ErrInvalidPeerAccount - } - - return peerAccount, nil -} - // IsInterfaceNil returns true if underlying object is nil func (n *nodesCoordinator) IsInterfaceNil() bool { return n == nil diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index b0781120835..266495d70cc 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -13,8 +13,6 @@ import ( "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" - "github.com/ElrondNetwork/elrond-go/data/state/factory" "github.com/ElrondNetwork/elrond-go/data/syncer" "github.com/ElrondNetwork/elrond-go/data/trie" trieFactory "github.com/ElrondNetwork/elrond-go/data/trie/factory" @@ -81,6 +79,7 @@ type epochStartBootstrap struct { blockKeyGen crypto.KeyGenerator shardCoordinator sharding.Coordinator genesisNodesConfig *sharding.NodesSetup + genesisShardCoordinator sharding.Coordinator pathManager storage.PathManagerHandler workingDir string defaultDBPath string @@ -133,6 +132,7 @@ type ArgsEpochStartBootstrap struct { KeyGen crypto.KeyGenerator BlockKeyGen crypto.KeyGenerator GenesisNodesConfig *sharding.NodesSetup + GenesisShardCoordinator sharding.Coordinator PathManager storage.PathManagerHandler WorkingDir string DefaultDBPath string @@ -152,6 +152,7 @@ func NewEpochStartBootstrap(args ArgsEpochStartBootstrap) (*epochStartBootstrap, generalConfig: args.GeneralConfig, economicsData: args.EconomicsData, genesisNodesConfig: args.GenesisNodesConfig, + genesisShardCoordinator: args.GenesisShardCoordinator, workingDir: args.WorkingDir, pathManager: args.PathManager, defaultEpochString: args.DefaultEpochString, @@ -189,8 +190,8 @@ func (e *epochStartBootstrap) isStartInEpochZero() bool { func (e *epochStartBootstrap) prepareEpochZero() (Parameters, error) { parameters := Parameters{ Epoch: 0, - SelfShardId: e.shardCoordinator.SelfId(), - NumOfShards: e.shardCoordinator.NumberOfShards(), + SelfShardId: e.genesisShardCoordinator.SelfId(), + NumOfShards: e.genesisShardCoordinator.NumberOfShards(), } return parameters, nil } @@ -405,7 +406,7 @@ func (e *epochStartBootstrap) requestAndProcessing() (Parameters, error) { } log.Info("start in epoch bootstrap: syncPeerAccountsState", "peer account tries map length", len(e.peerAccountTries)) - err = e.processNodesConfig(pubKeyBytes, e.epochStartMeta.ValidatorStatsRootHash) + err = e.processNodesConfig(pubKeyBytes) if err != nil { return Parameters{}, err } @@ -450,28 +451,14 @@ func (e *epochStartBootstrap) requestAndProcessing() (Parameters, error) { return parameters, nil } -func (e *epochStartBootstrap) processNodesConfig(pubKey []byte, rootHash []byte) error { - accountFactory := factory.NewAccountCreator() - peerAccountsDB, err := state.NewPeerAccountsDB(e.peerAccountTries[string(rootHash)], e.hasher, e.marshalizer, accountFactory) - if err != nil { - return err - } - - blsAddressConverter, err := addressConverters.NewPlainAddressConverter( - e.generalConfig.BLSPublicKey.Length, - e.generalConfig.BLSPublicKey.Prefix, - ) - if err != nil { - return err - } +func (e *epochStartBootstrap) processNodesConfig(pubKey []byte) error { + var err error argsNewValidatorStatusSyncers := ArgsNewSyncValidatorStatus{ - DataPool: e.dataPool, - Marshalizer: e.marshalizer, - RequestHandler: e.requestHandler, - Rater: e.rater, - GenesisNodesConfig: e.genesisNodesConfig, - ValidatorAccountsDB: peerAccountsDB, - AdrConv: blsAddressConverter, + DataPool: e.dataPool, + Marshalizer: e.marshalizer, + RequestHandler: e.requestHandler, + Rater: e.rater, + GenesisNodesConfig: e.genesisNodesConfig, } e.nodesConfigHandler, err = NewSyncValidatorStatus(argsNewValidatorStatusSyncers) if err != nil { diff --git a/epochStart/bootstrap/syncValidatorStatus.go b/epochStart/bootstrap/syncValidatorStatus.go index 634a7a02a69..09e667e0539 100644 --- a/epochStart/bootstrap/syncValidatorStatus.go +++ b/epochStart/bootstrap/syncValidatorStatus.go @@ -28,13 +28,11 @@ type syncValidatorStatus struct { // ArgsNewSyncValidatorStatus type ArgsNewSyncValidatorStatus struct { - DataPool dataRetriever.PoolsHolder - Marshalizer marshal.Marshalizer - RequestHandler process.RequestHandler - Rater sharding.ChanceComputer - GenesisNodesConfig *sharding.NodesSetup - ValidatorAccountsDB state.AccountsAdapter - AdrConv state.AddressConverter + DataPool dataRetriever.PoolsHolder + Marshalizer marshal.Marshalizer + RequestHandler process.RequestHandler + Rater sharding.ChanceComputer + GenesisNodesConfig *sharding.NodesSetup } // NewSyncValidatorStatus creates a new validator status process component @@ -69,8 +67,6 @@ func NewSyncValidatorStatus(args ArgsNewSyncValidatorStatus) (*syncValidatorStat Chance: args.Rater, ShardConsensusGroupSize: args.GenesisNodesConfig.ConsensusGroupSize, MetaConsensusGroupSize: args.GenesisNodesConfig.MetaChainConsensusGroupSize, - AdrConv: args.AdrConv, - ValidatorAccountsDB: args.ValidatorAccountsDB, } s.nodeCoordinator, err = NewStartInEpochNodesCoordinator(argsNodesCoordinator) @@ -90,12 +86,12 @@ func (s *syncValidatorStatus) NodesConfigFromMetaBlock( return nil, 0, epochStart.ErrNotEpochStartBlock } - prevEpochsValidators, err := s.computeNodesConfigFor(prevMetaBlock, false) + prevEpochsValidators, err := s.computeNodesConfigFor(prevMetaBlock) if err != nil { return nil, 0, err } - currEpochsValidators, err := s.computeNodesConfigFor(currMetaBlock, true) + currEpochsValidators, err := s.computeNodesConfigFor(currMetaBlock) if err != nil { return nil, 0, err } @@ -115,7 +111,7 @@ func (s *syncValidatorStatus) NodesConfigFromMetaBlock( return nodesConfig, selfShardId, nil } -func (s *syncValidatorStatus) computeNodesConfigFor(metaBlock *block.MetaBlock, updateValidatorInfo bool) (*sharding.EpochValidators, error) { +func (s *syncValidatorStatus) computeNodesConfigFor(metaBlock *block.MetaBlock) (*sharding.EpochValidators, error) { if metaBlock.Epoch == 0 { return s.nodeCoordinator.ComputeNodesConfigForGenesis(s.genesisNodesConfig) } @@ -125,7 +121,7 @@ func (s *syncValidatorStatus) computeNodesConfigFor(metaBlock *block.MetaBlock, return nil, err } - return s.nodeCoordinator.ComputeNodesConfigFor(metaBlock, epochValidatorsInfo, updateValidatorInfo) + return s.nodeCoordinator.ComputeNodesConfigFor(metaBlock, epochValidatorsInfo) } func findPeerMiniBlockHeaders(metaBlock *block.MetaBlock) []block.ShardMiniBlockHeader { diff --git a/epochStart/mock/nodesCoordinatorStub.go b/epochStart/mock/nodesCoordinatorStub.go index 63a1ed75e83..304e57da481 100644 --- a/epochStart/mock/nodesCoordinatorStub.go +++ b/epochStart/mock/nodesCoordinatorStub.go @@ -33,11 +33,6 @@ func (ncm *NodesCoordinatorStub) GetAllWaitingValidatorsPublicKeys(_ uint32) (ma return nil, nil } -// UpdatePeersListAndIndex - -func (ncm *NodesCoordinatorStub) UpdatePeersListAndIndex() error { - return nil -} - // GetNumTotalEligible - func (ncm *NodesCoordinatorStub) GetNumTotalEligible() uint64 { return 1 @@ -94,7 +89,7 @@ func (ncm *NodesCoordinatorStub) GetConsensusValidatorsPublicKeys( } // SetNodesPerShards - -func (ncm *NodesCoordinatorStub) SetNodesPerShards(_ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, _ uint32, _ bool) error { +func (ncm *NodesCoordinatorStub) SetNodesPerShards(_ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, _ uint32) error { return nil } diff --git a/integrationTests/consensus/testInitializer.go b/integrationTests/consensus/testInitializer.go index 2f9dcc9bb8f..1b906baf1ea 100644 --- a/integrationTests/consensus/testInitializer.go +++ b/integrationTests/consensus/testInitializer.go @@ -495,7 +495,6 @@ func createNodes( WaitingNodes: waitingMap, SelfPublicKey: []byte(strconv.Itoa(i)), ConsensusGroupCache: consensusCache, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/mock/nodesCoordinatorMock.go b/integrationTests/mock/nodesCoordinatorMock.go index 79f49b2f89f..90f1e53451f 100644 --- a/integrationTests/mock/nodesCoordinatorMock.go +++ b/integrationTests/mock/nodesCoordinatorMock.go @@ -33,11 +33,6 @@ func (ncm *NodesCoordinatorMock) GetAllEligibleValidatorsPublicKeys(_ uint32) (m return nil, nil } -// UpdatePeersListAndIndex - -func (ncm *NodesCoordinatorMock) UpdatePeersListAndIndex() error { - return nil -} - // GetAllWaitingValidatorsPublicKeys - func (ncm *NodesCoordinatorMock) GetAllWaitingValidatorsPublicKeys(_ uint32) (map[uint32][][]byte, error) { if ncm.GetAllWaitingValidatorsPublicKeysCalled != nil { @@ -99,7 +94,7 @@ func (ncm *NodesCoordinatorMock) GetConsensusValidatorsPublicKeys( } // SetNodesPerShards - -func (ncm *NodesCoordinatorMock) SetNodesPerShards(_ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, _ uint32, _ bool) error { +func (ncm *NodesCoordinatorMock) SetNodesPerShards(_ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, _ uint32) error { return nil } diff --git a/integrationTests/nodesCoordinatorFactory.go b/integrationTests/nodesCoordinatorFactory.go index 89a90c65b92..28a4fb25c4a 100644 --- a/integrationTests/nodesCoordinatorFactory.go +++ b/integrationTests/nodesCoordinatorFactory.go @@ -51,7 +51,6 @@ func (tpn *IndexHashedNodesCoordinatorFactory) CreateNodesCoordinator(arg ArgInd SelfPublicKey: pubKeyBytes, ConsensusGroupCache: arg.consensusGroupCache, BootStorer: arg.bootStorer, - ListIndexUpdater: arg.listIndexUpdater, } nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) if err != nil { @@ -88,7 +87,6 @@ func (ihncrf *IndexHashedNodesCoordinatorWithRaterFactory) CreateNodesCoordinato SelfPublicKey: pubKeyBytes, ConsensusGroupCache: arg.consensusGroupCache, BootStorer: arg.bootStorer, - ListIndexUpdater: arg.listIndexUpdater, } baseCoordinator, err := sharding.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/testP2PNode.go b/integrationTests/testP2PNode.go index f8f5b329aa4..27174c5a6be 100644 --- a/integrationTests/testP2PNode.go +++ b/integrationTests/testP2PNode.go @@ -260,7 +260,6 @@ func CreateNodesWithTestP2PNodes( WaitingNodes: make(map[uint32][]sharding.Validator), Epoch: 0, EpochStartNotifier: &mock.EpochStartNotifierStub{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) log.LogIfError(err) @@ -300,7 +299,6 @@ func CreateNodesWithTestP2PNodes( WaitingNodes: make(map[uint32][]sharding.Validator), Epoch: 0, EpochStartNotifier: &mock.EpochStartNotifierStub{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) log.LogIfError(err) diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go index 891e45d425c..ac330f7f196 100644 --- a/integrationTests/testProcessorNodeWithMultisigner.go +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -299,7 +299,6 @@ func CreateNodesWithNodesCoordinatorAndHeaderSigVerifier( WaitingNodes: make(map[uint32][]sharding.Validator), SelfPublicKey: []byte(strconv.Itoa(int(shardId))), ConsensusGroupCache: consensusCache, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) @@ -378,7 +377,6 @@ func CreateNodesWithNodesCoordinatorKeygenAndSingleSigner( WaitingNodes: waitingMap, SelfPublicKey: []byte(strconv.Itoa(int(shardId))), ConsensusGroupCache: cache, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/node/mock/nodesCoordinatorMock.go b/node/mock/nodesCoordinatorMock.go index 8e6367568c9..f5fec4392c2 100644 --- a/node/mock/nodesCoordinatorMock.go +++ b/node/mock/nodesCoordinatorMock.go @@ -12,16 +12,6 @@ type NodesCoordinatorMock struct { GetAllEligibleValidatorsPublicKeysCalled func() (map[uint32][][]byte, error) } -// SetConfig - -func (ncm *NodesCoordinatorMock) SetConfig(_ *sharding.NodesCoordinatorRegistry) error { - return nil -} - -// UpdatePeersListAndIndex - -func (ncm *NodesCoordinatorMock) UpdatePeersListAndIndex() error { - return nil -} - // GetAllEligibleValidatorsPublicKeys - func (ncm *NodesCoordinatorMock) GetAllEligibleValidatorsPublicKeys(_ uint32) (map[uint32][][]byte, error) { if ncm.GetAllEligibleValidatorsPublicKeysCalled != nil { @@ -102,7 +92,6 @@ func (ncm *NodesCoordinatorMock) SetNodesPerShards( _ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, _ uint32, - _ bool, ) error { return nil } diff --git a/process/mock/nodesCoordinatorMock.go b/process/mock/nodesCoordinatorMock.go index 492c1a98ba9..d3a2fac35b2 100644 --- a/process/mock/nodesCoordinatorMock.go +++ b/process/mock/nodesCoordinatorMock.go @@ -143,7 +143,6 @@ func (ncm *NodesCoordinatorMock) SetNodesPerShards( eligible map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, epoch uint32, - _ bool, ) error { if ncm.SetNodesPerShardsCalled != nil { return ncm.SetNodesPerShardsCalled(eligible, epoch) diff --git a/process/peer/process.go b/process/peer/process.go index 3638df12f62..b89f744bb0e 100644 --- a/process/peer/process.go +++ b/process/peer/process.go @@ -129,24 +129,7 @@ func NewValidatorStatisticsProcessor(arguments ArgValidatorStatisticsProcessor) ratingReaderSetter.SetRatingReader(rr) - listIndexUpdaterSetter, ok := rater.(sharding.ListIndexUpdaterSetter) - if !ok { - return nil, process.ErrNilListIndexUpdaterSetter - } - log.Debug("setting list index updater") - - liu := &ListIndexUpdater{ - updateListAndIndex: vs.updateListAndIndex, - } - - listIndexUpdaterSetter.SetListIndexUpdater(liu) - - err := vs.nodesCoordinator.UpdatePeersListAndIndex() - if err != nil { - return nil, err - } - - err = vs.saveInitialState(arguments.StakeValue, rater.GetStartRating(), arguments.StartEpoch) + err := vs.saveInitialState(arguments.StakeValue, rater.GetStartRating(), arguments.StartEpoch) if err != nil { return nil, err } diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index fcc4e448f1a..535764aa2c7 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -48,7 +48,6 @@ type indexHashedNodesCoordinator struct { hasher hashing.Hasher shuffler NodesShuffler epochStartRegistrationHandler EpochStartEventNotifier - listIndexUpdater ListIndexUpdaterHandler bootStorer storage.Storer selfPubKey []byte nodesConfig map[uint32]*epochNodesConfig @@ -88,7 +87,6 @@ func NewIndexHashedNodesCoordinator(arguments ArgNodesCoordinator) (*indexHashed shuffler: arguments.Shuffler, epochStartRegistrationHandler: arguments.EpochStartNotifier, bootStorer: arguments.BootStorer, - listIndexUpdater: arguments.ListIndexUpdater, selfPubKey: arguments.SelfPublicKey, nodesConfig: nodesConfig, currentEpoch: arguments.Epoch, @@ -100,7 +98,7 @@ func NewIndexHashedNodesCoordinator(arguments ArgNodesCoordinator) (*indexHashed } ihgs.nodesPerShardSetter = ihgs - err = ihgs.nodesPerShardSetter.SetNodesPerShards(arguments.EligibleNodes, arguments.WaitingNodes, arguments.Epoch, false) + err = ihgs.nodesPerShardSetter.SetNodesPerShards(arguments.EligibleNodes, arguments.WaitingNodes, arguments.Epoch) if err != nil { return nil, err } @@ -135,9 +133,6 @@ func checkArguments(arguments ArgNodesCoordinator) error { if check.IfNil(arguments.Shuffler) { return ErrNilShuffler } - if check.IfNil(arguments.ListIndexUpdater) { - return ErrNilListIndexUpdater - } if check.IfNil(arguments.BootStorer) { return ErrNilBootStorer } @@ -153,7 +148,6 @@ func (ihgs *indexHashedNodesCoordinator) SetNodesPerShards( eligible map[uint32][]Validator, waiting map[uint32][]Validator, epoch uint32, - updatePeersListAndIndex bool, ) error { ihgs.mutNodesConfig.Lock() defer ihgs.mutNodesConfig.Unlock() @@ -211,13 +205,6 @@ func (ihgs *indexHashedNodesCoordinator) SetNodesPerShards( ihgs.nodesConfig[epoch] = nodesConfig ihgs.numTotalEligible = numTotalEligible - if updatePeersListAndIndex { - err := ihgs.updatePeersListAndIndex(nodesConfig) - if err != nil { - return err - } - } - return nil } @@ -515,7 +502,7 @@ func (ihgs *indexHashedNodesCoordinator) EpochStartPrepare(metaHeader data.Heade eligibleMap, waitingMap, stillRemaining := ihgs.shuffler.UpdateNodeLists(shufflerArgs) - err := ihgs.nodesPerShardSetter.SetNodesPerShards(eligibleMap, waitingMap, newEpoch, true) + err := ihgs.nodesPerShardSetter.SetNodesPerShards(eligibleMap, waitingMap, newEpoch) if err != nil { log.Error("set nodes per shard failed", "error", err.Error()) } @@ -631,60 +618,6 @@ func (ihgs *indexHashedNodesCoordinator) GetConsensusWhitelistedNodes( return shardEligible, nil } -// UpdatePeersListAndIndex will update the list and the index for all peers -func (ihgs *indexHashedNodesCoordinator) UpdatePeersListAndIndex() error { - ihgs.mutNodesConfig.RLock() - nodesConfig, ok := ihgs.nodesConfig[ihgs.currentEpoch] - ihgs.mutNodesConfig.RUnlock() - - if !ok { - return ErrEpochNodesConfigDoesNotExist - } - - nodesConfig.mutNodesMaps.RLock() - defer nodesConfig.mutNodesMaps.RUnlock() - - return ihgs.updatePeersListAndIndex(nodesConfig) -} - -// updatePeersListAndIndex will update the list and the index for all peers -// should be called with mutex locked -func (ihgs *indexHashedNodesCoordinator) updatePeersListAndIndex(nodesConfig *epochNodesConfig) error { - err := ihgs.updatePeerAccountsForGivenMap(nodesConfig.eligibleMap, core.EligibleList) - if err != nil { - return err - } - - err = ihgs.updatePeerAccountsForGivenMap(nodesConfig.waitingMap, core.WaitingList) - if err != nil { - return err - } - - return nil -} - -func (ihgs *indexHashedNodesCoordinator) updatePeerAccountsForGivenMap( - peers map[uint32][]Validator, - list core.PeerType, -) error { - for shardId, accountsPerShard := range peers { - for index, account := range accountsPerShard { - err := ihgs.listIndexUpdater.UpdateListAndIndex( - string(account.PubKey()), - shardId, - string(list), - uint32(index)) - if err != nil { - log.Warn("error while updating list and index for peer", - "error", err, - "public key", account.PubKey()) - } - } - } - - return nil -} - func (ihgs *indexHashedNodesCoordinator) computeShardForSelfPublicKey(nodesConfig *epochNodesConfig) uint32 { pubKey := ihgs.selfPubKey selfShard := ihgs.shardIDAsObserver diff --git a/sharding/indexHashedNodesCoordinatorWithRater.go b/sharding/indexHashedNodesCoordinatorWithRater.go index d4a216fe1f6..49e6338bc27 100644 --- a/sharding/indexHashedNodesCoordinatorWithRater.go +++ b/sharding/indexHashedNodesCoordinatorWithRater.go @@ -46,9 +46,8 @@ func (ihgs *indexHashedNodesCoordinatorWithRater) SetNodesPerShards( eligible map[uint32][]Validator, waiting map[uint32][]Validator, epoch uint32, - updateList bool, ) error { - err := ihgs.indexHashedNodesCoordinator.SetNodesPerShards(eligible, waiting, epoch, updateList) + err := ihgs.indexHashedNodesCoordinator.SetNodesPerShards(eligible, waiting, epoch) if err != nil { return err } diff --git a/sharding/indexHashedNodesCoordinatorWithRater_test.go b/sharding/indexHashedNodesCoordinatorWithRater_test.go index a213c4f7b88..a18dcfadaf2 100644 --- a/sharding/indexHashedNodesCoordinatorWithRater_test.go +++ b/sharding/indexHashedNodesCoordinatorWithRater_test.go @@ -47,7 +47,7 @@ func TestIndexHashedGroupSelectorWithRater_SetNilEligibleMapShouldErr(t *testing waiting := createDummyNodesMap(2, 1, "waiting") nc, _ := NewIndexHashedNodesCoordinator(createArguments()) ihgs, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) - assert.Equal(t, ErrNilInputNodesMap, ihgs.SetNodesPerShards(nil, waiting, 0, true)) + assert.Equal(t, ErrNilInputNodesMap, ihgs.SetNodesPerShards(nil, waiting, 0)) } func TestIndexHashedGroupSelectorWithRater_OkValShouldWork(t *testing.T) { @@ -71,7 +71,6 @@ func TestIndexHashedGroupSelectorWithRater_OkValShouldWork(t *testing.T) { WaitingNodes: waitingMap, SelfPublicKey: []byte("test"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } nc, err := NewIndexHashedNodesCoordinator(arguments) assert.Nil(t, err) @@ -151,7 +150,6 @@ func TestIndexHashedGroupSelectorWithRater_ComputeExpandedList(t *testing.T) { WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ratingPk0 := uint32(5) @@ -217,7 +215,6 @@ func BenchmarkIndexHashedGroupSelectorWithRater_ComputeValidatorsGroup63of400(b WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, _ := NewIndexHashedNodesCoordinator(arguments) ihgsRater, _ := NewIndexHashedNodesCoordinatorWithRater(ihgs, &mock.RaterMock{}) @@ -258,7 +255,6 @@ func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldReturn WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } nc, _ := NewIndexHashedNodesCoordinator(arguments) ihgs, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) @@ -294,7 +290,6 @@ func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldReturn WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } nc, _ := NewIndexHashedNodesCoordinator(arguments) ihgs, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) @@ -344,7 +339,6 @@ func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldWork(t WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } nc, _ := NewIndexHashedNodesCoordinator(arguments) ihgs, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) @@ -415,7 +409,6 @@ func TestIndexHashedGroupSelectorWithRater_GetAllEligibleValidatorsPublicKeys(t WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } nc, _ := NewIndexHashedNodesCoordinator(arguments) diff --git a/sharding/indexHashedNodesCoordinator_test.go b/sharding/indexHashedNodesCoordinator_test.go index 79e31f9adfd..81bffcccb22 100644 --- a/sharding/indexHashedNodesCoordinator_test.go +++ b/sharding/indexHashedNodesCoordinator_test.go @@ -84,7 +84,6 @@ func createArguments() ArgNodesCoordinator { WaitingNodes: waitingMap, SelfPublicKey: []byte("test"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } return arguments } @@ -187,7 +186,7 @@ func TestIndexHashedNodesCoordinator_SetNilEligibleMapShouldErr(t *testing.T) { arguments := createArguments() ihgs, _ := NewIndexHashedNodesCoordinator(arguments) - require.Equal(t, ErrNilInputNodesMap, ihgs.SetNodesPerShards(nil, waitingMap, 0, false)) + require.Equal(t, ErrNilInputNodesMap, ihgs.SetNodesPerShards(nil, waitingMap, 0)) } func TestIndexHashedNodesCoordinator_SetNilWaitingMapShouldErr(t *testing.T) { @@ -197,7 +196,7 @@ func TestIndexHashedNodesCoordinator_SetNilWaitingMapShouldErr(t *testing.T) { arguments := createArguments() ihgs, _ := NewIndexHashedNodesCoordinator(arguments) - require.Equal(t, ErrNilInputNodesMap, ihgs.SetNodesPerShards(eligibleMap, nil, 0, false)) + require.Equal(t, ErrNilInputNodesMap, ihgs.SetNodesPerShards(eligibleMap, nil, 0)) } func TestIndexHashedNodesCoordinator_OkValShouldWork(t *testing.T) { @@ -221,7 +220,6 @@ func TestIndexHashedNodesCoordinator_OkValShouldWork(t *testing.T) { WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, err := NewIndexHashedNodesCoordinator(arguments) @@ -265,7 +263,6 @@ func TestIndexHashedNodesCoordinator_NewCoordinatorTooFewNodesShouldErr(t *testi WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, err := NewIndexHashedNodesCoordinator(arguments) @@ -323,7 +320,6 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup1ValidatorShouldRetur WaitingNodes: make(map[uint32][]Validator), SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, _ := NewIndexHashedNodesCoordinator(arguments) list2, err := ihgs.ComputeConsensusGroup([]byte("randomness"), 0, 0, 0) @@ -372,7 +368,6 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroupTest2Validators(t *te WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, _ := NewIndexHashedNodesCoordinator(arguments) @@ -434,7 +429,6 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroupTest2ValidatorsRevert WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, _ := NewIndexHashedNodesCoordinator(arguments) @@ -485,7 +479,6 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroupTest2ValidatorsSameIn WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, _ := NewIndexHashedNodesCoordinator(arguments) @@ -586,7 +579,6 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroupTest6From10Validators WaitingNodes: make(map[uint32][]Validator), SelfPublicKey: selfPubKey, ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, err := NewIndexHashedNodesCoordinator(arguments) require.Nil(t, err) @@ -639,7 +631,6 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup400of400For10locksNoM WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: cache, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, err := NewIndexHashedNodesCoordinator(arguments) @@ -711,7 +702,6 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup400of400For10BlocksMe WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: cache, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, err := NewIndexHashedNodesCoordinator(arguments) @@ -814,7 +804,6 @@ func computeMemoryRequirements(consensusGroupCache Cacher, consensusGroupSize in WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: consensusGroupCache, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, _ := NewIndexHashedNodesCoordinator(arguments) @@ -938,7 +927,6 @@ func TestIndexHashedNodesCoordinator_GetValidatorWithPublicKeyShouldWork(t *test WaitingNodes: make(map[uint32][]Validator), SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, _ := NewIndexHashedNodesCoordinator(arguments) @@ -1016,7 +1004,6 @@ func TestIndexHashedGroupSelector_GetAllEligibleValidatorsPublicKeys(t *testing. WaitingNodes: make(map[uint32][]Validator), SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, _ := NewIndexHashedNodesCoordinator(arguments) @@ -1078,7 +1065,6 @@ func TestIndexHashedGroupSelector_GetAllWaitingValidatorsPublicKeys(t *testing.T WaitingNodes: waitingMap, SelfPublicKey: []byte("key"), ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ListIndexUpdater: &mock.ListIndexUpdaterStub{}, } ihgs, _ := NewIndexHashedNodesCoordinator(arguments) diff --git a/sharding/interface.go b/sharding/interface.go index 8f4fe085abe..47f5580d449 100644 --- a/sharding/interface.go +++ b/sharding/interface.go @@ -28,7 +28,6 @@ type NodesCoordinator interface { PublicKeysSelector ComputeConsensusGroup(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []Validator, err error) GetValidatorWithPublicKey(publicKey []byte, epoch uint32) (validator Validator, shardId uint32, err error) - UpdatePeersListAndIndex() error LoadState(key []byte) error GetSavedStateKey() []byte ShardIdForEpoch(epoch uint32) (uint32, error) @@ -76,7 +75,6 @@ type NodesPerShardSetter interface { eligible map[uint32][]Validator, waiting map[uint32][]Validator, epoch uint32, - updateList bool, ) error ComputeLeaving(allValidators []Validator) []Validator } diff --git a/sharding/networksharding/mock_test.go b/sharding/networksharding/mock_test.go index 62c461569dd..2412e7da446 100644 --- a/sharding/networksharding/mock_test.go +++ b/sharding/networksharding/mock_test.go @@ -8,16 +8,6 @@ type nodesCoordinatorStub struct { GetValidatorWithPublicKeyCalled func(publicKey []byte, epoch uint32) (validator sharding.Validator, shardId uint32, err error) } -// SetConfig - -func (ncs *nodesCoordinatorStub) SetConfig(_ *sharding.NodesCoordinatorRegistry) error { - return nil -} - -// UpdatePeersListAndIndex - -func (ncs *nodesCoordinatorStub) UpdatePeersListAndIndex() error { - panic("implement me") -} - // ComputeLeaving - func (ncs *nodesCoordinatorStub) ComputeLeaving(_ []sharding.Validator) []sharding.Validator { panic("implement me") @@ -59,7 +49,7 @@ func (ncs *nodesCoordinatorStub) GetOwnPublicKey() []byte { } // SetNodesPerShards - -func (ncs *nodesCoordinatorStub) SetNodesPerShards(_ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, _ uint32, _ bool) error { +func (ncs *nodesCoordinatorStub) SetNodesPerShards(_ map[uint32][]sharding.Validator, _ map[uint32][]sharding.Validator, _ uint32) error { panic("implement me") } diff --git a/sharding/shardingArgs.go b/sharding/shardingArgs.go index a45a32a3761..fcf4680f650 100644 --- a/sharding/shardingArgs.go +++ b/sharding/shardingArgs.go @@ -12,7 +12,6 @@ type ArgNodesCoordinator struct { Hasher hashing.Hasher Shuffler NodesShuffler EpochStartNotifier EpochStartEventNotifier - ListIndexUpdater ListIndexUpdaterHandler BootStorer storage.Storer ShardIDAsObserver uint32 NbShards uint32 From 23d689189551ceb79f581f3d22979f2614d2f019 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Tue, 31 Mar 2020 10:59:21 +0300 Subject: [PATCH 58/61] no need for updateListIndex, fixed shard coordinator return when starting from epoch 0. --- epochStart/bootstrap/fromLocalStorage.go | 12 +++++++----- epochStart/bootstrap/process.go | 2 +- .../endOfEpoch/startInEpoch/startInEpoch_test.go | 2 ++ 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/epochStart/bootstrap/fromLocalStorage.go b/epochStart/bootstrap/fromLocalStorage.go index 5f1a4c938ae..5a7c7a20ece 100644 --- a/epochStart/bootstrap/fromLocalStorage.go +++ b/epochStart/bootstrap/fromLocalStorage.go @@ -26,6 +26,7 @@ func (e *epochStartBootstrap) initializeFromLocalStorage() { e.defaultShardString, ) if errNotCritical != nil { + e.baseData.storageExists = false log.Debug("no epoch db found in storage", "error", errNotCritical.Error()) } else { e.baseData.storageExists = true @@ -77,6 +78,12 @@ func (e *epochStartBootstrap) prepareEpochFromStorage() (Parameters, error) { return Parameters{}, err } + e.epochStartMeta, err = e.getEpochStartMetaFromStorage(storageUnits[1]) + if err != nil { + return Parameters{}, err + } + e.baseData.numberOfShards = uint32(len(e.epochStartMeta.EpochStart.LastFinalizedHeaders)) + if !e.checkIfShuffledOut(pubKey, e.nodesConfig) { parameters := Parameters{ Epoch: e.baseData.lastEpoch, @@ -86,11 +93,6 @@ func (e *epochStartBootstrap) prepareEpochFromStorage() (Parameters, error) { return parameters, nil } - e.epochStartMeta, err = e.getEpochStartMetaFromStorage(storageUnits[1]) - if err != nil { - return Parameters{}, err - } - err = e.createSyncers() if err != nil { return Parameters{}, err diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 266495d70cc..d07e1a9ee63 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -210,7 +210,7 @@ func (e *epochStartBootstrap) computeMostProbableEpoch() { func (e *epochStartBootstrap) Bootstrap() (Parameters, error) { var err error - e.shardCoordinator, err = sharding.NewMultiShardCoordinator(e.genesisNodesConfig.NumberOfShards(), core.MetachainShardId) + e.shardCoordinator, err = sharding.NewMultiShardCoordinator(e.genesisShardCoordinator.NumberOfShards(), core.MetachainShardId) if err != nil { return Parameters{}, err } diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index 2b7fca8a480..5a9d163861e 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -134,6 +134,7 @@ func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { assert.NoError(t, errRemoveDir) }() + genesisShardCoordinator, _ := sharding.NewMultiShardCoordinator(nodesConfig.NumberOfShards(), 0) messenger := integrationTests.CreateMessengerWithKadDht(context.Background(), integrationTests.GetConnectableAddress(advertiser)) _ = messenger.Bootstrap() time.Sleep(integrationTests.P2pBootstrapDelay) @@ -143,6 +144,7 @@ func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { Hasher: integrationTests.TestHasher, Messenger: messenger, GeneralConfig: getGeneralConfig(), + GenesisShardCoordinator: genesisShardCoordinator, EconomicsData: integrationTests.CreateEconomicsData(), SingleSigner: &mock.SignerMock{}, BlockSingleSigner: &mock.SignerMock{}, From 232ba2e43bbeac3a5b861b4feac08fb04a071cb4 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Tue, 31 Mar 2020 11:36:56 +0300 Subject: [PATCH 59/61] EN-6030: fix trie storers (temporary) --- epochStart/bootstrap/process.go | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index d07e1a9ee63..9b556c85252 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -2,6 +2,8 @@ package bootstrap import ( "context" + "fmt" + "path/filepath" "strconv" "time" @@ -227,13 +229,13 @@ func (e *epochStartBootstrap) Bootstrap() (Parameters, error) { if isCurrentEpochSaved { parameters, err := e.prepareEpochFromStorage() if err == nil { - return parameters, nil + return parameters, err } } err = e.prepareComponentsToSyncFromNetwork() if err != nil { - return Parameters{}, nil + return Parameters{}, err } e.epochStartMeta, err = e.epochStartMetaBlockSyncer.SyncEpochStartMeta(timeToWait) @@ -393,10 +395,6 @@ func (e *epochStartBootstrap) requestAndProcessing() (Parameters, error) { return Parameters{}, err } - err = e.createTrieStorageManagers() - if err != nil { - return Parameters{}, err - } log.Info("start in epoch bootstrap: createTrieStorageManagers") log.Info("start in epoch bootstrap: started syncPeerAccountsState") @@ -633,7 +631,15 @@ func (e *epochStartBootstrap) syncUserAccountsState(rootHash []byte) error { } func (e *epochStartBootstrap) createTrieStorageManagers() error { + // TODO: this func should be removed as tries storers are already created in coreComponents dbConfig := storageFactory.GetDBFromConfig(e.generalConfig.AccountsTrieStorage.DB) + shardIdStr := fmt.Sprintf("%d", e.shardCoordinator.SelfId()) + if e.shardCoordinator.SelfId() > e.shardCoordinator.NumberOfShards() { + shardIdStr = "metachain" + } + trieStoragePath := e.pathManager.PathForStatic(shardIdStr, dbConfig.FilePath) + trieStoragePath = filepath.Join(trieStoragePath, e.generalConfig.AccountsTrieStorage.DB.FilePath) + "_temp" + dbConfig.FilePath = trieStoragePath trieStorage, err := storageUnit.NewStorageUnitFromConf( storageFactory.GetCacherFromConfig(e.generalConfig.AccountsTrieStorage.Cache), dbConfig, @@ -649,6 +655,9 @@ func (e *epochStartBootstrap) createTrieStorageManagers() error { } dbConfig = storageFactory.GetDBFromConfig(e.generalConfig.PeerAccountsTrieStorage.DB) + peerTrieStoragePath := e.pathManager.PathForStatic(shardIdStr, dbConfig.FilePath) + peerTrieStoragePath = filepath.Join(peerTrieStoragePath, e.generalConfig.PeerAccountsTrieStorage.DB.FilePath) + "_temp" + dbConfig.FilePath = peerTrieStoragePath peerTrieStorage, err := storageUnit.NewStorageUnitFromConf( storageFactory.GetCacherFromConfig(e.generalConfig.PeerAccountsTrieStorage.Cache), dbConfig, From 9ec5defde14820b5885dd0924291a5fa02f62c76 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Tue, 31 Mar 2020 14:30:48 +0300 Subject: [PATCH 60/61] processing fixes. --- cmd/node/factory/structs.go | 26 +++-- cmd/node/main.go | 2 + data/interface.go | 2 +- data/state/dataTriesHolder.go | 5 + data/state/interface.go | 1 + data/trie/factory/trieCreator.go | 26 +++-- data/trie/factory/trieCreator_test.go | 10 +- epochStart/bootstrap/fromLocalStorage.go | 17 ++- epochStart/bootstrap/process.go | 106 +++++++----------- .../startInEpoch/startInEpoch_test.go | 50 +++++++++ update/mock/triesHolderMock.go | 16 +++ 11 files changed, 166 insertions(+), 95 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 6e444c3cec3..624a5d4df8c 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -127,6 +127,7 @@ type Core struct { VmMarshalizer marshal.Marshalizer TxSignMarshalizer marshal.Marshalizer TriesContainer state.TriesHolder + TrieStorageManagers map[string]data.StorageManager Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter StatusHandler core.AppStatusHandler ChainID []byte @@ -217,7 +218,7 @@ func CoreComponentsFactory(args *coreComponentsFactoryArgs) (*Core, error) { uint64ByteSliceConverter := uint64ByteSlice.NewBigEndianConverter() - trieContainer, err := createTries(args, internalMarshalizer, hasher) + trieStorageManagers, trieContainer, err := createTries(args, internalMarshalizer, hasher) if err != nil { return nil, err @@ -229,6 +230,7 @@ func CoreComponentsFactory(args *coreComponentsFactoryArgs) (*Core, error) { VmMarshalizer: vmMarshalizer, TxSignMarshalizer: txSignMarshalizer, TriesContainer: trieContainer, + TrieStorageManagers: trieStorageManagers, Uint64ByteSliceConverter: uint64ByteSliceConverter, StatusHandler: statusHandler.NewNilStatusHandler(), ChainID: args.chainID, @@ -239,10 +241,9 @@ func createTries( args *coreComponentsFactoryArgs, marshalizer marshal.Marshalizer, hasher hashing.Hasher, -) (state.TriesHolder, error) { +) (map[string]data.StorageManager, state.TriesHolder, error) { trieContainer := state.NewDataTriesHolder() - trieFactoryArgs := factory.TrieFactoryArgs{ EvictionWaitingListCfg: args.config.EvictionWaitingList, SnapshotDbCfg: args.config.TrieSnapshotDB, @@ -253,24 +254,25 @@ func createTries( } trieFactory, err := factory.NewTrieFactory(trieFactoryArgs) if err != nil { - return nil, err + return nil, nil, err } - merkleTrie, err := trieFactory.Create(args.config.AccountsTrieStorage, args.config.StateTriesConfig.AccountsStatePruningEnabled) + trieStorageManagers := make(map[string]data.StorageManager) + userStorageManager, userAccountTrie, err := trieFactory.Create(args.config.AccountsTrieStorage, args.config.StateTriesConfig.AccountsStatePruningEnabled) if err != nil { - return nil, err + return nil, nil, err } + trieContainer.Put([]byte(factory.UserAccountTrie), userAccountTrie) + trieStorageManagers[factory.UserAccountTrie] = userStorageManager - trieContainer.Put([]byte(factory.UserAccountTrie), merkleTrie) - - peerAccountsTrie, err := trieFactory.Create(args.config.PeerAccountsTrieStorage, args.config.StateTriesConfig.PeerStatePruningEnabled) + peerStorageManager, peerAccountsTrie, err := trieFactory.Create(args.config.PeerAccountsTrieStorage, args.config.StateTriesConfig.PeerStatePruningEnabled) if err != nil { - return nil, err + return nil, nil, err } - trieContainer.Put([]byte(factory.PeerAccountTrie), peerAccountsTrie) + trieStorageManagers[factory.PeerAccountTrie] = peerStorageManager - return trieContainer, nil + return trieStorageManagers, trieContainer, nil } type stateComponentsFactoryArgs struct { diff --git a/cmd/node/main.go b/cmd/node/main.go index c7bc017dc55..c3df2af1870 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -639,6 +639,8 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { DefaultShardString: defaultShardString, Rater: rater, DestinationShardAsObserver: ctx.GlobalString(destinationShardAsObserver.Name), + TrieContainer: coreComponents.TriesContainer, + TrieStorageManagers: coreComponents.TrieStorageManagers, } bootstrapper, err := bootstrap.NewEpochStartBootstrap(epochStartBootstrapArgs) if err != nil { diff --git a/data/interface.go b/data/interface.go index 21e970e189f..631080b6e98 100644 --- a/data/interface.go +++ b/data/interface.go @@ -175,7 +175,7 @@ type StorageManager interface { // TrieFactory creates new tries type TrieFactory interface { - Create(config.StorageConfig, bool) (Trie, error) + Create(config.StorageConfig, bool) (StorageManager, Trie, error) IsInterfaceNil() bool } diff --git a/data/state/dataTriesHolder.go b/data/state/dataTriesHolder.go index 6b10a01221b..766a5ab248c 100644 --- a/data/state/dataTriesHolder.go +++ b/data/state/dataTriesHolder.go @@ -25,6 +25,11 @@ func (dth *dataTriesHolder) Put(key []byte, tr data.Trie) { dth.mutex.Unlock() } +// Replace changes a trie pointer to the tries map +func (dth *dataTriesHolder) Replace(key []byte, tr data.Trie) { + dth.Put(key, tr) +} + // Get returns the trie pointer that is stored in the map at the given key func (dth *dataTriesHolder) Get(key []byte) data.Trie { dth.mutex.Lock() diff --git a/data/state/interface.go b/data/state/interface.go index 0c3f34ec5a4..6a9455fc730 100644 --- a/data/state/interface.go +++ b/data/state/interface.go @@ -166,6 +166,7 @@ type JournalEntry interface { // TriesHolder is used to store multiple tries type TriesHolder interface { Put([]byte, data.Trie) + Replace(key []byte, tr data.Trie) Get([]byte) data.Trie GetAll() []data.Trie Reset() diff --git a/data/trie/factory/trieCreator.go b/data/trie/factory/trieCreator.go index 4d44705eb54..252621fab99 100644 --- a/data/trie/factory/trieCreator.go +++ b/data/trie/factory/trieCreator.go @@ -53,7 +53,7 @@ func NewTrieFactory( } // Create creates a new trie -func (tc *trieCreator) Create(trieStorageCfg config.StorageConfig, pruningEnabled bool) (data.Trie, error) { +func (tc *trieCreator) Create(trieStorageCfg config.StorageConfig, pruningEnabled bool) (data.StorageManager, data.Trie, error) { trieStoragePath, mainDb := path.Split(tc.pathManager.PathForStatic(tc.shardId, trieStorageCfg.DB.FilePath)) dbConfig := factory.GetDBFromConfig(trieStorageCfg.DB) @@ -64,17 +64,22 @@ func (tc *trieCreator) Create(trieStorageCfg config.StorageConfig, pruningEnable factory.GetBloomFromConfig(trieStorageCfg.Bloom), ) if err != nil { - return nil, err + return nil, nil, err } log.Trace("trie pruning status", "enabled", pruningEnabled) if !pruningEnabled { trieStorage, errNewTrie := trie.NewTrieStorageManagerWithoutPruning(accountsTrieStorage) if errNewTrie != nil { - return nil, errNewTrie + return nil, nil, errNewTrie } - return trie.NewTrie(trieStorage, tc.marshalizer, tc.hasher) + newTrie, err := trie.NewTrie(trieStorage, tc.marshalizer, tc.hasher) + if err != nil { + return nil, nil, err + } + + return trieStorage, newTrie, nil } arg := storageUnit.ArgDB{ @@ -86,12 +91,12 @@ func (tc *trieCreator) Create(trieStorageCfg config.StorageConfig, pruningEnable } evictionDb, err := storageUnit.NewDB(arg) if err != nil { - return nil, err + return nil, nil, err } ewl, err := evictionWaitingList.NewEvictionWaitingList(tc.evictionWaitingListCfg.Size, evictionDb, tc.marshalizer) if err != nil { - return nil, err + return nil, nil, err } snapshotDbCfg := config.DBConfig{ @@ -104,10 +109,15 @@ func (tc *trieCreator) Create(trieStorageCfg config.StorageConfig, pruningEnable trieStorage, err := trie.NewTrieStorageManager(accountsTrieStorage, tc.marshalizer, tc.hasher, snapshotDbCfg, ewl) if err != nil { - return nil, err + return nil, nil, err + } + + newTrie, err := trie.NewTrie(trieStorage, tc.marshalizer, tc.hasher) + if err != nil { + return nil, nil, err } - return trie.NewTrie(trieStorage, tc.marshalizer, tc.hasher) + return trieStorage, newTrie, nil } // IsInterfaceNil returns true if there is no value under the interface diff --git a/data/trie/factory/trieCreator_test.go b/data/trie/factory/trieCreator_test.go index b8e25e3e865..ac34d0cb24c 100644 --- a/data/trie/factory/trieCreator_test.go +++ b/data/trie/factory/trieCreator_test.go @@ -81,7 +81,7 @@ func TestTrieFactory_CreateNotSupportedCacheType(t *testing.T) { tf, _ := NewTrieFactory(args) trieStorageCfg := config.StorageConfig{} - tr, err := tf.Create(trieStorageCfg, false) + _, tr, err := tf.Create(trieStorageCfg, false) require.Nil(t, tr) require.Equal(t, storage.ErrNotSupportedCacheType, err) } @@ -93,7 +93,7 @@ func TestTrieFactory_CreateWithoutPrunningWork(t *testing.T) { tf, _ := NewTrieFactory(args) trieStorageCfg := createTrieStorageCfg() - tr, err := tf.Create(trieStorageCfg, false) + _, tr, err := tf.Create(trieStorageCfg, false) require.NotNil(t, tr) require.Nil(t, err) } @@ -105,7 +105,7 @@ func TestTrieFactory_CreateWithPrunningWrongDbType(t *testing.T) { tf, _ := NewTrieFactory(args) trieStorageCfg := createTrieStorageCfg() - tr, err := tf.Create(trieStorageCfg, true) + _, tr, err := tf.Create(trieStorageCfg, true) require.Nil(t, tr) require.Equal(t, storage.ErrNotSupportedDBType, err) } @@ -120,7 +120,7 @@ func TestTrieFactory_CreateInvalidCacheSize(t *testing.T) { tf, _ := NewTrieFactory(args) trieStorageCfg := createTrieStorageCfg() - tr, err := tf.Create(trieStorageCfg, true) + _, tr, err := tf.Create(trieStorageCfg, true) require.Nil(t, tr) require.Equal(t, data.ErrInvalidCacheSize, err) } @@ -136,7 +136,7 @@ func TestTrieFactory_CreateWithPRunningShouldWork(t *testing.T) { tf, _ := NewTrieFactory(args) trieStorageCfg := createTrieStorageCfg() - tr, err := tf.Create(trieStorageCfg, true) + _, tr, err := tf.Create(trieStorageCfg, true) require.NotNil(t, tr) require.Nil(t, err) } diff --git a/epochStart/bootstrap/fromLocalStorage.go b/epochStart/bootstrap/fromLocalStorage.go index 5a7c7a20ece..66fb793bbe9 100644 --- a/epochStart/bootstrap/fromLocalStorage.go +++ b/epochStart/bootstrap/fromLocalStorage.go @@ -115,16 +115,23 @@ func (e *epochStartBootstrap) prepareEpochFromStorage() (Parameters, error) { return Parameters{}, err } - if e.shardCoordinator.SelfId() == core.MetachainShardId { - err = e.requestAndProcessForShard() + if e.shardCoordinator.SelfId() != e.genesisShardCoordinator.SelfId() { + err = e.createTriesForNewShardId(e.shardCoordinator.SelfId()) if err != nil { return Parameters{}, err } } - err = e.requestAndProcessForMeta() - if err != nil { - return Parameters{}, err + if e.shardCoordinator.SelfId() == core.MetachainShardId { + err = e.requestAndProcessForMeta() + if err != nil { + return Parameters{}, err + } + } else { + err = e.requestAndProcessForShard() + if err != nil { + return Parameters{}, err + } } parameters := Parameters{ diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 9b556c85252..6b7d33cb5ff 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -2,8 +2,6 @@ package bootstrap import ( "context" - "fmt" - "path/filepath" "strconv" "time" @@ -16,8 +14,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/syncer" - "github.com/ElrondNetwork/elrond-go/data/trie" - trieFactory "github.com/ElrondNetwork/elrond-go/data/trie/factory" + "github.com/ElrondNetwork/elrond-go/data/trie/factory" "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" "github.com/ElrondNetwork/elrond-go/dataRetriever" factoryDataPool "github.com/ElrondNetwork/elrond-go/dataRetriever/factory" @@ -35,7 +32,6 @@ import ( "github.com/ElrondNetwork/elrond-go/process/interceptors" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" - storageFactory "github.com/ElrondNetwork/elrond-go/storage/factory" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/ElrondNetwork/elrond-go/storage/timecache" "github.com/ElrondNetwork/elrond-go/update" @@ -89,6 +85,8 @@ type epochStartBootstrap struct { defaultShardString string destinationShardAsObserver string rater sharding.ChanceComputer + trieContainer state.TriesHolder + trieStorageManagers map[string]data.StorageManager // created components requestHandler process.RequestHandler @@ -98,8 +96,6 @@ type epochStartBootstrap struct { headersSyncer epochStart.HeadersByHashSyncer epochStartMetaBlockSyncer epochStart.StartOfEpochMetaSyncer nodesConfigHandler StartOfEpochNodesConfigHandler - userTrieStorageManager data.StorageManager - peerTrieStorageManager data.StorageManager whiteListHandler update.WhiteListHandler // gathered data @@ -142,6 +138,8 @@ type ArgsEpochStartBootstrap struct { DefaultShardString string Rater sharding.ChanceComputer DestinationShardAsObserver string + TrieContainer state.TriesHolder + TrieStorageManagers map[string]data.StorageManager } // NewEpochStartBootstrap will return a new instance of epochStartBootstrap @@ -166,6 +164,8 @@ func NewEpochStartBootstrap(args ArgsEpochStartBootstrap) (*epochStartBootstrap, blockSingleSigner: args.BlockSingleSigner, rater: args.Rater, destinationShardAsObserver: args.DestinationShardAsObserver, + trieContainer: args.TrieContainer, + trieStorageManagers: args.TrieStorageManagers, } return epochStartProvider, nil @@ -229,7 +229,7 @@ func (e *epochStartBootstrap) Bootstrap() (Parameters, error) { if isCurrentEpochSaved { parameters, err := e.prepareEpochFromStorage() if err == nil { - return parameters, err + return parameters, nil } } @@ -429,16 +429,23 @@ func (e *epochStartBootstrap) requestAndProcessing() (Parameters, error) { } log.Info("start in epoch bootstrap: shardCoordinator") - if e.shardCoordinator.SelfId() != core.MetachainShardId { - err = e.requestAndProcessForShard() + if e.shardCoordinator.SelfId() != e.genesisShardCoordinator.SelfId() { + err = e.createTriesForNewShardId(e.shardCoordinator.SelfId()) if err != nil { return Parameters{}, err } } - err = e.requestAndProcessForMeta() - if err != nil { - return Parameters{}, err + if e.shardCoordinator.SelfId() == core.MetachainShardId { + err = e.requestAndProcessForMeta() + if err != nil { + return Parameters{}, err + } + } else { + err = e.requestAndProcessForShard() + if err != nil { + return Parameters{}, err + } } parameters := Parameters{ @@ -468,7 +475,8 @@ func (e *epochStartBootstrap) processNodesConfig(pubKey []byte) error { } func (e *epochStartBootstrap) requestAndProcessForMeta() error { - err := e.syncUserAccountsState(e.epochStartMeta.RootHash) + var err error + err = e.syncUserAccountsState(e.epochStartMeta.RootHash) if err != nil { return err } @@ -609,7 +617,7 @@ func (e *epochStartBootstrap) syncUserAccountsState(rootHash []byte) error { ArgsNewBaseAccountsSyncer: syncer.ArgsNewBaseAccountsSyncer{ Hasher: e.hasher, Marshalizer: e.marshalizer, - TrieStorageManager: e.userTrieStorageManager, + TrieStorageManager: e.trieStorageManagers[factory.UserAccountTrie], RequestHandler: e.requestHandler, WaitTime: time.Minute, Cacher: e.dataPool.TrieNodes(), @@ -630,48 +638,36 @@ func (e *epochStartBootstrap) syncUserAccountsState(rootHash []byte) error { return nil } -func (e *epochStartBootstrap) createTrieStorageManagers() error { - // TODO: this func should be removed as tries storers are already created in coreComponents - dbConfig := storageFactory.GetDBFromConfig(e.generalConfig.AccountsTrieStorage.DB) - shardIdStr := fmt.Sprintf("%d", e.shardCoordinator.SelfId()) - if e.shardCoordinator.SelfId() > e.shardCoordinator.NumberOfShards() { - shardIdStr = "metachain" - } - trieStoragePath := e.pathManager.PathForStatic(shardIdStr, dbConfig.FilePath) - trieStoragePath = filepath.Join(trieStoragePath, e.generalConfig.AccountsTrieStorage.DB.FilePath) + "_temp" - dbConfig.FilePath = trieStoragePath - trieStorage, err := storageUnit.NewStorageUnitFromConf( - storageFactory.GetCacherFromConfig(e.generalConfig.AccountsTrieStorage.Cache), - dbConfig, - storageFactory.GetBloomFromConfig(e.generalConfig.AccountsTrieStorage.Bloom), - ) - if err != nil { - return err +func (e *epochStartBootstrap) createTriesForNewShardId(shardId uint32) error { + trieFactoryArgs := factory.TrieFactoryArgs{ + EvictionWaitingListCfg: e.generalConfig.EvictionWaitingList, + SnapshotDbCfg: e.generalConfig.TrieSnapshotDB, + Marshalizer: e.marshalizer, + Hasher: e.hasher, + PathManager: e.pathManager, + ShardId: core.GetShardIdString(shardId), } - - e.userTrieStorageManager, err = trie.NewTrieStorageManagerWithoutPruning(trieStorage) + trieFactory, err := factory.NewTrieFactory(trieFactoryArgs) if err != nil { return err } - dbConfig = storageFactory.GetDBFromConfig(e.generalConfig.PeerAccountsTrieStorage.DB) - peerTrieStoragePath := e.pathManager.PathForStatic(shardIdStr, dbConfig.FilePath) - peerTrieStoragePath = filepath.Join(peerTrieStoragePath, e.generalConfig.PeerAccountsTrieStorage.DB.FilePath) + "_temp" - dbConfig.FilePath = peerTrieStoragePath - peerTrieStorage, err := storageUnit.NewStorageUnitFromConf( - storageFactory.GetCacherFromConfig(e.generalConfig.PeerAccountsTrieStorage.Cache), - dbConfig, - storageFactory.GetBloomFromConfig(e.generalConfig.PeerAccountsTrieStorage.Bloom), - ) + userStorageManager, userAccountTrie, err := trieFactory.Create(e.generalConfig.AccountsTrieStorage, e.generalConfig.StateTriesConfig.AccountsStatePruningEnabled) if err != nil { return err } - e.peerTrieStorageManager, err = trie.NewTrieStorageManagerWithoutPruning(peerTrieStorage) + e.trieContainer.Replace([]byte(factory.UserAccountTrie), userAccountTrie) + e.trieStorageManagers[factory.UserAccountTrie] = userStorageManager + + peerStorageManager, peerAccountsTrie, err := trieFactory.Create(e.generalConfig.PeerAccountsTrieStorage, e.generalConfig.StateTriesConfig.PeerStatePruningEnabled) if err != nil { return err } + e.trieContainer.Replace([]byte(factory.PeerAccountTrie), peerAccountsTrie) + e.trieStorageManagers[factory.PeerAccountTrie] = peerStorageManager + return nil } @@ -680,7 +676,7 @@ func (e *epochStartBootstrap) syncPeerAccountsState(rootHash []byte) error { ArgsNewBaseAccountsSyncer: syncer.ArgsNewBaseAccountsSyncer{ Hasher: e.hasher, Marshalizer: e.marshalizer, - TrieStorageManager: e.peerTrieStorageManager, + TrieStorageManager: e.trieStorageManagers[factory.PeerAccountTrie], RequestHandler: e.requestHandler, WaitTime: time.Minute, Cacher: e.dataPool.TrieNodes(), @@ -708,24 +704,6 @@ func (e *epochStartBootstrap) createRequestHandler() error { storageService := disabled.NewChainStorer() - triesHolder := state.NewDataTriesHolder() - err = e.createTrieStorageManagers() - if err != nil { - return err - } - - stateTrie, err := trie.NewTrie(e.userTrieStorageManager, e.marshalizer, e.hasher) - if err != nil { - return err - } - triesHolder.Put([]byte(trieFactory.UserAccountTrie), stateTrie) - - peerTrie, err := trie.NewTrie(e.peerTrieStorageManager, e.marshalizer, e.hasher) - if err != nil { - return err - } - triesHolder.Put([]byte(trieFactory.PeerAccountTrie), peerTrie) - resolversContainerArgs := resolverscontainer.FactoryArgs{ ShardCoordinator: e.shardCoordinator, Messenger: e.messenger, @@ -735,7 +713,7 @@ func (e *epochStartBootstrap) createRequestHandler() error { Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), NumConcurrentResolvingJobs: 10, DataPacker: dataPacker, - TriesContainer: triesHolder, + TriesContainer: e.trieContainer, SizeCheckDelta: 0, InputAntifloodHandler: disabled.NewAntiFloodHandler(), OutputAntifloodHandler: disabled.NewAntiFloodHandler(), diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index 5a9d163861e..61653dcb06f 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -11,13 +11,19 @@ import ( "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/state" + triesFactory "github.com/ElrondNetwork/elrond-go/data/trie/factory" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap" + "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" "github.com/ElrondNetwork/elrond-go/integrationTests/multiShard/endOfEpoch" + "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/storage/factory" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/stretchr/testify/assert" @@ -138,6 +144,8 @@ func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { messenger := integrationTests.CreateMessengerWithKadDht(context.Background(), integrationTests.GetConnectableAddress(advertiser)) _ = messenger.Bootstrap() time.Sleep(integrationTests.P2pBootstrapDelay) + + trieStorageManager, triesHolder, _ := createTries(getGeneralConfig(), integrationTests.TestMarshalizer, integrationTests.TestHasher, 0, &mock.PathManagerStub{}) argsBootstrapHandler := bootstrap.ArgsEpochStartBootstrap{ PublicKey: nodeToJoinLate.NodeKeys.Pk, Marshalizer: integrationTests.TestMarshalizer, @@ -158,6 +166,8 @@ func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { DefaultShardString: "test_shard", Rater: &mock.RaterMock{}, DestinationShardAsObserver: "0", + TrieContainer: triesHolder, + TrieStorageManagers: trieStorageManager, } epochStartBootstrap, err := bootstrap.NewEpochStartBootstrap(argsBootstrapHandler) assert.Nil(t, err) @@ -203,6 +213,46 @@ func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { assert.Equal(t, epoch, bd.LastHeader.Epoch) } +func createTries( + config config.Config, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + shardId uint32, + pathManager storage.PathManagerHandler, +) (map[string]data.StorageManager, state.TriesHolder, error) { + + trieContainer := state.NewDataTriesHolder() + trieFactoryArgs := triesFactory.TrieFactoryArgs{ + EvictionWaitingListCfg: config.EvictionWaitingList, + SnapshotDbCfg: config.TrieSnapshotDB, + Marshalizer: marshalizer, + Hasher: hasher, + PathManager: pathManager, + ShardId: core.GetShardIdString(shardId), + } + trieFactory, err := triesFactory.NewTrieFactory(trieFactoryArgs) + if err != nil { + return nil, nil, err + } + + trieStorageManagers := make(map[string]data.StorageManager) + userStorageManager, userAccountTrie, err := trieFactory.Create(config.AccountsTrieStorage, config.StateTriesConfig.AccountsStatePruningEnabled) + if err != nil { + return nil, nil, err + } + trieContainer.Put([]byte(triesFactory.UserAccountTrie), userAccountTrie) + trieStorageManagers[triesFactory.UserAccountTrie] = userStorageManager + + peerStorageManager, peerAccountsTrie, err := trieFactory.Create(config.PeerAccountsTrieStorage, config.StateTriesConfig.PeerStatePruningEnabled) + if err != nil { + return nil, nil, err + } + trieContainer.Put([]byte(triesFactory.PeerAccountTrie), peerAccountsTrie) + trieStorageManagers[triesFactory.PeerAccountTrie] = peerStorageManager + + return trieStorageManagers, trieContainer, nil +} + func convertToSlice(originalMap map[uint32][]*integrationTests.TestProcessorNode) []*integrationTests.TestProcessorNode { sliceToRet := make([]*integrationTests.TestProcessorNode, 0) for _, nodesPerShard := range originalMap { diff --git a/update/mock/triesHolderMock.go b/update/mock/triesHolderMock.go index 49101f331b6..c6332d0fe55 100644 --- a/update/mock/triesHolderMock.go +++ b/update/mock/triesHolderMock.go @@ -2,30 +2,46 @@ package mock import "github.com/ElrondNetwork/elrond-go/data" +// TriesHolderMock - type TriesHolderMock struct { PutCalled func([]byte, data.Trie) + RemoveCalled func([]byte, data.Trie) GetCalled func([]byte) data.Trie GetAllCalled func() []data.Trie ResetCalled func() } +// Put - func (thm *TriesHolderMock) Put(key []byte, trie data.Trie) { if thm.PutCalled != nil { thm.PutCalled(key, trie) } } + +// Replace - +func (thm *TriesHolderMock) Replace(key []byte, trie data.Trie) { + if thm.RemoveCalled != nil { + thm.RemoveCalled(key, trie) + } +} + +// Get - func (thm *TriesHolderMock) Get(key []byte) data.Trie { if thm.GetCalled != nil { return thm.GetCalled(key) } return nil } + +// GetAll - func (thm *TriesHolderMock) GetAll() []data.Trie { if thm.GetAllCalled != nil { return thm.GetAllCalled() } return nil } + +// Reset - func (thm *TriesHolderMock) Reset() { if thm.ResetCalled != nil { thm.ResetCalled() From dfbbfcf08dbd619c1b7c27a0bd937cb48a2ede11 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Tue, 31 Mar 2020 16:15:07 +0300 Subject: [PATCH 61/61] fix after review. --- cmd/node/main.go | 1 + core/constants.go | 3 ++ core/mock/chainStorerMock.go | 2 +- data/syncer/baseAccountsSyncer.go | 5 +-- data/syncer/userAccountsSyncer.go | 11 +++--- data/syncer/validatorAccountsSyncer.go | 5 +-- data/trie/sync.go | 22 +++++++---- dataRetriever/mock/chainStorerMock.go | 2 +- .../requestHandlers/requestHandler.go | 2 +- epochStart/bootstrap/baseStorageHandler.go | 1 + .../disabled/disabledAccountsAdapter.go | 2 +- .../bootstrap/disabled/disabledChainStorer.go | 2 +- .../epochStartInterceptorsContainerFactory.go | 12 +++--- epochStart/bootstrap/fromLocalStorage.go | 4 +- epochStart/bootstrap/process.go | 39 +++++++++++-------- epochStart/bootstrap/shardStorageHandler.go | 8 ++-- epochStart/metachain/trigger.go | 2 +- epochStart/mock/chainStorerStub.go | 2 +- epochStart/shardchain/trigger.go | 2 +- integrationTests/mock/chainStorerMock.go | 2 +- .../startInEpoch/startInEpoch_test.go | 9 ++++- node/mock/chainStorerMock.go | 2 +- node/node.go | 2 + process/mock/chainStorerMock.go | 2 +- .../indexHashedNodesCoordinatorRegistry.go | 2 +- storage/factory/openStorage.go | 9 +++-- update/mock/chainStorerMock.go | 2 +- 27 files changed, 89 insertions(+), 68 deletions(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index c3df2af1870..5f98e109a9f 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -622,6 +622,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { epochStartBootstrapArgs := bootstrap.ArgsEpochStartBootstrap{ PublicKey: pubKey, Marshalizer: coreComponents.InternalMarshalizer, + TxSignMarshalizer: coreComponents.TxSignMarshalizer, Hasher: coreComponents.Hasher, Messenger: networkComponents.NetMessenger, GeneralConfig: *generalConfig, diff --git a/core/constants.go b/core/constants.go index 58c2e3a3668..443d2283600 100644 --- a/core/constants.go +++ b/core/constants.go @@ -303,5 +303,8 @@ const HighestRoundFromBootStorage = "highestRoundFromBootStorage" // TriggerRegistryKeyPrefix is the key prefix to save epoch start registry to storage const TriggerRegistryKeyPrefix = "epochStartTrigger_" +// TriggerRegistryInitialKeyPrefix is the key prefix to save initial data to storage +const TriggerRegistryInitialKeyPrefix = "initial_value_epoch_" + // NodesCoordinatorRegistryKeyPrefix is the key prefix to save epoch start registry to storage const NodesCoordinatorRegistryKeyPrefix = "indexHashed_" diff --git a/core/mock/chainStorerMock.go b/core/mock/chainStorerMock.go index 8c8b2407124..22048d7cf5c 100644 --- a/core/mock/chainStorerMock.go +++ b/core/mock/chainStorerMock.go @@ -5,7 +5,7 @@ import ( "github.com/ElrondNetwork/elrond-go/storage" ) -// ChainStorerMock is a mock implementation of the ChianStorer interface +// ChainStorerMock is a mock implementation of the ChainStorer interface type ChainStorerMock struct { AddStorerCalled func(key dataRetriever.UnitType, s storage.Storer) GetStorerCalled func(unitType dataRetriever.UnitType) storage.Storer diff --git a/data/syncer/baseAccountsSyncer.go b/data/syncer/baseAccountsSyncer.go index 45bd9bd68a2..9549eb25430 100644 --- a/data/syncer/baseAccountsSyncer.go +++ b/data/syncer/baseAccountsSyncer.go @@ -27,7 +27,6 @@ type baseAccountsSyncer struct { shardId uint32 cacher storage.Cacher rootHash []byte - ctx context.Context } const minWaitTime = time.Second @@ -65,7 +64,7 @@ func checkArgs(args ArgsNewBaseAccountsSyncer) error { return nil } -func (b *baseAccountsSyncer) syncMainTrie(rootHash []byte, trieTopic string) error { +func (b *baseAccountsSyncer) syncMainTrie(rootHash []byte, trieTopic string, ctx context.Context) error { b.rootHash = rootHash dataTrie, err := trie.NewTrie(b.trieStorageManager, b.marshalizer, b.hasher) @@ -80,7 +79,7 @@ func (b *baseAccountsSyncer) syncMainTrie(rootHash []byte, trieTopic string) err } b.trieSyncers[string(rootHash)] = trieSyncer - err = trieSyncer.StartSyncing(rootHash, b.ctx) + err = trieSyncer.StartSyncing(rootHash, ctx) if err != nil { return err } diff --git a/data/syncer/userAccountsSyncer.go b/data/syncer/userAccountsSyncer.go index bb4c78d5bd4..a5bc11f5241 100644 --- a/data/syncer/userAccountsSyncer.go +++ b/data/syncer/userAccountsSyncer.go @@ -49,16 +49,15 @@ func NewUserAccountsSyncer(args ArgsNewUserAccountsSyncer) (*userAccountsSyncer, return u, nil } -// SyncAccounts will launch the syncing method to gather all the data needed for userAccounts +// SyncAccounts will launch the syncing method to gather all the data needed for userAccounts - it is a blocking method func (u *userAccountsSyncer) SyncAccounts(rootHash []byte) error { u.mutex.Lock() defer u.mutex.Unlock() ctx, cancel := context.WithTimeout(context.Background(), u.waitTime) defer cancel() - u.ctx = ctx - err := u.syncMainTrie(rootHash, factory.AccountTrieNodesTopic) + err := u.syncMainTrie(rootHash, factory.AccountTrieNodesTopic, ctx) if err != nil { return nil } @@ -69,7 +68,7 @@ func (u *userAccountsSyncer) SyncAccounts(rootHash []byte) error { return err } - err = u.syncAccountDataTries(rootHashes) + err = u.syncAccountDataTries(rootHashes, ctx) if err != nil { return err } @@ -77,7 +76,7 @@ func (u *userAccountsSyncer) SyncAccounts(rootHash []byte) error { return nil } -func (u *userAccountsSyncer) syncAccountDataTries(rootHashes [][]byte) error { +func (u *userAccountsSyncer) syncAccountDataTries(rootHashes [][]byte, ctx context.Context) error { for _, rootHash := range rootHashes { dataTrie, err := trie.NewTrie(u.trieStorageManager, u.marshalizer, u.hasher) if err != nil { @@ -91,7 +90,7 @@ func (u *userAccountsSyncer) syncAccountDataTries(rootHashes [][]byte) error { } u.trieSyncers[string(rootHash)] = trieSyncer - err = trieSyncer.StartSyncing(rootHash, u.ctx) + err = trieSyncer.StartSyncing(rootHash, ctx) if err != nil { return err } diff --git a/data/syncer/validatorAccountsSyncer.go b/data/syncer/validatorAccountsSyncer.go index 3effdee7b61..fc3be17116c 100644 --- a/data/syncer/validatorAccountsSyncer.go +++ b/data/syncer/validatorAccountsSyncer.go @@ -44,14 +44,13 @@ func NewValidatorAccountsSyncer(args ArgsNewValidatorAccountsSyncer) (*validator return u, nil } -// SyncAccounts will launch the syncing method to gather all the data needed for validatorAccounts +// SyncAccounts will launch the syncing method to gather all the data needed for validatorAccounts - it is a blocking method func (v *validatorAccountsSyncer) SyncAccounts(rootHash []byte) error { v.mutex.Lock() defer v.mutex.Unlock() ctx, cancel := context.WithTimeout(context.Background(), v.waitTime) defer cancel() - v.ctx = ctx - return v.syncMainTrie(rootHash, factory.ValidatorTrieNodesTopic) + return v.syncMainTrie(rootHash, factory.ValidatorTrieNodesTopic, ctx) } diff --git a/data/trie/sync.go b/data/trie/sync.go index cf0f2606a58..a168b6fb242 100644 --- a/data/trie/sync.go +++ b/data/trie/sync.go @@ -125,7 +125,7 @@ func (ts *trieSyncer) getNextNodes() error { var err error nextNodes := make([]node, 0) missingNodes := make([][]byte, 0) - currMissingNodes := make([][]byte, 0) + currentMissingNodes := make([][]byte, 0) newElement := true @@ -134,7 +134,7 @@ func (ts *trieSyncer) getNextNodes() error { ts.nodeHashesMutex.Lock() for nodeHash := range ts.nodeHashes { - currMissingNodes = currMissingNodes[:0] + currentMissingNodes = currentMissingNodes[:0] currentNode, err = ts.getNode([]byte(nodeHash)) if err != nil { @@ -145,23 +145,23 @@ func (ts *trieSyncer) getNextNodes() error { ts.trie.root = currentNode } - currMissingNodes, err = currentNode.loadChildren(ts.getNode) + currentMissingNodes, err = currentNode.loadChildren(ts.getNode) if err != nil { + ts.nodeHashesMutex.Unlock() return err } - if len(currMissingNodes) > 0 { - missingNodes = append(missingNodes, currMissingNodes...) + if len(currentMissingNodes) > 0 { + missingNodes = append(missingNodes, currentMissingNodes...) continue } delete(ts.nodeHashes, nodeHash) - ts.receivedNodesMutex.Lock() - delete(ts.receivedNodes, nodeHash) - ts.receivedNodesMutex.Unlock() + ts.deleteFromReceived(nodeHash) nextNodes, err = currentNode.getChildren(ts.trie.Database()) if err != nil { + ts.nodeHashesMutex.Lock() return err } @@ -180,6 +180,12 @@ func (ts *trieSyncer) getNextNodes() error { return nil } +func (ts *trieSyncer) deleteFromReceived(nodeHash string) { + ts.receivedNodesMutex.Lock() + delete(ts.receivedNodes, nodeHash) + ts.receivedNodesMutex.Unlock() +} + // adds new elements to needed hash map, lock ts.nodeHashesMutex before calling func (ts *trieSyncer) addNew(nextNodes []node) bool { newElement := false diff --git a/dataRetriever/mock/chainStorerMock.go b/dataRetriever/mock/chainStorerMock.go index 2948d09b6ed..83e932e1016 100644 --- a/dataRetriever/mock/chainStorerMock.go +++ b/dataRetriever/mock/chainStorerMock.go @@ -6,7 +6,7 @@ import ( "github.com/pkg/errors" ) -// ChainStorerMock is a mock implementation of the ChianStorer interface +// ChainStorerMock is a mock implementation of the ChainStorer interface type ChainStorerMock struct { AddStorerCalled func(key dataRetriever.UnitType, s storage.Storer) GetStorerCalled func(unitType dataRetriever.UnitType) storage.Storer diff --git a/dataRetriever/requestHandlers/requestHandler.go b/dataRetriever/requestHandlers/requestHandler.go index 831ae66e38a..e4b0c4db3cc 100644 --- a/dataRetriever/requestHandlers/requestHandler.go +++ b/dataRetriever/requestHandlers/requestHandler.go @@ -50,7 +50,7 @@ func NewResolverRequestHandler( return nil, dataRetriever.ErrNilWhiteListHandler } if requestInterval < time.Millisecond { - return nil, dataRetriever.ErrRequestIntervalTooSmall + return nil, fmt.Errorf("%w:request interval is smaller than a millisecond", dataRetriever.ErrRequestIntervalTooSmall) } rrh := &resolverRequestHandler{ diff --git a/epochStart/bootstrap/baseStorageHandler.go b/epochStart/bootstrap/baseStorageHandler.go index d297785d491..e9496a53ff7 100644 --- a/epochStart/bootstrap/baseStorageHandler.go +++ b/epochStart/bootstrap/baseStorageHandler.go @@ -45,6 +45,7 @@ func (bsh *baseStorageHandler) saveNodesCoordinatorRegistry( ) ([]byte, error) { key := append([]byte(core.NodesCoordinatorRegistryKeyPrefix), metaBlock.RandSeed...) + // TODO: replace hardcoded json - although it is hardcoded in nodesCoordinator as well. registryBytes, err := json.Marshal(nodesConfig) if err != nil { return nil, err diff --git a/epochStart/bootstrap/disabled/disabledAccountsAdapter.go b/epochStart/bootstrap/disabled/disabledAccountsAdapter.go index 291e2ee9cb7..c3a73eb9b25 100644 --- a/epochStart/bootstrap/disabled/disabledAccountsAdapter.go +++ b/epochStart/bootstrap/disabled/disabledAccountsAdapter.go @@ -8,7 +8,7 @@ import ( type accountsAdapter struct { } -// NewAccountsAdapter returns a new instance of accountsAdapter +// NewAccountsAdapter returns a nil implementation of accountsAdapter func NewAccountsAdapter() *accountsAdapter { return &accountsAdapter{} } diff --git a/epochStart/bootstrap/disabled/disabledChainStorer.go b/epochStart/bootstrap/disabled/disabledChainStorer.go index 6e4f1a522c3..51ae235551d 100644 --- a/epochStart/bootstrap/disabled/disabledChainStorer.go +++ b/epochStart/bootstrap/disabled/disabledChainStorer.go @@ -7,7 +7,7 @@ import ( "github.com/ElrondNetwork/elrond-go/storage" ) -// ChainStorer is a mock implementation of the ChianStorer interface +// ChainStorer is a mock implementation of the ChainStorer interface type chainStorer struct { mapStorages map[dataRetriever.UnitType]storage.Storer mutex sync.Mutex diff --git a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go index 78c28554e5d..de414da95c9 100644 --- a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go +++ b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go @@ -19,12 +19,15 @@ import ( "github.com/ElrondNetwork/elrond-go/update" ) +const timeSpanForBadHeaders = time.Minute + // ArgsEpochStartInterceptorContainer holds the arguments needed for creating a new epoch start interceptors // container factory type ArgsEpochStartInterceptorContainer struct { Config config.Config ShardCoordinator sharding.Coordinator - Marshalizer marshal.Marshalizer + TxSignMarshalizer marshal.Marshalizer + ProtoMarshalizer marshal.Marshalizer Hasher hashing.Hasher Messenger process.TopicHandler DataPool dataRetriever.PoolsHolder @@ -41,7 +44,6 @@ type ArgsEpochStartInterceptorContainer struct { func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) (process.InterceptorsContainer, error) { nodesCoordinator := disabled.NewNodesCoordinator() storer := disabled.NewChainStorer() - txSignMarshalizer := marshal.JsonMarshalizer{} antiFloodHandler := disabled.NewAntiFloodHandler() multiSigner := disabled.NewMultiSigner() accountsAdapter := disabled.NewAccountsAdapter() @@ -52,7 +54,7 @@ func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) if err != nil { return nil, err } - blackListHandler := timecache.NewTimeCache(1 * time.Minute) + blackListHandler := timecache.NewTimeCache(timeSpanForBadHeaders) feeHandler := genesis.NewGenesisFeeHandler() headerSigVerifier := disabled.NewHeaderSigVerifier() sizeCheckDelta := 0 @@ -64,8 +66,8 @@ func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) NodesCoordinator: nodesCoordinator, Messenger: args.Messenger, Store: storer, - ProtoMarshalizer: args.Marshalizer, - TxSignMarshalizer: &txSignMarshalizer, + ProtoMarshalizer: args.ProtoMarshalizer, + TxSignMarshalizer: args.TxSignMarshalizer, Hasher: args.Hasher, MultiSigner: multiSigner, DataPool: args.DataPool, diff --git a/epochStart/bootstrap/fromLocalStorage.go b/epochStart/bootstrap/fromLocalStorage.go index 66fb793bbe9..a89e35ab825 100644 --- a/epochStart/bootstrap/fromLocalStorage.go +++ b/epochStart/bootstrap/fromLocalStorage.go @@ -52,9 +52,7 @@ func (e *epochStartBootstrap) prepareEpochFromStorage() (Parameters, error) { return Parameters{}, err } - unitsToOpen := make([]string, 0) - unitsToOpen = append(unitsToOpen, e.generalConfig.BootstrapStorage.DB.FilePath) - unitsToOpen = append(unitsToOpen, e.generalConfig.MetaBlockStorage.DB.FilePath) + unitsToOpen := []string{e.generalConfig.BootstrapStorage.DB.FilePath, e.generalConfig.MetaBlockStorage.DB.FilePath} storageUnits, err := openStorageHandler.OpenStorageUnits(unitsToOpen) defer func() { diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 6b7d33cb5ff..fc22dde96c3 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -40,9 +40,11 @@ import ( var log = logger.GetOrCreate("epochStart/bootstrap") -const timeToWait = 8 * time.Second +const timeToWait = 10 * time.Second +const timeBetweenRequests = 100 * time.Millisecond +const maxToRequest = 100 -// BootstrapParameters +// Parameters defines the DTO for the result produced by the bootstrap component type Parameters struct { Epoch uint32 SelfShardId uint32 @@ -67,6 +69,7 @@ type epochStartBootstrap struct { // should come via arguments publicKey crypto.PublicKey marshalizer marshal.Marshalizer + txSignMarshalizer marshal.Marshalizer hasher hashing.Hasher messenger p2p.Messenger generalConfig config.Config @@ -121,6 +124,7 @@ type baseDataInStorage struct { type ArgsEpochStartBootstrap struct { PublicKey crypto.PublicKey Marshalizer marshal.Marshalizer + TxSignMarshalizer marshal.Marshalizer Hasher hashing.Hasher Messenger p2p.Messenger GeneralConfig config.Config @@ -147,6 +151,7 @@ func NewEpochStartBootstrap(args ArgsEpochStartBootstrap) (*epochStartBootstrap, epochStartProvider := &epochStartBootstrap{ publicKey: args.PublicKey, marshalizer: args.Marshalizer, + txSignMarshalizer: args.TxSignMarshalizer, hasher: args.Hasher, messenger: args.Messenger, generalConfig: args.GeneralConfig, @@ -302,7 +307,8 @@ func (e *epochStartBootstrap) createSyncers() error { args := factoryInterceptors.ArgsEpochStartInterceptorContainer{ Config: e.generalConfig, ShardCoordinator: e.shardCoordinator, - Marshalizer: e.marshalizer, + ProtoMarshalizer: e.marshalizer, + TxSignMarshalizer: e.txSignMarshalizer, Hasher: e.hasher, Messenger: e.messenger, DataPool: e.dataPool, @@ -381,7 +387,7 @@ func (e *epochStartBootstrap) requestAndProcessing() (Parameters, error) { if err != nil { return Parameters{}, err } - log.Info("start in epoch bootstrap: got shard headers and previous epoch start meta block") + log.Debug("start in epoch bootstrap: got shard headers and previous epoch start meta block") prevEpochStartMetaHash := e.epochStartMeta.EpochStart.Economics.PrevEpochStartHash prevEpochStartMeta, ok := e.syncedHeaders[string(prevEpochStartMetaHash)].(*block.MetaBlock) @@ -395,20 +401,20 @@ func (e *epochStartBootstrap) requestAndProcessing() (Parameters, error) { return Parameters{}, err } - log.Info("start in epoch bootstrap: createTrieStorageManagers") + log.Debug("start in epoch bootstrap: createTrieStorageManagers") - log.Info("start in epoch bootstrap: started syncPeerAccountsState") + log.Debug("start in epoch bootstrap: started syncPeerAccountsState") err = e.syncPeerAccountsState(e.epochStartMeta.ValidatorStatsRootHash) if err != nil { return Parameters{}, err } - log.Info("start in epoch bootstrap: syncPeerAccountsState", "peer account tries map length", len(e.peerAccountTries)) + log.Debug("start in epoch bootstrap: syncPeerAccountsState", "peer account tries map length", len(e.peerAccountTries)) err = e.processNodesConfig(pubKeyBytes) if err != nil { return Parameters{}, err } - log.Info("start in epoch bootstrap: processNodesConfig") + log.Debug("start in epoch bootstrap: processNodesConfig") if e.baseData.shardId == core.AllShardId { destShardID := core.MetachainShardId @@ -427,7 +433,7 @@ func (e *epochStartBootstrap) requestAndProcessing() (Parameters, error) { if err != nil { return Parameters{}, err } - log.Info("start in epoch bootstrap: shardCoordinator") + log.Debug("start in epoch bootstrap: shardCoordinator") if e.shardCoordinator.SelfId() != e.genesisShardCoordinator.SelfId() { err = e.createTriesForNewShardId(e.shardCoordinator.SelfId()) @@ -538,7 +544,7 @@ func (e *epochStartBootstrap) requestAndProcessForShard() error { if err != nil { return err } - log.Info("start in epoch bootstrap: GetMiniBlocks") + log.Debug("start in epoch bootstrap: GetMiniBlocks") shardIds := []uint32{ core.MetachainShardId, @@ -561,7 +567,7 @@ func (e *epochStartBootstrap) requestAndProcessForShard() error { if err != nil { return err } - log.Info("start in epoch bootstrap: SyncMissingHeadersByHash") + log.Debug("start in epoch bootstrap: SyncMissingHeadersByHash") for hash, hdr := range neededHeaders { e.syncedHeaders[hash] = hdr @@ -572,12 +578,12 @@ func (e *epochStartBootstrap) requestAndProcessForShard() error { return epochStart.ErrWrongTypeAssertion } - log.Info("start in epoch bootstrap: started syncUserAccountsState") + log.Debug("start in epoch bootstrap: started syncUserAccountsState") err = e.syncUserAccountsState(ownShardHdr.RootHash) if err != nil { return err } - log.Info("start in epoch bootstrap: syncUserAccountsState") + log.Debug("start in epoch bootstrap: syncUserAccountsState") components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: e.epochStartMeta, @@ -591,7 +597,7 @@ func (e *epochStartBootstrap) requestAndProcessForShard() error { PendingMiniBlocks: pendingMiniBlocks, } - log.Info("reached maximum tested point from integration test") + log.Debug("reached maximum tested point from integration test") storageHandlerComponent, err := NewShardStorageHandler( e.generalConfig, e.shardCoordinator, @@ -733,15 +739,14 @@ func (e *epochStartBootstrap) createRequestHandler() error { return err } - requestedItemsHandler := timecache.NewTimeCache(100) - maxToRequest := 100 + requestedItemsHandler := timecache.NewTimeCache(timeBetweenRequests) e.requestHandler, err = requestHandlers.NewResolverRequestHandler( finder, requestedItemsHandler, e.whiteListHandler, maxToRequest, core.MetachainShardId, - 100*time.Millisecond, + timeBetweenRequests, ) return err } diff --git a/epochStart/bootstrap/shardStorageHandler.go b/epochStart/bootstrap/shardStorageHandler.go index 28284c7bdf5..31e292458e7 100644 --- a/epochStart/bootstrap/shardStorageHandler.go +++ b/epochStart/bootstrap/shardStorageHandler.go @@ -151,11 +151,9 @@ func (ssh *shardStorageHandler) SaveDataToStorage(components *ComponentsNeededFo func getEpochStartShardData(metaBlock *block.MetaBlock, shardId uint32) (block.EpochStartShardData, error) { for _, epochStartShardData := range metaBlock.EpochStart.LastFinalizedHeaders { - if epochStartShardData.ShardID != shardId { - continue + if epochStartShardData.ShardID == shardId { + return epochStartShardData, nil } - - return epochStartShardData, nil } return block.EpochStartShardData{}, epochStart.ErrEpochStartDataForShardNotFound @@ -177,7 +175,7 @@ func (ssh *shardStorageHandler) getProcessedMiniBlocks( } if check.IfNil(neededMeta) { - return nil, epochStart.ErrEpochStartDataForShardNotFound + return nil, epochStart.ErrMissingHeader } processedMbHashes := make([][]byte, 0) diff --git a/epochStart/metachain/trigger.go b/epochStart/metachain/trigger.go index 2d0824c9939..9a6a1173896 100644 --- a/epochStart/metachain/trigger.go +++ b/epochStart/metachain/trigger.go @@ -95,7 +95,7 @@ func NewEpochStartTrigger(args *ArgsNewMetaEpochStartTrigger) (*trigger, error) return nil, epochStart.ErrNilMetaBlockStorage } - trigggerStateKey := fmt.Sprintf("initial_value_epoch%d", args.Epoch) + trigggerStateKey := core.TriggerRegistryInitialKeyPrefix + fmt.Sprintf("%d", args.Epoch) trigger := &trigger{ triggerStateKey: []byte(trigggerStateKey), diff --git a/epochStart/mock/chainStorerStub.go b/epochStart/mock/chainStorerStub.go index 02f9a3e5635..8be4fa3a835 100644 --- a/epochStart/mock/chainStorerStub.go +++ b/epochStart/mock/chainStorerStub.go @@ -6,7 +6,7 @@ import ( "github.com/pkg/errors" ) -// ChainStorerStub is a mock implementation of the ChianStorer interface +// ChainStorerStub is a mock implementation of the ChainStorer interface type ChainStorerStub struct { AddStorerCalled func(key dataRetriever.UnitType, s storage.Storer) GetStorerCalled func(unitType dataRetriever.UnitType) storage.Storer diff --git a/epochStart/shardchain/trigger.go b/epochStart/shardchain/trigger.go index 492a86778d6..c8bf54246f5 100644 --- a/epochStart/shardchain/trigger.go +++ b/epochStart/shardchain/trigger.go @@ -144,7 +144,7 @@ func NewEpochStartTrigger(args *ArgsShardEpochStartTrigger) (*trigger, error) { return nil, epochStart.ErrNilShardHeaderStorage } - trigggerStateKey := fmt.Sprintf("initial_value_epoch%d", args.Epoch) + trigggerStateKey := core.TriggerRegistryInitialKeyPrefix + fmt.Sprintf("%d", args.Epoch) newTrigger := &trigger{ triggerStateKey: []byte(trigggerStateKey), diff --git a/integrationTests/mock/chainStorerMock.go b/integrationTests/mock/chainStorerMock.go index 2948d09b6ed..83e932e1016 100644 --- a/integrationTests/mock/chainStorerMock.go +++ b/integrationTests/mock/chainStorerMock.go @@ -6,7 +6,7 @@ import ( "github.com/pkg/errors" ) -// ChainStorerMock is a mock implementation of the ChianStorer interface +// ChainStorerMock is a mock implementation of the ChainStorer interface type ChainStorerMock struct { AddStorerCalled func(key dataRetriever.UnitType, s storage.Storer) GetStorerCalled func(unitType dataRetriever.UnitType) storage.Storer diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index 61653dcb06f..f755f47a90f 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -55,8 +55,10 @@ func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { nodes := convertToSlice(nodesMap) + // TODO: refactor test - node to join late should be created late. nodeToJoinLate := nodes[numNodesPerShardOnline] // will return the last node in shard 0 which was not used in consensus _ = nodeToJoinLate.Messenger.Close() // set not offline + // TODO: call nodeToJoinLate.Messenger.Bootstrap() later in the test and followed by a time.sleep as to allow it to bootstrap its peers. nodes = append(nodes[:numNodesPerShardOnline], nodes[numNodesPerShardOnline+1:]...) nodes = append(nodes[:2*numNodesPerShardOnline], nodes[2*numNodesPerShardOnline+1:]...) @@ -124,6 +126,8 @@ func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { _ = dataRetriever.SetEpochHandlerToHdrResolver(node.ResolversContainer, epochHandler) } + // TODO: refactor this test in another PR + generalConfig := getGeneralConfig() roundDurationMillis := 4000 epochDurationMillis := generalConfig.EpochStartConfig.RoundsPerEpoch * int64(roundDurationMillis) @@ -149,6 +153,7 @@ func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { argsBootstrapHandler := bootstrap.ArgsEpochStartBootstrap{ PublicKey: nodeToJoinLate.NodeKeys.Pk, Marshalizer: integrationTests.TestMarshalizer, + TxSignMarshalizer: integrationTests.TestTxSignMarshalizer, Hasher: integrationTests.TestHasher, Messenger: messenger, GeneralConfig: getGeneralConfig(), @@ -201,7 +206,7 @@ func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { assert.NoError(t, err) roundInt64 := roundFromStorage.Num - assert.Equal(t, int64(22), roundInt64) + assert.Equal(t, int64(21), roundInt64) key := []byte(strconv.FormatInt(roundInt64, 10)) bootstrapDataBytes, err := bootstrapUnit.Get(key) @@ -210,7 +215,7 @@ func TestStartInEpochForAShardNodeInMultiShardedEnvironment(t *testing.T) { var bd bootstrapStorage.BootstrapData err = integrationTests.TestMarshalizer.Unmarshal(&bd, bootstrapDataBytes) assert.NoError(t, err) - assert.Equal(t, epoch, bd.LastHeader.Epoch) + assert.Equal(t, epoch-1, bd.LastHeader.Epoch) } func createTries( diff --git a/node/mock/chainStorerMock.go b/node/mock/chainStorerMock.go index 51d5de42132..0d5b0600cd7 100644 --- a/node/mock/chainStorerMock.go +++ b/node/mock/chainStorerMock.go @@ -6,7 +6,7 @@ import ( "github.com/pkg/errors" ) -// ChainStorerMock is a mock implementation of the ChianStorer interface +// ChainStorerMock is a mock implementation of the ChainStorer interface type ChainStorerMock struct { AddStorerCalled func(key dataRetriever.UnitType, s storage.Storer) GetStorerCalled func(unitType dataRetriever.UnitType) storage.Storer diff --git a/node/node.go b/node/node.go index fc2fab86f47..de4609523c1 100644 --- a/node/node.go +++ b/node/node.go @@ -165,6 +165,8 @@ func (n *Node) IsRunning() bool { return n.isRunning } +// TODO: delete useles IsRunning, Start and Stop - too many usages in tests for this PR. + // Start will set up the Node state as running func (n *Node) Start() { n.isRunning = true diff --git a/process/mock/chainStorerMock.go b/process/mock/chainStorerMock.go index 2a1c0f1c5c2..891326e299c 100644 --- a/process/mock/chainStorerMock.go +++ b/process/mock/chainStorerMock.go @@ -6,7 +6,7 @@ import ( "github.com/pkg/errors" ) -// ChainStorerMock is a mock implementation of the ChianStorer interface +// ChainStorerMock is a mock implementation of the ChainStorer interface type ChainStorerMock struct { AddStorerCalled func(key dataRetriever.UnitType, s storage.Storer) GetStorerCalled func(unitType dataRetriever.UnitType) storage.Storer diff --git a/sharding/indexHashedNodesCoordinatorRegistry.go b/sharding/indexHashedNodesCoordinatorRegistry.go index 6e8872f0038..80d838b766f 100644 --- a/sharding/indexHashedNodesCoordinatorRegistry.go +++ b/sharding/indexHashedNodesCoordinatorRegistry.go @@ -251,7 +251,7 @@ func NodesInfoToValidators(nodesInfo map[uint32][]*NodeInfo) (map[uint32][]Valid validatorsMap := make(map[uint32][]Validator) for shId, nodeInfoList := range nodesInfo { - validators := make([]Validator, 0) + validators := make([]Validator, 0, len(nodeInfoList)) for _, nodeInfo := range nodeInfoList { validator, err := NewValidator(nodeInfo.PubKey(), nodeInfo.Address()) if err != nil { diff --git a/storage/factory/openStorage.go b/storage/factory/openStorage.go index c4e435f8f20..c9de1bd996f 100644 --- a/storage/factory/openStorage.go +++ b/storage/factory/openStorage.go @@ -17,7 +17,7 @@ import ( "github.com/ElrondNetwork/elrond-go/storage/storageUnit" ) -// ArgsNewOpenStorageUnits - +// ArgsNewOpenStorageUnits defines the arguments in order to open a set of storage units from disk type ArgsNewOpenStorageUnits struct { GeneralConfig config.Config Marshalizer marshal.Marshalizer @@ -38,7 +38,8 @@ type openStorageUnits struct { defaultShardString string } -// NewStorageUnitOpenHandler - +// TODO refactor this and unit tests +// NewStorageUnitOpenHandler creates an openStorageUnits component func NewStorageUnitOpenHandler(args ArgsNewOpenStorageUnits) (*openStorageUnits, error) { o := &openStorageUnits{ generalConfig: args.GeneralConfig, @@ -53,7 +54,7 @@ func NewStorageUnitOpenHandler(args ArgsNewOpenStorageUnits) (*openStorageUnits, return o, nil } -// OpenStorageUnits - +// OpenStorageUnits opens the defined storage units from the disk if they exists func (o *openStorageUnits) OpenStorageUnits( storageUnits []string, ) ([]storage.Storer, error) { @@ -150,6 +151,8 @@ func (o *openStorageUnits) getMostUpToDateDirectory( return mostRecentShard, nil } +// TODO refactor this and test it + // FindLatestDataFromStorage finds the last data (such as last epoch, shard ID or round) by searching over the // storage folders and opening older databases func FindLatestDataFromStorage( diff --git a/update/mock/chainStorerMock.go b/update/mock/chainStorerMock.go index ba92d016424..1a18f9b8f50 100644 --- a/update/mock/chainStorerMock.go +++ b/update/mock/chainStorerMock.go @@ -6,7 +6,7 @@ import ( "github.com/pkg/errors" ) -// ChainStorerMock is a mock implementation of the ChianStorer interface +// ChainStorerMock is a mock implementation of the ChainStorer interface type ChainStorerMock struct { AddStorerCalled func(key dataRetriever.UnitType, s storage.Storer) GetStorerCalled func(unitType dataRetriever.UnitType) storage.Storer