diff --git a/api/groups/nodeGroup.go b/api/groups/nodeGroup.go index 83e63e1e148..021ad389ed7 100644 --- a/api/groups/nodeGroup.go +++ b/api/groups/nodeGroup.go @@ -40,7 +40,7 @@ type nodeFacadeHandler interface { GetQueryHandler(name string) (debug.QueryHandler, error) GetEpochStartDataAPI(epoch uint32) (*common.EpochStartDataAPI, error) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) - GetConnectedPeersRatings() string + GetConnectedPeersRatingsOnMainNetwork() (string, error) GetManagedKeysCount() int GetManagedKeys() []string GetEligibleManagedKeys() ([]string, error) @@ -355,7 +355,19 @@ func (ng *nodeGroup) bootstrapMetrics(c *gin.Context) { // connectedPeersRatings returns the node's connected peers ratings func (ng *nodeGroup) connectedPeersRatings(c *gin.Context) { - ratings := ng.getFacade().GetConnectedPeersRatings() + ratings, err := ng.getFacade().GetConnectedPeersRatingsOnMainNetwork() + if err != nil { + c.JSON( + http.StatusInternalServerError, + shared.GenericAPIResponse{ + Data: nil, + Error: err.Error(), + Code: shared.ReturnCodeInternalError, + }, + ) + return + } + c.JSON( http.StatusOK, shared.GenericAPIResponse{ diff --git a/api/groups/nodeGroup_test.go b/api/groups/nodeGroup_test.go index 41507596b3f..92fc174ff81 100644 --- a/api/groups/nodeGroup_test.go +++ b/api/groups/nodeGroup_test.go @@ -280,35 +280,41 @@ func TestBootstrapStatusMetrics_ShouldWork(t *testing.T) { assert.True(t, valuesFound) } -func TestBootstrapGetConnectedPeersRatings_ShouldWork(t *testing.T) { - providedRatings := map[string]string{ - "pid1": "100", - "pid2": "-50", - "pid3": "-5", - } - buff, _ := json.Marshal(providedRatings) - facade := mock.FacadeStub{ - GetConnectedPeersRatingsCalled: func() string { - return string(buff) - }, - } +func TestNodeGroup_GetConnectedPeersRatings(t *testing.T) { + t.Parallel() - nodeGroup, err := groups.NewNodeGroup(&facade) - require.NoError(t, err) + t.Run("should work", func(t *testing.T) { + t.Parallel() - ws := startWebServer(nodeGroup, "node", getNodeRoutesConfig()) + providedRatings := map[string]string{ + "pid1": "100", + "pid2": "-50", + "pid3": "-5", + } + buff, _ := json.Marshal(providedRatings) + facade := mock.FacadeStub{ + GetConnectedPeersRatingsOnMainNetworkCalled: func() (string, error) { + return string(buff), nil + }, + } - req, _ := http.NewRequest("GET", "/node/connected-peers-ratings", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + nodeGroup, err := groups.NewNodeGroup(&facade) + require.NoError(t, err) - response := &shared.GenericAPIResponse{} - loadResponse(resp.Body, response) - respMap, ok := response.Data.(map[string]interface{}) - assert.True(t, ok) - ratings, ok := respMap["ratings"].(string) - assert.True(t, ok) - assert.Equal(t, string(buff), ratings) + ws := startWebServer(nodeGroup, "node", getNodeRoutesConfig()) + + req, _ := http.NewRequest("GET", "/node/connected-peers-ratings", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := &shared.GenericAPIResponse{} + loadResponse(resp.Body, response) + respMap, ok := response.Data.(map[string]interface{}) + assert.True(t, ok) + ratings, ok := respMap["ratings"].(string) + assert.True(t, ok) + assert.Equal(t, string(buff), ratings) + }) } func TestStatusMetrics_ShouldDisplayNonP2pMetrics(t *testing.T) { diff --git a/api/mock/facadeStub.go b/api/mock/facadeStub.go index 49175c2aa74..d8955decd9a 100644 --- a/api/mock/facadeStub.go +++ b/api/mock/facadeStub.go @@ -43,7 +43,7 @@ type FacadeStub struct { GetValueForKeyCalled func(address string, key string, options api.AccountQueryOptions) (string, api.BlockInfo, error) GetGuardianDataCalled func(address string, options api.AccountQueryOptions) (api.GuardianData, api.BlockInfo, error) GetPeerInfoCalled func(pid string) ([]core.QueryP2PPeerInfo, error) - GetConnectedPeersRatingsCalled func() string + GetConnectedPeersRatingsOnMainNetworkCalled func() (string, error) GetEpochStartDataAPICalled func(epoch uint32) (*common.EpochStartDataAPI, error) GetThrottlerForEndpointCalled func(endpoint string) (core.Throttler, bool) GetUsernameCalled func(address string, options api.AccountQueryOptions) (string, api.BlockInfo, error) @@ -388,9 +388,9 @@ func (f *FacadeStub) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) { return f.GetPeerInfoCalled(pid) } -// GetConnectedPeersRatings - -func (f *FacadeStub) GetConnectedPeersRatings() string { - return f.GetConnectedPeersRatingsCalled() +// GetConnectedPeersRatingsOnMainNetwork - +func (f *FacadeStub) GetConnectedPeersRatingsOnMainNetwork() (string, error) { + return f.GetConnectedPeersRatingsOnMainNetworkCalled() } // GetEpochStartDataAPI - diff --git a/api/shared/interface.go b/api/shared/interface.go index e62ce0219e5..e7bd44595d7 100644 --- a/api/shared/interface.go +++ b/api/shared/interface.go @@ -100,7 +100,7 @@ type FacadeHandler interface { GetQueryHandler(name string) (debug.QueryHandler, error) GetEpochStartDataAPI(epoch uint32) (*common.EpochStartDataAPI, error) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) - GetConnectedPeersRatings() string + GetConnectedPeersRatingsOnMainNetwork() (string, error) GetProof(rootHash string, address string) (*common.GetProofResponse, error) GetProofDataTrie(rootHash string, address string, key string) (*common.GetProofResponse, *common.GetProofResponse, error) GetProofCurrentRootHash(address string) (*common.GetProofResponse, error) diff --git a/cmd/node/CLI.md b/cmd/node/CLI.md index a3b51892e1c..05a3be078c8 100644 --- a/cmd/node/CLI.md +++ b/cmd/node/CLI.md @@ -26,6 +26,7 @@ GLOBAL OPTIONS: --config-preferences [path] The [path] for the preferences configuration file. This TOML file contains preferences configurations, such as the node display name or the shard to start in when starting as observer (default: "./config/prefs.toml") --config-external [path] The [path] for the external configuration file. This TOML file contains external configurations such as ElasticSearch's URL and login information (default: "./config/external.toml") --p2p-config [path] The [path] for the p2p configuration file. This TOML file contains peer-to-peer configurations such as port, target peer count or KadDHT settings (default: "./config/p2p.toml") + --full-archive-p2p-config [path] The [path] for the p2p configuration file for the full archive network. This TOML file contains peer-to-peer configurations such as port, target peer count or KadDHT settings (default: "./config/fullArchiveP2P.toml") --epoch-config [path] The [path] for the epoch configuration file. This TOML file contains activation epochs configurations (default: "./config/enableEpochs.toml") --round-config [path] The [path] for the round configuration file. This TOML file contains activation round configurations (default: "./config/enableRounds.toml") --gas-costs-config [path] The [path] for the gas costs configuration directory. (default: "./config/gasSchedules") @@ -33,6 +34,7 @@ GLOBAL OPTIONS: --validator-key-pem-file filepath The filepath for the PEM file which contains the secret keys for the validator key. (default: "./config/validatorKey.pem") --all-validator-keys-pem-file filepath The filepath for the PEM file which contains all the secret keys managed by the current node. (default: "./config/allValidatorsKeys.pem") --port [p2p port] The [p2p port] number on which the application will start. Can use single values such as `0, 10230, 15670` or range of ports such as `5000-10000` (default: "0") + --full-archive-port [p2p port] The [p2p port] number on which the application will start the second network when running in full archive mode. Can use single values such as `0, 10230, 15670` or range of ports such as `5000-10000` (default: "0") --profile-mode Boolean option for enabling the profiling mode. If set, the /debug/pprof routes will be available on the node for profiling the application. --use-health-service Boolean option for enabling the health service. --storage-cleanup Boolean option for starting the node with clean storage. If set, the Node will empty its storage before starting, otherwise it will start from the last state stored on disk.. diff --git a/cmd/node/config/fullArchiveP2P.toml b/cmd/node/config/fullArchiveP2P.toml new file mode 100644 index 00000000000..a7bfa21d7dc --- /dev/null +++ b/cmd/node/config/fullArchiveP2P.toml @@ -0,0 +1,72 @@ +# FullArchiveP2P config file + +# NodeConfig holds the P2P settings +[Node] + # Port is the port that will be opened by the node on all interfaces so other peers can connect to it + # If the port = 0, the node will search for a free port on the machine and use it + Port = "37373-38383" + + # ThresholdMinConnectedPeers represents the minimum number of connections a node should have before it can start + # the sync and consensus mechanisms + ThresholdMinConnectedPeers = 3 + + # MinNumPeersToWaitForOnBootstrap is the minimum number of peers to wait on bootstrap or the node will wait the default + # time which is now set to ~20 seconds (the const defined in the common package named TimeToWaitForP2PBootstrap) + MinNumPeersToWaitForOnBootstrap = 10 + +# P2P peer discovery section + +# The following sections correspond to the way new peers will be discovered +# If all config types are disabled then the peer will run in single mode (will not try to find other peers) +# If more than one peer discovery mechanism is enabled, the application will output an error and will not start + +[KadDhtPeerDiscovery] + # Enabled: true/false to enable/disable this discovery mechanism + Enabled = true + + # Type represents the kad-dht glue code implementation. + # "legacy" will define the first implementation. + # "optimized" represents the new variant able to connect to multiple seeders at once. This implementation also has + # a built-in timer that will try to automatically reconnect to the seeders (in case the seeders recover after a + # premature shutdown) + Type = "optimized" + + # RefreshIntervalInSec represents the time in seconds between querying for new peers + RefreshIntervalInSec = 10 + + # ProtocolID represents the protocol that this node will advertize to other peers + # To connect to other nodes, those nodes should have the same ProtocolID string + ProtocolID = "/erd/kad/1.0.0" + + # InitialPeerList represents the list of strings of some known nodes that will bootstrap this node + # The address will be in a self-describing addressing format. + # More can be found here: https://github.com/libp2p/specs/blob/master/3-requirements.md#34-transport-agnostic + # Example: + # /ip6/fe80::8823:6dff:fee7:f172/tcp/4001/p2p/QmYJyUMAcXEw1b5bFfbBbzYu5wyyjLMRHXGUkCXpag74Fu + # /ip4/162.246.145.218/udp/4001/utp/ipfs/QmYJyUMAcXEw1b5bFfbBbzYu5wyyjLMRHXGUkCXpag74Fu + # + # If the initial peers list is left empty, the node will not try to connect to other peers during initial bootstrap + # phase but will accept connections and will do the network discovery if another peer connects to it + InitialPeerList = ["/ip4/127.0.0.1/tcp/9999/p2p/16Uiu2HAkw5SNNtSvH1zJiQ6Gc3WoGNSxiyNueRKe6fuAuh57G3Bk"] + + # kademlia's routing table bucket size + BucketSize = 100 + + # RoutingTableRefreshIntervalInSec defines how many seconds should pass between 2 kad routing table auto refresh calls + RoutingTableRefreshIntervalInSec = 300 + +[Sharding] + # The targeted number of peer connections + TargetPeerCount = 36 + MaxIntraShardValidators = 7 + MaxCrossShardValidators = 15 + MaxIntraShardObservers = 2 + MaxCrossShardObservers = 3 + MaxSeeders = 2 + + # available options: + # `ListsSharder` will split the peers based on the shard membership (intra, cross or unknown) + # `OneListSharder` will do just the connection triming (upto TargetPeerCount value) not taking into account + # the shard membership of the connected peers + # `NilListSharder` will disable conection trimming (sharder is off) + Type = "ListsSharder" diff --git a/cmd/node/config/p2p.toml b/cmd/node/config/p2p.toml index 2ce99da3ba0..41c7c129c64 100644 --- a/cmd/node/config/p2p.toml +++ b/cmd/node/config/p2p.toml @@ -1,13 +1,13 @@ -#P2P config file +# P2P config file -#NodeConfig holds the P2P settings +# NodeConfig holds the P2P settings [Node] - #Port is the port that will be opened by the node on all interfaces so other peers can connect to it - #If the port = 0, the node will search for a free port on the machine and use it + # Port is the port that will be opened by the node on all interfaces so other peers can connect to it + # If the port = 0, the node will search for a free port on the machine and use it Port = "37373-38383" - #ThresholdMinConnectedPeers represents the minimum number of connections a node should have before it can start - #the sync and consensus mechanisms + # ThresholdMinConnectedPeers represents the minimum number of connections a node should have before it can start + # the sync and consensus mechanisms ThresholdMinConnectedPeers = 3 # MinNumPeersToWaitForOnBootstrap is the minimum number of peers to wait on bootstrap or the node will wait the default @@ -16,43 +16,43 @@ # P2P peer discovery section -#The following sections correspond to the way new peers will be discovered -#If all config types are disabled then the peer will run in single mode (will not try to find other peers) -#If more than one peer discovery mechanism is enabled, the application will output an error and will not start +# The following sections correspond to the way new peers will be discovered +# If all config types are disabled then the peer will run in single mode (will not try to find other peers) +# If more than one peer discovery mechanism is enabled, the application will output an error and will not start [KadDhtPeerDiscovery] - #Enabled: true/false to enable/disable this discovery mechanism + # Enabled: true/false to enable/disable this discovery mechanism Enabled = true - #Type represents the kad-dht glue code implementation. - #"legacy" will define the first implementation. - #"optimized" represents the new variant able to connect to multiple seeders at once. This implementation also has - #a built-in timer that will try to automatically reconnect to the seeders (in case the seeders recover after a - #premature shutdown) + # Type represents the kad-dht glue code implementation. + # "legacy" will define the first implementation. + # "optimized" represents the new variant able to connect to multiple seeders at once. This implementation also has + # a built-in timer that will try to automatically reconnect to the seeders (in case the seeders recover after a + # premature shutdown) Type = "optimized" - #RefreshIntervalInSec represents the time in seconds between querying for new peers + # RefreshIntervalInSec represents the time in seconds between querying for new peers RefreshIntervalInSec = 10 - #ProtocolID represents the protocol that this node will advertize to other peers - #To connect to other nodes, those nodes should have the same ProtocolID string + # ProtocolID represents the protocol that this node will advertize to other peers + # To connect to other nodes, those nodes should have the same ProtocolID string ProtocolID = "/erd/kad/1.0.0" - #InitialPeerList represents the list of strings of some known nodes that will bootstrap this node - #The address will be in a self-describing addressing format. - #More can be found here: https://github.com/libp2p/specs/blob/master/3-requirements.md#34-transport-agnostic - #Example: - # /ip6/fe80::8823:6dff:fee7:f172/tcp/4001/p2p/QmYJyUMAcXEw1b5bFfbBbzYu5wyyjLMRHXGUkCXpag74Fu - # /ip4/162.246.145.218/udp/4001/utp/ipfs/QmYJyUMAcXEw1b5bFfbBbzYu5wyyjLMRHXGUkCXpag74Fu + # InitialPeerList represents the list of strings of some known nodes that will bootstrap this node + # The address will be in a self-describing addressing format. + # More can be found here: https://github.com/libp2p/specs/blob/master/3-requirements.md#34-transport-agnostic + # Example: + # /ip6/fe80::8823:6dff:fee7:f172/tcp/4001/p2p/QmYJyUMAcXEw1b5bFfbBbzYu5wyyjLMRHXGUkCXpag74Fu + # /ip4/162.246.145.218/udp/4001/utp/ipfs/QmYJyUMAcXEw1b5bFfbBbzYu5wyyjLMRHXGUkCXpag74Fu # - #If the initial peers list is left empty, the node will not try to connect to other peers during initial bootstrap - #phase but will accept connections and will do the network discovery if another peer connects to it + # If the initial peers list is left empty, the node will not try to connect to other peers during initial bootstrap + # phase but will accept connections and will do the network discovery if another peer connects to it InitialPeerList = ["/ip4/127.0.0.1/tcp/9999/p2p/16Uiu2HAkw5SNNtSvH1zJiQ6Gc3WoGNSxiyNueRKe6fuAuh57G3Bk"] - #kademlia's routing table bucket size + # kademlia's routing table bucket size BucketSize = 100 - #RoutingTableRefreshIntervalInSec defines how many seconds should pass between 2 kad routing table auto refresh calls + # RoutingTableRefreshIntervalInSec defines how many seconds should pass between 2 kad routing table auto refresh calls RoutingTableRefreshIntervalInSec = 300 [Sharding] @@ -64,13 +64,9 @@ MaxCrossShardObservers = 3 MaxSeeders = 2 - #available options: - # `ListsSharder` will split the peers based on the shard membership (intra, cross or unknown) - # `OneListSharder` will do just the connection triming (upto TargetPeerCount value) not taking into account - # the shard membership of the connected peers - # `NilListSharder` will disable conection trimming (sharder is off) + # available options: + # `ListsSharder` will split the peers based on the shard membership (intra, cross or unknown) + # `OneListSharder` will do just the connection triming (upto TargetPeerCount value) not taking into account + # the shard membership of the connected peers + # `NilListSharder` will disable conection trimming (sharder is off) Type = "ListsSharder" - - [AdditionalConnections] - #this value will be added to the target peer count automatically when the node will be in full archive mode - MaxFullHistoryObservers = 10 diff --git a/cmd/node/flags.go b/cmd/node/flags.go index 3fda68b4021..2c6fae9cbb6 100644 --- a/cmd/node/flags.go +++ b/cmd/node/flags.go @@ -91,6 +91,13 @@ var ( "configurations such as port, target peer count or KadDHT settings", Value: "./config/p2p.toml", } + // fullArchiveP2PConfigurationFile defines a flag for the path to the toml file containing P2P configuration for the full archive network + fullArchiveP2PConfigurationFile = cli.StringFlag{ + Name: "full-archive-p2p-config", + Usage: "The `" + filePathPlaceholder + "` for the p2p configuration file for the full archive network. This TOML file contains peer-to-peer " + + "configurations such as port, target peer count or KadDHT settings", + Value: "./config/fullArchiveP2P.toml", + } // epochConfigurationFile defines a flag for the path to the toml file containing the epoch configuration epochConfigurationFile = cli.StringFlag{ Name: "epoch-config", @@ -111,13 +118,20 @@ var ( Usage: "The `" + filePathPlaceholder + "` for the gas costs configuration directory.", Value: "./config/gasSchedules", } - // port defines a flag for setting the port on which the node will listen for connections + // port defines a flag for setting the port on which the node will listen for connections on the main network port = cli.StringFlag{ Name: "port", Usage: "The `[p2p port]` number on which the application will start. Can use single values such as " + "`0, 10230, 15670` or range of ports such as `5000-10000`", Value: "0", } + // fullArchivePort defines a flag for setting the port on which the node will listen for connections on the full archive network + fullArchivePort = cli.StringFlag{ + Name: "full-archive-port", + Usage: "The `[p2p port]` number on which the application will start the second network when running in full archive mode. " + + "Can use single values such as `0, 10230, 15670` or range of ports such as `5000-10000`", + Value: "0", + } // profileMode defines a flag for profiling the binary // If enabled, it will open the pprof routes over the default gin rest webserver. // There are several routes that will be available for profiling (profiling can be analyzed with: go tool pprof): @@ -405,6 +419,7 @@ func getFlags() []cli.Flag { configurationPreferencesFile, externalConfigFile, p2pConfigurationFile, + fullArchiveP2PConfigurationFile, epochConfigurationFile, roundConfigurationFile, gasScheduleConfigurationDirectory, @@ -412,6 +427,7 @@ func getFlags() []cli.Flag { validatorKeyPemFile, allValidatorKeysPemFile, port, + fullArchivePort, profileMode, useHealthService, storageCleanup, @@ -672,7 +688,8 @@ func processSnapshotLessObserverMode(log logger.Logger, configs *config.Configs) func processConfigImportDBMode(log logger.Logger, configs *config.Configs) error { importDbFlags := configs.ImportDbConfig generalConfigs := configs.GeneralConfig - p2pConfigs := configs.P2pConfig + p2pConfigs := configs.MainP2pConfig + fullArchiveP2PConfigs := configs.FullArchiveP2pConfig prefsConfig := configs.PreferencesConfig var err error @@ -692,6 +709,8 @@ func processConfigImportDBMode(log logger.Logger, configs *config.Configs) error generalConfigs.StateTriesConfig.CheckpointRoundsModulus = 100000000 p2pConfigs.Node.ThresholdMinConnectedPeers = 0 p2pConfigs.KadDhtPeerDiscovery.Enabled = false + fullArchiveP2PConfigs.Node.ThresholdMinConnectedPeers = 0 + fullArchiveP2PConfigs.KadDhtPeerDiscovery.Enabled = false alterStorageConfigsForDBImport(generalConfigs) @@ -702,6 +721,7 @@ func processConfigImportDBMode(log logger.Logger, configs *config.Configs) error "StoragePruning.NumEpochsToKeep", generalConfigs.StoragePruning.NumEpochsToKeep, "StoragePruning.NumActivePersisters", generalConfigs.StoragePruning.NumActivePersisters, "p2p.ThresholdMinConnectedPeers", p2pConfigs.Node.ThresholdMinConnectedPeers, + "fullArchiveP2P.ThresholdMinConnectedPeers", fullArchiveP2PConfigs.Node.ThresholdMinConnectedPeers, "no sig check", importDbFlags.ImportDbNoSigCheckFlag, "import save trie epoch root hash", importDbFlags.ImportDbSaveTrieEpochRootHash, "import DB start in epoch", importDbFlags.ImportDBStartInEpoch, diff --git a/cmd/node/main.go b/cmd/node/main.go index f89702cb3c3..9da31568a1d 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -201,12 +201,19 @@ func readConfigs(ctx *cli.Context, log logger.Logger) (*config.Configs, error) { } log.Debug("config", "file", configurationPaths.External) - configurationPaths.P2p = ctx.GlobalString(p2pConfigurationFile.Name) - p2pConfig, err := common.LoadP2PConfig(configurationPaths.P2p) + configurationPaths.MainP2p = ctx.GlobalString(p2pConfigurationFile.Name) + mainP2PConfig, err := common.LoadP2PConfig(configurationPaths.MainP2p) if err != nil { return nil, err } - log.Debug("config", "file", configurationPaths.P2p) + log.Debug("config", "file", configurationPaths.MainP2p) + + configurationPaths.FullArchiveP2p = ctx.GlobalString(fullArchiveP2PConfigurationFile.Name) + fullArchiveP2PConfig, err := common.LoadP2PConfig(configurationPaths.FullArchiveP2p) + if err != nil { + return nil, err + } + log.Debug("config", "file", configurationPaths.FullArchiveP2p) configurationPaths.Epoch = ctx.GlobalString(epochConfigurationFile.Name) epochConfig, err := common.LoadEpochConfig(configurationPaths.Epoch) @@ -223,7 +230,10 @@ func readConfigs(ctx *cli.Context, log logger.Logger) (*config.Configs, error) { log.Debug("config", "file", configurationPaths.RoundActivation) if ctx.IsSet(port.Name) { - p2pConfig.Node.Port = ctx.GlobalString(port.Name) + mainP2PConfig.Node.Port = ctx.GlobalString(port.Name) + } + if ctx.IsSet(fullArchivePort.Name) { + fullArchiveP2PConfig.Node.Port = ctx.GlobalString(fullArchivePort.Name) } if ctx.IsSet(destinationShardAsObserver.Name) { preferencesConfig.Preferences.DestinationShardAsObserver = ctx.GlobalString(destinationShardAsObserver.Name) @@ -243,7 +253,8 @@ func readConfigs(ctx *cli.Context, log logger.Logger) (*config.Configs, error) { RatingsConfig: ratingsConfig, PreferencesConfig: preferencesConfig, ExternalConfig: externalConfig, - P2pConfig: p2pConfig, + MainP2pConfig: mainP2PConfig, + FullArchiveP2pConfig: fullArchiveP2PConfig, ConfigurationPathsHolder: configurationPaths, EpochConfig: epochConfig, RoundConfig: roundConfig, diff --git a/cmd/seednode/config/p2p.toml b/cmd/seednode/config/p2p.toml index 5e13f92574f..43dbd7989bc 100644 --- a/cmd/seednode/config/p2p.toml +++ b/cmd/seednode/config/p2p.toml @@ -69,7 +69,3 @@ # the shard membership of the connected peers # `NilListSharder` will disable conection trimming (sharder is off) Type = "NilListSharder" - - [AdditionalConnections] - #this value will be added to the target peer count automatically when the node will be in full archive mode - MaxFullHistoryObservers = 0 diff --git a/cmd/seednode/main.go b/cmd/seednode/main.go index 5be736d3abd..f2f354830be 100644 --- a/cmd/seednode/main.go +++ b/cmd/seednode/main.go @@ -263,12 +263,13 @@ func createNode( P2pConfig: p2pConfig, SyncTimer: &p2pFactory.LocalSyncTimer{}, PreferredPeersHolder: disabled.NewPreferredPeersHolder(), - NodeOperationMode: p2p.NormalOperation, PeersRatingHandler: disabled.NewDisabledPeersRatingHandler(), ConnectionWatcherType: "disabled", P2pPrivateKey: p2pKey, P2pSingleSigner: p2pSingleSigner, P2pKeyGenerator: p2pKeyGen, + NetworkType: p2p.MainNetwork, + Logger: logger.GetOrCreate("seed/p2p"), } return p2pFactory.NewNetworkMessenger(arg) diff --git a/common/constants.go b/common/constants.go index 521ef905d8e..5cc7e7ccd4c 100644 --- a/common/constants.go +++ b/common/constants.go @@ -649,9 +649,6 @@ const MetricP2PIntraShardObservers = "erd_p2p_intra_shard_observers" // MetricP2PCrossShardObservers is the metric that outputs the cross-shard connected observers const MetricP2PCrossShardObservers = "erd_p2p_cross_shard_observers" -// MetricP2PFullHistoryObservers is the metric that outputs the full-history connected observers -const MetricP2PFullHistoryObservers = "erd_p2p_full_history_observers" - // MetricP2PUnknownPeers is the metric that outputs the unknown-shard connected peers const MetricP2PUnknownPeers = "erd_p2p_unknown_shard_peers" diff --git a/config/config.go b/config/config.go index d4a47867389..37a98884e5d 100644 --- a/config/config.go +++ b/config/config.go @@ -570,7 +570,8 @@ type Configs struct { RatingsConfig *RatingsConfig PreferencesConfig *Preferences ExternalConfig *ExternalConfig - P2pConfig *p2pConfig.P2PConfig + MainP2pConfig *p2pConfig.P2PConfig + FullArchiveP2pConfig *p2pConfig.P2PConfig FlagsConfig *ContextFlagsConfig ImportDbConfig *ImportDbConfig ConfigurationPathsHolder *ConfigurationPathsHolder @@ -587,7 +588,8 @@ type ConfigurationPathsHolder struct { Ratings string Preferences string External string - P2p string + MainP2p string + FullArchiveP2p string GasScheduleDirectoryName string Nodes string Genesis string diff --git a/config/overridableConfig/configOverriding.go b/config/overridableConfig/configOverriding.go index c1d82b52dbb..7e9f3a153de 100644 --- a/config/overridableConfig/configOverriding.go +++ b/config/overridableConfig/configOverriding.go @@ -10,10 +10,11 @@ import ( ) const ( - configTomlFile = "config.toml" - enableEpochsTomlFile = "enableEpochs.toml" - p2pTomlFile = "p2p.toml" - externalTomlFile = "external.toml" + configTomlFile = "config.toml" + enableEpochsTomlFile = "enableEpochs.toml" + p2pTomlFile = "p2p.toml" + fullArchiveP2PTomlFile = "fullArchiveP2P.toml" + externalTomlFile = "external.toml" ) var ( @@ -31,7 +32,9 @@ func OverrideConfigValues(newConfigs []config.OverridableConfig, configs *config case enableEpochsTomlFile: err = reflectcommon.AdaptStructureValueBasedOnPath(configs.EpochConfig, newConfig.Path, newConfig.Value) case p2pTomlFile: - err = reflectcommon.AdaptStructureValueBasedOnPath(configs.P2pConfig, newConfig.Path, newConfig.Value) + err = reflectcommon.AdaptStructureValueBasedOnPath(configs.MainP2pConfig, newConfig.Path, newConfig.Value) + case fullArchiveP2PTomlFile: + err = reflectcommon.AdaptStructureValueBasedOnPath(configs.FullArchiveP2pConfig, newConfig.Path, newConfig.Value) case externalTomlFile: err = reflectcommon.AdaptStructureValueBasedOnPath(configs.ExternalConfig, newConfig.Path, newConfig.Value) default: diff --git a/config/overridableConfig/configOverriding_test.go b/config/overridableConfig/configOverriding_test.go index 77a48590cd2..b15cf8e5c5c 100644 --- a/config/overridableConfig/configOverriding_test.go +++ b/config/overridableConfig/configOverriding_test.go @@ -45,11 +45,21 @@ func TestOverrideConfigValues(t *testing.T) { t.Run("should work for p2p.toml", func(t *testing.T) { t.Parallel() - configs := &config.Configs{P2pConfig: &p2pConfig.P2PConfig{Sharding: p2pConfig.ShardingConfig{TargetPeerCount: 5}}} + configs := &config.Configs{MainP2pConfig: &p2pConfig.P2PConfig{Sharding: p2pConfig.ShardingConfig{TargetPeerCount: 5}}} err := OverrideConfigValues([]config.OverridableConfig{{Path: "Sharding.TargetPeerCount", Value: "37", File: "p2p.toml"}}, configs) require.NoError(t, err) - require.Equal(t, uint32(37), configs.P2pConfig.Sharding.TargetPeerCount) + require.Equal(t, uint32(37), configs.MainP2pConfig.Sharding.TargetPeerCount) + }) + + t.Run("should work for fullArchiveP2P.toml", func(t *testing.T) { + t.Parallel() + + configs := &config.Configs{FullArchiveP2pConfig: &p2pConfig.P2PConfig{Sharding: p2pConfig.ShardingConfig{TargetPeerCount: 5}}} + + err := OverrideConfigValues([]config.OverridableConfig{{Path: "Sharding.TargetPeerCount", Value: "37", File: "fullArchiveP2P.toml"}}, configs) + require.NoError(t, err) + require.Equal(t, uint32(37), configs.FullArchiveP2pConfig.Sharding.TargetPeerCount) }) t.Run("should work for external.toml", func(t *testing.T) { diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 787fa1486f2..f83322cb62a 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -499,9 +499,7 @@ func TestP2pConfig(t *testing.T) { MaxIntraShardObservers = 0 MaxCrossShardObservers = 0 MaxSeeders = 0 - Type = "` + shardingType + `" - [AdditionalConnections] - MaxFullHistoryObservers = 0` + Type = "` + shardingType + `"` expectedCfg := p2pConfig.P2PConfig{ Node: p2pConfig.NodeConfig{ diff --git a/consensus/mock/sposWorkerMock.go b/consensus/mock/sposWorkerMock.go index 47e7b9e196c..0454370bedf 100644 --- a/consensus/mock/sposWorkerMock.go +++ b/consensus/mock/sposWorkerMock.go @@ -49,7 +49,7 @@ func (sposWorkerMock *SposWorkerMock) RemoveAllReceivedMessagesCalls() { } // ProcessReceivedMessage - -func (sposWorkerMock *SposWorkerMock) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID) error { +func (sposWorkerMock *SposWorkerMock) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID, _ p2p.MessageHandler) error { return sposWorkerMock.ProcessReceivedMessageCalled(message) } diff --git a/consensus/spos/interface.go b/consensus/spos/interface.go index e9e31f6d202..1bb1eada421 100644 --- a/consensus/spos/interface.go +++ b/consensus/spos/interface.go @@ -126,7 +126,7 @@ type WorkerHandler interface { // RemoveAllReceivedMessagesCalls removes all the functions handlers RemoveAllReceivedMessagesCalls() // ProcessReceivedMessage method redirects the received message to the channel which should handle it - ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error + ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error // Extend does an extension for the subround with subroundId Extend(subroundId int) // GetConsensusStateChangedChannel gets the channel for the consensusStateChanged diff --git a/consensus/spos/worker.go b/consensus/spos/worker.go index e91ac9c2bda..7dd1776308e 100644 --- a/consensus/spos/worker.go +++ b/consensus/spos/worker.go @@ -334,7 +334,7 @@ func (wrk *Worker) getCleanedList(cnsDataList []*consensus.Message) []*consensus } // ProcessReceivedMessage method redirects the received message to the channel which should handle it -func (wrk *Worker) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (wrk *Worker) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, _ p2p.MessageHandler) error { if check.IfNil(message) { return ErrNilMessage } diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index 47eaec4a9d2..37cc36f33c1 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -400,7 +400,7 @@ func TestWorker_ProcessReceivedMessageShouldErrIfFloodIsDetectedOnTopic(t *testi TopicField: "topic1", SignatureField: []byte("signature"), } - err := wrk.ProcessReceivedMessage(msg, "peer") + err := wrk.ProcessReceivedMessage(msg, "peer", &p2pmocks.MessengerStub{}) assert.Equal(t, expectedErr, err) } @@ -515,7 +515,7 @@ func TestWorker_ProcessReceivedMessageTxBlockBodyShouldRetNil(t *testing.T) { PeerField: currentPid, SignatureField: []byte("signature"), } - err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Nil(t, err) } @@ -523,7 +523,7 @@ func TestWorker_ProcessReceivedMessageTxBlockBodyShouldRetNil(t *testing.T) { func TestWorker_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { t.Parallel() wrk := *initWorker(&statusHandlerMock.AppStatusHandlerStub{}) - err := wrk.ProcessReceivedMessage(nil, fromConnectedPeerId) + err := wrk.ProcessReceivedMessage(nil, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bls.MtBlockBody])) @@ -533,7 +533,7 @@ func TestWorker_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { func TestWorker_ProcessReceivedMessageNilMessageDataFieldShouldErr(t *testing.T) { t.Parallel() wrk := *initWorker(&statusHandlerMock.AppStatusHandlerStub{}) - err := wrk.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{}, fromConnectedPeerId) + err := wrk.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{}, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bls.MtBlockBody])) @@ -548,6 +548,7 @@ func TestWorker_ProcessReceivedMessageEmptySignatureFieldShouldErr(t *testing.T) DataField: []byte("data field"), }, fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) time.Sleep(time.Second) @@ -575,6 +576,7 @@ func TestWorker_ProcessReceivedMessageRedundancyNodeShouldResetInactivityIfNeede SignatureField: []byte("signature"), }, fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.True(t, wasCalled) @@ -608,6 +610,7 @@ func TestWorker_ProcessReceivedMessageNodeNotInEligibleListShouldErr(t *testing. SignatureField: []byte("signature"), }, fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) time.Sleep(time.Second) @@ -720,7 +723,7 @@ func testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( PeerField: currentPid, SignatureField: []byte("signature"), } - _ = wrk.ProcessReceivedMessage(msg, "") + _ = wrk.ProcessReceivedMessage(msg, "", &p2pmocks.MessengerStub{}) return receivedValue } @@ -754,6 +757,7 @@ func TestWorker_ProcessReceivedMessageInconsistentChainIDInConsensusMessageShoul SignatureField: []byte("signature"), }, fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.True(t, errors.Is(err, spos.ErrInvalidChainID)) @@ -787,6 +791,7 @@ func TestWorker_ProcessReceivedMessageTypeInvalidShouldErr(t *testing.T) { SignatureField: []byte("signature"), }, fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) time.Sleep(time.Second) @@ -822,6 +827,7 @@ func TestWorker_ProcessReceivedHeaderHashSizeInvalidShouldErr(t *testing.T) { SignatureField: []byte("signature"), }, fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) time.Sleep(time.Second) @@ -857,6 +863,7 @@ func TestWorker_ProcessReceivedMessageForFutureRoundShouldErr(t *testing.T) { SignatureField: []byte("signature"), }, fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) time.Sleep(time.Second) @@ -892,6 +899,7 @@ func TestWorker_ProcessReceivedMessageForPastRoundShouldErr(t *testing.T) { SignatureField: []byte("signature"), }, fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) time.Sleep(time.Second) @@ -927,17 +935,17 @@ func TestWorker_ProcessReceivedMessageTypeLimitReachedShouldErr(t *testing.T) { SignatureField: []byte("signature"), } - err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) assert.Equal(t, 1, len(wrk.ReceivedMessages()[bls.MtBlockBody])) assert.Nil(t, err) - err = wrk.ProcessReceivedMessage(msg, fromConnectedPeerId) + err = wrk.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) assert.Equal(t, 1, len(wrk.ReceivedMessages()[bls.MtBlockBody])) assert.True(t, errors.Is(err, spos.ErrMessageTypeLimitReached)) - err = wrk.ProcessReceivedMessage(msg, fromConnectedPeerId) + err = wrk.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) assert.Equal(t, 1, len(wrk.ReceivedMessages()[bls.MtBlockBody])) assert.True(t, errors.Is(err, spos.ErrMessageTypeLimitReached)) @@ -971,6 +979,7 @@ func TestWorker_ProcessReceivedMessageInvalidSignatureShouldErr(t *testing.T) { SignatureField: []byte("signature"), }, fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) time.Sleep(time.Second) @@ -1005,7 +1014,7 @@ func TestWorker_ProcessReceivedMessageReceivedMessageIsFromSelfShouldRetNilAndNo PeerField: currentPid, SignatureField: []byte("signature"), } - err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bls.MtBlockBody])) @@ -1040,7 +1049,7 @@ func TestWorker_ProcessReceivedMessageWhenRoundIsCanceledShouldRetNilAndNotProce PeerField: currentPid, SignatureField: []byte("signature"), } - err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bls.MtBlockBody])) @@ -1092,6 +1101,7 @@ func TestWorker_ProcessReceivedMessageWrongChainIDInProposedBlockShouldError(t * SignatureField: []byte("signature"), }, fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) time.Sleep(time.Second) @@ -1146,7 +1156,7 @@ func TestWorker_ProcessReceivedMessageWithABadOriginatorShouldErr(t *testing.T) PeerField: "other originator", SignatureField: []byte("signature"), } - err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bls.MtBlockHeader])) @@ -1215,7 +1225,7 @@ func TestWorker_ProcessReceivedMessageOkValsShouldWork(t *testing.T) { PeerField: currentPid, SignatureField: []byte("signature"), } - err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) assert.Equal(t, 1, len(wrk.ReceivedMessages()[bls.MtBlockHeader])) @@ -1741,7 +1751,7 @@ func TestWorker_ProcessReceivedMessageWrongHeaderShouldErr(t *testing.T) { PeerField: currentPid, SignatureField: []byte("signature"), } - err := wrk.ProcessReceivedMessage(msg, "") + err := wrk.ProcessReceivedMessage(msg, "", &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, spos.ErrInvalidHeader)) } @@ -1786,7 +1796,7 @@ func TestWorker_ProcessReceivedMessageWithSignature(t *testing.T) { PeerField: currentPid, SignatureField: []byte("signature"), } - err = wrk.ProcessReceivedMessage(msg, "") + err = wrk.ProcessReceivedMessage(msg, "", &p2pmocks.MessengerStub{}) assert.Nil(t, err) p2pMsgWithSignature, ok := wrk.ConsensusState().GetMessageWithSignature(string(pubKey)) diff --git a/dataRetriever/dataPool/dataPool_test.go b/dataRetriever/dataPool/dataPool_test.go index 11a94c5e488..b948b7f2d44 100644 --- a/dataRetriever/dataPool/dataPool_test.go +++ b/dataRetriever/dataPool/dataPool_test.go @@ -128,7 +128,7 @@ func TestNewDataPool_NilPeerAuthenticationsShouldErr(t *testing.T) { args.PeerAuthentications = nil tdp, err := dataPool.NewDataPool(args) - assert.Equal(t, dataRetriever.ErrNilPeerAuthenticationPool, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPeerAuthenticationPool)) assert.Nil(t, tdp) } @@ -139,7 +139,7 @@ func TestNewDataPool_NilHeartbeatsShouldErr(t *testing.T) { args.Heartbeats = nil tdp, err := dataPool.NewDataPool(args) - assert.Equal(t, dataRetriever.ErrNilHeartbeatPool, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilHeartbeatPool)) assert.Nil(t, tdp) } diff --git a/dataRetriever/factory/requestersContainer/args.go b/dataRetriever/factory/requestersContainer/args.go index 9e37166f9d9..96f09453cb9 100644 --- a/dataRetriever/factory/requestersContainer/args.go +++ b/dataRetriever/factory/requestersContainer/args.go @@ -11,14 +11,16 @@ import ( // FactoryArgs will hold the arguments for RequestersContainerFactory for both shard and meta type FactoryArgs struct { - RequesterConfig config.RequesterConfig - ShardCoordinator sharding.Coordinator - Messenger dataRetriever.TopicMessageHandler - Marshaller marshal.Marshalizer - Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter - OutputAntifloodHandler dataRetriever.P2PAntifloodHandler - CurrentNetworkEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler - PreferredPeersHolder p2p.PreferredPeersHolderHandler - PeersRatingHandler dataRetriever.PeersRatingHandler - SizeCheckDelta uint32 + RequesterConfig config.RequesterConfig + ShardCoordinator sharding.Coordinator + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger + Marshaller marshal.Marshalizer + Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + OutputAntifloodHandler dataRetriever.P2PAntifloodHandler + CurrentNetworkEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler + MainPreferredPeersHolder p2p.PreferredPeersHolderHandler + FullArchivePreferredPeersHolder p2p.PreferredPeersHolderHandler + PeersRatingHandler dataRetriever.PeersRatingHandler + SizeCheckDelta uint32 } diff --git a/dataRetriever/factory/requestersContainer/baseRequestersContainerFactory.go b/dataRetriever/factory/requestersContainer/baseRequestersContainerFactory.go index 562405c37fb..2ec10054d8d 100644 --- a/dataRetriever/factory/requestersContainer/baseRequestersContainerFactory.go +++ b/dataRetriever/factory/requestersContainer/baseRequestersContainerFactory.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/requestHandlers/requesters" topicsender "github.com/multiversx/mx-chain-go/dataRetriever/topicSender" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/sharding" logger "github.com/multiversx/mx-chain-logger-go" @@ -22,29 +23,34 @@ const EmptyExcludePeersOnTopic = "" var log = logger.GetOrCreate("dataRetriever/factory/requesterscontainer") type baseRequestersContainerFactory struct { - container dataRetriever.RequestersContainer - shardCoordinator sharding.Coordinator - messenger dataRetriever.TopicMessageHandler - marshaller marshal.Marshalizer - uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter - intRandomizer dataRetriever.IntRandomizer - outputAntifloodHandler dataRetriever.P2PAntifloodHandler - intraShardTopic string - currentNetworkEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler - preferredPeersHolder dataRetriever.PreferredPeersHolderHandler - peersRatingHandler dataRetriever.PeersRatingHandler - numCrossShardPeers int - numIntraShardPeers int - numTotalPeers int - numFullHistoryPeers int + container dataRetriever.RequestersContainer + shardCoordinator sharding.Coordinator + mainMessenger p2p.Messenger + fullArchiveMessenger p2p.Messenger + marshaller marshal.Marshalizer + uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + intRandomizer dataRetriever.IntRandomizer + outputAntifloodHandler dataRetriever.P2PAntifloodHandler + intraShardTopic string + currentNetworkEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler + mainPreferredPeersHolder dataRetriever.PreferredPeersHolderHandler + fullArchivePreferredPeersHolder dataRetriever.PreferredPeersHolderHandler + peersRatingHandler dataRetriever.PeersRatingHandler + numCrossShardPeers int + numIntraShardPeers int + numTotalPeers int + numFullHistoryPeers int } func (brcf *baseRequestersContainerFactory) checkParams() error { if check.IfNil(brcf.shardCoordinator) { return dataRetriever.ErrNilShardCoordinator } - if check.IfNil(brcf.messenger) { - return dataRetriever.ErrNilMessenger + if check.IfNil(brcf.mainMessenger) { + return fmt.Errorf("%w on main network", dataRetriever.ErrNilMessenger) + } + if check.IfNil(brcf.fullArchiveMessenger) { + return fmt.Errorf("%w on full archive network", dataRetriever.ErrNilMessenger) } if check.IfNil(brcf.marshaller) { return dataRetriever.ErrNilMarshalizer @@ -58,8 +64,11 @@ func (brcf *baseRequestersContainerFactory) checkParams() error { if check.IfNil(brcf.currentNetworkEpochProvider) { return dataRetriever.ErrNilCurrentNetworkEpochProvider } - if check.IfNil(brcf.preferredPeersHolder) { - return dataRetriever.ErrNilPreferredPeersHolder + if check.IfNil(brcf.mainPreferredPeersHolder) { + return fmt.Errorf("%w on main network", dataRetriever.ErrNilPreferredPeersHolder) + } + if check.IfNil(brcf.fullArchivePreferredPeersHolder) { + return fmt.Errorf("%w on full archive network", dataRetriever.ErrNilPreferredPeersHolder) } if check.IfNil(brcf.peersRatingHandler) { return dataRetriever.ErrNilPeersRatingHandler @@ -260,18 +269,20 @@ func (brcf *baseRequestersContainerFactory) createOneRequestSenderWithSpecifiedN "topic", topic, "intraShardTopic", brcf.intraShardTopic, "excludedTopic", excludedTopic, "numCrossShardPeers", numCrossShardPeers, "numIntraShardPeers", numIntraShardPeers) - peerListCreator, err := topicsender.NewDiffPeerListCreator(brcf.messenger, topic, brcf.intraShardTopic, excludedTopic) + peerListCreator, err := topicsender.NewDiffPeerListCreator(brcf.mainMessenger, topic, brcf.intraShardTopic, excludedTopic) if err != nil { return nil, err } arg := topicsender.ArgTopicRequestSender{ ArgBaseTopicSender: topicsender.ArgBaseTopicSender{ - Messenger: brcf.messenger, - TopicName: topic, - OutputAntiflooder: brcf.outputAntifloodHandler, - PreferredPeersHolder: brcf.preferredPeersHolder, - TargetShardId: targetShardId, + MainMessenger: brcf.mainMessenger, + FullArchiveMessenger: brcf.fullArchiveMessenger, + TopicName: topic, + OutputAntiflooder: brcf.outputAntifloodHandler, + MainPreferredPeersHolder: brcf.mainPreferredPeersHolder, + FullArchivePreferredPeersHolder: brcf.fullArchivePreferredPeersHolder, + TargetShardId: targetShardId, }, Marshaller: brcf.marshaller, Randomizer: brcf.intRandomizer, diff --git a/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory.go b/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory.go index 5abe87ed961..c718f5b22a1 100644 --- a/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory.go +++ b/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory.go @@ -27,20 +27,22 @@ func NewMetaRequestersContainerFactory( numIntraShardPeers := args.RequesterConfig.NumTotalPeers - args.RequesterConfig.NumCrossShardPeers container := containers.NewRequestersContainer() base := &baseRequestersContainerFactory{ - container: container, - shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, - marshaller: args.Marshaller, - uint64ByteSliceConverter: args.Uint64ByteSliceConverter, - intRandomizer: &random.ConcurrentSafeIntRandomizer{}, - outputAntifloodHandler: args.OutputAntifloodHandler, - currentNetworkEpochProvider: args.CurrentNetworkEpochProvider, - preferredPeersHolder: args.PreferredPeersHolder, - peersRatingHandler: args.PeersRatingHandler, - numCrossShardPeers: int(args.RequesterConfig.NumCrossShardPeers), - numIntraShardPeers: int(numIntraShardPeers), - numTotalPeers: int(args.RequesterConfig.NumTotalPeers), - numFullHistoryPeers: int(args.RequesterConfig.NumFullHistoryPeers), + container: container, + shardCoordinator: args.ShardCoordinator, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, + marshaller: args.Marshaller, + uint64ByteSliceConverter: args.Uint64ByteSliceConverter, + intRandomizer: &random.ConcurrentSafeIntRandomizer{}, + outputAntifloodHandler: args.OutputAntifloodHandler, + currentNetworkEpochProvider: args.CurrentNetworkEpochProvider, + mainPreferredPeersHolder: args.MainPreferredPeersHolder, + fullArchivePreferredPeersHolder: args.FullArchivePreferredPeersHolder, + peersRatingHandler: args.PeersRatingHandler, + numCrossShardPeers: int(args.RequesterConfig.NumCrossShardPeers), + numIntraShardPeers: int(numIntraShardPeers), + numTotalPeers: int(args.RequesterConfig.NumTotalPeers), + numFullHistoryPeers: int(args.RequesterConfig.NumFullHistoryPeers), } err := base.checkParams() diff --git a/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory_test.go b/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory_test.go index 69391b79efd..e68f4c7e5a5 100644 --- a/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory_test.go +++ b/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory_test.go @@ -22,15 +22,26 @@ func TestNewMetaRequestersContainerFactory_NilShardCoordinatorShouldErr(t *testi assert.Equal(t, dataRetriever.ErrNilShardCoordinator, err) } -func TestNewMetaRequestersContainerFactory_NilMessengerShouldErr(t *testing.T) { +func TestNewMetaRequestersContainerFactory_NilMainMessengerShouldErr(t *testing.T) { t.Parallel() args := getArguments() - args.Messenger = nil + args.MainMessenger = nil rcf, err := requesterscontainer.NewMetaRequestersContainerFactory(args) assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilMessenger, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) +} + +func TestNewMetaRequestersContainerFactory_NilFullArchiveMessengerShouldErr(t *testing.T) { + t.Parallel() + + args := getArguments() + args.FullArchiveMessenger = nil + rcf, err := requesterscontainer.NewMetaRequestersContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) } func TestNewMetaRequestersContainerFactory_NilMarshallerShouldErr(t *testing.T) { @@ -56,15 +67,26 @@ func TestNewMetaRequestersContainerFactory_NilMarshallerAndSizeCheckShouldErr(t assert.Equal(t, dataRetriever.ErrNilMarshalizer, err) } -func TestNewMetaRequestersContainerFactory_NilPreferredPeersHolderShouldErr(t *testing.T) { +func TestNewMetaRequestersContainerFactory_NilMainPreferredPeersHolderShouldErr(t *testing.T) { + t.Parallel() + + args := getArguments() + args.MainPreferredPeersHolder = nil + rcf, err := requesterscontainer.NewMetaRequestersContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) +} + +func TestNewMetaRequestersContainerFactory_NilFullArchivePreferredPeersHolderShouldErr(t *testing.T) { t.Parallel() args := getArguments() - args.PreferredPeersHolder = nil + args.FullArchivePreferredPeersHolder = nil rcf, err := requesterscontainer.NewMetaRequestersContainerFactory(args) assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilPreferredPeersHolder, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) } func TestNewMetaRequestersContainerFactory_NilPeersRatingHandlerShouldErr(t *testing.T) { diff --git a/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory.go b/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory.go index 20ff257d738..d7468d5302d 100644 --- a/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory.go +++ b/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory.go @@ -26,20 +26,22 @@ func NewShardRequestersContainerFactory( numIntraShardPeers := args.RequesterConfig.NumTotalPeers - args.RequesterConfig.NumCrossShardPeers container := containers.NewRequestersContainer() base := &baseRequestersContainerFactory{ - container: container, - shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, - marshaller: args.Marshaller, - uint64ByteSliceConverter: args.Uint64ByteSliceConverter, - intRandomizer: &random.ConcurrentSafeIntRandomizer{}, - outputAntifloodHandler: args.OutputAntifloodHandler, - currentNetworkEpochProvider: args.CurrentNetworkEpochProvider, - preferredPeersHolder: args.PreferredPeersHolder, - peersRatingHandler: args.PeersRatingHandler, - numCrossShardPeers: int(args.RequesterConfig.NumCrossShardPeers), - numIntraShardPeers: int(numIntraShardPeers), - numTotalPeers: int(args.RequesterConfig.NumTotalPeers), - numFullHistoryPeers: int(args.RequesterConfig.NumFullHistoryPeers), + container: container, + shardCoordinator: args.ShardCoordinator, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, + marshaller: args.Marshaller, + uint64ByteSliceConverter: args.Uint64ByteSliceConverter, + intRandomizer: &random.ConcurrentSafeIntRandomizer{}, + outputAntifloodHandler: args.OutputAntifloodHandler, + currentNetworkEpochProvider: args.CurrentNetworkEpochProvider, + mainPreferredPeersHolder: args.MainPreferredPeersHolder, + fullArchivePreferredPeersHolder: args.FullArchivePreferredPeersHolder, + peersRatingHandler: args.PeersRatingHandler, + numCrossShardPeers: int(args.RequesterConfig.NumCrossShardPeers), + numIntraShardPeers: int(numIntraShardPeers), + numTotalPeers: int(args.RequesterConfig.NumTotalPeers), + numFullHistoryPeers: int(args.RequesterConfig.NumFullHistoryPeers), } err := base.checkParams() diff --git a/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory_test.go b/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory_test.go index 7602fd6f98d..e4c94491487 100644 --- a/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory_test.go +++ b/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory_test.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/factory/requestersContainer" "github.com/multiversx/mx-chain-go/dataRetriever/mock" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -16,10 +17,10 @@ import ( var errExpected = errors.New("expected error") -func createStubTopicMessageHandler(matchStrToErrOnCreate string) dataRetriever.TopicMessageHandler { - tmhs := mock.NewTopicMessageHandlerStub() +func createMessengerStub(matchStrToErrOnCreate string) p2p.Messenger { + stub := &p2pmocks.MessengerStub{} - tmhs.CreateTopicCalled = func(name string, createChannelForTopic bool) error { + stub.CreateTopicCalled = func(name string, createChannelForTopic bool) error { if matchStrToErrOnCreate == "" { return nil } @@ -30,7 +31,7 @@ func createStubTopicMessageHandler(matchStrToErrOnCreate string) dataRetriever.T return nil } - return tmhs + return stub } func TestNewShardRequestersContainerFactory_NilShardCoordinatorShouldErr(t *testing.T) { @@ -44,15 +45,26 @@ func TestNewShardRequestersContainerFactory_NilShardCoordinatorShouldErr(t *test assert.Equal(t, dataRetriever.ErrNilShardCoordinator, err) } -func TestNewShardRequestersContainerFactory_NilMessengerShouldErr(t *testing.T) { +func TestNewShardRequestersContainerFactory_NilMainMessengerShouldErr(t *testing.T) { t.Parallel() args := getArguments() - args.Messenger = nil + args.MainMessenger = nil rcf, err := requesterscontainer.NewShardRequestersContainerFactory(args) assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilMessenger, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) +} + +func TestNewShardRequestersContainerFactory_NilFullArchiveMessengerShouldErr(t *testing.T) { + t.Parallel() + + args := getArguments() + args.FullArchiveMessenger = nil + rcf, err := requesterscontainer.NewShardRequestersContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) } func TestNewShardRequestersContainerFactory_NilMarshallerShouldErr(t *testing.T) { @@ -89,15 +101,26 @@ func TestNewShardRequestersContainerFactory_NilUint64SliceConverterShouldErr(t * assert.Equal(t, dataRetriever.ErrNilUint64ByteSliceConverter, err) } -func TestNewShardRequestersContainerFactory_NilPreferredPeersHolderShouldErr(t *testing.T) { +func TestNewShardRequestersContainerFactory_NilMainPreferredPeersHolderShouldErr(t *testing.T) { + t.Parallel() + + args := getArguments() + args.MainPreferredPeersHolder = nil + rcf, err := requesterscontainer.NewShardRequestersContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) +} + +func TestNewShardRequestersContainerFactory_NilFullArchivePreferredPeersHolderShouldErr(t *testing.T) { t.Parallel() args := getArguments() - args.PreferredPeersHolder = nil + args.FullArchivePreferredPeersHolder = nil rcf, err := requesterscontainer.NewShardRequestersContainerFactory(args) assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilPreferredPeersHolder, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) } func TestNewShardRequestersContainerFactory_NilPeersRatingHandlerShouldErr(t *testing.T) { @@ -108,7 +131,7 @@ func TestNewShardRequestersContainerFactory_NilPeersRatingHandlerShouldErr(t *te rcf, err := requesterscontainer.NewShardRequestersContainerFactory(args) assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilPeersRatingHandler, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPeersRatingHandler)) } func TestNewShardRequestersContainerFactory_InvalidNumTotalPeersShouldErr(t *testing.T) { @@ -243,14 +266,16 @@ func getArguments() requesterscontainer.FactoryArgs { NumTotalPeers: 3, NumFullHistoryPeers: 3, }, - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - Messenger: createStubTopicMessageHandler(""), - Marshaller: &mock.MarshalizerMock{}, - Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, - OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - CurrentNetworkEpochProvider: &mock.CurrentNetworkEpochProviderStub{}, - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, - SizeCheckDelta: 0, + ShardCoordinator: mock.NewOneShardCoordinatorMock(), + MainMessenger: createMessengerStub(""), + FullArchiveMessenger: createMessengerStub(""), + Marshaller: &mock.MarshalizerMock{}, + Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, + OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + CurrentNetworkEpochProvider: &mock.CurrentNetworkEpochProviderStub{}, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, + SizeCheckDelta: 0, } } diff --git a/dataRetriever/factory/resolverscontainer/args.go b/dataRetriever/factory/resolverscontainer/args.go index 65ddfc326a3..1446af01b97 100644 --- a/dataRetriever/factory/resolverscontainer/args.go +++ b/dataRetriever/factory/resolverscontainer/args.go @@ -11,19 +11,21 @@ import ( // FactoryArgs will hold the arguments for ResolversContainerFactory for both shard and meta type FactoryArgs struct { - NumConcurrentResolvingJobs int32 - ShardCoordinator sharding.Coordinator - Messenger dataRetriever.TopicMessageHandler - Store dataRetriever.StorageService - Marshalizer marshal.Marshalizer - DataPools dataRetriever.PoolsHolder - Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter - DataPacker dataRetriever.DataPacker - TriesContainer common.TriesHolder - InputAntifloodHandler dataRetriever.P2PAntifloodHandler - OutputAntifloodHandler dataRetriever.P2PAntifloodHandler - PreferredPeersHolder p2p.PreferredPeersHolderHandler - SizeCheckDelta uint32 - IsFullHistoryNode bool - PayloadValidator dataRetriever.PeerAuthenticationPayloadValidator + NumConcurrentResolvingJobs int32 + ShardCoordinator sharding.Coordinator + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger + Store dataRetriever.StorageService + Marshalizer marshal.Marshalizer + DataPools dataRetriever.PoolsHolder + Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + DataPacker dataRetriever.DataPacker + TriesContainer common.TriesHolder + InputAntifloodHandler dataRetriever.P2PAntifloodHandler + OutputAntifloodHandler dataRetriever.P2PAntifloodHandler + MainPreferredPeersHolder p2p.PreferredPeersHolderHandler + FullArchivePreferredPeersHolder p2p.PreferredPeersHolderHandler + SizeCheckDelta uint32 + IsFullHistoryNode bool + PayloadValidator dataRetriever.PeerAuthenticationPayloadValidator } diff --git a/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go index 45acf32e347..c1fc1e3a16b 100644 --- a/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/resolvers" "github.com/multiversx/mx-chain-go/dataRetriever/topicSender" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/sharding" logger "github.com/multiversx/mx-chain-logger-go" @@ -22,30 +23,35 @@ const EmptyExcludePeersOnTopic = "" var log = logger.GetOrCreate("dataRetriever/factory/resolverscontainer") type baseResolversContainerFactory struct { - container dataRetriever.ResolversContainer - shardCoordinator sharding.Coordinator - messenger dataRetriever.TopicMessageHandler - store dataRetriever.StorageService - marshalizer marshal.Marshalizer - dataPools dataRetriever.PoolsHolder - uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter - dataPacker dataRetriever.DataPacker - triesContainer common.TriesHolder - inputAntifloodHandler dataRetriever.P2PAntifloodHandler - outputAntifloodHandler dataRetriever.P2PAntifloodHandler - throttler dataRetriever.ResolverThrottler - intraShardTopic string - isFullHistoryNode bool - preferredPeersHolder dataRetriever.PreferredPeersHolderHandler - payloadValidator dataRetriever.PeerAuthenticationPayloadValidator + container dataRetriever.ResolversContainer + shardCoordinator sharding.Coordinator + mainMessenger p2p.Messenger + fullArchiveMessenger p2p.Messenger + store dataRetriever.StorageService + marshalizer marshal.Marshalizer + dataPools dataRetriever.PoolsHolder + uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + dataPacker dataRetriever.DataPacker + triesContainer common.TriesHolder + inputAntifloodHandler dataRetriever.P2PAntifloodHandler + outputAntifloodHandler dataRetriever.P2PAntifloodHandler + throttler dataRetriever.ResolverThrottler + intraShardTopic string + isFullHistoryNode bool + mainPreferredPeersHolder dataRetriever.PreferredPeersHolderHandler + fullArchivePreferredPeersHolder dataRetriever.PreferredPeersHolderHandler + payloadValidator dataRetriever.PeerAuthenticationPayloadValidator } func (brcf *baseResolversContainerFactory) checkParams() error { if check.IfNil(brcf.shardCoordinator) { return dataRetriever.ErrNilShardCoordinator } - if check.IfNil(brcf.messenger) { - return dataRetriever.ErrNilMessenger + if check.IfNil(brcf.mainMessenger) { + return fmt.Errorf("%w for main network", dataRetriever.ErrNilMessenger) + } + if check.IfNil(brcf.fullArchiveMessenger) { + return fmt.Errorf("%w for full archive network", dataRetriever.ErrNilMessenger) } if check.IfNil(brcf.store) { return dataRetriever.ErrNilStore @@ -74,8 +80,11 @@ func (brcf *baseResolversContainerFactory) checkParams() error { if check.IfNil(brcf.throttler) { return dataRetriever.ErrNilThrottler } - if check.IfNil(brcf.preferredPeersHolder) { - return dataRetriever.ErrNilPreferredPeersHolder + if check.IfNil(brcf.mainPreferredPeersHolder) { + return fmt.Errorf("%w for main network", dataRetriever.ErrNilPreferredPeersHolder) + } + if check.IfNil(brcf.fullArchivePreferredPeersHolder) { + return fmt.Errorf("%w for full archive network", dataRetriever.ErrNilPreferredPeersHolder) } return nil @@ -155,7 +164,12 @@ func (brcf *baseResolversContainerFactory) createTxResolver( return nil, err } - err = brcf.messenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + err = brcf.mainMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + if err != nil { + return nil, err + } + + err = brcf.fullArchiveMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) if err != nil { return nil, err } @@ -237,7 +251,12 @@ func (brcf *baseResolversContainerFactory) createMiniBlocksResolver( return nil, err } - err = brcf.messenger.RegisterMessageProcessor(txBlkResolver.RequestTopic(), common.DefaultResolversIdentifier, txBlkResolver) + err = brcf.mainMessenger.RegisterMessageProcessor(txBlkResolver.RequestTopic(), common.DefaultResolversIdentifier, txBlkResolver) + if err != nil { + return nil, err + } + + err = brcf.fullArchiveMessenger.RegisterMessageProcessor(txBlkResolver.RequestTopic(), common.DefaultResolversIdentifier, txBlkResolver) if err != nil { return nil, err } @@ -269,7 +288,12 @@ func (brcf *baseResolversContainerFactory) generatePeerAuthenticationResolver() return err } - err = brcf.messenger.RegisterMessageProcessor(peerAuthResolver.RequestTopic(), common.DefaultResolversIdentifier, peerAuthResolver) + err = brcf.mainMessenger.RegisterMessageProcessor(peerAuthResolver.RequestTopic(), common.DefaultResolversIdentifier, peerAuthResolver) + if err != nil { + return err + } + + err = brcf.fullArchiveMessenger.RegisterMessageProcessor(peerAuthResolver.RequestTopic(), common.DefaultResolversIdentifier, peerAuthResolver) if err != nil { return err } @@ -288,11 +312,13 @@ func (brcf *baseResolversContainerFactory) createOneResolverSenderWithSpecifiedN arg := topicsender.ArgTopicResolverSender{ ArgBaseTopicSender: topicsender.ArgBaseTopicSender{ - Messenger: brcf.messenger, - TopicName: topic, - OutputAntiflooder: brcf.outputAntifloodHandler, - PreferredPeersHolder: brcf.preferredPeersHolder, - TargetShardId: targetShardId, + MainMessenger: brcf.mainMessenger, + FullArchiveMessenger: brcf.fullArchiveMessenger, + TopicName: topic, + OutputAntiflooder: brcf.outputAntifloodHandler, + MainPreferredPeersHolder: brcf.mainPreferredPeersHolder, + FullArchivePreferredPeersHolder: brcf.fullArchivePreferredPeersHolder, + TargetShardId: targetShardId, }, } // TODO instantiate topic sender resolver with the shard IDs for which this resolver is supposed to serve the data @@ -334,7 +360,12 @@ func (brcf *baseResolversContainerFactory) createTrieNodesResolver( return nil, err } - err = brcf.messenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + err = brcf.mainMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + if err != nil { + return nil, err + } + + err = brcf.fullArchiveMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) if err != nil { return nil, err } @@ -370,7 +401,12 @@ func (brcf *baseResolversContainerFactory) generateValidatorInfoResolver() error return err } - err = brcf.messenger.RegisterMessageProcessor(validatorInfoResolver.RequestTopic(), common.DefaultResolversIdentifier, validatorInfoResolver) + err = brcf.mainMessenger.RegisterMessageProcessor(validatorInfoResolver.RequestTopic(), common.DefaultResolversIdentifier, validatorInfoResolver) + if err != nil { + return err + } + + err = brcf.fullArchiveMessenger.RegisterMessageProcessor(validatorInfoResolver.RequestTopic(), common.DefaultResolversIdentifier, validatorInfoResolver) if err != nil { return err } diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go index 889481e9fde..426a978ae20 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go @@ -34,21 +34,23 @@ func NewMetaResolversContainerFactory( container := containers.NewResolversContainer() base := &baseResolversContainerFactory{ - container: container, - shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, - store: args.Store, - marshalizer: args.Marshalizer, - dataPools: args.DataPools, - uint64ByteSliceConverter: args.Uint64ByteSliceConverter, - dataPacker: args.DataPacker, - triesContainer: args.TriesContainer, - inputAntifloodHandler: args.InputAntifloodHandler, - outputAntifloodHandler: args.OutputAntifloodHandler, - throttler: thr, - isFullHistoryNode: args.IsFullHistoryNode, - preferredPeersHolder: args.PreferredPeersHolder, - payloadValidator: args.PayloadValidator, + container: container, + shardCoordinator: args.ShardCoordinator, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, + store: args.Store, + marshalizer: args.Marshalizer, + dataPools: args.DataPools, + uint64ByteSliceConverter: args.Uint64ByteSliceConverter, + dataPacker: args.DataPacker, + triesContainer: args.TriesContainer, + inputAntifloodHandler: args.InputAntifloodHandler, + outputAntifloodHandler: args.OutputAntifloodHandler, + throttler: thr, + isFullHistoryNode: args.IsFullHistoryNode, + mainPreferredPeersHolder: args.MainPreferredPeersHolder, + fullArchivePreferredPeersHolder: args.FullArchivePreferredPeersHolder, + payloadValidator: args.PayloadValidator, } err = base.checkParams() @@ -221,7 +223,12 @@ func (mrcf *metaResolversContainerFactory) createShardHeaderResolver( return nil, err } - err = mrcf.messenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + err = mrcf.mainMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + if err != nil { + return nil, err + } + + err = mrcf.fullArchiveMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) if err != nil { return nil, err } @@ -279,7 +286,12 @@ func (mrcf *metaResolversContainerFactory) createMetaChainHeaderResolver( return nil, err } - err = mrcf.messenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + err = mrcf.mainMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + if err != nil { + return nil, err + } + + err = mrcf.fullArchiveMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) if err != nil { return nil, err } diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go index bb396cbcb7b..c6659693d79 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go @@ -22,10 +22,10 @@ import ( "github.com/stretchr/testify/assert" ) -func createStubTopicMessageHandlerForMeta(matchStrToErrOnCreate string, matchStrToErrOnRegister string) dataRetriever.TopicMessageHandler { - tmhs := mock.NewTopicMessageHandlerStub() +func createStubMessengerForMeta(matchStrToErrOnCreate string, matchStrToErrOnRegister string) p2p.Messenger { + stub := &p2pmocks.MessengerStub{} - tmhs.CreateTopicCalled = func(name string, createChannelForTopic bool) error { + stub.CreateTopicCalled = func(name string, createChannelForTopic bool) error { if matchStrToErrOnCreate == "" { return nil } @@ -36,7 +36,7 @@ func createStubTopicMessageHandlerForMeta(matchStrToErrOnCreate string, matchStr return nil } - tmhs.RegisterMessageProcessorCalled = func(topic string, identifier string, handler p2p.MessageProcessor) error { + stub.RegisterMessageProcessorCalled = func(topic string, identifier string, handler p2p.MessageProcessor) error { if matchStrToErrOnRegister == "" { return nil } @@ -47,7 +47,7 @@ func createStubTopicMessageHandlerForMeta(matchStrToErrOnCreate string, matchStr return nil } - return tmhs + return stub } func createDataPoolsForMeta() dataRetriever.PoolsHolder { @@ -111,15 +111,26 @@ func TestNewMetaResolversContainerFactory_NilShardCoordinatorShouldErr(t *testin assert.Equal(t, dataRetriever.ErrNilShardCoordinator, err) } -func TestNewMetaResolversContainerFactory_NilMessengerShouldErr(t *testing.T) { +func TestNewMetaResolversContainerFactory_NilMainMessengerShouldErr(t *testing.T) { t.Parallel() args := getArgumentsMeta() - args.Messenger = nil + args.MainMessenger = nil rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilMessenger, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) +} + +func TestNewMetaResolversContainerFactory_NilFullArchiveMessengerShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsMeta() + args.FullArchiveMessenger = nil + rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) } func TestNewMetaResolversContainerFactory_NilStoreShouldErr(t *testing.T) { @@ -167,15 +178,26 @@ func TestNewMetaResolversContainerFactory_NilDataPoolShouldErr(t *testing.T) { assert.Equal(t, dataRetriever.ErrNilDataPoolHolder, err) } -func TestNewMetaResolversContainerFactory_NilPreferredPeersHolderShouldErr(t *testing.T) { +func TestNewMetaResolversContainerFactory_NilMainPreferredPeersHolderShouldErr(t *testing.T) { t.Parallel() args := getArgumentsMeta() - args.PreferredPeersHolder = nil + args.MainPreferredPeersHolder = nil rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilPreferredPeersHolder, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) +} + +func TestNewMetaResolversContainerFactory_NilFullArchivePreferredPeersHolderShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsMeta() + args.FullArchivePreferredPeersHolder = nil + rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) } func TestNewMetaResolversContainerFactory_NilUint64SliceConverterShouldErr(t *testing.T) { @@ -235,11 +257,24 @@ func TestNewMetaResolversContainerFactory_NilOutputAntifloodHandlerShouldErr(t * // ------- Create -func TestMetaResolversContainerFactory_CreateRegisterShardHeadersForMetachainFailsShouldErr(t *testing.T) { +func TestMetaResolversContainerFactory_CreateRegisterShardHeadersForMetachainOnMainNetworkFailsShouldErr(t *testing.T) { t.Parallel() args := getArgumentsMeta() - args.Messenger = createStubTopicMessageHandlerForMeta("", factory.ShardBlocksTopic) + args.MainMessenger = createStubMessengerForMeta("", factory.ShardBlocksTopic) + rcf, _ := resolverscontainer.NewMetaResolversContainerFactory(args) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) +} + +func TestMetaResolversContainerFactory_CreateRegisterShardHeadersForMetachainOnFullArchiveNetworkFailsShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsMeta() + args.FullArchiveMessenger = createStubMessengerForMeta("", factory.ShardBlocksTopic) rcf, _ := resolverscontainer.NewMetaResolversContainerFactory(args) container, err := rcf.Create() @@ -269,6 +304,20 @@ func TestMetaResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { shardCoordinator.CurrentShard = 1 args := getArgumentsMeta() + registerMainCnt := 0 + args.MainMessenger = &p2pmocks.MessengerStub{ + RegisterMessageProcessorCalled: func(topic string, identifier string, handler p2p.MessageProcessor) error { + registerMainCnt++ + return nil + }, + } + registerFullArchiveCnt := 0 + args.FullArchiveMessenger = &p2pmocks.MessengerStub{ + RegisterMessageProcessorCalled: func(topic string, identifier string, handler p2p.MessageProcessor) error { + registerFullArchiveCnt++ + return nil + }, + } args.ShardCoordinator = shardCoordinator rcf, _ := resolverscontainer.NewMetaResolversContainerFactory(args) @@ -286,6 +335,8 @@ func TestMetaResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { numResolversUnsigned + numResolversTxs + numResolversTrieNodes + numResolversRewards + numResolversPeerAuth + numResolverValidatorInfo assert.Equal(t, totalResolvers, container.Len()) + assert.Equal(t, totalResolvers, registerMainCnt) + assert.Equal(t, totalResolvers, registerFullArchiveCnt) err := rcf.AddShardTrieNodeResolvers(container) assert.Nil(t, err) @@ -306,19 +357,21 @@ func TestMetaResolversContainerFactory_IsInterfaceNil(t *testing.T) { func getArgumentsMeta() resolverscontainer.FactoryArgs { return resolverscontainer.FactoryArgs{ - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - Messenger: createStubTopicMessageHandlerForMeta("", ""), - Store: createStoreForMeta(), - Marshalizer: &mock.MarshalizerMock{}, - DataPools: createDataPoolsForMeta(), - Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, - DataPacker: &mock.DataPackerStub{}, - TriesContainer: createTriesHolderForMeta(), - SizeCheckDelta: 0, - InputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - NumConcurrentResolvingJobs: 10, - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PayloadValidator: &testscommon.PeerAuthenticationPayloadValidatorStub{}, + ShardCoordinator: mock.NewOneShardCoordinatorMock(), + MainMessenger: createStubMessengerForMeta("", ""), + FullArchiveMessenger: createStubMessengerForMeta("", ""), + Store: createStoreForMeta(), + Marshalizer: &mock.MarshalizerMock{}, + DataPools: createDataPoolsForMeta(), + Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, + DataPacker: &mock.DataPackerStub{}, + TriesContainer: createTriesHolderForMeta(), + SizeCheckDelta: 0, + InputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + NumConcurrentResolvingJobs: 10, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PayloadValidator: &testscommon.PeerAuthenticationPayloadValidatorStub{}, } } diff --git a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go index 7a4fb1a282a..28582f03bc5 100644 --- a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go @@ -32,21 +32,23 @@ func NewShardResolversContainerFactory( container := containers.NewResolversContainer() base := &baseResolversContainerFactory{ - container: container, - shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, - store: args.Store, - marshalizer: args.Marshalizer, - dataPools: args.DataPools, - uint64ByteSliceConverter: args.Uint64ByteSliceConverter, - dataPacker: args.DataPacker, - triesContainer: args.TriesContainer, - inputAntifloodHandler: args.InputAntifloodHandler, - outputAntifloodHandler: args.OutputAntifloodHandler, - throttler: thr, - isFullHistoryNode: args.IsFullHistoryNode, - preferredPeersHolder: args.PreferredPeersHolder, - payloadValidator: args.PayloadValidator, + container: container, + shardCoordinator: args.ShardCoordinator, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, + store: args.Store, + marshalizer: args.Marshalizer, + dataPools: args.DataPools, + uint64ByteSliceConverter: args.Uint64ByteSliceConverter, + dataPacker: args.DataPacker, + triesContainer: args.TriesContainer, + inputAntifloodHandler: args.InputAntifloodHandler, + outputAntifloodHandler: args.OutputAntifloodHandler, + throttler: thr, + isFullHistoryNode: args.IsFullHistoryNode, + mainPreferredPeersHolder: args.MainPreferredPeersHolder, + fullArchivePreferredPeersHolder: args.FullArchivePreferredPeersHolder, + payloadValidator: args.PayloadValidator, } err = base.checkParams() @@ -166,7 +168,12 @@ func (srcf *shardResolversContainerFactory) generateHeaderResolvers() error { return err } - err = srcf.messenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + err = srcf.mainMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + if err != nil { + return err + } + + err = srcf.fullArchiveMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) if err != nil { return err } @@ -214,7 +221,12 @@ func (srcf *shardResolversContainerFactory) generateMetablockHeaderResolvers() e return err } - err = srcf.messenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + err = srcf.mainMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + if err != nil { + return err + } + + err = srcf.fullArchiveMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) if err != nil { return err } diff --git a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go index 51195e6c5a8..4d6ca351195 100644 --- a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go @@ -24,10 +24,10 @@ import ( var errExpected = errors.New("expected error") -func createStubTopicMessageHandlerForShard(matchStrToErrOnCreate string, matchStrToErrOnRegister string) dataRetriever.TopicMessageHandler { - tmhs := mock.NewTopicMessageHandlerStub() +func createMessengerStubForShard(matchStrToErrOnCreate string, matchStrToErrOnRegister string) p2p.Messenger { + stub := &p2pmocks.MessengerStub{} - tmhs.CreateTopicCalled = func(name string, createChannelForTopic bool) error { + stub.CreateTopicCalled = func(name string, createChannelForTopic bool) error { if matchStrToErrOnCreate == "" { return nil } @@ -39,7 +39,7 @@ func createStubTopicMessageHandlerForShard(matchStrToErrOnCreate string, matchSt return nil } - tmhs.RegisterMessageProcessorCalled = func(topic string, identifier string, handler p2p.MessageProcessor) error { + stub.RegisterMessageProcessorCalled = func(topic string, identifier string, handler p2p.MessageProcessor) error { if matchStrToErrOnRegister == "" { return nil } @@ -51,7 +51,7 @@ func createStubTopicMessageHandlerForShard(matchStrToErrOnCreate string, matchSt return nil } - return tmhs + return stub } func createDataPoolsForShard() dataRetriever.PoolsHolder { @@ -117,15 +117,26 @@ func TestNewShardResolversContainerFactory_NilShardCoordinatorShouldErr(t *testi assert.Equal(t, dataRetriever.ErrNilShardCoordinator, err) } -func TestNewShardResolversContainerFactory_NilMessengerShouldErr(t *testing.T) { +func TestNewShardResolversContainerFactory_NilMainMessengerShouldErr(t *testing.T) { t.Parallel() args := getArgumentsShard() - args.Messenger = nil + args.MainMessenger = nil rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilMessenger, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) +} + +func TestNewShardResolversContainerFactory_NilFullArchiveMessengerShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.FullArchiveMessenger = nil + rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) } func TestNewShardResolversContainerFactory_NilStoreShouldErr(t *testing.T) { @@ -195,15 +206,26 @@ func TestNewShardResolversContainerFactory_NilDataPackerShouldErr(t *testing.T) assert.Equal(t, dataRetriever.ErrNilDataPacker, err) } -func TestNewShardResolversContainerFactory_NilPreferredPeersHolderShouldErr(t *testing.T) { +func TestNewShardResolversContainerFactory_NilMainPreferredPeersHolderShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.MainPreferredPeersHolder = nil + rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) +} + +func TestNewShardResolversContainerFactory_NilFullArchivePreferredPeersHolderShouldErr(t *testing.T) { t.Parallel() args := getArgumentsShard() - args.PreferredPeersHolder = nil + args.FullArchivePreferredPeersHolder = nil rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilPreferredPeersHolder, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) } func TestNewShardResolversContainerFactory_NilTriesContainerShouldErr(t *testing.T) { @@ -241,11 +263,11 @@ func TestNewShardResolversContainerFactory_NilOutputAntifloodHandlerShouldErr(t // ------- Create -func TestShardResolversContainerFactory_CreateRegisterTxFailsShouldErr(t *testing.T) { +func TestShardResolversContainerFactory_CreateRegisterTxFailsOnMainNetworkShouldErr(t *testing.T) { t.Parallel() args := getArgumentsShard() - args.Messenger = createStubTopicMessageHandlerForShard("", factory.TransactionTopic) + args.MainMessenger = createMessengerStubForShard("", factory.TransactionTopic) rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) container, err := rcf.Create() @@ -254,11 +276,11 @@ func TestShardResolversContainerFactory_CreateRegisterTxFailsShouldErr(t *testin assert.Equal(t, errExpected, err) } -func TestShardResolversContainerFactory_CreateRegisterHdrFailsShouldErr(t *testing.T) { +func TestShardResolversContainerFactory_CreateRegisterTxFailsOnFullArchiveNetworkShouldErr(t *testing.T) { t.Parallel() args := getArgumentsShard() - args.Messenger = createStubTopicMessageHandlerForShard("", factory.ShardBlocksTopic) + args.FullArchiveMessenger = createMessengerStubForShard("", factory.TransactionTopic) rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) container, err := rcf.Create() @@ -267,11 +289,11 @@ func TestShardResolversContainerFactory_CreateRegisterHdrFailsShouldErr(t *testi assert.Equal(t, errExpected, err) } -func TestShardResolversContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t *testing.T) { +func TestShardResolversContainerFactory_CreateRegisterHdrFailsOnMainNetworkShouldErr(t *testing.T) { t.Parallel() args := getArgumentsShard() - args.Messenger = createStubTopicMessageHandlerForShard("", factory.MiniBlocksTopic) + args.MainMessenger = createMessengerStubForShard("", factory.ShardBlocksTopic) rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) container, err := rcf.Create() @@ -280,11 +302,11 @@ func TestShardResolversContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t assert.Equal(t, errExpected, err) } -func TestShardResolversContainerFactory_CreateRegisterTrieNodesFailsShouldErr(t *testing.T) { +func TestShardResolversContainerFactory_CreateRegisterHdrFailsOnFullArchiveNetworkShouldErr(t *testing.T) { t.Parallel() args := getArgumentsShard() - args.Messenger = createStubTopicMessageHandlerForShard("", factory.AccountTrieNodesTopic) + args.FullArchiveMessenger = createMessengerStubForShard("", factory.ShardBlocksTopic) rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) container, err := rcf.Create() @@ -293,11 +315,76 @@ func TestShardResolversContainerFactory_CreateRegisterTrieNodesFailsShouldErr(t assert.Equal(t, errExpected, err) } -func TestShardResolversContainerFactory_CreateRegisterPeerAuthenticationShouldErr(t *testing.T) { +func TestShardResolversContainerFactory_CreateRegisterMiniBlocksFailsOnMainNetworkShouldErr(t *testing.T) { t.Parallel() args := getArgumentsShard() - args.Messenger = createStubTopicMessageHandlerForShard("", common.PeerAuthenticationTopic) + args.MainMessenger = createMessengerStubForShard("", factory.MiniBlocksTopic) + rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) +} + +func TestShardResolversContainerFactory_CreateRegisterMiniBlocksFailsOnFullArchiveNetworkShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.FullArchiveMessenger = createMessengerStubForShard("", factory.MiniBlocksTopic) + rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) +} + +func TestShardResolversContainerFactory_CreateRegisterTrieNodesFailsOnMainNetworkShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.MainMessenger = createMessengerStubForShard("", factory.AccountTrieNodesTopic) + rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) +} + +func TestShardResolversContainerFactory_CreateRegisterTrieNodesFailsOnFullArchiveNetworkShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.FullArchiveMessenger = createMessengerStubForShard("", factory.AccountTrieNodesTopic) + rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) +} + +func TestShardResolversContainerFactory_CreateRegisterPeerAuthenticationOnMainNetworkShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.MainMessenger = createMessengerStubForShard("", common.PeerAuthenticationTopic) + rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) +} + +func TestShardResolversContainerFactory_CreateRegisterPeerAuthenticationOnFullArchiveNetworkShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.FullArchiveMessenger = createMessengerStubForShard("", common.PeerAuthenticationTopic) rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) container, err := rcf.Create() @@ -328,6 +415,20 @@ func TestShardResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { shardCoordinator.CurrentShard = 1 args := getArgumentsShard() + registerMainCnt := 0 + args.MainMessenger = &p2pmocks.MessengerStub{ + RegisterMessageProcessorCalled: func(topic string, identifier string, handler p2p.MessageProcessor) error { + registerMainCnt++ + return nil + }, + } + registerFullArchiveCnt := 0 + args.FullArchiveMessenger = &p2pmocks.MessengerStub{ + RegisterMessageProcessorCalled: func(topic string, identifier string, handler p2p.MessageProcessor) error { + registerFullArchiveCnt++ + return nil + }, + } args.ShardCoordinator = shardCoordinator rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) @@ -346,6 +447,8 @@ func TestShardResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { numResolverSCRs + numResolverRewardTxs + numResolverTrieNodes + numResolverPeerAuth + numResolverValidatorInfo assert.Equal(t, totalResolvers, container.Len()) + assert.Equal(t, totalResolvers, registerMainCnt) + assert.Equal(t, totalResolvers, registerFullArchiveCnt) } func TestShardResolversContainerFactory_IsInterfaceNil(t *testing.T) { @@ -362,19 +465,21 @@ func TestShardResolversContainerFactory_IsInterfaceNil(t *testing.T) { func getArgumentsShard() resolverscontainer.FactoryArgs { return resolverscontainer.FactoryArgs{ - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - Messenger: createStubTopicMessageHandlerForShard("", ""), - Store: createStoreForShard(), - Marshalizer: &mock.MarshalizerMock{}, - DataPools: createDataPoolsForShard(), - Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, - DataPacker: &mock.DataPackerStub{}, - TriesContainer: createTriesHolderForShard(), - SizeCheckDelta: 0, - InputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - NumConcurrentResolvingJobs: 10, - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PayloadValidator: &testscommon.PeerAuthenticationPayloadValidatorStub{}, + ShardCoordinator: mock.NewOneShardCoordinatorMock(), + MainMessenger: createMessengerStubForShard("", ""), + FullArchiveMessenger: createMessengerStubForShard("", ""), + Store: createStoreForShard(), + Marshalizer: &mock.MarshalizerMock{}, + DataPools: createDataPoolsForShard(), + Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, + DataPacker: &mock.DataPackerStub{}, + TriesContainer: createTriesHolderForShard(), + SizeCheckDelta: 0, + InputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + NumConcurrentResolvingJobs: 10, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PayloadValidator: &testscommon.PeerAuthenticationPayloadValidatorStub{}, } } diff --git a/dataRetriever/factory/storageRequestersContainer/args.go b/dataRetriever/factory/storageRequestersContainer/args.go index 026f9e133d3..528057b2255 100644 --- a/dataRetriever/factory/storageRequestersContainer/args.go +++ b/dataRetriever/factory/storageRequestersContainer/args.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/sharding" ) @@ -19,7 +20,7 @@ type FactoryArgs struct { WorkingDirectory string Hasher hashing.Hasher ShardCoordinator sharding.Coordinator - Messenger dataRetriever.TopicMessageHandler + Messenger p2p.Messenger Store dataRetriever.StorageService Marshalizer marshal.Marshalizer Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter diff --git a/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go b/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go index e0bccf04e75..f57929d6633 100644 --- a/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go +++ b/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go @@ -16,6 +16,7 @@ import ( disabledRequesters "github.com/multiversx/mx-chain-go/dataRetriever/requestHandlers/requesters/disabled" "github.com/multiversx/mx-chain-go/dataRetriever/storageRequesters" "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/storage" @@ -28,7 +29,7 @@ const defaultBeforeGracefulClose = time.Minute type baseRequestersContainerFactory struct { container dataRetriever.RequestersContainer shardCoordinator sharding.Coordinator - messenger dataRetriever.TopicMessageHandler + messenger p2p.Messenger store dataRetriever.StorageService marshalizer marshal.Marshalizer hasher hashing.Hasher @@ -261,14 +262,14 @@ func (brcf *baseRequestersContainerFactory) newImportDBTrieStorage( } args := trieFactory.TrieCreateArgs{ - MainStorer: mainStorer, - CheckpointsStorer: checkpointsStorer, - PruningEnabled: brcf.generalConfig.StateTriesConfig.AccountsStatePruningEnabled, - CheckpointsEnabled: brcf.generalConfig.StateTriesConfig.CheckpointsEnabled, - MaxTrieLevelInMem: brcf.generalConfig.StateTriesConfig.MaxStateTrieLevelInMemory, - SnapshotsEnabled: brcf.snapshotsEnabled, - IdleProvider: disabled.NewProcessStatusHandler(), - Identifier: storageIdentifier.String(), + MainStorer: mainStorer, + CheckpointsStorer: checkpointsStorer, + PruningEnabled: brcf.generalConfig.StateTriesConfig.AccountsStatePruningEnabled, + CheckpointsEnabled: brcf.generalConfig.StateTriesConfig.CheckpointsEnabled, + MaxTrieLevelInMem: brcf.generalConfig.StateTriesConfig.MaxStateTrieLevelInMemory, + SnapshotsEnabled: brcf.snapshotsEnabled, + IdleProvider: disabled.NewProcessStatusHandler(), + Identifier: storageIdentifier.String(), EnableEpochsHandler: handler, } return trieFactoryInstance.Create(args) diff --git a/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory_test.go b/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory_test.go index 587f9914d2a..7defb4d4c09 100644 --- a/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory_test.go +++ b/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory_test.go @@ -14,15 +14,16 @@ import ( "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func createStubTopicMessageHandlerForMeta(matchStrToErrOnCreate string, matchStrToErrOnRegister string) dataRetriever.TopicMessageHandler { - tmhs := mock.NewTopicMessageHandlerStub() +func createMessengerStubForMeta(matchStrToErrOnCreate string, matchStrToErrOnRegister string) p2p.Messenger { + stub := &p2pmocks.MessengerStub{} - tmhs.CreateTopicCalled = func(name string, createChannelForTopic bool) error { + stub.CreateTopicCalled = func(name string, createChannelForTopic bool) error { if matchStrToErrOnCreate == "" { return nil } @@ -33,7 +34,7 @@ func createStubTopicMessageHandlerForMeta(matchStrToErrOnCreate string, matchStr return nil } - tmhs.RegisterMessageProcessorCalled = func(topic string, identifier string, handler p2p.MessageProcessor) error { + stub.RegisterMessageProcessorCalled = func(topic string, identifier string, handler p2p.MessageProcessor) error { if matchStrToErrOnRegister == "" { return nil } @@ -44,7 +45,7 @@ func createStubTopicMessageHandlerForMeta(matchStrToErrOnCreate string, matchStr return nil } - return tmhs + return stub } func createStoreForMeta() dataRetriever.StorageService { @@ -217,7 +218,7 @@ func getArgumentsMeta() storagerequesterscontainer.FactoryArgs { WorkingDirectory: "", Hasher: &hashingMocks.HasherMock{}, ShardCoordinator: mock.NewOneShardCoordinatorMock(), - Messenger: createStubTopicMessageHandlerForMeta("", ""), + Messenger: createMessengerStubForMeta("", ""), Store: createStoreForMeta(), Marshalizer: &mock.MarshalizerMock{}, Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, diff --git a/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory_test.go b/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory_test.go index eb7a8908dfe..53139cfd2c1 100644 --- a/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory_test.go +++ b/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory_test.go @@ -14,6 +14,7 @@ import ( "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -21,10 +22,10 @@ import ( var errExpected = errors.New("expected error") -func createStubTopicMessageHandlerForShard(matchStrToErrOnCreate string, matchStrToErrOnRegister string) dataRetriever.TopicMessageHandler { - tmhs := mock.NewTopicMessageHandlerStub() +func createMessengerStubForShard(matchStrToErrOnCreate string, matchStrToErrOnRegister string) p2p.Messenger { + stub := &p2pmocks.MessengerStub{} - tmhs.CreateTopicCalled = func(name string, createChannelForTopic bool) error { + stub.CreateTopicCalled = func(name string, createChannelForTopic bool) error { if matchStrToErrOnCreate == "" { return nil } @@ -36,7 +37,7 @@ func createStubTopicMessageHandlerForShard(matchStrToErrOnCreate string, matchSt return nil } - tmhs.RegisterMessageProcessorCalled = func(topic string, identifier string, handler p2p.MessageProcessor) error { + stub.RegisterMessageProcessorCalled = func(topic string, identifier string, handler p2p.MessageProcessor) error { if matchStrToErrOnRegister == "" { return nil } @@ -48,7 +49,7 @@ func createStubTopicMessageHandlerForShard(matchStrToErrOnCreate string, matchSt return nil } - return tmhs + return stub } func createStoreForShard() dataRetriever.StorageService { @@ -202,7 +203,7 @@ func getArgumentsShard() storagerequesterscontainer.FactoryArgs { WorkingDirectory: "", Hasher: &hashingMocks.HasherMock{}, ShardCoordinator: mock.NewOneShardCoordinatorMock(), - Messenger: createStubTopicMessageHandlerForShard("", ""), + Messenger: createMessengerStubForShard("", ""), Store: createStoreForShard(), Marshalizer: &mock.MarshalizerMock{}, Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, diff --git a/dataRetriever/interface.go b/dataRetriever/interface.go index 77f59710677..930b6aca124 100644 --- a/dataRetriever/interface.go +++ b/dataRetriever/interface.go @@ -21,7 +21,7 @@ type ResolverThrottler interface { // Resolver defines what a data resolver should do type Resolver interface { - ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error + ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error SetDebugHandler(handler DebugHandler) error Close() error IsInterfaceNil() bool @@ -50,7 +50,7 @@ type HeaderRequester interface { // TopicResolverSender defines what sending operations are allowed for a topic resolver type TopicResolverSender interface { - Send(buff []byte, peer core.PeerID) error + Send(buff []byte, peer core.PeerID, destination p2p.MessageHandler) error RequestTopic() string TargetShardID() uint32 SetDebugHandler(handler DebugHandler) error @@ -135,9 +135,10 @@ type ManualEpochStartNotifier interface { // MessageHandler defines the functionality needed by structs to send data to other peers type MessageHandler interface { ConnectedPeersOnTopic(topic string) []core.PeerID - ConnectedFullHistoryPeersOnTopic(topic string) []core.PeerID SendToConnectedPeer(topic string, buff []byte, peerID core.PeerID) error ID() core.PeerID + ConnectedPeers() []core.PeerID + IsConnected(peerID core.PeerID) bool IsInterfaceNil() bool } @@ -148,13 +149,6 @@ type TopicHandler interface { RegisterMessageProcessor(topic string, identifier string, handler p2p.MessageProcessor) error } -// TopicMessageHandler defines the functionality needed by structs to manage topics, message processors and to send data -// to other peers -type TopicMessageHandler interface { - MessageHandler - TopicHandler -} - // IntRandomizer interface provides functionality over generating integer numbers type IntRandomizer interface { Intn(n int) int @@ -168,7 +162,6 @@ type StorageType uint8 type PeerListCreator interface { CrossShardPeerList() []core.PeerID IntraShardPeerList() []core.PeerID - FullHistoryList() []core.PeerID IsInterfaceNil() bool } diff --git a/dataRetriever/mock/headerResolverStub.go b/dataRetriever/mock/headerResolverStub.go index 3bf8bfb0028..fa87219b082 100644 --- a/dataRetriever/mock/headerResolverStub.go +++ b/dataRetriever/mock/headerResolverStub.go @@ -26,7 +26,7 @@ func (hrs *HeaderResolverStub) SetEpochHandler(epochHandler dataRetriever.EpochH } // ProcessReceivedMessage - -func (hrs *HeaderResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID) error { +func (hrs *HeaderResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID, _ p2p.MessageHandler) error { if hrs.ProcessReceivedMessageCalled != nil { return hrs.ProcessReceivedMessageCalled(message) } diff --git a/dataRetriever/mock/messageHandlerStub.go b/dataRetriever/mock/messageHandlerStub.go index 3b6943bb71d..3f2998efd15 100644 --- a/dataRetriever/mock/messageHandlerStub.go +++ b/dataRetriever/mock/messageHandlerStub.go @@ -6,10 +6,11 @@ import ( // MessageHandlerStub - type MessageHandlerStub struct { - ConnectedPeersOnTopicCalled func(topic string) []core.PeerID - ConnectedFullHistoryPeersOnTopicCalled func(topic string) []core.PeerID - SendToConnectedPeerCalled func(topic string, buff []byte, peerID core.PeerID) error - IDCalled func() core.PeerID + ConnectedPeersOnTopicCalled func(topic string) []core.PeerID + SendToConnectedPeerCalled func(topic string, buff []byte, peerID core.PeerID) error + IDCalled func() core.PeerID + ConnectedPeersCalled func() []core.PeerID + IsConnectedCalled func(peerID core.PeerID) bool } // ConnectedPeersOnTopic - @@ -17,11 +18,6 @@ func (mhs *MessageHandlerStub) ConnectedPeersOnTopic(topic string) []core.PeerID return mhs.ConnectedPeersOnTopicCalled(topic) } -// ConnectedFullHistoryPeersOnTopic - -func (mhs *MessageHandlerStub) ConnectedFullHistoryPeersOnTopic(topic string) []core.PeerID { - return mhs.ConnectedFullHistoryPeersOnTopicCalled(topic) -} - // SendToConnectedPeer - func (mhs *MessageHandlerStub) SendToConnectedPeer(topic string, buff []byte, peerID core.PeerID) error { return mhs.SendToConnectedPeerCalled(topic, buff, peerID) @@ -36,6 +32,24 @@ func (mhs *MessageHandlerStub) ID() core.PeerID { return "" } +// ConnectedPeers - +func (mhs *MessageHandlerStub) ConnectedPeers() []core.PeerID { + if mhs.ConnectedPeersCalled != nil { + return mhs.ConnectedPeersCalled() + } + + return make([]core.PeerID, 0) +} + +// IsConnected - +func (mhs *MessageHandlerStub) IsConnected(peerID core.PeerID) bool { + if mhs.IsConnectedCalled != nil { + return mhs.IsConnectedCalled(peerID) + } + + return false +} + // IsInterfaceNil returns true if there is no value under the interface func (mhs *MessageHandlerStub) IsInterfaceNil() bool { return mhs == nil diff --git a/dataRetriever/mock/peerListCreatorStub.go b/dataRetriever/mock/peerListCreatorStub.go index 7acea5d64e0..c933aa81056 100644 --- a/dataRetriever/mock/peerListCreatorStub.go +++ b/dataRetriever/mock/peerListCreatorStub.go @@ -8,7 +8,6 @@ import ( type PeerListCreatorStub struct { CrossShardPeerListCalled func() []core.PeerID IntraShardPeerListCalled func() []core.PeerID - FullHistoryListCalled func() []core.PeerID } // CrossShardPeerList - @@ -21,11 +20,6 @@ func (p *PeerListCreatorStub) IntraShardPeerList() []core.PeerID { return p.IntraShardPeerListCalled() } -// FullHistoryList - -func (p *PeerListCreatorStub) FullHistoryList() []core.PeerID { - return p.FullHistoryListCalled() -} - // IsInterfaceNil returns true if there is no value under the interface func (p *PeerListCreatorStub) IsInterfaceNil() bool { return p == nil diff --git a/dataRetriever/mock/resolverStub.go b/dataRetriever/mock/resolverStub.go index 15e698042e4..c667c9459b2 100644 --- a/dataRetriever/mock/resolverStub.go +++ b/dataRetriever/mock/resolverStub.go @@ -14,7 +14,7 @@ type ResolverStub struct { } // ProcessReceivedMessage - -func (rs *ResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID) error { +func (rs *ResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID, _ p2p.MessageHandler) error { return rs.ProcessReceivedMessageCalled(message) } diff --git a/dataRetriever/mock/topicHandlerStub.go b/dataRetriever/mock/topicHandlerStub.go deleted file mode 100644 index 8d9095b300d..00000000000 --- a/dataRetriever/mock/topicHandlerStub.go +++ /dev/null @@ -1,32 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-go/p2p" -) - -// TopicHandlerStub - -type TopicHandlerStub struct { - HasTopicCalled func(name string) bool - CreateTopicCalled func(name string, createChannelForTopic bool) error - RegisterMessageProcessorCalled func(topic string, identifier string, handler p2p.MessageProcessor) error -} - -// HasTopic - -func (ths *TopicHandlerStub) HasTopic(name string) bool { - return ths.HasTopicCalled(name) -} - -// CreateTopic - -func (ths *TopicHandlerStub) CreateTopic(name string, createChannelForTopic bool) error { - return ths.CreateTopicCalled(name, createChannelForTopic) -} - -// RegisterMessageProcessor - -func (ths *TopicHandlerStub) RegisterMessageProcessor(topic string, identifier string, handler p2p.MessageProcessor) error { - return ths.RegisterMessageProcessorCalled(topic, identifier, handler) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (ths *TopicHandlerStub) IsInterfaceNil() bool { - return ths == nil -} diff --git a/dataRetriever/mock/topicMessageHandlerStub.go b/dataRetriever/mock/topicMessageHandlerStub.go deleted file mode 100644 index 6b47a577048..00000000000 --- a/dataRetriever/mock/topicMessageHandlerStub.go +++ /dev/null @@ -1,19 +0,0 @@ -package mock - -type topicMessageHandlerStub struct { - *TopicHandlerStub - *MessageHandlerStub -} - -// NewTopicMessageHandlerStub - -func NewTopicMessageHandlerStub() *topicMessageHandlerStub { - return &topicMessageHandlerStub{ - TopicHandlerStub: &TopicHandlerStub{}, - MessageHandlerStub: &MessageHandlerStub{}, - } -} - -// IsInterfaceNil returns true if there is no value under the interface -func (s *topicMessageHandlerStub) IsInterfaceNil() bool { - return s == nil -} diff --git a/dataRetriever/mock/topicResolverSenderStub.go b/dataRetriever/mock/topicResolverSenderStub.go index 9188a9d99ef..744d0e6fef8 100644 --- a/dataRetriever/mock/topicResolverSenderStub.go +++ b/dataRetriever/mock/topicResolverSenderStub.go @@ -4,11 +4,12 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/p2p" ) // TopicResolverSenderStub - type TopicResolverSenderStub struct { - SendCalled func(buff []byte, peer core.PeerID) error + SendCalled func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error TargetShardIDCalled func() uint32 debugHandler dataRetriever.DebugHandler } @@ -19,9 +20,9 @@ func (trss *TopicResolverSenderStub) RequestTopic() string { } // Send - -func (trss *TopicResolverSenderStub) Send(buff []byte, peer core.PeerID) error { +func (trss *TopicResolverSenderStub) Send(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { if trss.SendCalled != nil { - return trss.SendCalled(buff, peer) + return trss.SendCalled(buff, peer, source) } return nil diff --git a/dataRetriever/resolvers/disabled/resolver.go b/dataRetriever/resolvers/disabled/resolver.go index 077c98d8f97..ac51a954260 100644 --- a/dataRetriever/resolvers/disabled/resolver.go +++ b/dataRetriever/resolvers/disabled/resolver.go @@ -15,7 +15,7 @@ func NewDisabledResolver() *resolver { } // ProcessReceivedMessage returns nil as it is disabled -func (r *resolver) ProcessReceivedMessage(_ p2p.MessageP2P, _ core.PeerID) error { +func (r *resolver) ProcessReceivedMessage(_ p2p.MessageP2P, _ core.PeerID, _ p2p.MessageHandler) error { return nil } diff --git a/dataRetriever/resolvers/export_test.go b/dataRetriever/resolvers/export_test.go index b13879a7d0e..360342be58a 100644 --- a/dataRetriever/resolvers/export_test.go +++ b/dataRetriever/resolvers/export_test.go @@ -11,6 +11,6 @@ func (hdrRes *HeaderResolver) EpochHandler() dataRetriever.EpochHandler { } // ResolveMultipleHashes - -func (tnRes *TrieNodeResolver) ResolveMultipleHashes(hashesBuff []byte, message p2p.MessageP2P) error { - return tnRes.resolveMultipleHashes(hashesBuff, message) +func (tnRes *TrieNodeResolver) ResolveMultipleHashes(hashesBuff []byte, message p2p.MessageP2P, source p2p.MessageHandler) error { + return tnRes.resolveMultipleHashes(hashesBuff, message, source) } diff --git a/dataRetriever/resolvers/headerResolver.go b/dataRetriever/resolvers/headerResolver.go index 59216068c2f..877c57a31da 100644 --- a/dataRetriever/resolvers/headerResolver.go +++ b/dataRetriever/resolvers/headerResolver.go @@ -109,7 +109,7 @@ func (hdrRes *HeaderResolver) SetEpochHandler(epochHandler dataRetriever.EpochHa // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) -func (hdrRes *HeaderResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (hdrRes *HeaderResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { err := hdrRes.canProcessMessage(message, fromConnectedPeer) if err != nil { return err @@ -158,7 +158,7 @@ func (hdrRes *HeaderResolver) ProcessReceivedMessage(message p2p.MessageP2P, fro hdrRes.DebugHandler().LogSucceededToResolveData(hdrRes.topic, rd.Value) - return hdrRes.Send(buff, message.Peer()) + return hdrRes.Send(buff, message.Peer(), source) } func (hdrRes *HeaderResolver) resolveHeaderFromNonce(rd *dataRetriever.RequestData) ([]byte, error) { diff --git a/dataRetriever/resolvers/headerResolver_test.go b/dataRetriever/resolvers/headerResolver_test.go index e71fff039bd..f50606a244e 100644 --- a/dataRetriever/resolvers/headerResolver_test.go +++ b/dataRetriever/resolvers/headerResolver_test.go @@ -165,7 +165,7 @@ func TestHeaderResolver_ProcessReceivedCanProcessMessageErrorsShouldErr(t *testi } hdrRes, _ := resolvers.NewHeaderResolver(arg) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, nil), fromConnectedPeerId) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, nil), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -177,7 +177,7 @@ func TestHeaderResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) { arg := createMockArgHeaderResolver() hdrRes, _ := resolvers.NewHeaderResolver(arg) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, nil), fromConnectedPeerId) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, nil), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrNilValue, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -190,7 +190,7 @@ func TestHeaderResolver_ProcessReceivedMessage_WrongIdentifierStartBlock(t *test hdrRes, _ := resolvers.NewHeaderResolver(arg) requestedData := []byte("request") - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.EpochType, requestedData), "") + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.EpochType, requestedData), "", &p2pmocks.MessengerStub{}) assert.Equal(t, core.ErrInvalidIdentifierForEpochStartBlockRequest, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -207,7 +207,7 @@ func TestHeaderResolver_ProcessReceivedMessageEpochTypeUnknownEpochShouldWork(t } wasSent := false arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSent = true return nil }, @@ -215,7 +215,7 @@ func TestHeaderResolver_ProcessReceivedMessageEpochTypeUnknownEpochShouldWork(t hdrRes, _ := resolvers.NewHeaderResolver(arg) requestedData := []byte(fmt.Sprintf("epoch_%d", math.MaxUint32)) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.EpochType, requestedData), "") + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.EpochType, requestedData), "", &p2pmocks.MessengerStub{}) assert.NoError(t, err) assert.True(t, wasSent) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -234,7 +234,7 @@ func TestHeaderResolver_ProcessReceivedMessage_Ok(t *testing.T) { hdrRes, _ := resolvers.NewHeaderResolver(arg) requestedData := []byte("request_1") - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.EpochType, requestedData), "") + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.EpochType, requestedData), "", &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -246,7 +246,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestUnknownTypeShouldErr(t *tes arg := createMockArgHeaderResolver() hdrRes, _ := resolvers.NewHeaderResolver(arg) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(254, make([]byte, 0)), fromConnectedPeerId) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(254, make([]byte, 0)), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrResolveTypeUnknown, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -272,7 +272,7 @@ func TestHeaderResolver_ValidateRequestHashTypeFoundInHdrPoolShouldSearchAndSend arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { sendWasCalled = true return nil }, @@ -280,7 +280,7 @@ func TestHeaderResolver_ValidateRequestHashTypeFoundInHdrPoolShouldSearchAndSend arg.Headers = headers hdrRes, _ := resolvers.NewHeaderResolver(arg) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, searchWasCalled) assert.True(t, sendWasCalled) @@ -309,7 +309,7 @@ func TestHeaderResolver_ValidateRequestHashTypeFoundInHdrPoolShouldSearchAndSend arg := createMockArgHeaderResolver() arg.IsFullHistoryNode = true arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { sendWasCalled = true return nil }, @@ -317,7 +317,7 @@ func TestHeaderResolver_ValidateRequestHashTypeFoundInHdrPoolShouldSearchAndSend arg.Headers = headers hdrRes, _ := resolvers.NewHeaderResolver(arg) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, searchWasCalled) assert.True(t, sendWasCalled) @@ -352,7 +352,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestHashTypeFoundInHdrPoolMarsh arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { return nil }, } @@ -360,7 +360,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestHashTypeFoundInHdrPoolMarsh arg.Headers = headers hdrRes, _ := resolvers.NewHeaderResolver(arg) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, errExpected, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -391,7 +391,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestRetFromStorageShouldRetValA arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSent = true return nil }, @@ -400,7 +400,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestRetFromStorageShouldRetValA arg.HdrStorage = store hdrRes, _ := resolvers.NewHeaderResolver(arg) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, wasGotFromStorage) assert.True(t, wasSent) @@ -419,7 +419,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeInvalidSliceShould } hdrRes, _ := resolvers.NewHeaderResolver(arg) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, []byte("aaa")), fromConnectedPeerId) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, []byte("aaa")), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrInvalidNonceByteSlice, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -446,7 +446,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceShouldCallWithTheCorre }, ) msg := &p2pmocks.P2PMessageMock{DataField: buff} - _ = hdrRes.ProcessReceivedMessage(msg, "") + _ = hdrRes.ProcessReceivedMessage(msg, "", &p2pmocks.MessengerStub{}) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) } @@ -460,7 +460,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeNotFoundInHdrNonce arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSent = true return nil }, @@ -491,6 +491,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeNotFoundInHdrNonce err := hdrRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.NonceType, arg.NonceConverter.ToByteSlice(requestedNonce)), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.Equal(t, expectedErr, err) assert.False(t, wasSent) @@ -514,7 +515,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSent = true return nil }, @@ -536,6 +537,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo err := hdrRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.NonceType, arg.NonceConverter.ToByteSlice(requestedNonce)), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.Nil(t, err) @@ -575,7 +577,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSend = true return nil }, @@ -598,6 +600,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo err := hdrRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.NonceType, arg.NonceConverter.ToByteSlice(requestedNonce)), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.Nil(t, err) @@ -625,7 +628,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { assert.Fail(t, "should not have been called") return nil }, @@ -654,6 +657,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo err := hdrRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.NonceType, arg.NonceConverter.ToByteSlice(requestedNonce)), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.True(t, errors.Is(err, expectedErr)) @@ -679,7 +683,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeNotFoundInHdrNonce } arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSend = true return nil }, @@ -695,6 +699,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeNotFoundInHdrNonce err := hdrRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.NonceType, arg.NonceConverter.ToByteSlice(requestedNonce)), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.Nil(t, err) @@ -729,7 +734,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { return nil }, TargetShardIDCalled: func() uint32 { @@ -751,6 +756,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo err := hdrRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.NonceType, arg.NonceConverter.ToByteSlice(requestedNonce)), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.Equal(t, errExpected, err) @@ -810,7 +816,7 @@ func TestHeaderResolver_SetEpochHandlerConcurrency(t *testing.T) { assert.Nil(t, err) return } - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.EpochType, []byte("request_1")), fromConnectedPeerId) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.EpochType, []byte("request_1")), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Nil(t, err) }(i) } diff --git a/dataRetriever/resolvers/miniblockResolver.go b/dataRetriever/resolvers/miniblockResolver.go index c67f0f4502b..0c1a1460074 100644 --- a/dataRetriever/resolvers/miniblockResolver.go +++ b/dataRetriever/resolvers/miniblockResolver.go @@ -77,7 +77,7 @@ func checkArgMiniblockResolver(arg ArgMiniblockResolver) error { // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) -func (mbRes *miniblockResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (mbRes *miniblockResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { err := mbRes.canProcessMessage(message, fromConnectedPeer) if err != nil { return err @@ -93,9 +93,9 @@ func (mbRes *miniblockResolver) ProcessReceivedMessage(message p2p.MessageP2P, f switch rd.Type { case dataRetriever.HashType: - err = mbRes.resolveMbRequestByHash(rd.Value, message.Peer(), rd.Epoch) + err = mbRes.resolveMbRequestByHash(rd.Value, message.Peer(), rd.Epoch, source) case dataRetriever.HashArrayType: - err = mbRes.resolveMbRequestByHashArray(rd.Value, message.Peer(), rd.Epoch) + err = mbRes.resolveMbRequestByHashArray(rd.Value, message.Peer(), rd.Epoch, source) default: err = dataRetriever.ErrRequestTypeNotImplemented } @@ -107,7 +107,7 @@ func (mbRes *miniblockResolver) ProcessReceivedMessage(message p2p.MessageP2P, f return err } -func (mbRes *miniblockResolver) resolveMbRequestByHash(hash []byte, pid core.PeerID, epoch uint32) error { +func (mbRes *miniblockResolver) resolveMbRequestByHash(hash []byte, pid core.PeerID, epoch uint32, source p2p.MessageHandler) error { mb, err := mbRes.fetchMbAsByteSlice(hash, epoch) if err != nil { return err @@ -121,7 +121,7 @@ func (mbRes *miniblockResolver) resolveMbRequestByHash(hash []byte, pid core.Pee return err } - return mbRes.Send(buffToSend, pid) + return mbRes.Send(buffToSend, pid, source) } func (mbRes *miniblockResolver) fetchMbAsByteSlice(hash []byte, epoch uint32) ([]byte, error) { @@ -146,7 +146,7 @@ func (mbRes *miniblockResolver) fetchMbAsByteSlice(hash []byte, epoch uint32) ([ return buff, nil } -func (mbRes *miniblockResolver) resolveMbRequestByHashArray(mbBuff []byte, pid core.PeerID, epoch uint32) error { +func (mbRes *miniblockResolver) resolveMbRequestByHashArray(mbBuff []byte, pid core.PeerID, epoch uint32, source p2p.MessageHandler) error { b := batch.Batch{} err := mbRes.marshalizer.Unmarshal(&b, mbBuff) if err != nil { @@ -177,7 +177,7 @@ func (mbRes *miniblockResolver) resolveMbRequestByHashArray(mbBuff []byte, pid c } for _, buff := range buffsToSend { - errSend := mbRes.Send(buff, pid) + errSend := mbRes.Send(buff, pid, source) if errSend != nil { return errSend } diff --git a/dataRetriever/resolvers/miniblockResolver_test.go b/dataRetriever/resolvers/miniblockResolver_test.go index 1b336c50396..35588e9d6a9 100644 --- a/dataRetriever/resolvers/miniblockResolver_test.go +++ b/dataRetriever/resolvers/miniblockResolver_test.go @@ -14,6 +14,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever/resolvers" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" ) @@ -127,7 +128,7 @@ func TestMiniblockResolver_ProcessReceivedAntifloodErrorsShouldErr(t *testing.T) } mbRes, _ := resolvers.NewMiniblockResolver(arg) - err := mbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeerId) + err := mbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -139,7 +140,7 @@ func TestMiniblockResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) arg := createMockArgMiniblockResolver() mbRes, _ := resolvers.NewMiniblockResolver(arg) - err := mbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeerId) + err := mbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrNilValue, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -151,7 +152,7 @@ func TestMiniblockResolver_ProcessReceivedMessageWrongTypeShouldErr(t *testing.T arg := createMockArgMiniblockResolver() mbRes, _ := resolvers.NewMiniblockResolver(arg) - err := mbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, make([]byte, 0)), fromConnectedPeerId) + err := mbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, make([]byte, 0)), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, dataRetriever.ErrRequestTypeNotImplemented)) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -184,7 +185,7 @@ func TestMiniblockResolver_ProcessReceivedMessageFoundInPoolShouldRetValAndSend( arg := createMockArgMiniblockResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSent = true return nil }, @@ -200,6 +201,7 @@ func TestMiniblockResolver_ProcessReceivedMessageFoundInPoolShouldRetValAndSend( err := mbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashArrayType, requestedBuff), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.Nil(t, err) @@ -254,6 +256,7 @@ func TestMiniblockResolver_ProcessReceivedMessageFoundInPoolMarshalizerFailShoul err := mbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashArrayType, requestedBuff), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.True(t, errors.Is(err, errExpected)) @@ -309,6 +312,7 @@ func TestMiniblockResolver_ProcessReceivedMessageUnmarshalFails(t *testing.T) { err := mbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashArrayType, requestedBuff), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.True(t, errors.Is(err, expectedErr)) @@ -352,6 +356,7 @@ func TestMiniblockResolver_ProcessReceivedMessagePackDataInChunksFails(t *testin err := mbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashArrayType, requestedBuff), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.True(t, errors.Is(err, expectedErr)) @@ -386,7 +391,7 @@ func TestMiniblockResolver_ProcessReceivedMessageSendFails(t *testing.T) { } arg.Marshaller = goodMarshalizer arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { return expectedErr }, } @@ -395,6 +400,7 @@ func TestMiniblockResolver_ProcessReceivedMessageSendFails(t *testing.T) { err := mbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashArrayType, requestedBuff), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.True(t, errors.Is(err, expectedErr)) @@ -428,7 +434,7 @@ func TestMiniblockResolver_ProcessReceivedMessageNotFoundInPoolShouldRetFromStor arg := createMockArgMiniblockResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSend = true return nil }, @@ -440,6 +446,7 @@ func TestMiniblockResolver_ProcessReceivedMessageNotFoundInPoolShouldRetFromStor err := mbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashType, requestedBuff), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.Nil(t, err) @@ -474,7 +481,7 @@ func TestMiniblockResolver_ProcessReceivedMessageMarshalFails(t *testing.T) { arg := createMockArgMiniblockResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { assert.Fail(t, "should have not been called") return nil }, @@ -492,6 +499,7 @@ func TestMiniblockResolver_ProcessReceivedMessageMarshalFails(t *testing.T) { err := mbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashType, requestedBuff), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.True(t, errors.Is(err, expectedErr)) @@ -523,7 +531,7 @@ func TestMiniblockResolver_ProcessReceivedMessageMissingDataShouldNotSend(t *tes arg := createMockArgMiniblockResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSent = true return nil }, @@ -535,6 +543,7 @@ func TestMiniblockResolver_ProcessReceivedMessageMissingDataShouldNotSend(t *tes _ = mbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashType, requestedBuff), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.False(t, wasSent) diff --git a/dataRetriever/resolvers/peerAuthenticationResolver.go b/dataRetriever/resolvers/peerAuthenticationResolver.go index 3a93101b4c2..dc2a45892c2 100644 --- a/dataRetriever/resolvers/peerAuthenticationResolver.go +++ b/dataRetriever/resolvers/peerAuthenticationResolver.go @@ -75,7 +75,7 @@ func checkArgPeerAuthenticationResolver(arg ArgPeerAuthenticationResolver) error // ProcessReceivedMessage represents the callback func from the p2p.Messenger that is called each time a new message is received // (for the topic this validator was registered to, usually a request topic) -func (res *peerAuthenticationResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (res *peerAuthenticationResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { err := res.canProcessMessage(message, fromConnectedPeer) if err != nil { return err @@ -91,7 +91,7 @@ func (res *peerAuthenticationResolver) ProcessReceivedMessage(message p2p.Messag switch rd.Type { case dataRetriever.HashArrayType: - return res.resolveMultipleHashesRequest(rd.Value, message.Peer()) + return res.resolveMultipleHashesRequest(rd.Value, message.Peer(), source) default: err = dataRetriever.ErrRequestTypeNotImplemented } @@ -103,7 +103,7 @@ func (res *peerAuthenticationResolver) ProcessReceivedMessage(message p2p.Messag } // resolveMultipleHashesRequest sends the response for multiple hashes request -func (res *peerAuthenticationResolver) resolveMultipleHashesRequest(hashesBuff []byte, pid core.PeerID) error { +func (res *peerAuthenticationResolver) resolveMultipleHashesRequest(hashesBuff []byte, pid core.PeerID, source p2p.MessageHandler) error { b := batch.Batch{} err := res.marshalizer.Unmarshal(&b, hashesBuff) if err != nil { @@ -116,18 +116,18 @@ func (res *peerAuthenticationResolver) resolveMultipleHashesRequest(hashesBuff [ return fmt.Errorf("resolveMultipleHashesRequest error %w from buff %x", err, hashesBuff) } - return res.sendPeerAuthsForHashes(peerAuthsForHashes, pid) + return res.sendPeerAuthsForHashes(peerAuthsForHashes, pid, source) } // sendPeerAuthsForHashes sends multiple peer authentication messages for specific hashes -func (res *peerAuthenticationResolver) sendPeerAuthsForHashes(dataBuff [][]byte, pid core.PeerID) error { +func (res *peerAuthenticationResolver) sendPeerAuthsForHashes(dataBuff [][]byte, pid core.PeerID, source p2p.MessageHandler) error { buffsToSend, err := res.dataPacker.PackDataInChunks(dataBuff, maxBuffToSendPeerAuthentications) if err != nil { return err } for _, buff := range buffsToSend { - err = res.Send(buff, pid) + err = res.Send(buff, pid, source) if err != nil { return err } diff --git a/dataRetriever/resolvers/peerAuthenticationResolver_test.go b/dataRetriever/resolvers/peerAuthenticationResolver_test.go index 962d50be2ec..188c29d7e3f 100644 --- a/dataRetriever/resolvers/peerAuthenticationResolver_test.go +++ b/dataRetriever/resolvers/peerAuthenticationResolver_test.go @@ -19,6 +19,7 @@ import ( "github.com/multiversx/mx-chain-go/heartbeat" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -163,7 +164,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { assert.Nil(t, err) assert.False(t, res.IsInterfaceNil()) - err = res.ProcessReceivedMessage(nil, fromConnectedPeer) + err = res.ProcessReceivedMessage(nil, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrNilMessage, err) }) t.Run("canProcessMessage due to antiflood handler error", func(t *testing.T) { @@ -179,7 +180,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { assert.Nil(t, err) assert.False(t, res.IsInterfaceNil()) - err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.ChunkType, nil), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.ChunkType, nil), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -197,7 +198,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { assert.Nil(t, err) assert.False(t, res.IsInterfaceNil()) - err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.ChunkType, nil), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.ChunkType, nil), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) }) t.Run("invalid request type should error", func(t *testing.T) { @@ -212,7 +213,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { assert.Nil(t, err) assert.False(t, res.IsInterfaceNil()) - err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedBuff), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedBuff), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, dataRetriever.ErrRequestTypeNotImplemented)) }) @@ -226,7 +227,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { assert.Nil(t, err) assert.False(t, res.IsInterfaceNil()) - err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, []byte("invalid data")), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, []byte("invalid data")), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.NotNil(t, err) }) t.Run("resolveMultipleHashesRequest: all hashes missing from cache should error", func(t *testing.T) { @@ -241,7 +242,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { arg.PeerAuthenticationPool = cache wasSent := false arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSent = true return nil }, @@ -253,7 +254,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { hashes := getKeysSlice() providedHashes, err := arg.Marshaller.Marshal(batch.Batch{Data: hashes}) assert.Nil(t, err) - err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer, &p2pmocks.MessengerStub{}) expectedSubstrErr := fmt.Sprintf("%s %x", "from buff", providedHashes) assert.True(t, strings.Contains(fmt.Sprintf("%s", err), expectedSubstrErr)) assert.False(t, wasSent) @@ -270,7 +271,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { arg.PeerAuthenticationPool = cache wasSent := false arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSent = true return nil }, @@ -282,7 +283,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { hashes := getKeysSlice() providedHashes, err := arg.Marshaller.Marshal(batch.Batch{Data: hashes}) assert.Nil(t, err) - err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer, &p2pmocks.MessengerStub{}) expectedSubstrErr := fmt.Sprintf("%s %x", "from buff", providedHashes) assert.True(t, strings.Contains(fmt.Sprintf("%s", err), expectedSubstrErr)) assert.False(t, wasSent) @@ -306,7 +307,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { arg.PeerAuthenticationPool = cache wasSent := false arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSent = true return nil }, @@ -318,7 +319,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { hashes := getKeysSlice() providedHashes, err := arg.Marshaller.Marshal(batch.Batch{Data: hashes}) assert.Nil(t, err) - err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer, &p2pmocks.MessengerStub{}) expectedSubstrErr := fmt.Sprintf("%s %x", "from buff", providedHashes) assert.True(t, strings.Contains(fmt.Sprintf("%s", err), expectedSubstrErr)) assert.False(t, wasSent) @@ -360,7 +361,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { arg.PeerAuthenticationPool = cache wasSent := false arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { b := &batch.Batch{} err = arg.Marshaller.Unmarshal(b, buff) assert.Nil(t, err) @@ -386,7 +387,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { assert.Nil(t, err) assert.False(t, res.IsInterfaceNil()) - err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, wasSent) }) @@ -412,7 +413,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { hashes := getKeysSlice() providedHashes, err := arg.Marshaller.Marshal(batch.Batch{Data: hashes}) assert.Nil(t, err) - err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) }) t.Run("resolveMultipleHashesRequest: Send returns error", func(t *testing.T) { @@ -426,7 +427,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { arg := createMockArgPeerAuthenticationResolver() arg.PeerAuthenticationPool = cache arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { return expectedErr }, } @@ -437,7 +438,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { hashes := getKeysSlice() providedHashes, err := arg.Marshaller.Marshal(batch.Batch{Data: hashes}) assert.Nil(t, err) - err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) }) t.Run("resolveMultipleHashesRequest: send large data buff", func(t *testing.T) { @@ -463,7 +464,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { messagesSent := 0 hashesReceived := 0 arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { b := &batch.Batch{} err := arg.Marshaller.Unmarshal(b, buff) assert.Nil(t, err) @@ -500,7 +501,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { chunkIndex := uint32(0) providedHashes, err := arg.Marshaller.Marshal(&batch.Batch{Data: providedKeys}) assert.Nil(t, err) - err = res.ProcessReceivedMessage(createRequestMsgWithChunkIndex(dataRetriever.HashArrayType, providedHashes, epoch, chunkIndex), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsgWithChunkIndex(dataRetriever.HashArrayType, providedHashes, epoch, chunkIndex), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.Equal(t, 2, messagesSent) assert.Equal(t, expectedLen, hashesReceived) diff --git a/dataRetriever/resolvers/transactionResolver.go b/dataRetriever/resolvers/transactionResolver.go index cbf83d9fe04..3a88bd13c15 100644 --- a/dataRetriever/resolvers/transactionResolver.go +++ b/dataRetriever/resolvers/transactionResolver.go @@ -82,7 +82,7 @@ func checkArgTxResolver(arg ArgTxResolver) error { // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) -func (txRes *TxResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (txRes *TxResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { err := txRes.canProcessMessage(message, fromConnectedPeer) if err != nil { return err @@ -98,9 +98,9 @@ func (txRes *TxResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConn switch rd.Type { case dataRetriever.HashType: - err = txRes.resolveTxRequestByHash(rd.Value, message.Peer(), rd.Epoch) + err = txRes.resolveTxRequestByHash(rd.Value, message.Peer(), rd.Epoch, source) case dataRetriever.HashArrayType: - err = txRes.resolveTxRequestByHashArray(rd.Value, message.Peer(), rd.Epoch) + err = txRes.resolveTxRequestByHashArray(rd.Value, message.Peer(), rd.Epoch, source) default: err = dataRetriever.ErrRequestTypeNotImplemented } @@ -112,7 +112,7 @@ func (txRes *TxResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConn return err } -func (txRes *TxResolver) resolveTxRequestByHash(hash []byte, pid core.PeerID, epoch uint32) error { +func (txRes *TxResolver) resolveTxRequestByHash(hash []byte, pid core.PeerID, epoch uint32, source p2p.MessageHandler) error { // TODO this can be optimized by searching in corresponding datapool (taken by topic name) tx, err := txRes.fetchTxAsByteSlice(hash, epoch) if err != nil { @@ -127,7 +127,7 @@ func (txRes *TxResolver) resolveTxRequestByHash(hash []byte, pid core.PeerID, ep return err } - return txRes.Send(buff, pid) + return txRes.Send(buff, pid, source) } func (txRes *TxResolver) fetchTxAsByteSlice(hash []byte, epoch uint32) ([]byte, error) { @@ -152,7 +152,7 @@ func (txRes *TxResolver) fetchTxAsByteSlice(hash []byte, epoch uint32) ([]byte, return buff, nil } -func (txRes *TxResolver) resolveTxRequestByHashArray(hashesBuff []byte, pid core.PeerID, epoch uint32) error { +func (txRes *TxResolver) resolveTxRequestByHashArray(hashesBuff []byte, pid core.PeerID, epoch uint32, source p2p.MessageHandler) error { // TODO this can be optimized by searching in corresponding datapool (taken by topic name) b := batch.Batch{} err := txRes.marshalizer.Unmarshal(&b, hashesBuff) @@ -186,7 +186,7 @@ func (txRes *TxResolver) resolveTxRequestByHashArray(hashesBuff []byte, pid core } for _, buff := range buffsToSend { - errSend := txRes.Send(buff, pid) + errSend := txRes.Send(buff, pid, source) if errSend != nil { return errSend } diff --git a/dataRetriever/resolvers/transactionResolver_test.go b/dataRetriever/resolvers/transactionResolver_test.go index d75d2192789..2af167aae70 100644 --- a/dataRetriever/resolvers/transactionResolver_test.go +++ b/dataRetriever/resolvers/transactionResolver_test.go @@ -131,7 +131,7 @@ func TestTxResolver_ProcessReceivedMessageCanProcessMessageErrorsShouldErr(t *te } txRes, _ := resolvers.NewTxResolver(arg) - err := txRes.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{}, connectedPeerId) + err := txRes.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{}, connectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -144,7 +144,7 @@ func TestTxResolver_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { arg := createMockArgTxResolver() txRes, _ := resolvers.NewTxResolver(arg) - err := txRes.ProcessReceivedMessage(nil, connectedPeerId) + err := txRes.ProcessReceivedMessage(nil, connectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrNilMessage, err) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -161,7 +161,7 @@ func TestTxResolver_ProcessReceivedMessageWrongTypeShouldErr(t *testing.T) { msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, dataRetriever.ErrRequestTypeNotImplemented)) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -178,7 +178,7 @@ func TestTxResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) { msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrNilValue, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -206,7 +206,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxPoolShouldSearchAndSend(t *te arg := createMockArgTxResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { sendWasCalled = true return nil }, @@ -218,7 +218,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxPoolShouldSearchAndSend(t *te msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, searchWasCalled) @@ -262,7 +262,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxPoolMarshalizerFailShouldRetN msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, errExpected)) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -307,7 +307,7 @@ func TestTxResolver_ProcessReceivedMessageBatchMarshalFailShouldRetNilAndErr(t * msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -342,7 +342,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxStorageShouldRetValAndSend(t arg := createMockArgTxResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { sendWasCalled = true return nil }, @@ -355,7 +355,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxStorageShouldRetValAndSend(t msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, searchWasCalled) @@ -395,7 +395,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxStorageCheckRetError(t *testi msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, errExpected)) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -432,7 +432,7 @@ func TestTxResolver_ProcessReceivedMessageRequestedTwoSmallTransactionsShouldCal sendWasCalled := false arg := createMockArgTxResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { sendWasCalled = true return nil }, @@ -455,7 +455,7 @@ func TestTxResolver_ProcessReceivedMessageRequestedTwoSmallTransactionsShouldCal msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, splitSliceWasCalled) @@ -488,7 +488,7 @@ func TestTxResolver_ProcessReceivedMessageRequestedTwoSmallTransactionsFoundOnly sendWasCalled := false arg := createMockArgTxResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { sendWasCalled = true return nil }, @@ -516,7 +516,7 @@ func TestTxResolver_ProcessReceivedMessageRequestedTwoSmallTransactionsFoundOnly msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.NotNil(t, err) assert.True(t, splitSliceWasCalled) @@ -545,7 +545,7 @@ func TestTxResolver_ProcessReceivedMessageHashArrayUnmarshalFails(t *testing.T) data, _ := marshalizer.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashArrayType, Value: []byte("buff")}) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -570,7 +570,7 @@ func TestTxResolver_ProcessReceivedMessageHashArrayPackDataInChunksFails(t *test data, _ := arg.Marshaller.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashArrayType, Value: buff}) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -585,7 +585,7 @@ func TestTxResolver_ProcessReceivedMessageHashArraySendFails(t *testing.T) { arg := createMockArgTxResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { return expectedErr }, } @@ -595,7 +595,7 @@ func TestTxResolver_ProcessReceivedMessageHashArraySendFails(t *testing.T) { data, _ := arg.Marshaller.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashArrayType, Value: buff}) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) diff --git a/dataRetriever/resolvers/trieNodeResolver.go b/dataRetriever/resolvers/trieNodeResolver.go index 7086eb35605..871ed85fee5 100644 --- a/dataRetriever/resolvers/trieNodeResolver.go +++ b/dataRetriever/resolvers/trieNodeResolver.go @@ -59,7 +59,7 @@ func checkArgTrieNodeResolver(arg ArgTrieNodeResolver) error { // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) -func (tnRes *TrieNodeResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (tnRes *TrieNodeResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { err := tnRes.canProcessMessage(message, fromConnectedPeer) if err != nil { return err @@ -75,15 +75,15 @@ func (tnRes *TrieNodeResolver) ProcessReceivedMessage(message p2p.MessageP2P, fr switch rd.Type { case dataRetriever.HashType: - return tnRes.resolveOneHash(rd.Value, rd.ChunkIndex, message) + return tnRes.resolveOneHash(rd.Value, rd.ChunkIndex, message, source) case dataRetriever.HashArrayType: - return tnRes.resolveMultipleHashes(rd.Value, message) + return tnRes.resolveMultipleHashes(rd.Value, message, source) default: return dataRetriever.ErrRequestTypeNotImplemented } } -func (tnRes *TrieNodeResolver) resolveMultipleHashes(hashesBuff []byte, message p2p.MessageP2P) error { +func (tnRes *TrieNodeResolver) resolveMultipleHashes(hashesBuff []byte, message p2p.MessageP2P, source p2p.MessageHandler) error { b := batch.Batch{} err := tnRes.marshalizer.Unmarshal(&b, hashesBuff) if err != nil { @@ -95,12 +95,12 @@ func (tnRes *TrieNodeResolver) resolveMultipleHashes(hashesBuff []byte, message nodes := make(map[string]struct{}) spaceUsed, usedAllSpace := tnRes.resolveOnlyRequestedHashes(hashes, nodes) if usedAllSpace { - return tnRes.sendResponse(convertMapToSlice(nodes), hashes, supportedChunkIndex, message) + return tnRes.sendResponse(convertMapToSlice(nodes), hashes, supportedChunkIndex, message, source) } tnRes.resolveSubTries(hashes, nodes, spaceUsed) - return tnRes.sendResponse(convertMapToSlice(nodes), hashes, supportedChunkIndex, message) + return tnRes.sendResponse(convertMapToSlice(nodes), hashes, supportedChunkIndex, message, source) } func (tnRes *TrieNodeResolver) resolveOnlyRequestedHashes(hashes [][]byte, nodes map[string]struct{}) (int, bool) { @@ -167,13 +167,13 @@ func convertMapToSlice(m map[string]struct{}) [][]byte { return buff } -func (tnRes *TrieNodeResolver) resolveOneHash(hash []byte, chunkIndex uint32, message p2p.MessageP2P) error { +func (tnRes *TrieNodeResolver) resolveOneHash(hash []byte, chunkIndex uint32, message p2p.MessageP2P, source p2p.MessageHandler) error { serializedNode, err := tnRes.trieDataGetter.GetSerializedNode(hash) if err != nil { return err } - return tnRes.sendResponse([][]byte{serializedNode}, [][]byte{hash}, chunkIndex, message) + return tnRes.sendResponse([][]byte{serializedNode}, [][]byte{hash}, chunkIndex, message, source) } func (tnRes *TrieNodeResolver) getSubTrie(hash []byte, remainingSpace uint64) ([][]byte, uint64, error) { @@ -198,6 +198,7 @@ func (tnRes *TrieNodeResolver) sendResponse( hashes [][]byte, chunkIndex uint32, message p2p.MessageP2P, + source p2p.MessageHandler, ) error { if len(serializedNodes) == 0 { @@ -206,7 +207,7 @@ func (tnRes *TrieNodeResolver) sendResponse( } if len(serializedNodes) == 1 && len(serializedNodes[0]) > core.MaxBufferSizeToSendTrieNodes { - return tnRes.sendLargeMessage(serializedNodes[0], hashes[0], int(chunkIndex), message) + return tnRes.sendLargeMessage(serializedNodes[0], hashes[0], int(chunkIndex), message, source) } buff, err := tnRes.marshalizer.Marshal(&batch.Batch{Data: serializedNodes}) @@ -214,7 +215,7 @@ func (tnRes *TrieNodeResolver) sendResponse( return err } - return tnRes.Send(buff, message.Peer()) + return tnRes.Send(buff, message.Peer(), source) } func (tnRes *TrieNodeResolver) sendLargeMessage( @@ -222,6 +223,7 @@ func (tnRes *TrieNodeResolver) sendLargeMessage( reference []byte, chunkIndex int, message p2p.MessageP2P, + source p2p.MessageHandler, ) error { logTrieNodes.Trace("assembling chunk", "reference", reference, "len", len(largeBuff)) @@ -248,7 +250,7 @@ func (tnRes *TrieNodeResolver) sendLargeMessage( return err } - return tnRes.Send(buff, message.Peer()) + return tnRes.Send(buff, message.Peer(), source) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/dataRetriever/resolvers/trieNodeResolver_test.go b/dataRetriever/resolvers/trieNodeResolver_test.go index dd7325d533b..b2706f02b36 100644 --- a/dataRetriever/resolvers/trieNodeResolver_test.go +++ b/dataRetriever/resolvers/trieNodeResolver_test.go @@ -108,7 +108,7 @@ func TestTrieNodeResolver_ProcessReceivedAntiflooderCanProcessMessageErrShouldEr } tnRes, _ := resolvers.NewTrieNodeResolver(arg) - err := tnRes.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{}, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{}, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -120,7 +120,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageNilMessageShouldErr(t *testing.T arg := createMockArgTrieNodeResolver() tnRes, _ := resolvers.NewTrieNodeResolver(arg) - err := tnRes.ProcessReceivedMessage(nil, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(nil, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrNilMessage, err) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -137,7 +137,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageWrongTypeShouldErr(t *testing.T) data, _ := marshalizer.Marshal(&dataRetriever.RequestData{Type: dataRetriever.NonceType, Value: []byte("aaa")}) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrRequestTypeNotImplemented, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -154,7 +154,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) data, _ := marshalizer.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: nil}) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrNilValue, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -183,7 +183,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageShouldGetFromTrieAndSend(t *test arg := createMockArgTrieNodeResolver() arg.TrieDataGetter = tr arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { sendWasCalled = true return nil }, @@ -193,7 +193,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageShouldGetFromTrieAndSend(t *test data, _ := marshalizer.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: []byte("node1")}) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, getSerializedNodesWasCalled) @@ -223,7 +223,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageShouldGetFromTrieAndMarshalizerF data, _ := marshalizerMock.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: []byte("node1")}) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, errExpected, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -243,7 +243,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageTrieErrorsShouldErr(t *testing.T data, _ := arg.Marshaller.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: []byte("node1")}) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, expectedErr, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -286,7 +286,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesUnmarshalFails(t * ) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, expectedErr, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -297,7 +297,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesGetSerializedNodeE arg := createMockArgTrieNodeResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { assert.Fail(t, "should have not called send") return nil }, @@ -322,7 +322,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesGetSerializedNodeE ) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -337,7 +337,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesGetSerializedNodes var receivedNodes [][]byte arg := createMockArgTrieNodeResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { b := &batch.Batch{} err := arg.Marshaller.Unmarshal(b, buff) require.Nil(t, err) @@ -375,7 +375,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesGetSerializedNodes ) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -392,7 +392,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesNotEnoughSpaceShou var receivedNodes [][]byte arg := createMockArgTrieNodeResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { b := &batch.Batch{} err := arg.Marshaller.Unmarshal(b, buff) require.Nil(t, err) @@ -431,7 +431,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesNotEnoughSpaceShou ) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -449,7 +449,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesShouldWorkWithSubt var receivedNodes [][]byte arg := createMockArgTrieNodeResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { b := &batch.Batch{} err := arg.Marshaller.Unmarshal(b, buff) require.Nil(t, err) @@ -492,7 +492,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesShouldWorkWithSubt ) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -519,7 +519,7 @@ func testTrieNodeResolverProcessReceivedMessageLargeTrieNode( sendWasCalled := false arg := createMockArgTrieNodeResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { b := &batch.Batch{} err := arg.Marshaller.Unmarshal(b, buff) require.Nil(t, err) @@ -558,7 +558,7 @@ func testTrieNodeResolverProcessReceivedMessageLargeTrieNode( ) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) diff --git a/dataRetriever/resolvers/validatorInfoResolver.go b/dataRetriever/resolvers/validatorInfoResolver.go index 47e1e21baeb..9f7e5a6bb1a 100644 --- a/dataRetriever/resolvers/validatorInfoResolver.go +++ b/dataRetriever/resolvers/validatorInfoResolver.go @@ -89,7 +89,7 @@ func checkArgs(args ArgValidatorInfoResolver) error { // ProcessReceivedMessage represents the callback func from the p2p.Messenger that is called each time a new message is received // (for the topic this validator was registered to, usually a request topic) -func (res *validatorInfoResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (res *validatorInfoResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { err := res.canProcessMessage(message, fromConnectedPeer) if err != nil { return err @@ -105,26 +105,26 @@ func (res *validatorInfoResolver) ProcessReceivedMessage(message p2p.MessageP2P, switch rd.Type { case dataRetriever.HashType: - return res.resolveHashRequest(rd.Value, rd.Epoch, fromConnectedPeer) + return res.resolveHashRequest(rd.Value, rd.Epoch, fromConnectedPeer, source) case dataRetriever.HashArrayType: - return res.resolveMultipleHashesRequest(rd.Value, rd.Epoch, fromConnectedPeer) + return res.resolveMultipleHashesRequest(rd.Value, rd.Epoch, fromConnectedPeer, source) } return fmt.Errorf("%w for value %s", dataRetriever.ErrRequestTypeNotImplemented, logger.DisplayByteSlice(rd.Value)) } // resolveHashRequest sends the response for a hash request -func (res *validatorInfoResolver) resolveHashRequest(hash []byte, epoch uint32, pid core.PeerID) error { +func (res *validatorInfoResolver) resolveHashRequest(hash []byte, epoch uint32, pid core.PeerID, source p2p.MessageHandler) error { data, err := res.fetchValidatorInfoByteSlice(hash, epoch) if err != nil { return err } - return res.marshalAndSend(data, pid) + return res.marshalAndSend(data, pid, source) } // resolveMultipleHashesRequest sends the response for a hash array type request -func (res *validatorInfoResolver) resolveMultipleHashesRequest(hashesBuff []byte, epoch uint32, pid core.PeerID) error { +func (res *validatorInfoResolver) resolveMultipleHashesRequest(hashesBuff []byte, epoch uint32, pid core.PeerID, source p2p.MessageHandler) error { b := batch.Batch{} err := res.marshalizer.Unmarshal(&b, hashesBuff) if err != nil { @@ -141,17 +141,17 @@ func (res *validatorInfoResolver) resolveMultipleHashesRequest(hashesBuff []byte return fmt.Errorf("resolveMultipleHashesRequest error %w from buff %s", err, outputHashes) } - return res.sendValidatorInfoForHashes(validatorInfoForHashes, pid) + return res.sendValidatorInfoForHashes(validatorInfoForHashes, pid, source) } -func (res *validatorInfoResolver) sendValidatorInfoForHashes(validatorInfoForHashes [][]byte, pid core.PeerID) error { +func (res *validatorInfoResolver) sendValidatorInfoForHashes(validatorInfoForHashes [][]byte, pid core.PeerID, source p2p.MessageHandler) error { buffsToSend, err := res.dataPacker.PackDataInChunks(validatorInfoForHashes, maxBuffToSendValidatorsInfo) if err != nil { return err } for _, buff := range buffsToSend { - err = res.Send(buff, pid) + err = res.Send(buff, pid, source) if err != nil { return err } @@ -197,7 +197,7 @@ func (res *validatorInfoResolver) fetchValidatorInfoByteSlice(hash []byte, epoch return buff, nil } -func (res *validatorInfoResolver) marshalAndSend(data []byte, pid core.PeerID) error { +func (res *validatorInfoResolver) marshalAndSend(data []byte, pid core.PeerID, source p2p.MessageHandler) error { b := &batch.Batch{ Data: [][]byte{data}, } @@ -206,7 +206,7 @@ func (res *validatorInfoResolver) marshalAndSend(data []byte, pid core.PeerID) e return err } - return res.Send(buff, pid) + return res.Send(buff, pid, source) } // SetDebugHandler sets a debug handler diff --git a/dataRetriever/resolvers/validatorInfoResolver_test.go b/dataRetriever/resolvers/validatorInfoResolver_test.go index 19f659660f9..d17fd1aedb4 100644 --- a/dataRetriever/resolvers/validatorInfoResolver_test.go +++ b/dataRetriever/resolvers/validatorInfoResolver_test.go @@ -19,6 +19,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -140,7 +141,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { res, _ := resolvers.NewValidatorInfoResolver(createMockArgValidatorInfoResolver()) require.False(t, check.IfNil(res)) - err := res.ProcessReceivedMessage(nil, fromConnectedPeer) + err := res.ProcessReceivedMessage(nil, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrNilMessage, err) }) t.Run("canProcessMessage due to antiflood handler error", func(t *testing.T) { @@ -155,7 +156,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { res, _ := resolvers.NewValidatorInfoResolver(args) require.False(t, check.IfNil(res)) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) assert.False(t, args.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.False(t, args.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -172,7 +173,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { res, _ := resolvers.NewValidatorInfoResolver(args) require.False(t, check.IfNil(res)) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) }) @@ -182,7 +183,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { res, _ := resolvers.NewValidatorInfoResolver(createMockArgValidatorInfoResolver()) require.False(t, check.IfNil(res)) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, []byte("hash")), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, []byte("hash")), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, dataRetriever.ErrRequestTypeNotImplemented)) }) @@ -204,7 +205,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { res, _ := resolvers.NewValidatorInfoResolver(args) require.False(t, check.IfNil(res)) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, []byte("hash")), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, []byte("hash")), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, expectedErr, err) }) t.Run("data found in cache but marshal fails", func(t *testing.T) { @@ -228,7 +229,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { res, _ := resolvers.NewValidatorInfoResolver(args) require.False(t, check.IfNil(res)) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, []byte("hash")), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, []byte("hash")), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.NotNil(t, err) }) t.Run("data found in storage but marshal fails", func(t *testing.T) { @@ -257,7 +258,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { res, _ := resolvers.NewValidatorInfoResolver(args) require.False(t, check.IfNil(res)) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, []byte("hash")), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, []byte("hash")), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.NotNil(t, err) }) t.Run("should work, data from cache", func(t *testing.T) { @@ -272,7 +273,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { }, } args.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { marshMock := marshallerMock.MarshalizerMock{} b := &batch.Batch{} _ = marshMock.Unmarshal(b, buff) @@ -289,7 +290,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { res, _ := resolvers.NewValidatorInfoResolver(args) require.False(t, check.IfNil(res)) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, []byte("hash")), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, []byte("hash")), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, wasCalled) }) @@ -311,7 +312,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { }, } args.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { marshMock := marshallerMock.MarshalizerMock{} b := &batch.Batch{} _ = marshMock.Unmarshal(b, buff) @@ -328,7 +329,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { res, _ := resolvers.NewValidatorInfoResolver(args) require.False(t, check.IfNil(res)) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, []byte("hash")), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, []byte("hash")), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, wasCalled) }) @@ -352,7 +353,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { res, _ := resolvers.NewValidatorInfoResolver(args) require.False(t, check.IfNil(res)) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, []byte("hash")), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, []byte("hash")), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, expectedErr, err) }) t.Run("no hash found", func(t *testing.T) { @@ -376,7 +377,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { Data: [][]byte{[]byte("hash")}, } buff, _ := args.Marshaller.Marshal(b) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer, &p2pmocks.MessengerStub{}) require.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), dataRetriever.ErrValidatorInfoNotFound.Error())) }) @@ -406,7 +407,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { Data: [][]byte{[]byte("hash")}, } buff, _ := args.Marshaller.Marshal(b) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, expectedErr, err) }) t.Run("send returns error", func(t *testing.T) { @@ -431,7 +432,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { }, } args.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { return expectedErr }, } @@ -440,7 +441,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { require.False(t, check.IfNil(res)) buff, _ := args.Marshaller.Marshal(&batch.Batch{Data: providedHashes}) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, expectedErr, err) }) t.Run("all hashes in one chunk should work", func(t *testing.T) { @@ -466,7 +467,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { }, } args.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { marshMock := marshallerMock.MarshalizerMock{} b := &batch.Batch{} _ = marshMock.Unmarshal(b, buff) @@ -488,7 +489,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { require.False(t, check.IfNil(res)) buff, _ := args.Marshaller.Marshal(&batch.Batch{Data: providedHashes}) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, wasCalled) }) @@ -524,7 +525,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { } numOfCallsSend := 0 args.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { marshMock := marshallerMock.MarshalizerMock{} b := &batch.Batch{} _ = marshMock.Unmarshal(b, buff) @@ -550,7 +551,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { require.False(t, check.IfNil(res)) buff, _ := args.Marshaller.Marshal(&batch.Batch{Data: providedHashes}) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.Equal(t, 2, numOfCallsSend) // ~677 messages in a chunk assert.Equal(t, 0, len(providedDataMap)) // all items should have been deleted on Send diff --git a/dataRetriever/topicSender/baseTopicSender.go b/dataRetriever/topicSender/baseTopicSender.go index f5470a87dfb..c301995fe57 100644 --- a/dataRetriever/topicSender/baseTopicSender.go +++ b/dataRetriever/topicSender/baseTopicSender.go @@ -22,57 +22,76 @@ const ( // ArgBaseTopicSender is the base DTO used to create a new topic sender instance type ArgBaseTopicSender struct { - Messenger dataRetriever.MessageHandler - TopicName string - OutputAntiflooder dataRetriever.P2PAntifloodHandler - PreferredPeersHolder dataRetriever.PreferredPeersHolderHandler - TargetShardId uint32 + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger + TopicName string + OutputAntiflooder dataRetriever.P2PAntifloodHandler + MainPreferredPeersHolder dataRetriever.PreferredPeersHolderHandler + FullArchivePreferredPeersHolder dataRetriever.PreferredPeersHolderHandler + TargetShardId uint32 } type baseTopicSender struct { - messenger dataRetriever.MessageHandler - topicName string - outputAntiflooder dataRetriever.P2PAntifloodHandler - mutDebugHandler sync.RWMutex - debugHandler dataRetriever.DebugHandler - preferredPeersHolderHandler dataRetriever.PreferredPeersHolderHandler - targetShardId uint32 + mainMessenger p2p.Messenger + fullArchiveMessenger p2p.Messenger + topicName string + outputAntiflooder dataRetriever.P2PAntifloodHandler + mutDebugHandler sync.RWMutex + debugHandler dataRetriever.DebugHandler + mainPreferredPeersHolderHandler dataRetriever.PreferredPeersHolderHandler + fullArchivePreferredPeersHolderHandler dataRetriever.PreferredPeersHolderHandler + targetShardId uint32 } func createBaseTopicSender(args ArgBaseTopicSender) *baseTopicSender { return &baseTopicSender{ - messenger: args.Messenger, - topicName: args.TopicName, - outputAntiflooder: args.OutputAntiflooder, - debugHandler: handler.NewDisabledInterceptorDebugHandler(), - preferredPeersHolderHandler: args.PreferredPeersHolder, - targetShardId: args.TargetShardId, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, + topicName: args.TopicName, + outputAntiflooder: args.OutputAntiflooder, + debugHandler: handler.NewDisabledInterceptorDebugHandler(), + mainPreferredPeersHolderHandler: args.MainPreferredPeersHolder, + fullArchivePreferredPeersHolderHandler: args.FullArchivePreferredPeersHolder, + targetShardId: args.TargetShardId, } } func checkBaseTopicSenderArgs(args ArgBaseTopicSender) error { - if check.IfNil(args.Messenger) { - return dataRetriever.ErrNilMessenger + if check.IfNil(args.MainMessenger) { + return fmt.Errorf("%w on main network", dataRetriever.ErrNilMessenger) + } + if check.IfNil(args.FullArchiveMessenger) { + return fmt.Errorf("%w on full archive network", dataRetriever.ErrNilMessenger) } if check.IfNil(args.OutputAntiflooder) { return dataRetriever.ErrNilAntifloodHandler } - if check.IfNil(args.PreferredPeersHolder) { - return dataRetriever.ErrNilPreferredPeersHolder + if check.IfNil(args.MainPreferredPeersHolder) { + return fmt.Errorf("%w on main network", dataRetriever.ErrNilPreferredPeersHolder) + } + if check.IfNil(args.FullArchivePreferredPeersHolder) { + return fmt.Errorf("%w on full archive network", dataRetriever.ErrNilPreferredPeersHolder) } return nil } -func (baseSender *baseTopicSender) sendToConnectedPeer(topic string, buff []byte, peer core.PeerID) error { +func (baseSender *baseTopicSender) sendToConnectedPeer( + topic string, + buff []byte, + peer core.PeerID, + messenger p2p.MessageHandler, +) error { msg := &factory.Message{ DataField: buff, PeerField: peer, TopicField: topic, } - shouldAvoidAntiFloodCheck := baseSender.preferredPeersHolderHandler.Contains(peer) + isPreferredOnMain := baseSender.mainPreferredPeersHolderHandler.Contains(peer) + isPreferredOnFullArchive := baseSender.fullArchivePreferredPeersHolderHandler.Contains(peer) + shouldAvoidAntiFloodCheck := isPreferredOnMain || isPreferredOnFullArchive if shouldAvoidAntiFloodCheck { - return baseSender.messenger.SendToConnectedPeer(topic, buff, peer) + return messenger.SendToConnectedPeer(topic, buff, peer) } err := baseSender.outputAntiflooder.CanProcessMessage(msg, peer) @@ -84,7 +103,7 @@ func (baseSender *baseTopicSender) sendToConnectedPeer(topic string, buff []byte ) } - return baseSender.messenger.SendToConnectedPeer(topic, buff, peer) + return messenger.SendToConnectedPeer(topic, buff, peer) } // DebugHandler returns the debug handler used in resolvers diff --git a/dataRetriever/topicSender/diffPeerListCreator.go b/dataRetriever/topicSender/diffPeerListCreator.go index 8261fa590ab..ff7e83ae6e5 100644 --- a/dataRetriever/topicSender/diffPeerListCreator.go +++ b/dataRetriever/topicSender/diffPeerListCreator.go @@ -73,11 +73,6 @@ func (dplc *diffPeerListCreator) IntraShardPeerList() []core.PeerID { return dplc.messenger.ConnectedPeersOnTopic(dplc.intraShardTopic) } -// FullHistoryList returns the full history peers list -func (dplc *diffPeerListCreator) FullHistoryList() []core.PeerID { - return dplc.messenger.ConnectedFullHistoryPeersOnTopic(dplc.intraShardTopic) -} - // IsInterfaceNil returns true if there is no value under the interface func (dplc *diffPeerListCreator) IsInterfaceNil() bool { return dplc == nil diff --git a/dataRetriever/topicSender/diffPeerListCreator_test.go b/dataRetriever/topicSender/diffPeerListCreator_test.go index 4b63b757608..4a9e043d281 100644 --- a/dataRetriever/topicSender/diffPeerListCreator_test.go +++ b/dataRetriever/topicSender/diffPeerListCreator_test.go @@ -240,21 +240,3 @@ func TestDiffPeerListCreator_IntraShardPeersList(t *testing.T) { assert.Equal(t, peerList, dplc.IntraShardPeerList()) } - -func TestDiffPeerListCreator_FullHistoryList(t *testing.T) { - t.Parallel() - - peerList := []core.PeerID{"pid1", "pid2"} - dplc, _ := topicsender.NewDiffPeerListCreator( - &mock.MessageHandlerStub{ - ConnectedFullHistoryPeersOnTopicCalled: func(topic string) []core.PeerID { - return peerList - }, - }, - mainTopic, - intraTopic, - excludedTopic, - ) - - assert.Equal(t, peerList, dplc.FullHistoryList()) -} diff --git a/dataRetriever/topicSender/topicRequestSender.go b/dataRetriever/topicSender/topicRequestSender.go index 22831cdf038..4358cfe5c1d 100644 --- a/dataRetriever/topicSender/topicRequestSender.go +++ b/dataRetriever/topicSender/topicRequestSender.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/random" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/p2p" ) var _ dataRetriever.TopicRequestSender = (*topicRequestSender)(nil) @@ -120,15 +121,37 @@ func (trs *topicRequestSender) SendOnRequestTopic(rd *dataRetriever.RequestData, if trs.currentNetworkEpochProviderHandler.EpochIsActiveInNetwork(rd.Epoch) { crossPeers = trs.peerListCreator.CrossShardPeerList() preferredPeer := trs.getPreferredPeer(trs.targetShardId) - numSentCross = trs.sendOnTopic(crossPeers, preferredPeer, topicToSendRequest, buff, trs.numCrossShardPeers, core.CrossShardPeer.String()) + numSentCross = trs.sendOnTopic( + crossPeers, + preferredPeer, + topicToSendRequest, + buff, + trs.numCrossShardPeers, + core.CrossShardPeer.String(), + trs.mainMessenger) intraPeers = trs.peerListCreator.IntraShardPeerList() preferredPeer = trs.getPreferredPeer(trs.selfShardId) - numSentIntra = trs.sendOnTopic(intraPeers, preferredPeer, topicToSendRequest, buff, trs.numIntraShardPeers, core.IntraShardPeer.String()) + numSentIntra = trs.sendOnTopic( + intraPeers, + preferredPeer, + topicToSendRequest, + buff, + trs.numIntraShardPeers, + core.IntraShardPeer.String(), + trs.mainMessenger) } else { - // TODO: select preferred peers of type full history as well. - fullHistoryPeers = trs.peerListCreator.FullHistoryList() - numSentIntra = trs.sendOnTopic(fullHistoryPeers, "", topicToSendRequest, buff, trs.numFullHistoryPeers, core.FullHistoryPeer.String()) + preferredPeer := trs.getPreferredFullArchivePeer() + fullHistoryPeers = trs.fullArchiveMessenger.ConnectedPeers() + + numSentIntra = trs.sendOnTopic( + fullHistoryPeers, + preferredPeer, + topicToSendRequest, + buff, + trs.numFullHistoryPeers, + core.FullHistoryPeer.String(), + trs.fullArchiveMessenger) } trs.callDebugHandler(originalHashes, numSentIntra, numSentCross) @@ -168,6 +191,7 @@ func (trs *topicRequestSender) sendOnTopic( buff []byte, maxToSend int, peerType string, + messenger p2p.MessageHandler, ) int { if len(peerList) == 0 || maxToSend == 0 { return 0 @@ -189,7 +213,7 @@ func (trs *topicRequestSender) sendOnTopic( for idx := 0; idx < len(shuffledIndexes); idx++ { peer := getPeerID(shuffledIndexes[idx], topRatedPeersList, preferredPeer, peerType, topicToSendRequest, histogramMap) - err := trs.sendToConnectedPeer(topicToSendRequest, buff, peer) + err := trs.sendToConnectedPeer(topicToSendRequest, buff, peer, messenger) if err != nil { continue } @@ -226,13 +250,11 @@ func (trs *topicRequestSender) getPreferredPeer(shardID uint32) core.PeerID { return "" } - randomIdx := trs.randomizer.Intn(len(peersInShard)) - - return peersInShard[randomIdx] + return trs.getRandomPeerID(peersInShard) } func (trs *topicRequestSender) getPreferredPeersInShard(shardID uint32) ([]core.PeerID, bool) { - preferredPeers := trs.preferredPeersHolderHandler.Get() + preferredPeers := trs.mainPreferredPeersHolderHandler.Get() peers, found := preferredPeers[shardID] if !found || len(peers) == 0 { @@ -242,6 +264,33 @@ func (trs *topicRequestSender) getPreferredPeersInShard(shardID uint32) ([]core. return peers, true } +func (trs *topicRequestSender) getPreferredFullArchivePeer() core.PeerID { + preferredPeersMap := trs.fullArchivePreferredPeersHolderHandler.Get() + preferredPeersSlice := mapToSlice(preferredPeersMap) + + if len(preferredPeersSlice) == 0 { + return "" + } + + return trs.getRandomPeerID(preferredPeersSlice) +} + +func (trs *topicRequestSender) getRandomPeerID(peerIDs []core.PeerID) core.PeerID { + randomIdx := trs.randomizer.Intn(len(peerIDs)) + + return peerIDs[randomIdx] +} + +func mapToSlice(initialMap map[uint32][]core.PeerID) []core.PeerID { + newSlice := make([]core.PeerID, 0, len(initialMap)) + + for _, peerIDsOnShard := range initialMap { + newSlice = append(newSlice, peerIDsOnShard...) + } + + return newSlice +} + // SetNumPeersToQuery will set the number of intra shard and cross shard number of peers to query func (trs *topicRequestSender) SetNumPeersToQuery(intra int, cross int) { trs.mutNumPeersToQuery.Lock() diff --git a/dataRetriever/topicSender/topicRequestSender_test.go b/dataRetriever/topicSender/topicRequestSender_test.go index 83fef0bba9c..cff654b3fe4 100644 --- a/dataRetriever/topicSender/topicRequestSender_test.go +++ b/dataRetriever/topicSender/topicRequestSender_test.go @@ -20,10 +20,16 @@ import ( func createMockArgBaseTopicSender() topicsender.ArgBaseTopicSender { return topicsender.ArgBaseTopicSender{ - Messenger: &mock.MessageHandlerStub{}, - TopicName: "topic", - OutputAntiflooder: &mock.P2PAntifloodHandlerStub{}, - PreferredPeersHolder: &p2pmocks.PeersHolderStub{ + MainMessenger: &p2pmocks.MessengerStub{}, + FullArchiveMessenger: &p2pmocks.MessengerStub{}, + TopicName: "topic", + OutputAntiflooder: &mock.P2PAntifloodHandlerStub{}, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{ + GetCalled: func() map[uint32][]core.PeerID { + return map[uint32][]core.PeerID{} + }, + }, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{ GetCalled: func() map[uint32][]core.PeerID { return map[uint32][]core.PeerID{} }, @@ -50,14 +56,23 @@ func createMockArgTopicRequestSender() topicsender.ArgTopicRequestSender { func TestNewTopicRequestSender(t *testing.T) { t.Parallel() - t.Run("nil Messenger should error", func(t *testing.T) { + t.Run("nil MainMessenger should error", func(t *testing.T) { + t.Parallel() + + arg := createMockArgTopicRequestSender() + arg.MainMessenger = nil + trs, err := topicsender.NewTopicRequestSender(arg) + assert.True(t, check.IfNil(trs)) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) + }) + t.Run("nil FullArchiveMessenger should error", func(t *testing.T) { t.Parallel() arg := createMockArgTopicRequestSender() - arg.Messenger = nil + arg.FullArchiveMessenger = nil trs, err := topicsender.NewTopicRequestSender(arg) assert.True(t, check.IfNil(trs)) - assert.Equal(t, dataRetriever.ErrNilMessenger, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) }) t.Run("nil OutputAntiflooder should error", func(t *testing.T) { t.Parallel() @@ -68,14 +83,23 @@ func TestNewTopicRequestSender(t *testing.T) { assert.True(t, check.IfNil(trs)) assert.Equal(t, dataRetriever.ErrNilAntifloodHandler, err) }) - t.Run("nil PreferredPeersHolder should error", func(t *testing.T) { + t.Run("nil MainPreferredPeersHolder should error", func(t *testing.T) { t.Parallel() arg := createMockArgTopicRequestSender() - arg.PreferredPeersHolder = nil + arg.MainPreferredPeersHolder = nil trs, err := topicsender.NewTopicRequestSender(arg) assert.True(t, check.IfNil(trs)) - assert.Equal(t, dataRetriever.ErrNilPreferredPeersHolder, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) + }) + t.Run("nil FullArchivePreferredPeersHolder should error", func(t *testing.T) { + t.Parallel() + + arg := createMockArgTopicRequestSender() + arg.FullArchivePreferredPeersHolder = nil + trs, err := topicsender.NewTopicRequestSender(arg) + assert.True(t, check.IfNil(trs)) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) }) t.Run("nil Marshaller should error", func(t *testing.T) { t.Parallel() @@ -230,7 +254,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { sentToPid2 := false arg := createMockArgTopicRequestSender() - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if bytes.Equal(peerID.Bytes(), pID1.Bytes()) { sentToPid1 = true @@ -242,6 +266,12 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { return nil }, } + arg.FullArchiveMessenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + assert.Fail(t, "should have not been called") + return nil + }, + } arg.PeerListCreator = &mock.PeerListCreatorStub{ CrossShardPeerListCalled: func() []core.PeerID { return []core.PeerID{pID1} @@ -275,19 +305,24 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { sentToFullHistoryPeer := false arg := createMockArgTopicRequestSender() - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { - if bytes.Equal(peerID.Bytes(), pIDfullHistory.Bytes()) { - sentToFullHistoryPeer = true - } + assert.Fail(t, "should have not been called") return nil }, } - arg.PeerListCreator = &mock.PeerListCreatorStub{ - FullHistoryListCalled: func() []core.PeerID { + arg.FullArchiveMessenger = &p2pmocks.MessengerStub{ + ConnectedPeersCalled: func() []core.PeerID { return []core.PeerID{pIDfullHistory} }, + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + if bytes.Equal(peerID.Bytes(), pIDfullHistory.Bytes()) { + sentToFullHistoryPeer = true + } + + return nil + }, } arg.CurrentNetworkEpochProvider = &mock.CurrentNetworkEpochProviderStub{ EpochIsActiveInNetworkCalled: func(epoch uint32) bool { @@ -308,7 +343,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { assert.True(t, sentToFullHistoryPeer) assert.Equal(t, 1, decreaseCalledCounter) }) - t.Run("should work and send to preferred peers", func(t *testing.T) { + t.Run("should work and send to preferred regular peers", func(t *testing.T) { t.Parallel() selfShardID := uint32(0) @@ -341,7 +376,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { return []core.PeerID{regularPeer1} }, } - arg.PreferredPeersHolder = &p2pmocks.PeersHolderStub{ + arg.MainPreferredPeersHolder = &p2pmocks.PeersHolderStub{ GetCalled: func() map[uint32][]core.PeerID { return map[uint32][]core.PeerID{ selfShardID: preferredPeersShard0, @@ -351,7 +386,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { } arg.NumCrossShardPeers = 5 arg.NumIntraShardPeers = 5 - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if strings.HasPrefix(string(peerID), "prefPIDsh0") { countPrefPeersSh0++ @@ -370,7 +405,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { assert.Nil(t, err) assert.Equal(t, 1, countPrefPeersSh1) }) - t.Run("should work and send to preferred cross peer first", func(t *testing.T) { + t.Run("should work and send to preferred regular cross peer first", func(t *testing.T) { t.Parallel() targetShardID := uint32(37) @@ -390,7 +425,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { return []core.PeerID{} }, } - arg.PreferredPeersHolder = &p2pmocks.PeersHolderStub{ + arg.MainPreferredPeersHolder = &p2pmocks.PeersHolderStub{ GetCalled: func() map[uint32][]core.PeerID { return map[uint32][]core.PeerID{ targetShardID: {pidPreferred}, @@ -398,7 +433,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { }, } - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if bytes.Equal(peerID.Bytes(), pidPreferred.Bytes()) { sentToPreferredPeer = true @@ -415,7 +450,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { assert.Nil(t, err) assert.True(t, sentToPreferredPeer) }) - t.Run("should work and send to preferred intra peer first", func(t *testing.T) { + t.Run("should work and send to preferred regular intra peer first", func(t *testing.T) { t.Parallel() selfShardID := uint32(37) @@ -435,7 +470,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { return []core.PeerID{regularPeer0, regularPeer1, regularPeer0, regularPeer1} }, } - arg.PreferredPeersHolder = &p2pmocks.PeersHolderStub{ + arg.MainPreferredPeersHolder = &p2pmocks.PeersHolderStub{ GetCalled: func() map[uint32][]core.PeerID { return map[uint32][]core.PeerID{ selfShardID: {pidPreferred}, @@ -443,7 +478,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { }, } - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if bytes.Equal(peerID.Bytes(), pidPreferred.Bytes()) { sentToPreferredPeer = true @@ -465,6 +500,54 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { assert.Nil(t, err) assert.True(t, sentToPreferredPeer) }) + t.Run("should work and send to preferred full archive first", func(t *testing.T) { + t.Parallel() + + selfShardID := uint32(37) + pidPreferred := core.PeerID("preferred peer") + sentToPreferredPeer := false + regularPeer0, regularPeer1 := core.PeerID("peer0"), core.PeerID("peer1") + + arg := createMockArgTopicRequestSender() + arg.NumFullHistoryPeers = 2 + arg.CurrentNetworkEpochProvider = &mock.CurrentNetworkEpochProviderStub{ + EpochIsActiveInNetworkCalled: func(epoch uint32) bool { + return false + }, + } + arg.FullArchivePreferredPeersHolder = &p2pmocks.PeersHolderStub{ + GetCalled: func() map[uint32][]core.PeerID { + return map[uint32][]core.PeerID{ + selfShardID: {pidPreferred}, + } + }, + } + arg.FullArchiveMessenger = &p2pmocks.MessengerStub{ + ConnectedPeersCalled: func() []core.PeerID { + return []core.PeerID{regularPeer0, regularPeer1} + }, + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + if bytes.Equal(peerID.Bytes(), pidPreferred.Bytes()) { + sentToPreferredPeer = true + } + + return nil + }, + } + arg.MainMessenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + assert.Fail(t, "should not have been called") + + return nil + }, + } + + trs, _ := topicsender.NewTopicRequestSender(arg) + + err := trs.SendOnRequestTopic(&dataRetriever.RequestData{}, defaultHashes) + assert.Nil(t, err) + assert.True(t, sentToPreferredPeer) + }) t.Run("should work and skip antiflood checks for preferred peers", func(t *testing.T) { t.Parallel() @@ -486,7 +569,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { return []core.PeerID{} }, } - arg.PreferredPeersHolder = &p2pmocks.PeersHolderStub{ + arg.MainPreferredPeersHolder = &p2pmocks.PeersHolderStub{ GetCalled: func() map[uint32][]core.PeerID { return map[uint32][]core.PeerID{ targetShardID: {pidPreferred}, @@ -497,7 +580,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { }, } - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if peerID == pidPreferred { sentToPreferredPeer = true @@ -542,7 +625,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { return []core.PeerID{} }, } - arg.PreferredPeersHolder = &p2pmocks.PeersHolderStub{ + arg.MainPreferredPeersHolder = &p2pmocks.PeersHolderStub{ GetCalled: func() map[uint32][]core.PeerID { return map[uint32][]core.PeerID{ 37: {pidPreferred}, @@ -550,7 +633,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { }, } - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if bytes.Equal(peerID.Bytes(), pidPreferred.Bytes()) { sentToPreferredPeer = true @@ -574,7 +657,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { numSent := 0 arg := createMockArgTopicRequestSender() - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { numSent++ @@ -604,7 +687,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { numSent := 0 arg := createMockArgTopicRequestSender() - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if peerID == pidNotCalled { assert.Fail(t, fmt.Sprintf("should not have called pid %s", peerID)) @@ -638,7 +721,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { numSent := 0 arg := createMockArgTopicRequestSender() - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if peerID == pidNotCalled { assert.Fail(t, fmt.Sprintf("should not have called pid %s", peerID)) @@ -671,7 +754,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { sentToPid1 := false arg := createMockArgTopicRequestSender() - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if bytes.Equal(peerID.Bytes(), pID1.Bytes()) { sentToPid1 = true diff --git a/dataRetriever/topicSender/topicResolverSender.go b/dataRetriever/topicSender/topicResolverSender.go index 59f33f083e8..6c65afed900 100644 --- a/dataRetriever/topicSender/topicResolverSender.go +++ b/dataRetriever/topicSender/topicResolverSender.go @@ -3,6 +3,7 @@ package topicsender import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/p2p" ) var _ dataRetriever.TopicResolverSender = (*topicResolverSender)(nil) @@ -30,8 +31,8 @@ func NewTopicResolverSender(arg ArgTopicResolverSender) (*topicResolverSender, e // Send is used to send an array buffer to a connected peer // It is used when replying to a request -func (trs *topicResolverSender) Send(buff []byte, peer core.PeerID) error { - return trs.sendToConnectedPeer(trs.topicName, buff, peer) +func (trs *topicResolverSender) Send(buff []byte, peer core.PeerID, destination p2p.MessageHandler) error { + return trs.sendToConnectedPeer(trs.topicName, buff, peer, destination) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/dataRetriever/topicSender/topicResolverSender_test.go b/dataRetriever/topicSender/topicResolverSender_test.go index e51e46dfe90..2e8df0f47c9 100644 --- a/dataRetriever/topicSender/topicResolverSender_test.go +++ b/dataRetriever/topicSender/topicResolverSender_test.go @@ -22,15 +22,26 @@ func createMockArgTopicResolverSender() topicsender.ArgTopicResolverSender { } } -func TestNewTopicResolverSender_NilMessengerShouldErr(t *testing.T) { +func TestNewTopicResolverSender_NilMainMessengerShouldErr(t *testing.T) { t.Parallel() arg := createMockArgTopicResolverSender() - arg.Messenger = nil + arg.MainMessenger = nil trs, err := topicsender.NewTopicResolverSender(arg) assert.True(t, check.IfNil(trs)) - assert.Equal(t, dataRetriever.ErrNilMessenger, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) +} + +func TestNewTopicResolverSender_NilFullArchiveMessengerShouldErr(t *testing.T) { + t.Parallel() + + arg := createMockArgTopicResolverSender() + arg.FullArchiveMessenger = nil + trs, err := topicsender.NewTopicResolverSender(arg) + + assert.True(t, check.IfNil(trs)) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) } func TestNewTopicResolverSender_NilOutputAntiflooderShouldErr(t *testing.T) { @@ -44,15 +55,26 @@ func TestNewTopicResolverSender_NilOutputAntiflooderShouldErr(t *testing.T) { assert.Equal(t, dataRetriever.ErrNilAntifloodHandler, err) } -func TestNewTopicResolverSender_NilPreferredPeersHolderShouldErr(t *testing.T) { +func TestNewTopicResolverSender_NilMainPreferredPeersHolderShouldErr(t *testing.T) { + t.Parallel() + + arg := createMockArgTopicResolverSender() + arg.MainPreferredPeersHolder = nil + trs, err := topicsender.NewTopicResolverSender(arg) + + assert.True(t, check.IfNil(trs)) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) +} + +func TestNewTopicResolverSender_NilFullArchivePreferredPeersHolderShouldErr(t *testing.T) { t.Parallel() arg := createMockArgTopicResolverSender() - arg.PreferredPeersHolder = nil + arg.FullArchivePreferredPeersHolder = nil trs, err := topicsender.NewTopicResolverSender(arg) assert.True(t, check.IfNil(trs)) - assert.Equal(t, dataRetriever.ErrNilPreferredPeersHolder, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) } func TestNewTopicResolverSender_OkValsShouldWork(t *testing.T) { @@ -74,7 +96,7 @@ func TestTopicResolverSender_SendOutputAntiflooderErrorsShouldNotSendButError(t expectedErr := errors.New("can not send to peer") arg := createMockArgTopicResolverSender() - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { assert.Fail(t, "send shouldn't have been called") @@ -93,7 +115,7 @@ func TestTopicResolverSender_SendOutputAntiflooderErrorsShouldNotSendButError(t } trs, _ := topicsender.NewTopicResolverSender(arg) - err := trs.Send(buffToSend, pID1) + err := trs.Send(buffToSend, pID1, arg.MainMessenger) assert.True(t, errors.Is(err, expectedErr)) } @@ -106,7 +128,7 @@ func TestTopicResolverSender_SendShouldNotCheckAntifloodForPreferred(t *testing. sendWasCalled := false arg := createMockArgTopicResolverSender() - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { sendWasCalled = true return nil @@ -119,14 +141,14 @@ func TestTopicResolverSender_SendShouldNotCheckAntifloodForPreferred(t *testing. return nil }, } - arg.PreferredPeersHolder = &p2pmocks.PeersHolderStub{ + arg.MainPreferredPeersHolder = &p2pmocks.PeersHolderStub{ ContainsCalled: func(peerID core.PeerID) bool { return peerID == pID1 }, } trs, _ := topicsender.NewTopicResolverSender(arg) - err := trs.Send(buffToSend, pID1) + err := trs.Send(buffToSend, pID1, arg.MainMessenger) require.NoError(t, err) require.True(t, sendWasCalled) } @@ -137,24 +159,94 @@ func TestTopicResolverSender_SendShouldWork(t *testing.T) { pID1 := core.PeerID("peer1") sentToPid1 := false buffToSend := []byte("buff") - - arg := createMockArgTopicResolverSender() - arg.Messenger = &mock.MessageHandlerStub{ - SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { - if bytes.Equal(peerID.Bytes(), pID1.Bytes()) && - bytes.Equal(buff, buffToSend) { - sentToPid1 = true - } - - return nil - }, - } - trs, _ := topicsender.NewTopicResolverSender(arg) - - err := trs.Send(buffToSend, pID1) - - assert.Nil(t, err) - assert.True(t, sentToPid1) + t.Run("on main network", func(t *testing.T) { + t.Parallel() + + arg := createMockArgTopicResolverSender() + arg.MainMessenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + if bytes.Equal(peerID.Bytes(), pID1.Bytes()) && + bytes.Equal(buff, buffToSend) { + sentToPid1 = true + } + + return nil + }, + } + arg.FullArchiveMessenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + assert.Fail(t, "should have not been called") + + return nil + }, + } + wasMainCalled := false + arg.MainPreferredPeersHolder = &p2pmocks.PeersHolderStub{ + ContainsCalled: func(peerID core.PeerID) bool { + wasMainCalled = true + return false + }, + } + wasFullArchiveCalled := false + arg.FullArchivePreferredPeersHolder = &p2pmocks.PeersHolderStub{ + ContainsCalled: func(peerID core.PeerID) bool { + wasFullArchiveCalled = true + return false + }, + } + trs, _ := topicsender.NewTopicResolverSender(arg) + + err := trs.Send(buffToSend, pID1, arg.MainMessenger) + + assert.Nil(t, err) + assert.True(t, sentToPid1) + assert.True(t, wasMainCalled) + assert.True(t, wasFullArchiveCalled) + }) + t.Run("on full archive network", func(t *testing.T) { + t.Parallel() + + arg := createMockArgTopicResolverSender() + arg.FullArchiveMessenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + if bytes.Equal(peerID.Bytes(), pID1.Bytes()) && + bytes.Equal(buff, buffToSend) { + sentToPid1 = true + } + + return nil + }, + } + arg.MainMessenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + assert.Fail(t, "should have not been called") + + return nil + }, + } + wasFullArchiveCalled := false + arg.FullArchivePreferredPeersHolder = &p2pmocks.PeersHolderStub{ + ContainsCalled: func(peerID core.PeerID) bool { + wasFullArchiveCalled = true + return false + }, + } + wasMainCalled := false + arg.MainPreferredPeersHolder = &p2pmocks.PeersHolderStub{ + ContainsCalled: func(peerID core.PeerID) bool { + wasMainCalled = true + return false + }, + } + trs, _ := topicsender.NewTopicResolverSender(arg) + + err := trs.Send(buffToSend, pID1, arg.FullArchiveMessenger) + + assert.Nil(t, err) + assert.True(t, sentToPid1) + assert.True(t, wasMainCalled) + assert.True(t, wasFullArchiveCalled) + }) } func TestTopicResolverSender_Topic(t *testing.T) { diff --git a/epochStart/bootstrap/common.go b/epochStart/bootstrap/common.go index 7b1bcec3c88..4db54c14382 100644 --- a/epochStart/bootstrap/common.go +++ b/epochStart/bootstrap/common.go @@ -13,8 +13,11 @@ func checkArguments(args ArgsEpochStartBootstrap) error { if check.IfNil(args.GenesisShardCoordinator) { return fmt.Errorf("%s: %w", baseErrorMessage, epochStart.ErrNilShardCoordinator) } - if check.IfNil(args.Messenger) { - return fmt.Errorf("%s: %w", baseErrorMessage, epochStart.ErrNilMessenger) + if check.IfNil(args.MainMessenger) { + return fmt.Errorf("%s on main network: %w", baseErrorMessage, epochStart.ErrNilMessenger) + } + if check.IfNil(args.FullArchiveMessenger) { + return fmt.Errorf("%s on full archive network: %w", baseErrorMessage, epochStart.ErrNilMessenger) } if check.IfNil(args.EconomicsData) { return fmt.Errorf("%s: %w", baseErrorMessage, epochStart.ErrNilEconomicsData) diff --git a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go index 5ab4c67d1bb..095f85e5c70 100644 --- a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go +++ b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" disabledFactory "github.com/multiversx/mx-chain-go/factory/disabled" disabledGenesis "github.com/multiversx/mx-chain-go/genesis/process/disabled" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/factory/interceptorscontainer" "github.com/multiversx/mx-chain-go/sharding" @@ -29,7 +30,8 @@ type ArgsEpochStartInterceptorContainer struct { CryptoComponents process.CryptoComponentsHolder Config config.Config ShardCoordinator sharding.Coordinator - Messenger process.TopicHandler + MainMessenger process.TopicHandler + FullArchiveMessenger process.TopicHandler DataPool dataRetriever.PoolsHolder WhiteListHandler update.WhiteListHandler WhiteListerVerifiedTxs update.WhiteListHandler @@ -40,24 +42,25 @@ type ArgsEpochStartInterceptorContainer struct { HeaderIntegrityVerifier process.HeaderIntegrityVerifier RequestHandler process.RequestHandler SignaturesHandler process.SignaturesHandler + NodeOperationMode p2p.NodeOperation } // NewEpochStartInterceptorsContainer will return a real interceptors container factory, but with many disabled components -func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) (process.InterceptorsContainer, error) { +func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) (process.InterceptorsContainer, process.InterceptorsContainer, error) { if check.IfNil(args.CoreComponents) { - return nil, epochStart.ErrNilCoreComponentsHolder + return nil, nil, epochStart.ErrNilCoreComponentsHolder } if check.IfNil(args.CryptoComponents) { - return nil, epochStart.ErrNilCryptoComponentsHolder + return nil, nil, epochStart.ErrNilCryptoComponentsHolder } if check.IfNil(args.CoreComponents.AddressPubKeyConverter()) { - return nil, epochStart.ErrNilPubkeyConverter + return nil, nil, epochStart.ErrNilPubkeyConverter } cryptoComponents := args.CryptoComponents.Clone().(process.CryptoComponentsHolder) err := cryptoComponents.SetMultiSignerContainer(disabled.NewMultiSignerContainer()) if err != nil { - return nil, err + return nil, nil, err } nodesCoordinator := disabled.NewNodesCoordinator() @@ -72,6 +75,7 @@ func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) epochStartTrigger := disabled.NewEpochStartTrigger() // TODO: move the peerShardMapper creation before boostrapComponents peerShardMapper := disabled.NewPeerShardMapper() + fullArchivePeerShardMapper := disabled.NewPeerShardMapper() hardforkTrigger := disabledFactory.HardforkTrigger() containerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ @@ -80,7 +84,8 @@ func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) Accounts: accountsAdapter, ShardCoordinator: args.ShardCoordinator, NodesCoordinator: nodesCoordinator, - Messenger: args.Messenger, + MainMessenger: args.MainMessenger, + FullArchiveMessenger: args.FullArchiveMessenger, Store: storer, DataPool: args.DataPool, MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, @@ -100,24 +105,33 @@ func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) PeerSignatureHandler: cryptoComponents.PeerSignatureHandler(), SignaturesHandler: args.SignaturesHandler, HeartbeatExpiryTimespanInSec: args.Config.HeartbeatV2.HeartbeatExpiryTimespanInSec, - PeerShardMapper: peerShardMapper, + MainPeerShardMapper: peerShardMapper, + FullArchivePeerShardMapper: fullArchivePeerShardMapper, HardforkTrigger: hardforkTrigger, + NodeOperationMode: args.NodeOperationMode, } interceptorsContainerFactory, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(containerFactoryArgs) if err != nil { - return nil, err + return nil, nil, err } - container, err := interceptorsContainerFactory.Create() + mainContainer, fullArchiveContainer, err := interceptorsContainerFactory.Create() if err != nil { - return nil, err + return nil, nil, err } - err = interceptorsContainerFactory.AddShardTrieNodeInterceptors(container) + err = interceptorsContainerFactory.AddShardTrieNodeInterceptors(mainContainer) if err != nil { - return nil, err + return nil, nil, err } - return container, nil + if args.NodeOperationMode == p2p.FullArchiveMode { + err = interceptorsContainerFactory.AddShardTrieNodeInterceptors(fullArchiveContainer) + if err != nil { + return nil, nil, err + } + } + + return mainContainer, fullArchiveContainer, nil } diff --git a/epochStart/bootstrap/fromLocalStorage.go b/epochStart/bootstrap/fromLocalStorage.go index 8b90d0d21a8..b4652bcacde 100644 --- a/epochStart/bootstrap/fromLocalStorage.go +++ b/epochStart/bootstrap/fromLocalStorage.go @@ -137,9 +137,13 @@ func (e *epochStartBootstrap) prepareEpochFromStorage() (Parameters, error) { } defer func() { - errClose := e.interceptorContainer.Close() + errClose := e.mainInterceptorContainer.Close() if errClose != nil { - log.Warn("prepareEpochFromStorage interceptorContainer.Close()", "error", errClose) + log.Warn("prepareEpochFromStorage mainInterceptorContainer.Close()", "error", errClose) + } + errClose = e.fullArchiveInterceptorContainer.Close() + if errClose != nil { + log.Warn("prepareEpochFromStorage fullArchiveInterceptorContainer.Close()", "error", errClose) } }() @@ -160,7 +164,8 @@ func (e *epochStartBootstrap) prepareEpochFromStorage() (Parameters, error) { return Parameters{}, err } - err = e.messenger.CreateTopic(common.ConsensusTopic+e.shardCoordinator.CommunicationIdentifier(e.shardCoordinator.SelfId()), true) + consensusTopic := common.ConsensusTopic + e.shardCoordinator.CommunicationIdentifier(e.shardCoordinator.SelfId()) + err = e.mainMessenger.CreateTopic(consensusTopic, true) if err != nil { return Parameters{}, err } diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 68459a7bcfd..2f7846f9856 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -30,6 +30,7 @@ import ( "github.com/multiversx/mx-chain-go/epochStart/bootstrap/types" factoryDisabled "github.com/multiversx/mx-chain-go/factory/disabled" "github.com/multiversx/mx-chain-go/heartbeat/sender" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/preprocess" "github.com/multiversx/mx-chain-go/process/heartbeat/validator" @@ -92,7 +93,8 @@ type epochStartBootstrap struct { destinationShardAsObserver uint32 coreComponentsHolder process.CoreComponentsHolder cryptoComponentsHolder process.CryptoComponentsHolder - messenger Messenger + mainMessenger p2p.Messenger + fullArchiveMessenger p2p.Messenger generalConfig config.Config prefsConfig config.PreferencesConfig flagsConfig config.ContextFlagsConfig @@ -116,24 +118,25 @@ type epochStartBootstrap struct { bootstrapHeartbeatSender update.Closer trieSyncStatisticsProvider common.SizeSyncStatisticsHandler nodeProcessingMode common.NodeProcessingMode - + nodeOperationMode p2p.NodeOperation // created components - requestHandler process.RequestHandler - interceptorContainer process.InterceptorsContainer - dataPool dataRetriever.PoolsHolder - miniBlocksSyncer epochStart.PendingMiniBlocksSyncHandler - headersSyncer epochStart.HeadersByHashSyncer - txSyncerForScheduled update.TransactionsSyncHandler - epochStartMetaBlockSyncer epochStart.StartOfEpochMetaSyncer - nodesConfigHandler StartOfEpochNodesConfigHandler - whiteListHandler update.WhiteListHandler - whiteListerVerifiedTxs update.WhiteListHandler - storageOpenerHandler storage.UnitOpenerHandler - latestStorageDataProvider storage.LatestStorageDataProviderHandler - argumentsParser process.ArgumentsParser - dataSyncerFactory types.ScheduledDataSyncerCreator - dataSyncerWithScheduled types.ScheduledDataSyncer - storageService dataRetriever.StorageService + requestHandler process.RequestHandler + mainInterceptorContainer process.InterceptorsContainer + fullArchiveInterceptorContainer process.InterceptorsContainer + dataPool dataRetriever.PoolsHolder + miniBlocksSyncer epochStart.PendingMiniBlocksSyncHandler + headersSyncer epochStart.HeadersByHashSyncer + txSyncerForScheduled update.TransactionsSyncHandler + epochStartMetaBlockSyncer epochStart.StartOfEpochMetaSyncer + nodesConfigHandler StartOfEpochNodesConfigHandler + whiteListHandler update.WhiteListHandler + whiteListerVerifiedTxs update.WhiteListHandler + storageOpenerHandler storage.UnitOpenerHandler + latestStorageDataProvider storage.LatestStorageDataProviderHandler + argumentsParser process.ArgumentsParser + dataSyncerFactory types.ScheduledDataSyncerCreator + dataSyncerWithScheduled types.ScheduledDataSyncer + storageService dataRetriever.StorageService // gathered data epochStartMeta data.MetaHeaderHandler @@ -161,7 +164,8 @@ type ArgsEpochStartBootstrap struct { CoreComponentsHolder process.CoreComponentsHolder CryptoComponentsHolder process.CryptoComponentsHolder DestinationShardAsObserver uint32 - Messenger Messenger + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger GeneralConfig config.Config PrefsConfig config.PreferencesConfig FlagsConfig config.ContextFlagsConfig @@ -200,7 +204,8 @@ func NewEpochStartBootstrap(args ArgsEpochStartBootstrap) (*epochStartBootstrap, epochStartProvider := &epochStartBootstrap{ coreComponentsHolder: args.CoreComponentsHolder, cryptoComponentsHolder: args.CryptoComponentsHolder, - messenger: args.Messenger, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, generalConfig: args.GeneralConfig, prefsConfig: args.PrefsConfig, flagsConfig: args.FlagsConfig, @@ -227,6 +232,11 @@ func NewEpochStartBootstrap(args ArgsEpochStartBootstrap) (*epochStartBootstrap, shardCoordinator: args.GenesisShardCoordinator, trieSyncStatisticsProvider: args.TrieSyncStatisticsProvider, nodeProcessingMode: args.NodeProcessingMode, + nodeOperationMode: p2p.NormalOperation, + } + + if epochStartProvider.prefsConfig.FullArchive { + epochStartProvider.nodeOperationMode = p2p.FullArchiveMode } whiteListCache, err := storageunit.NewCache(storageFactory.GetCacherFromConfig(epochStartProvider.generalConfig.WhiteListPool)) @@ -370,9 +380,14 @@ func (e *epochStartBootstrap) Bootstrap() (Parameters, error) { } defer func() { - errClose := e.interceptorContainer.Close() + errClose := e.mainInterceptorContainer.Close() + if errClose != nil { + log.Warn("prepareEpochFromStorage mainInterceptorContainer.Close()", "error", errClose) + } + + errClose = e.fullArchiveInterceptorContainer.Close() if errClose != nil { - log.Warn("prepareEpochFromStorage interceptorContainer.Close()", "error", errClose) + log.Warn("prepareEpochFromStorage fullArchiveInterceptorContainer.Close()", "error", errClose) } }() @@ -417,10 +432,16 @@ func (e *epochStartBootstrap) bootstrapFromLocalStorage() (Parameters, error) { func (e *epochStartBootstrap) cleanupOnBootstrapFinish() { log.Debug("unregistering all message processor and un-joining all topics") - errMessenger := e.messenger.UnregisterAllMessageProcessors() + errMessenger := e.mainMessenger.UnregisterAllMessageProcessors() + log.LogIfError(errMessenger) + + errMessenger = e.mainMessenger.UnJoinAllTopics() log.LogIfError(errMessenger) - errMessenger = e.messenger.UnJoinAllTopics() + errMessenger = e.fullArchiveMessenger.UnregisterAllMessageProcessors() + log.LogIfError(errMessenger) + + errMessenger = e.fullArchiveMessenger.UnJoinAllTopics() log.LogIfError(errMessenger) e.closeTrieNodes() @@ -509,7 +530,7 @@ func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { epochStartConfig := e.generalConfig.EpochStartConfig metaBlockProcessor, err := NewEpochStartMetaBlockProcessor( - e.messenger, + e.mainMessenger, e.requestHandler, e.coreComponentsHolder.InternalMarshalizer(), e.coreComponentsHolder.Hasher(), @@ -525,7 +546,7 @@ func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { CoreComponentsHolder: e.coreComponentsHolder, CryptoComponentsHolder: e.cryptoComponentsHolder, RequestHandler: e.requestHandler, - Messenger: e.messenger, + Messenger: e.mainMessenger, ShardCoordinator: e.shardCoordinator, EconomicsData: e.economicsData, WhitelistHandler: e.whiteListHandler, @@ -548,17 +569,19 @@ func (e *epochStartBootstrap) createSyncers() error { CryptoComponents: e.cryptoComponentsHolder, Config: e.generalConfig, ShardCoordinator: e.shardCoordinator, - Messenger: e.messenger, + MainMessenger: e.mainMessenger, + FullArchiveMessenger: e.fullArchiveMessenger, DataPool: e.dataPool, WhiteListHandler: e.whiteListHandler, WhiteListerVerifiedTxs: e.whiteListerVerifiedTxs, ArgumentsParser: e.argumentsParser, HeaderIntegrityVerifier: e.headerIntegrityVerifier, RequestHandler: e.requestHandler, - SignaturesHandler: e.messenger, + SignaturesHandler: e.mainMessenger, + NodeOperationMode: e.nodeOperationMode, } - e.interceptorContainer, err = factoryInterceptors.NewEpochStartInterceptorsContainer(args) + e.mainInterceptorContainer, e.fullArchiveInterceptorContainer, err = factoryInterceptors.NewEpochStartInterceptorsContainer(args) if err != nil { return err } @@ -670,7 +693,8 @@ func (e *epochStartBootstrap) requestAndProcessing() (Parameters, error) { } log.Debug("start in epoch bootstrap: shardCoordinator", "numOfShards", e.baseData.numberOfShards, "shardId", e.baseData.shardId) - err = e.messenger.CreateTopic(common.ConsensusTopic+e.shardCoordinator.CommunicationIdentifier(e.shardCoordinator.SelfId()), true) + consensusTopic := common.ConsensusTopic + e.shardCoordinator.CommunicationIdentifier(e.shardCoordinator.SelfId()) + err = e.mainMessenger.CreateTopic(consensusTopic, true) if err != nil { return Parameters{}, err } @@ -1183,20 +1207,22 @@ func (e *epochStartBootstrap) createResolversContainer() error { // this one should only be used before determining the correct shard where the node should reside log.Debug("epochStartBootstrap.createRequestHandler", "shard", e.shardCoordinator.SelfId()) resolversContainerArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: e.shardCoordinator, - Messenger: e.messenger, - Store: storageService, - Marshalizer: e.coreComponentsHolder.InternalMarshalizer(), - DataPools: e.dataPool, - Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), - NumConcurrentResolvingJobs: 10, - DataPacker: dataPacker, - TriesContainer: e.trieContainer, - SizeCheckDelta: 0, - InputAntifloodHandler: disabled.NewAntiFloodHandler(), - OutputAntifloodHandler: disabled.NewAntiFloodHandler(), - PreferredPeersHolder: disabled.NewPreferredPeersHolder(), - PayloadValidator: payloadValidator, + ShardCoordinator: e.shardCoordinator, + MainMessenger: e.mainMessenger, + FullArchiveMessenger: e.fullArchiveMessenger, + Store: storageService, + Marshalizer: e.coreComponentsHolder.InternalMarshalizer(), + DataPools: e.dataPool, + Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), + NumConcurrentResolvingJobs: 10, + DataPacker: dataPacker, + TriesContainer: e.trieContainer, + SizeCheckDelta: 0, + InputAntifloodHandler: disabled.NewAntiFloodHandler(), + OutputAntifloodHandler: disabled.NewAntiFloodHandler(), + MainPreferredPeersHolder: disabled.NewPreferredPeersHolder(), + FullArchivePreferredPeersHolder: disabled.NewPreferredPeersHolder(), + PayloadValidator: payloadValidator, } resolverFactory, err := resolverscontainer.NewMetaResolversContainerFactory(resolversContainerArgs) if err != nil { @@ -1213,16 +1239,18 @@ func (e *epochStartBootstrap) createResolversContainer() error { func (e *epochStartBootstrap) createRequestHandler() error { requestersContainerArgs := requesterscontainer.FactoryArgs{ - RequesterConfig: e.generalConfig.Requesters, - ShardCoordinator: e.shardCoordinator, - Messenger: e.messenger, - Marshaller: e.coreComponentsHolder.InternalMarshalizer(), - Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), - OutputAntifloodHandler: disabled.NewAntiFloodHandler(), - CurrentNetworkEpochProvider: disabled.NewCurrentNetworkEpochProviderHandler(), - PreferredPeersHolder: disabled.NewPreferredPeersHolder(), - PeersRatingHandler: disabled.NewDisabledPeersRatingHandler(), - SizeCheckDelta: 0, + RequesterConfig: e.generalConfig.Requesters, + ShardCoordinator: e.shardCoordinator, + MainMessenger: e.mainMessenger, + FullArchiveMessenger: e.fullArchiveMessenger, + Marshaller: e.coreComponentsHolder.InternalMarshalizer(), + Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), + OutputAntifloodHandler: disabled.NewAntiFloodHandler(), + CurrentNetworkEpochProvider: disabled.NewCurrentNetworkEpochProviderHandler(), + MainPreferredPeersHolder: disabled.NewPreferredPeersHolder(), + FullArchivePreferredPeersHolder: disabled.NewPreferredPeersHolder(), + PeersRatingHandler: disabled.NewDisabledPeersRatingHandler(), + SizeCheckDelta: 0, } requestersFactory, err := requesterscontainer.NewMetaRequestersContainerFactory(requestersContainerArgs) if err != nil { @@ -1286,8 +1314,15 @@ func (e *epochStartBootstrap) createHeartbeatSender() error { } heartbeatTopic := common.HeartbeatV2Topic + e.shardCoordinator.CommunicationIdentifier(e.shardCoordinator.SelfId()) - if !e.messenger.HasTopic(heartbeatTopic) { - err = e.messenger.CreateTopic(heartbeatTopic, true) + if !e.mainMessenger.HasTopic(heartbeatTopic) { + err = e.mainMessenger.CreateTopic(heartbeatTopic, true) + if err != nil { + return err + } + } + + if !e.fullArchiveMessenger.HasTopic(heartbeatTopic) { + err = e.fullArchiveMessenger.CreateTopic(heartbeatTopic, true) if err != nil { return err } @@ -1299,7 +1334,8 @@ func (e *epochStartBootstrap) createHeartbeatSender() error { } heartbeatCfg := e.generalConfig.HeartbeatV2 argsHeartbeatSender := sender.ArgBootstrapSender{ - Messenger: e.messenger, + MainMessenger: e.mainMessenger, + FullArchiveMessenger: e.fullArchiveMessenger, Marshaller: e.coreComponentsHolder.InternalMarshalizer(), HeartbeatTopic: heartbeatTopic, HeartbeatTimeBetweenSends: time.Second * time.Duration(heartbeatCfg.HeartbeatTimeBetweenSendsDuringBootstrapInSec), diff --git a/epochStart/bootstrap/process_test.go b/epochStart/bootstrap/process_test.go index 85441f5612a..fcfc15d823e 100644 --- a/epochStart/bootstrap/process_test.go +++ b/epochStart/bootstrap/process_test.go @@ -102,11 +102,12 @@ func createMockEpochStartBootstrapArgs( ScheduledSCRsStorer: genericMocks.NewStorerMock(), CoreComponentsHolder: coreMock, CryptoComponentsHolder: cryptoMock, - Messenger: &p2pmocks.MessengerStub{ + MainMessenger: &p2pmocks.MessengerStub{ ConnectedPeersCalled: func() []core.PeerID { return []core.PeerID{"peer0", "peer1", "peer2", "peer3", "peer4", "peer5"} }, }, + FullArchiveMessenger: &p2pmocks.MessengerStub{}, GeneralConfig: config.Config{ MiniBlocksStorage: generalCfg.MiniBlocksStorage, PeerBlockBodyStorage: generalCfg.PeerBlockBodyStorage, @@ -243,11 +244,21 @@ func TestNewEpochStartBootstrap_NilArgsChecks(t *testing.T) { require.Nil(t, epochStartProvider) require.True(t, errors.Is(err, epochStart.ErrNilShardCoordinator)) }) - t.Run("nil messenger", func(t *testing.T) { + t.Run("nil main messenger", func(t *testing.T) { t.Parallel() args := createMockEpochStartBootstrapArgs(createComponentsForEpochStart()) - args.Messenger = nil + args.MainMessenger = nil + + epochStartProvider, err := NewEpochStartBootstrap(args) + require.Nil(t, epochStartProvider) + require.True(t, errors.Is(err, epochStart.ErrNilMessenger)) + }) + t.Run("nil full archive messenger", func(t *testing.T) { + t.Parallel() + + args := createMockEpochStartBootstrapArgs(createComponentsForEpochStart()) + args.FullArchiveMessenger = nil epochStartProvider, err := NewEpochStartBootstrap(args) require.Nil(t, epochStartProvider) @@ -1669,14 +1680,14 @@ func TestRequestAndProcessing(t *testing.T) { assert.Error(t, err) assert.True(t, strings.Contains(err.Error(), nodesCoordinator.ErrInvalidNumberOfShards.Error())) }) - t.Run("failed to create messenger topic", func(t *testing.T) { + t.Run("failed to create main messenger topic", func(t *testing.T) { t.Parallel() args := createMockEpochStartBootstrapArgs(createComponentsForEpochStart()) args.GenesisNodesConfig = getNodesConfigMock(1) expectedErr := errors.New("expected error") - args.Messenger = &p2pmocks.MessengerStub{ + args.MainMessenger = &p2pmocks.MessengerStub{ CreateTopicCalled: func(topic string, identifier bool) error { return expectedErr }, diff --git a/epochStart/bootstrap/storageProcess.go b/epochStart/bootstrap/storageProcess.go index 812d5adaa2c..d5f14fb4676 100644 --- a/epochStart/bootstrap/storageProcess.go +++ b/epochStart/bootstrap/storageProcess.go @@ -166,7 +166,7 @@ func (sesb *storageEpochStartBootstrap) prepareComponentsToSync() error { } metablockProcessor, err := NewStorageEpochStartMetaBlockProcessor( - sesb.messenger, + sesb.mainMessenger, sesb.requestHandler, sesb.coreComponentsHolder.InternalMarshalizer(), sesb.coreComponentsHolder.Hasher(), @@ -179,7 +179,7 @@ func (sesb *storageEpochStartBootstrap) prepareComponentsToSync() error { CoreComponentsHolder: sesb.coreComponentsHolder, CryptoComponentsHolder: sesb.cryptoComponentsHolder, RequestHandler: sesb.requestHandler, - Messenger: sesb.messenger, + Messenger: sesb.mainMessenger, ShardCoordinator: sesb.shardCoordinator, EconomicsData: sesb.economicsData, WhitelistHandler: sesb.whiteListHandler, @@ -244,7 +244,7 @@ func (sesb *storageEpochStartBootstrap) createStorageRequesters() error { WorkingDirectory: sesb.importDbConfig.ImportDBWorkingDir, Hasher: sesb.coreComponentsHolder.Hasher(), ShardCoordinator: shardCoordinator, - Messenger: sesb.messenger, + Messenger: sesb.mainMessenger, Store: sesb.store, Marshalizer: sesb.coreComponentsHolder.InternalMarshalizer(), Uint64ByteSliceConverter: sesb.coreComponentsHolder.Uint64ByteSliceConverter(), @@ -327,7 +327,8 @@ func (sesb *storageEpochStartBootstrap) requestAndProcessFromStorage() (Paramete } log.Debug("start in epoch bootstrap: shardCoordinator", "numOfShards", sesb.baseData.numberOfShards, "shardId", sesb.baseData.shardId) - err = sesb.messenger.CreateTopic(common.ConsensusTopic+sesb.shardCoordinator.CommunicationIdentifier(sesb.shardCoordinator.SelfId()), true) + consensusTopic := common.ConsensusTopic + sesb.shardCoordinator.CommunicationIdentifier(sesb.shardCoordinator.SelfId()) + err = sesb.mainMessenger.CreateTopic(consensusTopic, true) if err != nil { return Parameters{}, err } diff --git a/errors/errors.go b/errors/errors.go index 1a04eacb70f..eb730fcf5ec 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -589,3 +589,6 @@ var ErrNilBLSPublicKey = errors.New("bls public key is nil") // ErrEmptyAddress defines the error when trying to work with an empty address var ErrEmptyAddress = errors.New("empty Address") + +// ErrInvalidNodeOperationMode signals that an invalid node operation mode has been provided +var ErrInvalidNodeOperationMode = errors.New("invalid node operation mode") diff --git a/facade/initial/initialNodeFacade.go b/facade/initial/initialNodeFacade.go index 9b0c00c25a2..6951c73cdf8 100644 --- a/facade/initial/initialNodeFacade.go +++ b/facade/initial/initialNodeFacade.go @@ -241,9 +241,9 @@ func (inf *initialNodeFacade) GetPeerInfo(_ string) ([]core.QueryP2PPeerInfo, er return nil, errNodeStarting } -// GetConnectedPeersRatings returns empty string -func (inf *initialNodeFacade) GetConnectedPeersRatings() string { - return "" +// GetConnectedPeersRatingsOnMainNetwork returns empty string and error +func (inf *initialNodeFacade) GetConnectedPeersRatingsOnMainNetwork() (string, error) { + return "", errNodeStarting } // GetEpochStartDataAPI returns nil and error diff --git a/facade/initial/initialNodeFacade_test.go b/facade/initial/initialNodeFacade_test.go index 70ebf524359..7ee2e26de2e 100644 --- a/facade/initial/initialNodeFacade_test.go +++ b/facade/initial/initialNodeFacade_test.go @@ -255,8 +255,9 @@ func TestInitialNodeFacade_AllMethodsShouldNotPanic(t *testing.T) { assert.Nil(t, stakeValue) assert.Equal(t, errNodeStarting, err) - ratings := inf.GetConnectedPeersRatings() + ratings, err := inf.GetConnectedPeersRatingsOnMainNetwork() assert.Equal(t, "", ratings) + assert.Equal(t, errNodeStarting, err) epochStartData, err := inf.GetEpochStartDataAPI(0) assert.Nil(t, epochStartData) diff --git a/facade/interface.go b/facade/interface.go index 3d4b1756b51..9e2f909781f 100644 --- a/facade/interface.go +++ b/facade/interface.go @@ -94,7 +94,7 @@ type NodeHandler interface { GetQueryHandler(name string) (debug.QueryHandler, error) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) - GetConnectedPeersRatings() string + GetConnectedPeersRatingsOnMainNetwork() (string, error) GetEpochStartDataAPI(epoch uint32) (*common.EpochStartDataAPI, error) diff --git a/facade/mock/nodeStub.go b/facade/mock/nodeStub.go index d7fed77d781..45078244146 100644 --- a/facade/mock/nodeStub.go +++ b/facade/mock/nodeStub.go @@ -38,7 +38,7 @@ type NodeStub struct { GetValueForKeyCalled func(address string, key string, options api.AccountQueryOptions) (string, api.BlockInfo, error) GetGuardianDataCalled func(address string, options api.AccountQueryOptions) (api.GuardianData, api.BlockInfo, error) GetPeerInfoCalled func(pid string) ([]core.QueryP2PPeerInfo, error) - GetConnectedPeersRatingsCalled func() string + GetConnectedPeersRatingsOnMainNetworkCalled func() (string, error) GetEpochStartDataAPICalled func(epoch uint32) (*common.EpochStartDataAPI, error) GetUsernameCalled func(address string, options api.AccountQueryOptions) (string, api.BlockInfo, error) GetCodeHashCalled func(address string, options api.AccountQueryOptions) ([]byte, api.BlockInfo, error) @@ -215,13 +215,13 @@ func (ns *NodeStub) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) { return make([]core.QueryP2PPeerInfo, 0), nil } -// GetConnectedPeersRatings - -func (ns *NodeStub) GetConnectedPeersRatings() string { - if ns.GetConnectedPeersRatingsCalled != nil { - return ns.GetConnectedPeersRatingsCalled() +// GetConnectedPeersRatingsOnMainNetwork - +func (ns *NodeStub) GetConnectedPeersRatingsOnMainNetwork() (string, error) { + if ns.GetConnectedPeersRatingsOnMainNetworkCalled != nil { + return ns.GetConnectedPeersRatingsOnMainNetworkCalled() } - return "" + return "", nil } // GetEpochStartDataAPI - diff --git a/facade/nodeFacade.go b/facade/nodeFacade.go index b7d2f077363..a8318c7a03d 100644 --- a/facade/nodeFacade.go +++ b/facade/nodeFacade.go @@ -461,9 +461,9 @@ func (nf *nodeFacade) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) { return nf.node.GetPeerInfo(pid) } -// GetConnectedPeersRatings returns the connected peers ratings -func (nf *nodeFacade) GetConnectedPeersRatings() string { - return nf.node.GetConnectedPeersRatings() +// GetConnectedPeersRatingsOnMainNetwork returns the connected peers ratings on the main network +func (nf *nodeFacade) GetConnectedPeersRatingsOnMainNetwork() (string, error) { + return nf.node.GetConnectedPeersRatingsOnMainNetwork() } // GetThrottlerForEndpoint returns the throttler for a given endpoint if found diff --git a/facade/nodeFacade_test.go b/facade/nodeFacade_test.go index 510852c1572..8380e747103 100644 --- a/facade/nodeFacade_test.go +++ b/facade/nodeFacade_test.go @@ -2104,19 +2104,20 @@ func TestNodeFacade_GetEpochStartDataAPI(t *testing.T) { require.Equal(t, providedResponse, response) } -func TestNodeFacade_GetConnectedPeersRatings(t *testing.T) { +func TestNodeFacade_GetConnectedPeersRatingsOnMainNetwork(t *testing.T) { t.Parallel() providedResponse := "ratings" args := createMockArguments() args.Node = &mock.NodeStub{ - GetConnectedPeersRatingsCalled: func() string { - return providedResponse + GetConnectedPeersRatingsOnMainNetworkCalled: func() (string, error) { + return providedResponse, nil }, } nf, _ := NewNodeFacade(args) - response := nf.GetConnectedPeersRatings() + response, err := nf.GetConnectedPeersRatingsOnMainNetwork() + require.NoError(t, err) require.Equal(t, providedResponse, response) } diff --git a/factory/bootstrap/bootstrapComponents.go b/factory/bootstrap/bootstrapComponents.go index 9f580416853..1c3e834a16f 100644 --- a/factory/bootstrap/bootstrapComponents.go +++ b/factory/bootstrap/bootstrapComponents.go @@ -188,7 +188,8 @@ func (bcf *bootstrapComponentsFactory) Create() (*bootstrapComponents, error) { epochStartBootstrapArgs := bootstrap.ArgsEpochStartBootstrap{ CoreComponentsHolder: bcf.coreComponents, CryptoComponentsHolder: bcf.cryptoComponents, - Messenger: bcf.networkComponents.NetworkMessenger(), + MainMessenger: bcf.networkComponents.NetworkMessenger(), + FullArchiveMessenger: bcf.networkComponents.FullArchiveNetworkMessenger(), GeneralConfig: bcf.config, PrefsConfig: bcf.prefConfig.Preferences, FlagsConfig: bcf.flagsConfig, diff --git a/factory/consensus/consensusComponents.go b/factory/consensus/consensusComponents.go index 9681bdde7d0..b34f011724b 100644 --- a/factory/consensus/consensusComponents.go +++ b/factory/consensus/consensusComponents.go @@ -668,6 +668,7 @@ func (ccf *consensusComponentsFactory) createP2pSigningHandler() (consensus.P2PS p2pSignerArgs := p2pFactory.ArgsMessageVerifier{ Marshaller: ccf.coreComponents.InternalMarshalizer(), P2PSigner: ccf.networkComponents.NetworkMessenger(), + Logger: logger.GetOrCreate("main/p2p/messagecheck"), } return p2pFactory.NewMessageVerifier(p2pSignerArgs) diff --git a/factory/consensus/consensusComponents_test.go b/factory/consensus/consensusComponents_test.go index 54f6a4cf4de..67f551acf1d 100644 --- a/factory/consensus/consensusComponents_test.go +++ b/factory/consensus/consensusComponents_test.go @@ -117,7 +117,8 @@ func createMockConsensusComponentsFactoryArgs() consensusComp.ConsensusComponent NodeRedundancyHandlerInternal: &testsMocks.RedundancyHandlerStub{}, HardforkTriggerField: &testscommon.HardforkTriggerStub{}, ReqHandler: &testscommon.RequestHandlerStub{}, - PeerMapper: &testsMocks.PeerShardMapperStub{}, + MainPeerMapper: &testsMocks.PeerShardMapperStub{}, + FullArchivePeerMapper: &testsMocks.PeerShardMapperStub{}, ShardCoord: testscommon.NewMultiShardsCoordinatorMock(2), RoundHandlerField: &testscommon.RoundHandlerMock{ TimeDurationCalled: func() time.Duration { diff --git a/factory/disabled/preferredPeersHolder.go b/factory/disabled/preferredPeersHolder.go new file mode 100644 index 00000000000..5e0eeefb856 --- /dev/null +++ b/factory/disabled/preferredPeersHolder.go @@ -0,0 +1,44 @@ +package disabled + +import ( + "github.com/multiversx/mx-chain-core-go/core" +) + +type preferredPeersHolder struct { +} + +// NewPreferredPeersHolder returns a new instance of preferredPeersHolder +func NewPreferredPeersHolder() *preferredPeersHolder { + return &preferredPeersHolder{} +} + +// PutConnectionAddress does nothing as it is disabled +func (holder *preferredPeersHolder) PutConnectionAddress(_ core.PeerID, _ string) { +} + +// PutShardID does nothing as it is disabled +func (holder *preferredPeersHolder) PutShardID(_ core.PeerID, _ uint32) { +} + +// Get returns an empty map as it is disabled +func (holder *preferredPeersHolder) Get() map[uint32][]core.PeerID { + return make(map[uint32][]core.PeerID) +} + +// Contains returns false +func (holder *preferredPeersHolder) Contains(_ core.PeerID) bool { + return false +} + +// Remove does nothing as it is disabled +func (holder *preferredPeersHolder) Remove(_ core.PeerID) { +} + +// Clear does nothing as it is disabled +func (holder *preferredPeersHolder) Clear() { +} + +// IsInterfaceNil returns true if there is no value under the interface +func (holder *preferredPeersHolder) IsInterfaceNil() bool { + return holder == nil +} diff --git a/factory/heartbeat/heartbeatV2Components.go b/factory/heartbeat/heartbeatV2Components.go index 08b0e65bd58..a551f22e869 100644 --- a/factory/heartbeat/heartbeatV2Components.go +++ b/factory/heartbeat/heartbeatV2Components.go @@ -15,6 +15,7 @@ import ( "github.com/multiversx/mx-chain-go/heartbeat/processor" "github.com/multiversx/mx-chain-go/heartbeat/sender" "github.com/multiversx/mx-chain-go/heartbeat/status" + "github.com/multiversx/mx-chain-go/p2p" processFactory "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/process/peer" "github.com/multiversx/mx-chain-go/update" @@ -53,12 +54,13 @@ type heartbeatV2ComponentsFactory struct { } type heartbeatV2Components struct { - sender update.Closer - peerAuthRequestsProcessor update.Closer - shardSender update.Closer - monitor factory.HeartbeatV2Monitor - statusHandler update.Closer - directConnectionProcessor update.Closer + sender update.Closer + peerAuthRequestsProcessor update.Closer + shardSender update.Closer + monitor factory.HeartbeatV2Monitor + statusHandler update.Closer + mainDirectConnectionProcessor update.Closer + fullArchiveDirectConnectionProcessor update.Closer } // NewHeartbeatV2ComponentsFactory creates a new instance of heartbeatV2ComponentsFactory @@ -100,7 +102,10 @@ func checkHeartbeatV2FactoryArgs(args ArgHeartbeatV2ComponentsFactory) error { return errors.ErrNilNetworkComponentsHolder } if check.IfNil(args.NetworkComponents.NetworkMessenger()) { - return errors.ErrNilMessenger + return fmt.Errorf("%w for main", errors.ErrNilMessenger) + } + if check.IfNil(args.NetworkComponents.FullArchiveNetworkMessenger()) { + return fmt.Errorf("%w for full archive", errors.ErrNilMessenger) } if check.IfNil(args.CryptoComponents) { return errors.ErrNilCryptoComponentsHolder @@ -120,17 +125,9 @@ func checkHeartbeatV2FactoryArgs(args ArgHeartbeatV2ComponentsFactory) error { // Create creates the heartbeatV2 components func (hcf *heartbeatV2ComponentsFactory) Create() (*heartbeatV2Components, error) { - if !hcf.networkComponents.NetworkMessenger().HasTopic(common.PeerAuthenticationTopic) { - err := hcf.networkComponents.NetworkMessenger().CreateTopic(common.PeerAuthenticationTopic, true) - if err != nil { - return nil, err - } - } - if !hcf.networkComponents.NetworkMessenger().HasTopic(common.HeartbeatV2Topic) { - err := hcf.networkComponents.NetworkMessenger().CreateTopic(common.HeartbeatV2Topic, true) - if err != nil { - return nil, err - } + err := hcf.createTopicsIfNeeded() + if err != nil { + return nil, err } cfg := hcf.config.HeartbeatV2 @@ -157,11 +154,12 @@ func (hcf *heartbeatV2ComponentsFactory) Create() (*heartbeatV2Components, error } argsSender := sender.ArgSender{ - Messenger: hcf.networkComponents.NetworkMessenger(), - Marshaller: hcf.coreComponents.InternalMarshalizer(), - PeerAuthenticationTopic: common.PeerAuthenticationTopic, - HeartbeatTopic: heartbeatTopic, - PeerAuthenticationTimeBetweenSends: time.Second * time.Duration(cfg.PeerAuthenticationTimeBetweenSendsInSec), + MainMessenger: hcf.networkComponents.NetworkMessenger(), + FullArchiveMessenger: hcf.networkComponents.FullArchiveNetworkMessenger(), + Marshaller: hcf.coreComponents.InternalMarshalizer(), + PeerAuthenticationTopic: common.PeerAuthenticationTopic, + HeartbeatTopic: heartbeatTopic, + PeerAuthenticationTimeBetweenSends: time.Second * time.Duration(cfg.PeerAuthenticationTimeBetweenSendsInSec), PeerAuthenticationTimeBetweenSendsWhenError: time.Second * time.Duration(cfg.PeerAuthenticationTimeBetweenSendsWhenErrorInSec), PeerAuthenticationTimeThresholdBetweenSends: cfg.PeerAuthenticationTimeThresholdBetweenSends, HeartbeatTimeBetweenSends: time.Second * time.Duration(cfg.HeartbeatTimeBetweenSendsInSec), @@ -209,7 +207,8 @@ func (hcf *heartbeatV2ComponentsFactory) Create() (*heartbeatV2Components, error } argsPeerShardSender := sender.ArgPeerShardSender{ - Messenger: hcf.networkComponents.NetworkMessenger(), + MainMessenger: hcf.networkComponents.NetworkMessenger(), + FullArchiveMessenger: hcf.networkComponents.FullArchiveNetworkMessenger(), Marshaller: hcf.coreComponents.InternalMarshalizer(), ShardCoordinator: hcf.bootstrapComponents.ShardCoordinator(), TimeBetweenSends: time.Second * time.Duration(cfg.PeerShardTimeBetweenSendsInSec), @@ -247,42 +246,95 @@ func (hcf *heartbeatV2ComponentsFactory) Create() (*heartbeatV2Components, error return nil, err } - argsDirectConnectionProcessor := processor.ArgsDirectConnectionProcessor{ + argsMainDirectConnectionProcessor := processor.ArgsDirectConnectionProcessor{ TimeToReadDirectConnections: time.Second * time.Duration(cfg.TimeToReadDirectConnectionsInSec), Messenger: hcf.networkComponents.NetworkMessenger(), PeerShardMapper: hcf.processComponents.PeerShardMapper(), ShardCoordinator: hcf.processComponents.ShardCoordinator(), - BaseIntraShardTopic: common.ConsensusTopic, + BaseIntraShardTopic: common.HeartbeatV2Topic, BaseCrossShardTopic: processFactory.MiniBlocksTopic, } - directConnectionProcessor, err := processor.NewDirectConnectionProcessor(argsDirectConnectionProcessor) + mainDirectConnectionProcessor, err := processor.NewDirectConnectionProcessor(argsMainDirectConnectionProcessor) if err != nil { return nil, err } - argsCrossShardPeerTopicNotifier := monitor.ArgsCrossShardPeerTopicNotifier{ + argsFullArchiveDirectConnectionProcessor := processor.ArgsDirectConnectionProcessor{ + TimeToReadDirectConnections: time.Second * time.Duration(cfg.TimeToReadDirectConnectionsInSec), + Messenger: hcf.networkComponents.FullArchiveNetworkMessenger(), + PeerShardMapper: hcf.processComponents.FullArchivePeerShardMapper(), + ShardCoordinator: hcf.processComponents.ShardCoordinator(), + BaseIntraShardTopic: common.HeartbeatV2Topic, + BaseCrossShardTopic: processFactory.MiniBlocksTopic, + } + fullArchiveDirectConnectionProcessor, err := processor.NewDirectConnectionProcessor(argsFullArchiveDirectConnectionProcessor) + if err != nil { + return nil, err + } + + argsMainCrossShardPeerTopicNotifier := monitor.ArgsCrossShardPeerTopicNotifier{ ShardCoordinator: hcf.processComponents.ShardCoordinator(), PeerShardMapper: hcf.processComponents.PeerShardMapper(), } - crossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsCrossShardPeerTopicNotifier) + mainCrossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsMainCrossShardPeerTopicNotifier) + if err != nil { + return nil, err + } + err = hcf.networkComponents.NetworkMessenger().AddPeerTopicNotifier(mainCrossShardPeerTopicNotifier) + if err != nil { + return nil, err + } + + argsFullArchiveCrossShardPeerTopicNotifier := monitor.ArgsCrossShardPeerTopicNotifier{ + ShardCoordinator: hcf.processComponents.ShardCoordinator(), + PeerShardMapper: hcf.processComponents.FullArchivePeerShardMapper(), + } + fullArchiveCrossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsFullArchiveCrossShardPeerTopicNotifier) if err != nil { return nil, err } - err = hcf.networkComponents.NetworkMessenger().AddPeerTopicNotifier(crossShardPeerTopicNotifier) + err = hcf.networkComponents.FullArchiveNetworkMessenger().AddPeerTopicNotifier(fullArchiveCrossShardPeerTopicNotifier) if err != nil { return nil, err } return &heartbeatV2Components{ - sender: heartbeatV2Sender, - peerAuthRequestsProcessor: paRequestsProcessor, - shardSender: shardSender, - monitor: heartbeatsMonitor, - statusHandler: statusHandler, - directConnectionProcessor: directConnectionProcessor, + sender: heartbeatV2Sender, + peerAuthRequestsProcessor: paRequestsProcessor, + shardSender: shardSender, + monitor: heartbeatsMonitor, + statusHandler: statusHandler, + mainDirectConnectionProcessor: mainDirectConnectionProcessor, + fullArchiveDirectConnectionProcessor: fullArchiveDirectConnectionProcessor, }, nil } +func (hcf *heartbeatV2ComponentsFactory) createTopicsIfNeeded() error { + err := createTopicsIfNeededOnMessenger(hcf.networkComponents.NetworkMessenger()) + if err != nil { + return err + } + + return createTopicsIfNeededOnMessenger(hcf.networkComponents.FullArchiveNetworkMessenger()) +} + +func createTopicsIfNeededOnMessenger(messenger p2p.Messenger) error { + if !messenger.HasTopic(common.PeerAuthenticationTopic) { + err := messenger.CreateTopic(common.PeerAuthenticationTopic, true) + if err != nil { + return err + } + } + if !messenger.HasTopic(common.HeartbeatV2Topic) { + err := messenger.CreateTopic(common.HeartbeatV2Topic, true) + if err != nil { + return err + } + } + + return nil +} + // Close closes the heartbeat components func (hc *heartbeatV2Components) Close() error { log.Debug("calling close on heartbeatV2 components") @@ -303,8 +355,12 @@ func (hc *heartbeatV2Components) Close() error { log.LogIfError(hc.statusHandler.Close()) } - if !check.IfNil(hc.directConnectionProcessor) { - log.LogIfError(hc.directConnectionProcessor.Close()) + if !check.IfNil(hc.mainDirectConnectionProcessor) { + log.LogIfError(hc.mainDirectConnectionProcessor.Close()) + } + + if !check.IfNil(hc.fullArchiveDirectConnectionProcessor) { + log.LogIfError(hc.fullArchiveDirectConnectionProcessor.Close()) } return nil diff --git a/factory/heartbeat/heartbeatV2Components_test.go b/factory/heartbeat/heartbeatV2Components_test.go index 46587997ecf..f013294a7d1 100644 --- a/factory/heartbeat/heartbeatV2Components_test.go +++ b/factory/heartbeat/heartbeatV2Components_test.go @@ -65,7 +65,8 @@ func createMockHeartbeatV2ComponentsFactoryArgs() heartbeatComp.ArgHeartbeatV2Co BlockChain: &testscommon.ChainHandlerStub{}, }, NetworkComponents: &testsMocks.NetworkComponentsStub{ - Messenger: &p2pmocks.MessengerStub{}, + Messenger: &p2pmocks.MessengerStub{}, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, }, CryptoComponents: &testsMocks.CryptoComponentsStub{ PrivKey: &cryptoMocks.PrivateKeyStub{}, @@ -79,7 +80,8 @@ func createMockHeartbeatV2ComponentsFactoryArgs() heartbeatComp.ArgHeartbeatV2Co NodeRedundancyHandlerInternal: &testsMocks.RedundancyHandlerStub{}, HardforkTriggerField: &testscommon.HardforkTriggerStub{}, ReqHandler: &testscommon.RequestHandlerStub{}, - PeerMapper: &testsMocks.PeerShardMapperStub{}, + MainPeerMapper: &testsMocks.PeerShardMapperStub{}, + FullArchivePeerMapper: &testsMocks.PeerShardMapperStub{}, ShardCoord: &testscommon.ShardsCoordinatorMock{}, }, StatusCoreComponents: &factory.StatusCoreComponentsStub{ @@ -189,7 +191,19 @@ func TestNewHeartbeatV2ComponentsFactory(t *testing.T) { } hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) assert.Nil(t, hcf) - assert.Equal(t, errorsMx.ErrNilMessenger, err) + assert.True(t, errors.Is(err, errorsMx.ErrNilMessenger)) + }) + t.Run("nil FullArchiveNetworkMessenger should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.NetworkComponents = &testsMocks.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{}, + FullArchiveNetworkMessengerField: nil, + } + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.Nil(t, hcf) + assert.True(t, errors.Is(err, errorsMx.ErrNilMessenger)) }) t.Run("nil CryptoComponents should error", func(t *testing.T) { t.Parallel() @@ -235,7 +249,7 @@ func TestHeartbeatV2Components_Create(t *testing.T) { t.Parallel() expectedErr := errors.New("expected error") - t.Run("messenger does not have PeerAuthenticationTopic and fails to create it", func(t *testing.T) { + t.Run("main messenger does not have PeerAuthenticationTopic and fails to create it", func(t *testing.T) { t.Parallel() args := createMockHeartbeatV2ComponentsFactoryArgs() @@ -256,6 +270,7 @@ func TestHeartbeatV2Components_Create(t *testing.T) { return nil }, }, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, } hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) assert.NotNil(t, hcf) @@ -265,7 +280,7 @@ func TestHeartbeatV2Components_Create(t *testing.T) { assert.Nil(t, hc) assert.Equal(t, expectedErr, err) }) - t.Run("messenger does not have HeartbeatV2Topic and fails to create it", func(t *testing.T) { + t.Run("main messenger does not have HeartbeatV2Topic and fails to create it", func(t *testing.T) { t.Parallel() args := createMockHeartbeatV2ComponentsFactoryArgs() @@ -282,6 +297,65 @@ func TestHeartbeatV2Components_Create(t *testing.T) { return nil }, }, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, + } + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.NotNil(t, hcf) + assert.NoError(t, err) + + hc, err := hcf.Create() + assert.Nil(t, hc) + assert.Equal(t, expectedErr, err) + }) + t.Run("full archive messenger does not have PeerAuthenticationTopic and fails to create it", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.NetworkComponents = &testsMocks.NetworkComponentsStub{ + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{ + HasTopicCalled: func(name string) bool { + if name == common.PeerAuthenticationTopic { + return false + } + assert.Fail(t, "should not have been called") + return true + }, + CreateTopicCalled: func(name string, createChannelForTopic bool) error { + if name == common.PeerAuthenticationTopic { + return expectedErr + } + assert.Fail(t, "should not have been called") + return nil + }, + }, + Messenger: &p2pmocks.MessengerStub{}, + } + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.NotNil(t, hcf) + assert.NoError(t, err) + + hc, err := hcf.Create() + assert.Nil(t, hc) + assert.Equal(t, expectedErr, err) + }) + t.Run("full archive messenger does not have HeartbeatV2Topic and fails to create it", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.NetworkComponents = &testsMocks.NetworkComponentsStub{ + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{ + HasTopicCalled: func(name string) bool { + return name != common.HeartbeatV2Topic + }, + CreateTopicCalled: func(name string, createChannelForTopic bool) error { + if name == common.HeartbeatV2Topic { + return expectedErr + } + assert.Fail(t, "should not have been called") + return nil + }, + }, + Messenger: &p2pmocks.MessengerStub{}, } hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) assert.NotNil(t, hcf) @@ -412,7 +486,8 @@ func TestHeartbeatV2Components_Create(t *testing.T) { EpochNotifier: processComp.EpochStartNotifier(), NodeRedundancyHandlerInternal: processComp.NodeRedundancyHandler(), HardforkTriggerField: processComp.HardforkTrigger(), - PeerMapper: processComp.PeerShardMapper(), + MainPeerMapper: processComp.PeerShardMapper(), + FullArchivePeerMapper: processComp.FullArchivePeerShardMapper(), ShardCoordinatorCalled: func() sharding.Coordinator { cnt++ if cnt > 3 { @@ -439,6 +514,7 @@ func TestHeartbeatV2Components_Create(t *testing.T) { return expectedErr }, }, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, } hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) assert.NotNil(t, hcf) @@ -458,7 +534,22 @@ func TestHeartbeatV2Components_Create(t *testing.T) { } }() + topicsCreated := make(map[string][]string) args := createMockHeartbeatV2ComponentsFactoryArgs() + args.NetworkComponents = &testsMocks.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{ + CreateTopicCalled: func(name string, createChannelForTopic bool) error { + topicsCreated["main"] = append(topicsCreated["main"], name) + return nil + }, + }, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{ + CreateTopicCalled: func(name string, createChannelForTopic bool) error { + topicsCreated["full_archive"] = append(topicsCreated["full_archive"], name) + return nil + }, + }, + } args.Prefs.Preferences.FullArchive = true // coverage only hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) assert.NotNil(t, hcf) @@ -468,6 +559,14 @@ func TestHeartbeatV2Components_Create(t *testing.T) { assert.NotNil(t, hc) assert.NoError(t, err) assert.NoError(t, hc.Close()) + + assert.Equal(t, 2, len(topicsCreated)) + assert.Equal(t, 2, len(topicsCreated["main"])) + assert.Equal(t, 2, len(topicsCreated["full_archive"])) + for _, messengerTopics := range topicsCreated { + assert.Contains(t, messengerTopics, common.HeartbeatV2Topic) + assert.Contains(t, messengerTopics, common.PeerAuthenticationTopic) + } }) } diff --git a/factory/interface.go b/factory/interface.go index 86df7e94f23..28eb2a72bcb 100644 --- a/factory/interface.go +++ b/factory/interface.go @@ -248,6 +248,8 @@ type NetworkComponentsHolder interface { PreferredPeersHolderHandler() PreferredPeersHolderHandler PeersRatingHandler() p2p.PeersRatingHandler PeersRatingMonitor() p2p.PeersRatingMonitor + FullArchiveNetworkMessenger() p2p.Messenger + FullArchivePreferredPeersHolderHandler() PreferredPeersHolderHandler IsInterfaceNil() bool } @@ -269,6 +271,7 @@ type ProcessComponentsHolder interface { NodesCoordinator() nodesCoordinator.NodesCoordinator ShardCoordinator() sharding.Coordinator InterceptorsContainer() process.InterceptorsContainer + FullArchiveInterceptorsContainer() process.InterceptorsContainer ResolversContainer() dataRetriever.ResolversContainer RequestersFinder() dataRetriever.RequestersFinder RoundHandler() consensus.RoundHandler @@ -288,6 +291,7 @@ type ProcessComponentsHolder interface { TxLogsProcessor() process.TransactionLogProcessorDatabase HeaderConstructionValidator() process.HeaderConstructionValidator PeerShardMapper() process.NetworkShardingCollector + FullArchivePeerShardMapper() process.NetworkShardingCollector FallbackHeaderValidator() process.FallbackHeaderValidator APITransactionEvaluator() TransactionEvaluator WhiteListHandler() process.WhiteListHandler @@ -379,7 +383,7 @@ type ConsensusWorker interface { // RemoveAllReceivedMessagesCalls removes all the functions handlers RemoveAllReceivedMessagesCalls() // ProcessReceivedMessage method redirects the received message to the channel which should handle it - ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error + ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error // Extend does an extension for the subround with subroundId Extend(subroundId int) // GetConsensusStateChangedChannel gets the channel for the consensusStateChanged diff --git a/factory/mock/networkComponentsMock.go b/factory/mock/networkComponentsMock.go index fb194918ff7..a5677c75db9 100644 --- a/factory/mock/networkComponentsMock.go +++ b/factory/mock/networkComponentsMock.go @@ -8,13 +8,15 @@ import ( // NetworkComponentsMock - type NetworkComponentsMock struct { - Messenger p2p.Messenger - InputAntiFlood factory.P2PAntifloodHandler - OutputAntiFlood factory.P2PAntifloodHandler - PeerBlackList process.PeerBlackListCacher - PreferredPeersHolder factory.PreferredPeersHolderHandler - PeersRatingHandlerField p2p.PeersRatingHandler - PeersRatingMonitorField p2p.PeersRatingMonitor + Messenger p2p.Messenger + InputAntiFlood factory.P2PAntifloodHandler + OutputAntiFlood factory.P2PAntifloodHandler + PeerBlackList process.PeerBlackListCacher + PreferredPeersHolder factory.PreferredPeersHolderHandler + PeersRatingHandlerField p2p.PeersRatingHandler + PeersRatingMonitorField p2p.PeersRatingMonitor + FullArchiveNetworkMessengerField p2p.Messenger + FullArchivePreferredPeersHolder factory.PreferredPeersHolderHandler } // PubKeyCacher - @@ -77,6 +79,16 @@ func (ncm *NetworkComponentsMock) PeersRatingMonitor() p2p.PeersRatingMonitor { return ncm.PeersRatingMonitorField } +// FullArchiveNetworkMessenger - +func (ncm *NetworkComponentsMock) FullArchiveNetworkMessenger() p2p.Messenger { + return ncm.FullArchiveNetworkMessengerField +} + +// FullArchivePreferredPeersHolderHandler - +func (ncm *NetworkComponentsMock) FullArchivePreferredPeersHolderHandler() factory.PreferredPeersHolderHandler { + return ncm.FullArchivePreferredPeersHolder +} + // IsInterfaceNil - func (ncm *NetworkComponentsMock) IsInterfaceNil() bool { return ncm == nil diff --git a/factory/mock/processComponentsStub.go b/factory/mock/processComponentsStub.go index 464bd032166..51265a22997 100644 --- a/factory/mock/processComponentsStub.go +++ b/factory/mock/processComponentsStub.go @@ -19,6 +19,7 @@ type ProcessComponentsMock struct { NodesCoord nodesCoordinator.NodesCoordinator ShardCoord sharding.Coordinator IntContainer process.InterceptorsContainer + FullArchiveIntContainer process.InterceptorsContainer ResContainer dataRetriever.ResolversContainer ReqFinder dataRetriever.RequestersFinder RoundHandlerField consensus.RoundHandler @@ -37,7 +38,8 @@ type ProcessComponentsMock struct { ReqHandler process.RequestHandler TxLogsProcess process.TransactionLogProcessorDatabase HeaderConstructValidator process.HeaderConstructionValidator - PeerMapper process.NetworkShardingCollector + MainPeerMapper process.NetworkShardingCollector + FullArchivePeerMapper process.NetworkShardingCollector TransactionEvaluator factory.TransactionEvaluator FallbackHdrValidator process.FallbackHeaderValidator WhiteListHandlerInternal process.WhiteListHandler @@ -86,6 +88,11 @@ func (pcm *ProcessComponentsMock) InterceptorsContainer() process.InterceptorsCo return pcm.IntContainer } +// FullArchiveInterceptorsContainer - +func (pcm *ProcessComponentsMock) FullArchiveInterceptorsContainer() process.InterceptorsContainer { + return pcm.FullArchiveIntContainer +} + // ResolversContainer - func (pcm *ProcessComponentsMock) ResolversContainer() dataRetriever.ResolversContainer { return pcm.ResContainer @@ -178,7 +185,12 @@ func (pcm *ProcessComponentsMock) HeaderConstructionValidator() process.HeaderCo // PeerShardMapper - func (pcm *ProcessComponentsMock) PeerShardMapper() process.NetworkShardingCollector { - return pcm.PeerMapper + return pcm.MainPeerMapper +} + +// FullArchivePeerShardMapper - +func (pcm *ProcessComponentsMock) FullArchivePeerShardMapper() process.NetworkShardingCollector { + return pcm.FullArchivePeerMapper } // FallbackHeaderValidator - diff --git a/factory/network/networkComponents.go b/factory/network/networkComponents.go index 1ff92da63af..1ba0ba7c15f 100644 --- a/factory/network/networkComponents.go +++ b/factory/network/networkComponents.go @@ -10,11 +10,12 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/consensus" - "github.com/multiversx/mx-chain-go/debug/antiflood" "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/factory/disabled" "github.com/multiversx/mx-chain-go/p2p" p2pConfig "github.com/multiversx/mx-chain-go/p2p/config" + p2pDisabled "github.com/multiversx/mx-chain-go/p2p/disabled" p2pFactory "github.com/multiversx/mx-chain-go/p2p/factory" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/rating/peerHonesty" @@ -27,7 +28,8 @@ import ( // NetworkComponentsFactoryArgs holds the arguments to create a network component handler instance type NetworkComponentsFactoryArgs struct { - P2pConfig p2pConfig.P2PConfig + MainP2pConfig p2pConfig.P2PConfig + FullArchiveP2pConfig p2pConfig.P2PConfig MainConfig config.Config RatingsConfig config.RatingsConfig StatusHandler core.AppStatusHandler @@ -41,7 +43,8 @@ type NetworkComponentsFactoryArgs struct { } type networkComponentsFactory struct { - p2pConfig p2pConfig.P2PConfig + mainP2PConfig p2pConfig.P2PConfig + fullArchiveP2PConfig p2pConfig.P2PConfig mainConfig config.Config ratingsConfig config.RatingsConfig statusHandler core.AppStatusHandler @@ -55,21 +58,26 @@ type networkComponentsFactory struct { cryptoComponents factory.CryptoComponentsHolder } +type networkComponentsHolder struct { + netMessenger p2p.Messenger + preferredPeersHolder p2p.PreferredPeersHolderHandler +} + // networkComponents struct holds the network components type networkComponents struct { - netMessenger p2p.Messenger - inputAntifloodHandler factory.P2PAntifloodHandler - outputAntifloodHandler factory.P2PAntifloodHandler - pubKeyTimeCacher process.TimeCacher - topicFloodPreventer process.TopicFloodPreventer - floodPreventers []process.FloodPreventer - peerBlackListHandler process.PeerBlackListCacher - antifloodConfig config.AntifloodConfig - peerHonestyHandler consensus.PeerHonestyHandler - peersHolder factory.PreferredPeersHolderHandler - peersRatingHandler p2p.PeersRatingHandler - peersRatingMonitor p2p.PeersRatingMonitor - closeFunc context.CancelFunc + mainNetworkHolder networkComponentsHolder + fullArchiveNetworkHolder networkComponentsHolder + peersRatingHandler p2p.PeersRatingHandler + peersRatingMonitor p2p.PeersRatingMonitor + inputAntifloodHandler factory.P2PAntifloodHandler + outputAntifloodHandler factory.P2PAntifloodHandler + pubKeyTimeCacher process.TimeCacher + topicFloodPreventer process.TopicFloodPreventer + floodPreventers []process.FloodPreventer + peerBlackListHandler process.PeerBlackListCacher + antifloodConfig config.AntifloodConfig + peerHonestyHandler consensus.PeerHonestyHandler + closeFunc context.CancelFunc } var log = logger.GetOrCreate("factory") @@ -90,9 +98,13 @@ func NewNetworkComponentsFactory( if check.IfNil(args.CryptoComponents) { return nil, errors.ErrNilCryptoComponentsHolder } + if args.NodeOperationMode != p2p.NormalOperation && args.NodeOperationMode != p2p.FullArchiveMode { + return nil, errors.ErrInvalidNodeOperationMode + } return &networkComponentsFactory{ - p2pConfig: args.P2pConfig, + mainP2PConfig: args.MainP2pConfig, + fullArchiveP2PConfig: args.FullArchiveP2pConfig, ratingsConfig: args.RatingsConfig, marshalizer: args.Marshalizer, mainConfig: args.MainConfig, @@ -109,43 +121,17 @@ func NewNetworkComponentsFactory( // Create creates and returns the network components func (ncf *networkComponentsFactory) Create() (*networkComponents, error) { - ph, err := p2pFactory.NewPeersHolder(ncf.preferredPeersSlices) + peersRatingHandler, peersRatingMonitor, err := ncf.createPeersRatingComponents() if err != nil { return nil, err } - peersRatingCfg := ncf.mainConfig.PeersRatingConfig - topRatedCache, err := cache.NewLRUCache(peersRatingCfg.TopRatedCacheCapacity) - if err != nil { - return nil, err - } - badRatedCache, err := cache.NewLRUCache(peersRatingCfg.BadRatedCacheCapacity) - if err != nil { - return nil, err - } - argsPeersRatingHandler := p2pFactory.ArgPeersRatingHandler{ - TopRatedCache: topRatedCache, - BadRatedCache: badRatedCache, - } - peersRatingHandler, err := p2pFactory.NewPeersRatingHandler(argsPeersRatingHandler) + mainNetworkComp, err := ncf.createMainNetworkHolder(peersRatingHandler) if err != nil { return nil, err } - arg := p2pFactory.ArgsNetworkMessenger{ - Marshaller: ncf.marshalizer, - ListenAddress: ncf.listenAddress, - P2pConfig: ncf.p2pConfig, - SyncTimer: ncf.syncer, - PreferredPeersHolder: ph, - NodeOperationMode: ncf.nodeOperationMode, - PeersRatingHandler: peersRatingHandler, - ConnectionWatcherType: ncf.connectionWatcherType, - P2pPrivateKey: ncf.cryptoComponents.P2pPrivateKey(), - P2pSingleSigner: ncf.cryptoComponents.P2pSingleSigner(), - P2pKeyGenerator: ncf.cryptoComponents.P2pKeyGen(), - } - netMessenger, err := p2pFactory.NewNetworkMessenger(arg) + fullArchiveNetworkComp, err := ncf.createFullArchiveNetworkHolder(peersRatingHandler) if err != nil { return nil, err } @@ -157,52 +143,66 @@ func (ncf *networkComponentsFactory) Create() (*networkComponents, error) { } }() - argsPeersRatingMonitor := p2pFactory.ArgPeersRatingMonitor{ - TopRatedCache: topRatedCache, - BadRatedCache: badRatedCache, - ConnectionsProvider: netMessenger, + antiFloodComponents, inputAntifloodHandler, outputAntifloodHandler, peerHonestyHandler, err := ncf.createAntifloodComponents(ctx, mainNetworkComp.netMessenger.ID()) + if err != nil { + return nil, err } - peersRatingMonitor, err := p2pFactory.NewPeersRatingMonitor(argsPeersRatingMonitor) + + err = mainNetworkComp.netMessenger.Bootstrap() if err != nil { return nil, err } - var antiFloodComponents *antifloodFactory.AntiFloodComponents - antiFloodComponents, err = antifloodFactory.NewP2PAntiFloodComponents(ctx, ncf.mainConfig, ncf.statusHandler, netMessenger.ID()) + mainNetworkComp.netMessenger.WaitForConnections(ncf.bootstrapWaitTime, ncf.mainP2PConfig.Node.MinNumPeersToWaitForOnBootstrap) + + err = fullArchiveNetworkComp.netMessenger.Bootstrap() if err != nil { return nil, err } - // TODO: move to NewP2PAntiFloodComponents.initP2PAntiFloodComponents - if ncf.mainConfig.Debug.Antiflood.Enabled { - var debugger process.AntifloodDebugger - debugger, err = antiflood.NewAntifloodDebugger(ncf.mainConfig.Debug.Antiflood) - if err != nil { - return nil, err - } + return &networkComponents{ + mainNetworkHolder: mainNetworkComp, + fullArchiveNetworkHolder: fullArchiveNetworkComp, + peersRatingHandler: peersRatingHandler, + peersRatingMonitor: peersRatingMonitor, + inputAntifloodHandler: inputAntifloodHandler, + outputAntifloodHandler: outputAntifloodHandler, + pubKeyTimeCacher: antiFloodComponents.PubKeysCacher, + topicFloodPreventer: antiFloodComponents.TopicPreventer, + floodPreventers: antiFloodComponents.FloodPreventers, + peerBlackListHandler: antiFloodComponents.BlacklistHandler, + antifloodConfig: ncf.mainConfig.Antiflood, + peerHonestyHandler: peerHonestyHandler, + closeFunc: cancelFunc, + }, nil +} - err = antiFloodComponents.AntiFloodHandler.SetDebugger(debugger) - if err != nil { - return nil, err - } +func (ncf *networkComponentsFactory) createAntifloodComponents( + ctx context.Context, + currentPid core.PeerID, +) (*antifloodFactory.AntiFloodComponents, factory.P2PAntifloodHandler, factory.P2PAntifloodHandler, consensus.PeerHonestyHandler, error) { + var antiFloodComponents *antifloodFactory.AntiFloodComponents + antiFloodComponents, err := antifloodFactory.NewP2PAntiFloodComponents(ctx, ncf.mainConfig, ncf.statusHandler, currentPid) + if err != nil { + return nil, nil, nil, nil, err } inputAntifloodHandler, ok := antiFloodComponents.AntiFloodHandler.(factory.P2PAntifloodHandler) if !ok { err = errors.ErrWrongTypeAssertion - return nil, fmt.Errorf("%w when casting input antiflood handler to P2PAntifloodHandler", err) + return nil, nil, nil, nil, fmt.Errorf("%w when casting input antiflood handler to P2PAntifloodHandler", err) } var outAntifloodHandler process.P2PAntifloodHandler outAntifloodHandler, err = antifloodFactory.NewP2POutputAntiFlood(ctx, ncf.mainConfig) if err != nil { - return nil, err + return nil, nil, nil, nil, err } outputAntifloodHandler, ok := outAntifloodHandler.(factory.P2PAntifloodHandler) if !ok { err = errors.ErrWrongTypeAssertion - return nil, fmt.Errorf("%w when casting output antiflood handler to P2PAntifloodHandler", err) + return nil, nil, nil, nil, fmt.Errorf("%w when casting output antiflood handler to P2PAntifloodHandler", err) } var peerHonestyHandler consensus.PeerHonestyHandler @@ -212,31 +212,10 @@ func (ncf *networkComponentsFactory) Create() (*networkComponents, error) { antiFloodComponents.PubKeysCacher, ) if err != nil { - return nil, err - } - - err = netMessenger.Bootstrap() - if err != nil { - return nil, err + return nil, nil, nil, nil, err } - netMessenger.WaitForConnections(ncf.bootstrapWaitTime, ncf.p2pConfig.Node.MinNumPeersToWaitForOnBootstrap) - - return &networkComponents{ - netMessenger: netMessenger, - inputAntifloodHandler: inputAntifloodHandler, - outputAntifloodHandler: outputAntifloodHandler, - topicFloodPreventer: antiFloodComponents.TopicPreventer, - floodPreventers: antiFloodComponents.FloodPreventers, - peerBlackListHandler: antiFloodComponents.BlacklistHandler, - pubKeyTimeCacher: antiFloodComponents.PubKeysCacher, - antifloodConfig: ncf.mainConfig.Antiflood, - peerHonestyHandler: peerHonestyHandler, - peersHolder: ph, - peersRatingHandler: peersRatingHandler, - peersRatingMonitor: peersRatingMonitor, - closeFunc: cancelFunc, - }, nil + return antiFloodComponents, inputAntifloodHandler, outputAntifloodHandler, peerHonestyHandler, nil } func (ncf *networkComponentsFactory) createPeerHonestyHandler( @@ -253,6 +232,95 @@ func (ncf *networkComponentsFactory) createPeerHonestyHandler( return peerHonesty.NewP2pPeerHonesty(ratingConfig.PeerHonesty, pkTimeCache, suCache) } +func (ncf *networkComponentsFactory) createNetworkHolder( + p2pConfig p2pConfig.P2PConfig, + logger p2p.Logger, + peersRatingHandler p2p.PeersRatingHandler, + networkType p2p.NetworkType, +) (networkComponentsHolder, error) { + + peersHolder, err := p2pFactory.NewPeersHolder(ncf.preferredPeersSlices) + if err != nil { + return networkComponentsHolder{}, err + } + + argsMessenger := p2pFactory.ArgsNetworkMessenger{ + ListenAddress: ncf.listenAddress, + Marshaller: ncf.marshalizer, + P2pConfig: p2pConfig, + SyncTimer: ncf.syncer, + PreferredPeersHolder: peersHolder, + PeersRatingHandler: peersRatingHandler, + ConnectionWatcherType: ncf.connectionWatcherType, + P2pPrivateKey: ncf.cryptoComponents.P2pPrivateKey(), + P2pSingleSigner: ncf.cryptoComponents.P2pSingleSigner(), + P2pKeyGenerator: ncf.cryptoComponents.P2pKeyGen(), + NetworkType: networkType, + Logger: logger, + } + networkMessenger, err := p2pFactory.NewNetworkMessenger(argsMessenger) + if err != nil { + return networkComponentsHolder{}, err + } + + return networkComponentsHolder{ + netMessenger: networkMessenger, + preferredPeersHolder: peersHolder, + }, nil +} + +func (ncf *networkComponentsFactory) createMainNetworkHolder(peersRatingHandler p2p.PeersRatingHandler) (networkComponentsHolder, error) { + loggerInstance := logger.GetOrCreate("main/p2p") + return ncf.createNetworkHolder(ncf.mainP2PConfig, loggerInstance, peersRatingHandler, p2p.MainNetwork) +} + +func (ncf *networkComponentsFactory) createFullArchiveNetworkHolder(peersRatingHandler p2p.PeersRatingHandler) (networkComponentsHolder, error) { + if ncf.nodeOperationMode != p2p.FullArchiveMode { + return networkComponentsHolder{ + netMessenger: p2pDisabled.NewNetworkMessenger(), + preferredPeersHolder: disabled.NewPreferredPeersHolder(), + }, nil + } + + loggerInstance := logger.GetOrCreate("full-archive/p2p") + + return ncf.createNetworkHolder(ncf.fullArchiveP2PConfig, loggerInstance, peersRatingHandler, p2p.FullArchiveNetwork) +} + +func (ncf *networkComponentsFactory) createPeersRatingComponents() (p2p.PeersRatingHandler, p2p.PeersRatingMonitor, error) { + peersRatingCfg := ncf.mainConfig.PeersRatingConfig + topRatedCache, err := cache.NewLRUCache(peersRatingCfg.TopRatedCacheCapacity) + if err != nil { + return nil, nil, err + } + badRatedCache, err := cache.NewLRUCache(peersRatingCfg.BadRatedCacheCapacity) + if err != nil { + return nil, nil, err + } + + peersRatingLogger := logger.GetOrCreate("peersRating") + argsPeersRatingHandler := p2pFactory.ArgPeersRatingHandler{ + TopRatedCache: topRatedCache, + BadRatedCache: badRatedCache, + Logger: peersRatingLogger, + } + peersRatingHandler, err := p2pFactory.NewPeersRatingHandler(argsPeersRatingHandler) + if err != nil { + return nil, nil, err + } + + argsPeersRatingMonitor := p2pFactory.ArgPeersRatingMonitor{ + TopRatedCache: topRatedCache, + BadRatedCache: badRatedCache, + } + peersRatingMonitor, err := p2pFactory.NewPeersRatingMonitor(argsPeersRatingMonitor) + if err != nil { + return nil, nil, err + } + + return peersRatingHandler, peersRatingMonitor, nil +} + // Close closes all underlying components that need closing func (nc *networkComponents) Close() error { nc.closeFunc() @@ -263,17 +331,20 @@ func (nc *networkComponents) Close() error { if !check.IfNil(nc.outputAntifloodHandler) { log.LogIfError(nc.outputAntifloodHandler.Close()) } - if !check.IfNil(nc.topicFloodPreventer) { - log.LogIfError(nc.outputAntifloodHandler.Close()) - } if !check.IfNil(nc.peerHonestyHandler) { log.LogIfError(nc.peerHonestyHandler.Close()) } - if nc.netMessenger != nil { - log.Debug("calling close on the network messenger instance...") - err := nc.netMessenger.Close() - log.LogIfError(err) + mainNetMessenger := nc.mainNetworkHolder.netMessenger + if !check.IfNil(mainNetMessenger) { + log.Debug("calling close on the main network messenger instance...") + log.LogIfError(mainNetMessenger.Close()) + } + + fullArchiveNetMessenger := nc.fullArchiveNetworkHolder.netMessenger + if !check.IfNil(fullArchiveNetMessenger) { + log.Debug("calling close on the full archive network messenger instance...") + log.LogIfError(fullArchiveNetMessenger.Close()) } return nil diff --git a/factory/network/networkComponentsHandler.go b/factory/network/networkComponentsHandler.go index 578794bce98..eda76bb8f28 100644 --- a/factory/network/networkComponentsHandler.go +++ b/factory/network/networkComponentsHandler.go @@ -15,6 +15,11 @@ var _ factory.ComponentHandler = (*managedNetworkComponents)(nil) var _ factory.NetworkComponentsHolder = (*managedNetworkComponents)(nil) var _ factory.NetworkComponentsHandler = (*managedNetworkComponents)(nil) +const ( + errorOnMainNetworkString = "on main network" + errorOnFullArchiveNetworkString = "on full archive network" +) + // managedNetworkComponents creates the data components handler that can create, close and access the data components type managedNetworkComponents struct { *networkComponents @@ -74,9 +79,20 @@ func (mnc *managedNetworkComponents) CheckSubcomponents() error { if mnc.networkComponents == nil { return errors.ErrNilNetworkComponents } - if check.IfNil(mnc.netMessenger) { - return errors.ErrNilMessenger + if check.IfNil(mnc.mainNetworkHolder.netMessenger) { + return fmt.Errorf("%w %s", errors.ErrNilMessenger, errorOnMainNetworkString) + } + if check.IfNil(mnc.peersRatingHandler) { + return errors.ErrNilPeersRatingHandler + } + if check.IfNil(mnc.peersRatingMonitor) { + return errors.ErrNilPeersRatingMonitor + } + + if check.IfNil(mnc.fullArchiveNetworkHolder.netMessenger) { + return fmt.Errorf("%w %s", errors.ErrNilMessenger, errorOnFullArchiveNetworkString) } + if check.IfNil(mnc.inputAntifloodHandler) { return errors.ErrNilInputAntiFloodHandler } @@ -89,17 +105,11 @@ func (mnc *managedNetworkComponents) CheckSubcomponents() error { if check.IfNil(mnc.peerHonestyHandler) { return errors.ErrNilPeerHonestyHandler } - if check.IfNil(mnc.peersRatingHandler) { - return errors.ErrNilPeersRatingHandler - } - if check.IfNil(mnc.peersRatingMonitor) { - return errors.ErrNilPeersRatingMonitor - } return nil } -// NetworkMessenger returns the p2p messenger +// NetworkMessenger returns the p2p messenger of the main network func (mnc *managedNetworkComponents) NetworkMessenger() p2p.Messenger { mnc.mutNetworkComponents.RLock() defer mnc.mutNetworkComponents.RUnlock() @@ -108,7 +118,7 @@ func (mnc *managedNetworkComponents) NetworkMessenger() p2p.Messenger { return nil } - return mnc.netMessenger + return mnc.mainNetworkHolder.netMessenger } // InputAntiFloodHandler returns the input p2p anti-flood handler @@ -171,7 +181,7 @@ func (mnc *managedNetworkComponents) PeerHonestyHandler() factory.PeerHonestyHan return mnc.networkComponents.peerHonestyHandler } -// PreferredPeersHolderHandler returns the preferred peers holder +// PreferredPeersHolderHandler returns the preferred peers holder of the main network func (mnc *managedNetworkComponents) PreferredPeersHolderHandler() factory.PreferredPeersHolderHandler { mnc.mutNetworkComponents.RLock() defer mnc.mutNetworkComponents.RUnlock() @@ -180,10 +190,10 @@ func (mnc *managedNetworkComponents) PreferredPeersHolderHandler() factory.Prefe return nil } - return mnc.networkComponents.peersHolder + return mnc.mainNetworkHolder.preferredPeersHolder } -// PeersRatingHandler returns the peers rating handler +// PeersRatingHandler returns the peers rating handler of the main network func (mnc *managedNetworkComponents) PeersRatingHandler() p2p.PeersRatingHandler { mnc.mutNetworkComponents.RLock() defer mnc.mutNetworkComponents.RUnlock() @@ -192,10 +202,10 @@ func (mnc *managedNetworkComponents) PeersRatingHandler() p2p.PeersRatingHandler return nil } - return mnc.networkComponents.peersRatingHandler + return mnc.peersRatingHandler } -// PeersRatingMonitor returns the peers rating monitor +// PeersRatingMonitor returns the peers rating monitor of the main network func (mnc *managedNetworkComponents) PeersRatingMonitor() p2p.PeersRatingMonitor { mnc.mutNetworkComponents.RLock() defer mnc.mutNetworkComponents.RUnlock() @@ -204,7 +214,31 @@ func (mnc *managedNetworkComponents) PeersRatingMonitor() p2p.PeersRatingMonitor return nil } - return mnc.networkComponents.peersRatingMonitor + return mnc.peersRatingMonitor +} + +// FullArchiveNetworkMessenger returns the p2p messenger of the full archive network +func (mnc *managedNetworkComponents) FullArchiveNetworkMessenger() p2p.Messenger { + mnc.mutNetworkComponents.RLock() + defer mnc.mutNetworkComponents.RUnlock() + + if mnc.networkComponents == nil { + return nil + } + + return mnc.fullArchiveNetworkHolder.netMessenger +} + +// FullArchivePreferredPeersHolderHandler returns the preferred peers holder of the full archive network +func (mnc *managedNetworkComponents) FullArchivePreferredPeersHolderHandler() factory.PreferredPeersHolderHandler { + mnc.mutNetworkComponents.RLock() + defer mnc.mutNetworkComponents.RUnlock() + + if mnc.networkComponents == nil { + return nil + } + + return mnc.fullArchiveNetworkHolder.preferredPeersHolder } // IsInterfaceNil returns true if the value under the interface is nil diff --git a/factory/network/networkComponentsHandler_test.go b/factory/network/networkComponentsHandler_test.go index 51bfe86372c..811af70cc30 100644 --- a/factory/network/networkComponentsHandler_test.go +++ b/factory/network/networkComponentsHandler_test.go @@ -37,7 +37,7 @@ func TestManagedNetworkComponents_Create(t *testing.T) { t.Parallel() networkArgs := componentsMock.GetNetworkFactoryArgs() - networkArgs.P2pConfig.Node.Port = "invalid" + networkArgs.MainP2pConfig.Node.Port = "invalid" networkComponentsFactory, _ := networkComp.NewNetworkComponentsFactory(networkArgs) managedNetworkComponents, err := networkComp.NewManagedNetworkComponents(networkComponentsFactory) require.NoError(t, err) @@ -61,6 +61,8 @@ func TestManagedNetworkComponents_Create(t *testing.T) { require.Nil(t, managedNetworkComponents.PreferredPeersHolderHandler()) require.Nil(t, managedNetworkComponents.PeerHonestyHandler()) require.Nil(t, managedNetworkComponents.PeersRatingHandler()) + require.Nil(t, managedNetworkComponents.FullArchiveNetworkMessenger()) + require.Nil(t, managedNetworkComponents.FullArchivePreferredPeersHolderHandler()) err = managedNetworkComponents.Create() require.NoError(t, err) @@ -72,6 +74,8 @@ func TestManagedNetworkComponents_Create(t *testing.T) { require.NotNil(t, managedNetworkComponents.PreferredPeersHolderHandler()) require.NotNil(t, managedNetworkComponents.PeerHonestyHandler()) require.NotNil(t, managedNetworkComponents.PeersRatingHandler()) + require.NotNil(t, managedNetworkComponents.FullArchiveNetworkMessenger()) + require.NotNil(t, managedNetworkComponents.FullArchivePreferredPeersHolderHandler()) require.Equal(t, factory.NetworkComponentsName, managedNetworkComponents.String()) }) diff --git a/factory/network/networkComponents_test.go b/factory/network/networkComponents_test.go index 1fe95107b6f..dca1e2f2d80 100644 --- a/factory/network/networkComponents_test.go +++ b/factory/network/networkComponents_test.go @@ -50,6 +50,16 @@ func TestNewNetworkComponentsFactory(t *testing.T) { require.Nil(t, ncf) require.Equal(t, errorsMx.ErrNilCryptoComponentsHolder, err) }) + t.Run("invalid node operation mode should error", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetNetworkFactoryArgs() + args.NodeOperationMode = "invalid" + + ncf, err := networkComp.NewNetworkComponentsFactory(args) + require.Equal(t, errorsMx.ErrInvalidNodeOperationMode, err) + require.Nil(t, ncf) + }) t.Run("should work", func(t *testing.T) { t.Parallel() diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index bbbb3043ac2..58031461a18 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -42,6 +42,7 @@ import ( "github.com/multiversx/mx-chain-go/genesis" "github.com/multiversx/mx-chain-go/genesis/checking" processGenesis "github.com/multiversx/mx-chain-go/genesis/process" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" @@ -86,7 +87,8 @@ var timeSpanForBadHeaders = time.Minute * 2 type processComponents struct { nodesCoordinator nodesCoordinator.NodesCoordinator shardCoordinator sharding.Coordinator - interceptorsContainer process.InterceptorsContainer + mainInterceptorsContainer process.InterceptorsContainer + fullArchiveInterceptorsContainer process.InterceptorsContainer resolversContainer dataRetriever.ResolversContainer requestersFinder dataRetriever.RequestersFinder roundHandler consensus.RoundHandler @@ -105,7 +107,8 @@ type processComponents struct { requestHandler process.RequestHandler txLogsProcessor process.TransactionLogProcessorDatabase headerConstructionValidator process.HeaderConstructionValidator - peerShardMapper process.NetworkShardingCollector + mainPeerShardMapper process.NetworkShardingCollector + fullArchivePeerShardMapper process.NetworkShardingCollector apiTransactionEvaluator factory.TransactionEvaluator miniBlocksPoolCleaner process.PoolsCleaner txsPoolCleaner process.PoolsCleaner @@ -267,8 +270,16 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } - // TODO: maybe move PeerShardMapper to network components - peerShardMapper, err := pcf.prepareNetworkShardingCollector() + mainPeerShardMapper, err := pcf.prepareNetworkShardingCollectorForMessenger(pcf.network.NetworkMessenger()) + if err != nil { + return nil, err + } + fullArchivePeerShardMapper, err := pcf.prepareNetworkShardingCollectorForMessenger(pcf.network.FullArchiveNetworkMessenger()) + if err != nil { + return nil, err + } + + err = pcf.network.InputAntiFloodHandler().SetPeerValidatorMapper(mainPeerShardMapper) if err != nil { return nil, err } @@ -496,7 +507,8 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { blockTracker, epochStartTrigger, requestHandler, - peerShardMapper, + mainPeerShardMapper, + fullArchivePeerShardMapper, hardforkTrigger, ) if err != nil { @@ -504,7 +516,7 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { } // TODO refactor all these factory calls - interceptorsContainer, err := interceptorContainerFactory.Create() + mainInterceptorsContainer, fullArchiveInterceptorsContainer, err := interceptorContainerFactory.Create() if err != nil { return nil, err } @@ -514,7 +526,8 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { requestHandler, resolversContainer, requestersContainer, - interceptorsContainer, + mainInterceptorsContainer, + fullArchiveInterceptorsContainer, headerSigVerifier, blockTracker, ) @@ -673,7 +686,8 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return &processComponents{ nodesCoordinator: pcf.nodesCoordinator, shardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - interceptorsContainer: interceptorsContainer, + mainInterceptorsContainer: mainInterceptorsContainer, + fullArchiveInterceptorsContainer: fullArchiveInterceptorsContainer, resolversContainer: resolversContainer, requestersFinder: requestersFinder, roundHandler: pcf.coreData.RoundHandler(), @@ -692,7 +706,8 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { txLogsProcessor: txLogsProcessor, headerConstructionValidator: headerValidator, headerIntegrityVerifier: pcf.bootstrapComponents.HeaderIntegrityVerifier(), - peerShardMapper: peerShardMapper, + mainPeerShardMapper: mainPeerShardMapper, + fullArchivePeerShardMapper: fullArchivePeerShardMapper, apiTransactionEvaluator: apiTransactionEvaluator, miniBlocksPoolCleaner: mbsPoolsCleaner, txsPoolCleaner: txsPoolsCleaner, @@ -1334,21 +1349,23 @@ func (pcf *processComponentsFactory) newShardResolverContainerFactory( } resolversContainerFactoryArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - Messenger: pcf.network.NetworkMessenger(), - Store: pcf.data.StorageService(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - DataPools: pcf.data.Datapool(), - Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), - DataPacker: dataPacker, - TriesContainer: pcf.state.TriesContainer(), - SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, - InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), - OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), - NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, - IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, - PreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), - PayloadValidator: payloadValidator, + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + MainMessenger: pcf.network.NetworkMessenger(), + FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), + Store: pcf.data.StorageService(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + DataPools: pcf.data.Datapool(), + Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), + DataPacker: dataPacker, + TriesContainer: pcf.state.TriesContainer(), + SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, + InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), + OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), + NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, + IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, + MainPreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), + FullArchivePreferredPeersHolder: pcf.network.FullArchivePreferredPeersHolderHandler(), + PayloadValidator: payloadValidator, } resolversContainerFactory, err := resolverscontainer.NewShardResolversContainerFactory(resolversContainerFactoryArgs) if err != nil { @@ -1368,21 +1385,23 @@ func (pcf *processComponentsFactory) newMetaResolverContainerFactory( } resolversContainerFactoryArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - Messenger: pcf.network.NetworkMessenger(), - Store: pcf.data.StorageService(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - DataPools: pcf.data.Datapool(), - Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), - DataPacker: dataPacker, - TriesContainer: pcf.state.TriesContainer(), - SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, - InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), - OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), - NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, - IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, - PreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), - PayloadValidator: payloadValidator, + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + MainMessenger: pcf.network.NetworkMessenger(), + FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), + Store: pcf.data.StorageService(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + DataPools: pcf.data.Datapool(), + Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), + DataPacker: dataPacker, + TriesContainer: pcf.state.TriesContainer(), + SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, + InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), + OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), + NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, + IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, + MainPreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), + FullArchivePreferredPeersHolder: pcf.network.FullArchivePreferredPeersHolderHandler(), + PayloadValidator: payloadValidator, } return resolverscontainer.NewMetaResolversContainerFactory(resolversContainerFactoryArgs) @@ -1399,16 +1418,18 @@ func (pcf *processComponentsFactory) newRequestersContainerFactory( shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() requestersContainerFactoryArgs := requesterscontainer.FactoryArgs{ - RequesterConfig: pcf.config.Requesters, - ShardCoordinator: shardCoordinator, - Messenger: pcf.network.NetworkMessenger(), - Marshaller: pcf.coreData.InternalMarshalizer(), - Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), - OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), - CurrentNetworkEpochProvider: currentEpochProvider, - PreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), - PeersRatingHandler: pcf.network.PeersRatingHandler(), - SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, + RequesterConfig: pcf.config.Requesters, + ShardCoordinator: shardCoordinator, + MainMessenger: pcf.network.NetworkMessenger(), + FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), + Marshaller: pcf.coreData.InternalMarshalizer(), + Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), + OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), + CurrentNetworkEpochProvider: currentEpochProvider, + MainPreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), + FullArchivePreferredPeersHolder: pcf.network.FullArchivePreferredPeersHolderHandler(), + PeersRatingHandler: pcf.network.PeersRatingHandler(), + SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, } if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { @@ -1427,9 +1448,15 @@ func (pcf *processComponentsFactory) newInterceptorContainerFactory( validityAttester process.ValidityAttester, epochStartTrigger process.EpochStartTriggerHandler, requestHandler process.RequestHandler, - peerShardMapper *networksharding.PeerShardMapper, + mainPeerShardMapper *networksharding.PeerShardMapper, + fullArchivePeerShardMapper *networksharding.PeerShardMapper, hardforkTrigger factory.HardforkTrigger, ) (process.InterceptorsContainerFactory, process.TimeCacher, error) { + nodeOperationMode := p2p.NormalOperation + if pcf.prefConfigs.Preferences.FullArchive { + nodeOperationMode = p2p.FullArchiveMode + } + shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { return pcf.newShardInterceptorContainerFactory( @@ -1438,8 +1465,10 @@ func (pcf *processComponentsFactory) newInterceptorContainerFactory( validityAttester, epochStartTrigger, requestHandler, - peerShardMapper, + mainPeerShardMapper, + fullArchivePeerShardMapper, hardforkTrigger, + nodeOperationMode, ) } if shardCoordinator.SelfId() == core.MetachainShardId { @@ -1449,8 +1478,10 @@ func (pcf *processComponentsFactory) newInterceptorContainerFactory( validityAttester, epochStartTrigger, requestHandler, - peerShardMapper, + mainPeerShardMapper, + fullArchivePeerShardMapper, hardforkTrigger, + nodeOperationMode, ) } @@ -1583,8 +1614,10 @@ func (pcf *processComponentsFactory) newShardInterceptorContainerFactory( validityAttester process.ValidityAttester, epochStartTrigger process.EpochStartTriggerHandler, requestHandler process.RequestHandler, - peerShardMapper *networksharding.PeerShardMapper, + mainPeerShardMapper *networksharding.PeerShardMapper, + fullArchivePeerShardMapper *networksharding.PeerShardMapper, hardforkTrigger factory.HardforkTrigger, + nodeOperationMode p2p.NodeOperation, ) (process.InterceptorsContainerFactory, process.TimeCacher, error) { headerBlackList := cache.NewTimeCache(timeSpanForBadHeaders) shardInterceptorsContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ @@ -1593,7 +1626,8 @@ func (pcf *processComponentsFactory) newShardInterceptorContainerFactory( Accounts: pcf.state.AccountsAdapter(), ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), NodesCoordinator: pcf.nodesCoordinator, - Messenger: pcf.network.NetworkMessenger(), + MainMessenger: pcf.network.NetworkMessenger(), + FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), Store: pcf.data.StorageService(), DataPool: pcf.data.Datapool(), MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, @@ -1613,8 +1647,10 @@ func (pcf *processComponentsFactory) newShardInterceptorContainerFactory( PeerSignatureHandler: pcf.crypto.PeerSignatureHandler(), SignaturesHandler: pcf.network.NetworkMessenger(), HeartbeatExpiryTimespanInSec: pcf.config.HeartbeatV2.HeartbeatExpiryTimespanInSec, - PeerShardMapper: peerShardMapper, + MainPeerShardMapper: mainPeerShardMapper, + FullArchivePeerShardMapper: fullArchivePeerShardMapper, HardforkTrigger: hardforkTrigger, + NodeOperationMode: nodeOperationMode, } interceptorContainerFactory, err := interceptorscontainer.NewShardInterceptorsContainerFactory(shardInterceptorsContainerFactoryArgs) @@ -1631,8 +1667,10 @@ func (pcf *processComponentsFactory) newMetaInterceptorContainerFactory( validityAttester process.ValidityAttester, epochStartTrigger process.EpochStartTriggerHandler, requestHandler process.RequestHandler, - peerShardMapper *networksharding.PeerShardMapper, + mainPeerShardMapper *networksharding.PeerShardMapper, + fullArchivePeerShardMapper *networksharding.PeerShardMapper, hardforkTrigger factory.HardforkTrigger, + nodeOperationMode p2p.NodeOperation, ) (process.InterceptorsContainerFactory, process.TimeCacher, error) { headerBlackList := cache.NewTimeCache(timeSpanForBadHeaders) metaInterceptorsContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ @@ -1640,7 +1678,8 @@ func (pcf *processComponentsFactory) newMetaInterceptorContainerFactory( CryptoComponents: pcf.crypto, ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), NodesCoordinator: pcf.nodesCoordinator, - Messenger: pcf.network.NetworkMessenger(), + MainMessenger: pcf.network.NetworkMessenger(), + FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), Store: pcf.data.StorageService(), DataPool: pcf.data.Datapool(), Accounts: pcf.state.AccountsAdapter(), @@ -1661,8 +1700,10 @@ func (pcf *processComponentsFactory) newMetaInterceptorContainerFactory( PeerSignatureHandler: pcf.crypto.PeerSignatureHandler(), SignaturesHandler: pcf.network.NetworkMessenger(), HeartbeatExpiryTimespanInSec: pcf.config.HeartbeatV2.HeartbeatExpiryTimespanInSec, - PeerShardMapper: peerShardMapper, + MainPeerShardMapper: mainPeerShardMapper, + FullArchivePeerShardMapper: fullArchivePeerShardMapper, HardforkTrigger: hardforkTrigger, + NodeOperationMode: nodeOperationMode, } interceptorContainerFactory, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(metaInterceptorsContainerFactoryArgs) @@ -1688,8 +1729,8 @@ func (pcf *processComponentsFactory) newForkDetector( return nil, errors.New("could not create fork detector") } -// PrepareNetworkShardingCollector will create the network sharding collector and apply it to the network messenger -func (pcf *processComponentsFactory) prepareNetworkShardingCollector() (*networksharding.PeerShardMapper, error) { +// prepareNetworkShardingCollectorForMessenger will create the network sharding collector and apply it to the provided network messenger +func (pcf *processComponentsFactory) prepareNetworkShardingCollectorForMessenger(messenger p2p.Messenger) (*networksharding.PeerShardMapper, error) { networkShardingCollector, err := createNetworkShardingCollector( &pcf.config, pcf.nodesCoordinator, @@ -1702,12 +1743,7 @@ func (pcf *processComponentsFactory) prepareNetworkShardingCollector() (*network localID := pcf.network.NetworkMessenger().ID() networkShardingCollector.UpdatePeerIDInfo(localID, pcf.crypto.PublicKeyBytes(), pcf.bootstrapComponents.ShardCoordinator().SelfId()) - err = pcf.network.NetworkMessenger().SetPeerShardResolver(networkShardingCollector) - if err != nil { - return nil, err - } - - err = pcf.network.InputAntiFloodHandler().SetPeerValidatorMapper(networkShardingCollector) + err = messenger.SetPeerShardResolver(networkShardingCollector) if err != nil { return nil, err } @@ -1720,7 +1756,8 @@ func (pcf *processComponentsFactory) createExportFactoryHandler( requestHandler process.RequestHandler, resolversContainer dataRetriever.ResolversContainer, requestersContainer dataRetriever.RequestersContainer, - interceptorsContainer process.InterceptorsContainer, + mainInterceptorsContainer process.InterceptorsContainer, + fullArchiveInterceptorsContainer process.InterceptorsContainer, headerSigVerifier process.InterceptedHeaderSigVerifier, blockTracker process.ValidityAttester, ) (update.ExportFactoryHandler, error) { @@ -1730,39 +1767,42 @@ func (pcf *processComponentsFactory) createExportFactoryHandler( accountsDBs[state.UserAccountsState] = pcf.state.AccountsAdapter() accountsDBs[state.PeerAccountsState] = pcf.state.PeerAccounts() exportFolder := filepath.Join(pcf.flagsConfig.WorkingDir, hardforkConfig.ImportFolder) + nodeOperationMode := p2p.NormalOperation + if pcf.prefConfigs.Preferences.FullArchive { + nodeOperationMode = p2p.FullArchiveMode + } argsExporter := updateFactory.ArgsExporter{ - CoreComponents: pcf.coreData, - CryptoComponents: pcf.crypto, - StatusCoreComponents: pcf.statusCoreComponents, - HeaderValidator: headerValidator, - DataPool: pcf.data.Datapool(), - StorageService: pcf.data.StorageService(), - RequestHandler: requestHandler, - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - Messenger: pcf.network.NetworkMessenger(), - ActiveAccountsDBs: accountsDBs, - ExistingResolvers: resolversContainer, - ExistingRequesters: requestersContainer, - ExportFolder: exportFolder, - ExportTriesStorageConfig: hardforkConfig.ExportTriesStorageConfig, - ExportStateStorageConfig: hardforkConfig.ExportStateStorageConfig, - ExportStateKeysConfig: hardforkConfig.ExportKeysStorageConfig, - MaxTrieLevelInMemory: pcf.config.StateTriesConfig.MaxStateTrieLevelInMemory, - WhiteListHandler: pcf.whiteListHandler, - WhiteListerVerifiedTxs: pcf.whiteListerVerifiedTxs, - InterceptorsContainer: interceptorsContainer, - NodesCoordinator: pcf.nodesCoordinator, - HeaderSigVerifier: headerSigVerifier, - HeaderIntegrityVerifier: pcf.bootstrapComponents.HeaderIntegrityVerifier(), - ValidityAttester: blockTracker, - InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), - OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), - RoundHandler: pcf.coreData.RoundHandler(), - InterceptorDebugConfig: pcf.config.Debug.InterceptorResolver, - MaxHardCapForMissingNodes: pcf.config.TrieSync.MaxHardCapForMissingNodes, - NumConcurrentTrieSyncers: pcf.config.TrieSync.NumConcurrentTrieSyncers, - TrieSyncerVersion: pcf.config.TrieSync.TrieSyncerVersion, - PeersRatingHandler: pcf.network.PeersRatingHandler(), + CoreComponents: pcf.coreData, + CryptoComponents: pcf.crypto, + StatusCoreComponents: pcf.statusCoreComponents, + NetworkComponents: pcf.network, + HeaderValidator: headerValidator, + DataPool: pcf.data.Datapool(), + StorageService: pcf.data.StorageService(), + RequestHandler: requestHandler, + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ActiveAccountsDBs: accountsDBs, + ExistingResolvers: resolversContainer, + ExistingRequesters: requestersContainer, + ExportFolder: exportFolder, + ExportTriesStorageConfig: hardforkConfig.ExportTriesStorageConfig, + ExportStateStorageConfig: hardforkConfig.ExportStateStorageConfig, + ExportStateKeysConfig: hardforkConfig.ExportKeysStorageConfig, + MaxTrieLevelInMemory: pcf.config.StateTriesConfig.MaxStateTrieLevelInMemory, + WhiteListHandler: pcf.whiteListHandler, + WhiteListerVerifiedTxs: pcf.whiteListerVerifiedTxs, + MainInterceptorsContainer: mainInterceptorsContainer, + FullArchiveInterceptorsContainer: fullArchiveInterceptorsContainer, + NodesCoordinator: pcf.nodesCoordinator, + HeaderSigVerifier: headerSigVerifier, + HeaderIntegrityVerifier: pcf.bootstrapComponents.HeaderIntegrityVerifier(), + ValidityAttester: blockTracker, + RoundHandler: pcf.coreData.RoundHandler(), + InterceptorDebugConfig: pcf.config.Debug.InterceptorResolver, + MaxHardCapForMissingNodes: pcf.config.TrieSync.MaxHardCapForMissingNodes, + NumConcurrentTrieSyncers: pcf.config.TrieSync.NumConcurrentTrieSyncers, + TrieSyncerVersion: pcf.config.TrieSync.TrieSyncerVersion, + NodeOperationMode: nodeOperationMode, } return updateFactory.NewExportHandlerFactory(argsExporter) } @@ -1944,8 +1984,9 @@ func (pc *processComponents) Close() error { if !check.IfNil(pc.importHandler) { log.LogIfError(pc.importHandler.Close()) } - if !check.IfNil(pc.interceptorsContainer) { - log.LogIfError(pc.interceptorsContainer.Close()) + // only calling close on the mainInterceptorsContainer as it should be the same interceptors on full archive + if !check.IfNil(pc.mainInterceptorsContainer) { + log.LogIfError(pc.mainInterceptorsContainer.Close()) } if !check.IfNil(pc.vmFactoryForTxSimulator) { log.LogIfError(pc.vmFactoryForTxSimulator.Close()) diff --git a/factory/processing/processComponentsHandler.go b/factory/processing/processComponentsHandler.go index 619bc189b26..b544ba901ef 100644 --- a/factory/processing/processComponentsHandler.go +++ b/factory/processing/processComponentsHandler.go @@ -1,6 +1,7 @@ package processing import ( + "fmt" "sync" "github.com/multiversx/mx-chain-core-go/core/check" @@ -86,8 +87,11 @@ func (m *managedProcessComponents) CheckSubcomponents() error { if check.IfNil(m.processComponents.shardCoordinator) { return errors.ErrNilShardCoordinator } - if check.IfNil(m.processComponents.interceptorsContainer) { - return errors.ErrNilInterceptorsContainer + if check.IfNil(m.processComponents.mainInterceptorsContainer) { + return fmt.Errorf("%w on main network", errors.ErrNilInterceptorsContainer) + } + if check.IfNil(m.processComponents.fullArchiveInterceptorsContainer) { + return fmt.Errorf("%w on full archive network", errors.ErrNilInterceptorsContainer) } if check.IfNil(m.processComponents.resolversContainer) { return errors.ErrNilResolversContainer @@ -143,8 +147,11 @@ func (m *managedProcessComponents) CheckSubcomponents() error { if check.IfNil(m.processComponents.headerConstructionValidator) { return errors.ErrNilHeaderConstructionValidator } - if check.IfNil(m.processComponents.peerShardMapper) { - return errors.ErrNilPeerShardMapper + if check.IfNil(m.processComponents.mainPeerShardMapper) { + return fmt.Errorf("%w for main", errors.ErrNilPeerShardMapper) + } + if check.IfNil(m.processComponents.fullArchivePeerShardMapper) { + return fmt.Errorf("%w for full archive", errors.ErrNilPeerShardMapper) } if check.IfNil(m.processComponents.fallbackHeaderValidator) { return errors.ErrNilFallbackHeaderValidator @@ -195,7 +202,7 @@ func (m *managedProcessComponents) ShardCoordinator() sharding.Coordinator { return m.processComponents.shardCoordinator } -// InterceptorsContainer returns the interceptors container +// InterceptorsContainer returns the interceptors container on the main network func (m *managedProcessComponents) InterceptorsContainer() process.InterceptorsContainer { m.mutProcessComponents.RLock() defer m.mutProcessComponents.RUnlock() @@ -204,7 +211,19 @@ func (m *managedProcessComponents) InterceptorsContainer() process.InterceptorsC return nil } - return m.processComponents.interceptorsContainer + return m.processComponents.mainInterceptorsContainer +} + +// FullArchiveInterceptorsContainer returns the interceptors container on the full archive network +func (m *managedProcessComponents) FullArchiveInterceptorsContainer() process.InterceptorsContainer { + m.mutProcessComponents.RLock() + defer m.mutProcessComponents.RUnlock() + + if m.processComponents == nil { + return nil + } + + return m.processComponents.fullArchiveInterceptorsContainer } // ResolversContainer returns the resolvers container @@ -423,7 +442,7 @@ func (m *managedProcessComponents) HeaderConstructionValidator() process.HeaderC return m.processComponents.headerConstructionValidator } -// PeerShardMapper returns the peer to shard mapper +// PeerShardMapper returns the peer to shard mapper of the main network func (m *managedProcessComponents) PeerShardMapper() process.NetworkShardingCollector { m.mutProcessComponents.RLock() defer m.mutProcessComponents.RUnlock() @@ -432,7 +451,19 @@ func (m *managedProcessComponents) PeerShardMapper() process.NetworkShardingColl return nil } - return m.processComponents.peerShardMapper + return m.processComponents.mainPeerShardMapper +} + +// FullArchivePeerShardMapper returns the peer to shard mapper of the full archive network +func (m *managedProcessComponents) FullArchivePeerShardMapper() process.NetworkShardingCollector { + m.mutProcessComponents.RLock() + defer m.mutProcessComponents.RUnlock() + + if m.processComponents == nil { + return nil + } + + return m.processComponents.fullArchivePeerShardMapper } // FallbackHeaderValidator returns the fallback header validator diff --git a/factory/processing/processComponentsHandler_test.go b/factory/processing/processComponentsHandler_test.go index dc22b846efa..152b7637dc6 100644 --- a/factory/processing/processComponentsHandler_test.go +++ b/factory/processing/processComponentsHandler_test.go @@ -90,6 +90,8 @@ func TestManagedProcessComponents_Create(t *testing.T) { require.True(t, check.IfNil(managedProcessComponents.ScheduledTxsExecutionHandler())) require.True(t, check.IfNil(managedProcessComponents.ESDTDataStorageHandlerForAPI())) require.True(t, check.IfNil(managedProcessComponents.ReceiptsRepository())) + require.True(t, check.IfNil(managedProcessComponents.FullArchivePeerShardMapper())) + require.True(t, check.IfNil(managedProcessComponents.FullArchiveInterceptorsContainer())) err := managedProcessComponents.Create() require.NoError(t, err) @@ -131,6 +133,8 @@ func TestManagedProcessComponents_Create(t *testing.T) { require.False(t, check.IfNil(managedProcessComponents.ScheduledTxsExecutionHandler())) require.False(t, check.IfNil(managedProcessComponents.ESDTDataStorageHandlerForAPI())) require.False(t, check.IfNil(managedProcessComponents.ReceiptsRepository())) + require.False(t, check.IfNil(managedProcessComponents.FullArchivePeerShardMapper())) + require.False(t, check.IfNil(managedProcessComponents.FullArchiveInterceptorsContainer())) require.Equal(t, factory.ProcessComponentsName, managedProcessComponents.String()) }) diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index a2e30c2acd3..b99036be033 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -216,11 +216,13 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto ManagedPeersHolderField: &testscommon.ManagedPeersHolderStub{}, }, Network: &testsMocks.NetworkComponentsStub{ - Messenger: &p2pmocks.MessengerStub{}, - InputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, - OutputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, + Messenger: &p2pmocks.MessengerStub{}, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, + InputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, + OutputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, }, BootstrapComponents: &mainFactoryMocks.BootstrapComponentsStub{ ShCoordinator: mock.NewMultiShardsCoordinatorMock(2), diff --git a/factory/status/statusComponentsHandler.go b/factory/status/statusComponentsHandler.go index b0c1198dac6..dd293a08538 100644 --- a/factory/status/statusComponentsHandler.go +++ b/factory/status/statusComponentsHandler.go @@ -277,12 +277,11 @@ func computeConnectedPeers( ) { peersInfo := netMessenger.GetConnectedPeersInfo() - peerClassification := fmt.Sprintf("intraVal:%d,crossVal:%d,intraObs:%d,crossObs:%d,fullObs:%d,unknown:%d,", + peerClassification := fmt.Sprintf("intraVal:%d,crossVal:%d,intraObs:%d,crossObs:%d,unknown:%d,", len(peersInfo.IntraShardValidators), len(peersInfo.CrossShardValidators), len(peersInfo.IntraShardObservers), len(peersInfo.CrossShardObservers), - len(peersInfo.FullHistoryObservers), len(peersInfo.UnknownPeers), ) appStatusHandler.SetStringValue(common.MetricNumConnectedPeersClassification, peerClassification) @@ -298,7 +297,6 @@ func setP2pConnectedPeersMetrics(appStatusHandler core.AppStatusHandler, info *p appStatusHandler.SetStringValue(common.MetricP2PIntraShardObservers, mapToString(info.IntraShardObservers)) appStatusHandler.SetStringValue(common.MetricP2PCrossShardValidators, mapToString(info.CrossShardValidators)) appStatusHandler.SetStringValue(common.MetricP2PCrossShardObservers, mapToString(info.CrossShardObservers)) - appStatusHandler.SetStringValue(common.MetricP2PFullHistoryObservers, mapToString(info.FullHistoryObservers)) } func sliceToString(input []string) string { diff --git a/factory/status/statusComponentsHandler_test.go b/factory/status/statusComponentsHandler_test.go index a8d952fb98a..9daeb5dea81 100644 --- a/factory/status/statusComponentsHandler_test.go +++ b/factory/status/statusComponentsHandler_test.go @@ -210,10 +210,6 @@ func TestComputeConnectedPeers(t *testing.T) { 0: {"cross-o-0"}, 1: {"cross-o-1"}, }, - FullHistoryObservers: map[uint32][]string{ - 0: {"fh-0"}, - 1: {"fh-1"}, - }, NumValidatorsOnShard: map[uint32]int{ 0: 1, 1: 1, @@ -230,14 +226,13 @@ func TestComputeConnectedPeers(t *testing.T) { NumIntraShardObservers: 2, NumCrossShardValidators: 2, NumCrossShardObservers: 2, - NumFullHistoryObservers: 2, } }, AddressesCalled: func() []string { return []string{"intra-v-0", "intra-v-1", "intra-o-0", "intra-o-1", "cross-v-0", "cross-v-1"} }, } - expectedPeerClassification := "intraVal:2,crossVal:2,intraObs:2,crossObs:2,fullObs:2,unknown:1," + expectedPeerClassification := "intraVal:2,crossVal:2,intraObs:2,crossObs:2,unknown:1," cnt := 0 appStatusHandler := &statusHandler.AppStatusHandlerStub{ SetStringValueHandler: func(key string, value string) { @@ -265,9 +260,6 @@ func TestComputeConnectedPeers(t *testing.T) { require.Equal(t, common.MetricP2PCrossShardObservers, key) require.Equal(t, "cross-o-0,cross-o-1", value) case 8: - require.Equal(t, common.MetricP2PFullHistoryObservers, key) - require.Equal(t, "fh-0,fh-1", value) - case 9: require.Equal(t, common.MetricP2PPeerInfo, key) require.Equal(t, "intra-v-0,intra-v-1,intra-o-0,intra-o-1,cross-v-0,cross-v-1", value) default: diff --git a/go.mod b/go.mod index 14c54ed7b0c..a5c2ed8cd7b 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.3 + github.com/multiversx/mx-chain-communication-go v1.0.4 github.com/multiversx/mx-chain-core-go v1.2.9 github.com/multiversx/mx-chain-crypto-go v1.2.7 github.com/multiversx/mx-chain-es-indexer-go v1.4.6 diff --git a/go.sum b/go.sum index d35141ad228..513acfb4321 100644 --- a/go.sum +++ b/go.sum @@ -621,8 +621,9 @@ github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2 github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-communication-go v1.0.3 h1:42LkNQX+PdT4UEDO2/kbOMT/iDpEMfE0ncNldFTyFD4= github.com/multiversx/mx-chain-communication-go v1.0.3/go.mod h1:7oTI77XfWmRWwVEbCq+pjH5CO3mJ6vEiHGMvQv6vF3Y= +github.com/multiversx/mx-chain-communication-go v1.0.4 h1:77DJZp1J8R9YsX61GVXVi7WNLVi4m0Z34gwgCEi6urc= +github.com/multiversx/mx-chain-communication-go v1.0.4/go.mod h1:7oTI77XfWmRWwVEbCq+pjH5CO3mJ6vEiHGMvQv6vF3Y= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.5/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= diff --git a/heartbeat/sender/baseSender.go b/heartbeat/sender/baseSender.go index cf7a7787c1f..a1c6a1664a2 100644 --- a/heartbeat/sender/baseSender.go +++ b/heartbeat/sender/baseSender.go @@ -19,7 +19,8 @@ const maxThresholdBetweenSends = 1.00 // 100% // argBaseSender represents the arguments for base sender type argBaseSender struct { - messenger heartbeat.P2PMessenger + mainMessenger heartbeat.P2PMessenger + fullArchiveMessenger heartbeat.P2PMessenger marshaller marshal.Marshalizer topic string timeBetweenSends time.Duration @@ -31,7 +32,8 @@ type argBaseSender struct { type baseSender struct { timerHandler - messenger heartbeat.P2PMessenger + mainMessenger heartbeat.P2PMessenger + fullArchiveMessenger heartbeat.P2PMessenger marshaller marshal.Marshalizer topic string timeBetweenSends time.Duration @@ -45,7 +47,8 @@ type baseSender struct { func createBaseSender(args argBaseSender) baseSender { bs := baseSender{ - messenger: args.messenger, + mainMessenger: args.mainMessenger, + fullArchiveMessenger: args.fullArchiveMessenger, marshaller: args.marshaller, topic: args.topic, timeBetweenSends: args.timeBetweenSends, @@ -64,8 +67,11 @@ func createBaseSender(args argBaseSender) baseSender { } func checkBaseSenderArgs(args argBaseSender) error { - if check.IfNil(args.messenger) { - return heartbeat.ErrNilMessenger + if check.IfNil(args.mainMessenger) { + return fmt.Errorf("%w for main", heartbeat.ErrNilMessenger) + } + if check.IfNil(args.fullArchiveMessenger) { + return fmt.Errorf("%w for full archive", heartbeat.ErrNilMessenger) } if check.IfNil(args.marshaller) { return heartbeat.ErrNilMarshaller diff --git a/heartbeat/sender/baseSender_test.go b/heartbeat/sender/baseSender_test.go index dc19139fe29..0061f1aeea4 100644 --- a/heartbeat/sender/baseSender_test.go +++ b/heartbeat/sender/baseSender_test.go @@ -13,7 +13,8 @@ import ( func createMockBaseArgs() argBaseSender { return argBaseSender{ - messenger: &p2pmocks.MessengerStub{}, + mainMessenger: &p2pmocks.MessengerStub{}, + fullArchiveMessenger: &p2pmocks.MessengerStub{}, marshaller: &marshallerMock.MarshalizerMock{}, topic: "topic", timeBetweenSends: time.Second, diff --git a/heartbeat/sender/bootstrapSender.go b/heartbeat/sender/bootstrapSender.go index 107eaedd93b..0872412ddda 100644 --- a/heartbeat/sender/bootstrapSender.go +++ b/heartbeat/sender/bootstrapSender.go @@ -12,7 +12,8 @@ import ( // ArgBootstrapSender represents the arguments for the bootstrap bootstrapSender type ArgBootstrapSender struct { - Messenger heartbeat.P2PMessenger + MainMessenger heartbeat.P2PMessenger + FullArchiveMessenger heartbeat.P2PMessenger Marshaller marshal.Marshalizer HeartbeatTopic string HeartbeatTimeBetweenSends time.Duration @@ -39,7 +40,8 @@ type bootstrapSender struct { func NewBootstrapSender(args ArgBootstrapSender) (*bootstrapSender, error) { hbs, err := newHeartbeatSender(argHeartbeatSender{ argBaseSender: argBaseSender{ - messenger: args.Messenger, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, marshaller: args.Marshaller, topic: args.HeartbeatTopic, timeBetweenSends: args.HeartbeatTimeBetweenSends, diff --git a/heartbeat/sender/bootstrapSender_test.go b/heartbeat/sender/bootstrapSender_test.go index 1f9dd524940..8d78e04caf4 100644 --- a/heartbeat/sender/bootstrapSender_test.go +++ b/heartbeat/sender/bootstrapSender_test.go @@ -7,7 +7,6 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/heartbeat" "github.com/multiversx/mx-chain-go/heartbeat/mock" "github.com/multiversx/mx-chain-go/testscommon" @@ -19,7 +18,8 @@ import ( func createMockBootstrapSenderArgs() ArgBootstrapSender { return ArgBootstrapSender{ - Messenger: &p2pmocks.MessengerStub{}, + MainMessenger: &p2pmocks.MessengerStub{}, + FullArchiveMessenger: &p2pmocks.MessengerStub{}, Marshaller: &marshallerMock.MarshalizerMock{}, HeartbeatTopic: "hb-topic", HeartbeatTimeBetweenSends: time.Second, @@ -40,15 +40,25 @@ func createMockBootstrapSenderArgs() ArgBootstrapSender { func TestNewBootstrapSender(t *testing.T) { t.Parallel() - t.Run("nil peer messenger should error", func(t *testing.T) { + t.Run("nil main messenger should error", func(t *testing.T) { t.Parallel() args := createMockBootstrapSenderArgs() - args.Messenger = nil + args.MainMessenger = nil senderInstance, err := NewBootstrapSender(args) assert.Nil(t, senderInstance) - assert.Equal(t, heartbeat.ErrNilMessenger, err) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) + }) + t.Run("nil full archive messenger should error", func(t *testing.T) { + t.Parallel() + + args := createMockBootstrapSenderArgs() + args.FullArchiveMessenger = nil + senderInstance, err := NewBootstrapSender(args) + + assert.Nil(t, senderInstance) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) }) t.Run("nil marshaller should error", func(t *testing.T) { t.Parallel() @@ -182,7 +192,7 @@ func TestNewBootstrapSender(t *testing.T) { args := createMockBootstrapSenderArgs() senderInstance, err := NewBootstrapSender(args) - assert.False(t, check.IfNil(senderInstance)) + assert.NotNil(t, senderInstance) assert.Nil(t, err) }) } @@ -202,3 +212,13 @@ func TestBootstrapSender_Close(t *testing.T) { err := senderInstance.Close() assert.Nil(t, err) } + +func TestBootstrapSender_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var senderInstance *bootstrapSender + assert.True(t, senderInstance.IsInterfaceNil()) + + senderInstance, _ = NewBootstrapSender(createMockBootstrapSenderArgs()) + assert.False(t, senderInstance.IsInterfaceNil()) +} diff --git a/heartbeat/sender/commonPeerAuthenticationSender.go b/heartbeat/sender/commonPeerAuthenticationSender.go index f1cf2e41eed..c89f0899261 100644 --- a/heartbeat/sender/commonPeerAuthenticationSender.go +++ b/heartbeat/sender/commonPeerAuthenticationSender.go @@ -40,12 +40,12 @@ func (cpas *commonPeerAuthenticationSender) generateMessageBytes( msg.Payload = payloadBytes if p2pSkBytes != nil { - msg.PayloadSignature, err = cpas.messenger.SignUsingPrivateKey(p2pSkBytes, payloadBytes) + msg.PayloadSignature, err = cpas.mainMessenger.SignUsingPrivateKey(p2pSkBytes, payloadBytes) if err != nil { return nil, isTriggered, 0, err } } else { - msg.PayloadSignature, err = cpas.messenger.Sign(payloadBytes) + msg.PayloadSignature, err = cpas.mainMessenger.Sign(payloadBytes) if err != nil { return nil, isTriggered, 0, err } diff --git a/heartbeat/sender/heartbeatSender.go b/heartbeat/sender/heartbeatSender.go index 77c52cd96ee..bdf6c5c12d1 100644 --- a/heartbeat/sender/heartbeatSender.go +++ b/heartbeat/sender/heartbeatSender.go @@ -112,7 +112,8 @@ func (sender *heartbeatSender) execute() error { return err } - sender.messenger.Broadcast(sender.topic, msgBytes) + sender.mainMessenger.Broadcast(sender.topic, msgBytes) + sender.fullArchiveMessenger.Broadcast(sender.topic, msgBytes) return nil } diff --git a/heartbeat/sender/heartbeatSenderFactory.go b/heartbeat/sender/heartbeatSenderFactory.go index 487bd623924..d254eeb5c02 100644 --- a/heartbeat/sender/heartbeatSenderFactory.go +++ b/heartbeat/sender/heartbeatSenderFactory.go @@ -39,7 +39,8 @@ func createHeartbeatSender(args argHeartbeatSenderFactory) (heartbeatSenderHandl func createRegularHeartbeatSender(args argHeartbeatSenderFactory) (*heartbeatSender, error) { argsSender := argHeartbeatSender{ argBaseSender: argBaseSender{ - messenger: args.messenger, + mainMessenger: args.mainMessenger, + fullArchiveMessenger: args.fullArchiveMessenger, marshaller: args.marshaller, topic: args.topic, timeBetweenSends: args.timeBetweenSends, @@ -63,7 +64,8 @@ func createRegularHeartbeatSender(args argHeartbeatSenderFactory) (*heartbeatSen func createMultikeyHeartbeatSender(args argHeartbeatSenderFactory) (*multikeyHeartbeatSender, error) { argsSender := argMultikeyHeartbeatSender{ argBaseSender: argBaseSender{ - messenger: args.messenger, + mainMessenger: args.mainMessenger, + fullArchiveMessenger: args.fullArchiveMessenger, marshaller: args.marshaller, topic: args.topic, timeBetweenSends: args.timeBetweenSends, diff --git a/heartbeat/sender/heartbeatSender_test.go b/heartbeat/sender/heartbeatSender_test.go index e4fd2c4bc3f..ef7d59f3544 100644 --- a/heartbeat/sender/heartbeatSender_test.go +++ b/heartbeat/sender/heartbeatSender_test.go @@ -7,7 +7,6 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/heartbeat" @@ -36,16 +35,27 @@ func createMockHeartbeatSenderArgs(argBase argBaseSender) argHeartbeatSender { func TestNewHeartbeatSender(t *testing.T) { t.Parallel() - t.Run("nil peer messenger should error", func(t *testing.T) { + t.Run("nil main messenger should error", func(t *testing.T) { t.Parallel() argBase := createMockBaseArgs() - argBase.messenger = nil + argBase.mainMessenger = nil args := createMockHeartbeatSenderArgs(argBase) senderInstance, err := newHeartbeatSender(args) assert.Nil(t, senderInstance) - assert.Equal(t, heartbeat.ErrNilMessenger, err) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) + }) + t.Run("nil full archive messenger should error", func(t *testing.T) { + t.Parallel() + + argBase := createMockBaseArgs() + argBase.fullArchiveMessenger = nil + args := createMockHeartbeatSenderArgs(argBase) + senderInstance, err := newHeartbeatSender(args) + + assert.Nil(t, senderInstance) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) }) t.Run("nil marshaller should error", func(t *testing.T) { t.Parallel() @@ -101,7 +111,7 @@ func TestNewHeartbeatSender(t *testing.T) { args.privKey = nil senderInstance, err := newHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilPrivateKey, err) }) t.Run("nil redundancy handler should error", func(t *testing.T) { @@ -111,7 +121,7 @@ func TestNewHeartbeatSender(t *testing.T) { args.redundancyHandler = nil senderInstance, err := newHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilRedundancyHandler, err) }) t.Run("version number too long should error", func(t *testing.T) { @@ -205,7 +215,7 @@ func TestNewHeartbeatSender(t *testing.T) { args := createMockHeartbeatSenderArgs(createMockBaseArgs()) senderInstance, err := newHeartbeatSender(args) - assert.False(t, check.IfNil(senderInstance)) + assert.NotNil(t, senderInstance) assert.Nil(t, err) }) } @@ -278,7 +288,7 @@ func TestHeartbeatSender_execute(t *testing.T) { args := createMockHeartbeatSenderArgs(argsBase) senderInstance, _ := newHeartbeatSender(args) - assert.False(t, check.IfNil(senderInstance)) + assert.NotNil(t, senderInstance) err := senderInstance.execute() assert.Equal(t, expectedErr, err) @@ -301,7 +311,7 @@ func TestHeartbeatSender_execute(t *testing.T) { args := createMockHeartbeatSenderArgs(argsBase) senderInstance, _ := newHeartbeatSender(args) - assert.False(t, check.IfNil(senderInstance)) + assert.NotNil(t, senderInstance) err := senderInstance.execute() assert.Equal(t, expectedErr, err) @@ -312,7 +322,20 @@ func TestHeartbeatSender_execute(t *testing.T) { providedNumTrieNodesSynced := 100 argsBase := createMockBaseArgs() broadcastCalled := false - argsBase.messenger = &p2pmocks.MessengerStub{ + argsBase.mainMessenger = &p2pmocks.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + assert.Equal(t, argsBase.topic, topic) + recoveredMessage := &heartbeat.HeartbeatV2{} + err := argsBase.marshaller.Unmarshal(recoveredMessage, buff) + assert.Nil(t, err) + pk := argsBase.privKey.GeneratePublic() + pkBytes, _ := pk.ToByteArray() + assert.Equal(t, pkBytes, recoveredMessage.Pubkey) + assert.Equal(t, uint64(providedNumTrieNodesSynced), recoveredMessage.NumTrieNodesSynced) + broadcastCalled = true + }, + } + argsBase.fullArchiveMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { assert.Equal(t, argsBase.topic, topic) recoveredMessage := &heartbeat.HeartbeatV2{} @@ -343,7 +366,7 @@ func TestHeartbeatSender_execute(t *testing.T) { } senderInstance, _ := newHeartbeatSender(args) - assert.False(t, check.IfNil(senderInstance)) + assert.NotNil(t, senderInstance) err := senderInstance.execute() assert.Nil(t, err) @@ -369,3 +392,14 @@ func TestHeartbeatSender_GetCurrentNodeType(t *testing.T) { assert.Equal(t, string(common.EligibleList), peerType) assert.Equal(t, core.FullHistoryObserver, subType) } + +func TestHeartbeatSender_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var senderInstance *heartbeatSender + assert.True(t, senderInstance.IsInterfaceNil()) + + args := createMockHeartbeatSenderArgs(createMockBaseArgs()) + senderInstance, _ = newHeartbeatSender(args) + assert.False(t, senderInstance.IsInterfaceNil()) +} diff --git a/heartbeat/sender/multikeyHeartbeatSender.go b/heartbeat/sender/multikeyHeartbeatSender.go index 6e147dd8e47..7f14c9be905 100644 --- a/heartbeat/sender/multikeyHeartbeatSender.go +++ b/heartbeat/sender/multikeyHeartbeatSender.go @@ -130,7 +130,8 @@ func (sender *multikeyHeartbeatSender) execute() error { return err } - sender.messenger.Broadcast(sender.topic, buff) + sender.mainMessenger.Broadcast(sender.topic, buff) + sender.fullArchiveMessenger.Broadcast(sender.topic, buff) return sender.sendMultiKeysInfo() } @@ -184,7 +185,7 @@ func (sender *multikeyHeartbeatSender) sendMessageForKey(pkBytes []byte) error { return err } - sender.messenger.BroadcastUsingPrivateKey(sender.topic, buff, pid, p2pSk) + sender.mainMessenger.BroadcastUsingPrivateKey(sender.topic, buff, pid, p2pSk) return nil } diff --git a/heartbeat/sender/multikeyHeartbeatSender_test.go b/heartbeat/sender/multikeyHeartbeatSender_test.go index fec7a216720..5d341bd9de7 100644 --- a/heartbeat/sender/multikeyHeartbeatSender_test.go +++ b/heartbeat/sender/multikeyHeartbeatSender_test.go @@ -7,7 +7,6 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-go/common" @@ -50,15 +49,25 @@ func createMockMultikeyHeartbeatSenderArgs(argBase argBaseSender) argMultikeyHea func TestNewMultikeyHeartbeatSender(t *testing.T) { t.Parallel() - t.Run("nil messenger should error", func(t *testing.T) { + t.Run("nil main messenger should error", func(t *testing.T) { t.Parallel() args := createMockMultikeyHeartbeatSenderArgs(createMockBaseArgs()) - args.messenger = nil + args.mainMessenger = nil senderInstance, err := newMultikeyHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) - assert.Equal(t, heartbeat.ErrNilMessenger, err) + assert.Nil(t, senderInstance) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) + }) + t.Run("nil full archive messenger should error", func(t *testing.T) { + t.Parallel() + + args := createMockMultikeyHeartbeatSenderArgs(createMockBaseArgs()) + args.fullArchiveMessenger = nil + + senderInstance, err := newMultikeyHeartbeatSender(args) + assert.Nil(t, senderInstance) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) }) t.Run("nil marshaller should error", func(t *testing.T) { t.Parallel() @@ -67,7 +76,7 @@ func TestNewMultikeyHeartbeatSender(t *testing.T) { args.marshaller = nil senderInstance, err := newMultikeyHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilMarshaller, err) }) t.Run("empty topic should error", func(t *testing.T) { @@ -79,7 +88,7 @@ func TestNewMultikeyHeartbeatSender(t *testing.T) { args := createMockMultikeyHeartbeatSenderArgs(argsBase) senderInstance, err := newMultikeyHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrEmptySendTopic, err) }) t.Run("invalid time between sends should error", func(t *testing.T) { @@ -91,7 +100,7 @@ func TestNewMultikeyHeartbeatSender(t *testing.T) { args := createMockMultikeyHeartbeatSenderArgs(argsBase) senderInstance, err := newMultikeyHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) assert.True(t, strings.Contains(err.Error(), "timeBetweenSends")) assert.False(t, strings.Contains(err.Error(), "timeBetweenSendsWhenError")) @@ -105,7 +114,7 @@ func TestNewMultikeyHeartbeatSender(t *testing.T) { args := createMockMultikeyHeartbeatSenderArgs(argsBase) senderInstance, err := newMultikeyHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) assert.True(t, strings.Contains(err.Error(), "timeBetweenSendsWhenError")) }) @@ -138,7 +147,7 @@ func TestNewMultikeyHeartbeatSender(t *testing.T) { args.peerTypeProvider = nil senderInstance, err := newMultikeyHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilPeerTypeProvider, err) }) t.Run("version number too long should error", func(t *testing.T) { @@ -192,7 +201,7 @@ func TestNewMultikeyHeartbeatSender(t *testing.T) { args.currentBlockProvider = nil senderInstance, err := newMultikeyHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilCurrentBlockProvider, err) }) t.Run("nil managed peers holder should error", func(t *testing.T) { @@ -202,7 +211,7 @@ func TestNewMultikeyHeartbeatSender(t *testing.T) { args.managedPeersHolder = nil senderInstance, err := newMultikeyHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilManagedPeersHolder, err) }) t.Run("nil shard coordinator should error", func(t *testing.T) { @@ -212,7 +221,7 @@ func TestNewMultikeyHeartbeatSender(t *testing.T) { args.shardCoordinator = nil senderInstance, err := newMultikeyHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilShardCoordinator, err) }) @@ -222,7 +231,7 @@ func TestNewMultikeyHeartbeatSender(t *testing.T) { args := createMockMultikeyHeartbeatSenderArgs(createMockBaseArgs()) senderInstance, err := newMultikeyHeartbeatSender(args) - assert.False(t, check.IfNil(senderInstance)) + assert.NotNil(t, senderInstance) assert.Nil(t, err) }) } @@ -287,13 +296,22 @@ func TestMultikeyHeartbeatSender_execute(t *testing.T) { t.Parallel() args := createMockMultikeyHeartbeatSenderArgs(createMockBaseArgs()) - broadcastCalled := false - recordedMessages := make(map[core.PeerID][][]byte) - args.messenger = &p2pmocks.MessengerStub{ + mainBroadcastCalled := false + fullArchiveBroadcastCalled := false + recordedMessagesFromMain := make(map[core.PeerID][][]byte) + recordedMessagesFromFullArchive := make(map[core.PeerID][][]byte) + args.mainMessenger = &p2pmocks.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + assert.Equal(t, args.topic, topic) + recordedMessagesFromMain[args.mainMessenger.ID()] = append(recordedMessagesFromMain[args.mainMessenger.ID()], buff) + mainBroadcastCalled = true + }, + } + args.fullArchiveMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { assert.Equal(t, args.topic, topic) - recordedMessages[args.messenger.ID()] = append(recordedMessages[args.messenger.ID()], buff) - broadcastCalled = true + recordedMessagesFromFullArchive[args.mainMessenger.ID()] = append(recordedMessagesFromFullArchive[args.mainMessenger.ID()], buff) + fullArchiveBroadcastCalled = true }, } @@ -301,24 +319,27 @@ func TestMultikeyHeartbeatSender_execute(t *testing.T) { err := senderInstance.execute() assert.Nil(t, err) - assert.True(t, broadcastCalled) - assert.Equal(t, 1, len(recordedMessages)) - checkRecordedMessages(t, recordedMessages, args, args.versionNumber, args.nodeDisplayName, args.messenger.ID(), core.FullHistoryObserver) + assert.True(t, mainBroadcastCalled) + assert.True(t, fullArchiveBroadcastCalled) + assert.Equal(t, 1, len(recordedMessagesFromMain)) + checkRecordedMessages(t, recordedMessagesFromMain, args, args.versionNumber, args.nodeDisplayName, args.mainMessenger.ID(), core.FullHistoryObserver) + assert.Equal(t, 1, len(recordedMessagesFromFullArchive)) + checkRecordedMessages(t, recordedMessagesFromFullArchive, args, args.versionNumber, args.nodeDisplayName, args.mainMessenger.ID(), core.FullHistoryObserver) assert.Equal(t, uint64(1), args.currentBlockProvider.GetCurrentBlockHeader().GetNonce()) }) t.Run("should send the current node heartbeat and some multikey heartbeats", func(t *testing.T) { t.Parallel() args := createMockMultikeyHeartbeatSenderArgs(createMockBaseArgs()) - recordedMessages := make(map[core.PeerID][][]byte) - args.messenger = &p2pmocks.MessengerStub{ + recordedMainMessages := make(map[core.PeerID][][]byte) + args.mainMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { assert.Equal(t, args.topic, topic) - recordedMessages[args.messenger.ID()] = append(recordedMessages[args.messenger.ID()], buff) + recordedMainMessages[args.mainMessenger.ID()] = append(recordedMainMessages[args.mainMessenger.ID()], buff) }, BroadcastUsingPrivateKeyCalled: func(topic string, buff []byte, pid core.PeerID, skBytes []byte) { assert.Equal(t, args.topic, topic) - recordedMessages[pid] = append(recordedMessages[pid], buff) + recordedMainMessages[pid] = append(recordedMainMessages[pid], buff) }, } args.managedPeersHolder = &testscommon.ManagedPeersHolderStub{ @@ -358,18 +379,18 @@ func TestMultikeyHeartbeatSender_execute(t *testing.T) { err := senderInstance.execute() assert.Nil(t, err) - assert.Equal(t, 4, len(recordedMessages)) // current pid, aa, bb, cc + assert.Equal(t, 4, len(recordedMainMessages)) // current pid, aa, bb, cc checkRecordedMessages(t, - recordedMessages, + recordedMainMessages, args, args.versionNumber, args.nodeDisplayName, - args.messenger.ID(), + args.mainMessenger.ID(), core.FullHistoryObserver) checkRecordedMessages(t, - recordedMessages, + recordedMainMessages, args, args.baseVersionNumber+"/aa_machineID", "aa_name", @@ -377,7 +398,7 @@ func TestMultikeyHeartbeatSender_execute(t *testing.T) { core.RegularPeer) checkRecordedMessages(t, - recordedMessages, + recordedMainMessages, args, args.baseVersionNumber+"/bb_machineID", "bb_name", @@ -385,7 +406,7 @@ func TestMultikeyHeartbeatSender_execute(t *testing.T) { core.RegularPeer) checkRecordedMessages(t, - recordedMessages, + recordedMainMessages, args, args.baseVersionNumber+"/cc_machineID", "cc_name", @@ -446,6 +467,17 @@ func TestMultikeyHeartbeatSender_generateMessageBytes(t *testing.T) { }) } +func TestMultikeyHeartbeatSender_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var senderInstance *multikeyHeartbeatSender + assert.True(t, senderInstance.IsInterfaceNil()) + + args := createMockMultikeyHeartbeatSenderArgs(createMockBaseArgs()) + senderInstance, _ = newMultikeyHeartbeatSender(args) + assert.False(t, senderInstance.IsInterfaceNil()) +} + func checkRecordedMessages( tb testing.TB, recordedMessages map[core.PeerID][][]byte, diff --git a/heartbeat/sender/multikeyPeerAuthenticationSender.go b/heartbeat/sender/multikeyPeerAuthenticationSender.go index ac6d03b849b..b1e9a62b71c 100644 --- a/heartbeat/sender/multikeyPeerAuthenticationSender.go +++ b/heartbeat/sender/multikeyPeerAuthenticationSender.go @@ -194,7 +194,7 @@ func (sender *multikeyPeerAuthenticationSender) sendData(pkBytes []byte, data [] log.Error("could not get identity for pk", "pk", hex.EncodeToString(pkBytes), "error", err) return } - sender.messenger.BroadcastUsingPrivateKey(sender.topic, data, pid, p2pSk) + sender.mainMessenger.BroadcastUsingPrivateKey(sender.topic, data, pid, p2pSk) nextTimeToCheck, err := sender.managedPeersHolder.GetNextPeerAuthenticationTime(pkBytes) if err != nil { diff --git a/heartbeat/sender/multikeyPeerAuthenticationSender_test.go b/heartbeat/sender/multikeyPeerAuthenticationSender_test.go index 37107f3b6e3..d28c31a8d62 100644 --- a/heartbeat/sender/multikeyPeerAuthenticationSender_test.go +++ b/heartbeat/sender/multikeyPeerAuthenticationSender_test.go @@ -8,7 +8,6 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/batch" crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-crypto-go/signing" @@ -144,7 +143,7 @@ func createMockMultikeyPeerAuthenticationSenderArgsSemiIntegrationTests( }, } - args.messenger = messenger + args.mainMessenger = messenger return args, messenger } @@ -152,17 +151,29 @@ func createMockMultikeyPeerAuthenticationSenderArgsSemiIntegrationTests( func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { t.Parallel() - t.Run("nil peer messenger should error", func(t *testing.T) { + t.Run("nil main messenger should error", func(t *testing.T) { t.Parallel() argsBase := createMockBaseArgs() - argsBase.messenger = nil + argsBase.mainMessenger = nil args := createMockMultikeyPeerAuthenticationSenderArgs(argsBase) senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) - assert.Equal(t, heartbeat.ErrNilMessenger, err) + assert.Nil(t, senderInstance) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) + }) + t.Run("nil full archive messenger should error", func(t *testing.T) { + t.Parallel() + + argsBase := createMockBaseArgs() + argsBase.fullArchiveMessenger = nil + + args := createMockMultikeyPeerAuthenticationSenderArgs(argsBase) + senderInstance, err := newMultikeyPeerAuthenticationSender(args) + + assert.Nil(t, senderInstance) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) }) t.Run("nil marshaller should error", func(t *testing.T) { t.Parallel() @@ -173,7 +184,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args := createMockMultikeyPeerAuthenticationSenderArgs(argsBase) senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilMarshaller, err) }) t.Run("empty topic should error", func(t *testing.T) { @@ -185,7 +196,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args := createMockMultikeyPeerAuthenticationSenderArgs(argsBase) senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrEmptySendTopic, err) }) t.Run("invalid time between sends should error", func(t *testing.T) { @@ -197,7 +208,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args := createMockMultikeyPeerAuthenticationSenderArgs(argsBase) senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) assert.True(t, strings.Contains(err.Error(), "timeBetweenSends")) assert.False(t, strings.Contains(err.Error(), "timeBetweenSendsWhenError")) @@ -211,7 +222,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args := createMockMultikeyPeerAuthenticationSenderArgs(argsBase) senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) assert.True(t, strings.Contains(err.Error(), "timeBetweenSendsWhenError")) }) @@ -244,7 +255,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args.nodesCoordinator = nil senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilNodesCoordinator, err) }) t.Run("nil peer signature handler should error", func(t *testing.T) { @@ -254,7 +265,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args.peerSignatureHandler = nil senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilPeerSignatureHandler, err) }) t.Run("nil hardfork trigger should error", func(t *testing.T) { @@ -264,7 +275,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args.hardforkTrigger = nil senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilHardforkTrigger, err) }) t.Run("invalid time between hardforks should error", func(t *testing.T) { @@ -274,7 +285,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args.hardforkTimeBetweenSends = time.Second - time.Nanosecond senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) assert.True(t, strings.Contains(err.Error(), "hardforkTimeBetweenSends")) }) @@ -285,7 +296,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args.managedPeersHolder = nil senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilManagedPeersHolder, err) }) t.Run("invalid time between checks should error", func(t *testing.T) { @@ -295,7 +306,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args.timeBetweenChecks = time.Second - time.Nanosecond senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) assert.True(t, strings.Contains(err.Error(), "timeBetweenChecks")) }) @@ -306,7 +317,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args.shardCoordinator = nil senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilShardCoordinator, err) }) t.Run("should work", func(t *testing.T) { @@ -315,7 +326,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args := createMockMultikeyPeerAuthenticationSenderArgs(createMockBaseArgs()) senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.False(t, check.IfNil(senderInstance)) + assert.NotNil(t, senderInstance) assert.Nil(t, err) }) } @@ -648,6 +659,17 @@ func TestNewMultikeyPeerAuthenticationSender_Execute(t *testing.T) { }) } +func TestMultikeyPeerAuthenticationSender_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var senderInstance *multikeyPeerAuthenticationSender + assert.True(t, senderInstance.IsInterfaceNil()) + + args := createMockMultikeyPeerAuthenticationSenderArgs(createMockBaseArgs()) + senderInstance, _ = newMultikeyPeerAuthenticationSender(args) + assert.False(t, senderInstance.IsInterfaceNil()) +} + func testRecoveredMessages( tb testing.TB, args argMultikeyPeerAuthenticationSender, @@ -693,7 +715,7 @@ func testSingleMessage( errVerify := args.peerSignatureHandler.VerifyPeerSignature(recoveredMessage.Pubkey, core.PeerID(recoveredMessage.Pid), recoveredMessage.Signature) assert.Nil(tb, errVerify) - messenger := args.messenger.(*p2pmocks.MessengerStub) + messenger := args.mainMessenger.(*p2pmocks.MessengerStub) errVerify = messenger.Verify(recoveredMessage.Payload, core.PeerID(recoveredMessage.Pid), recoveredMessage.PayloadSignature) assert.Nil(tb, errVerify) diff --git a/heartbeat/sender/peerAuthenticationSender.go b/heartbeat/sender/peerAuthenticationSender.go index 6151177c8af..09387a13da9 100644 --- a/heartbeat/sender/peerAuthenticationSender.go +++ b/heartbeat/sender/peerAuthenticationSender.go @@ -120,15 +120,15 @@ func (sender *peerAuthenticationSender) execute() (error, bool) { return err, false } - data, isTriggered, msgTimestamp, err := sender.generateMessageBytes(pkBytes, sk, nil, sender.messenger.ID().Bytes()) + data, isTriggered, msgTimestamp, err := sender.generateMessageBytes(pkBytes, sk, nil, sender.mainMessenger.ID().Bytes()) if err != nil { return err, isTriggered } log.Debug("sending peer authentication message", - "public key", pkBytes, "pid", sender.messenger.ID().Pretty(), + "public key", pkBytes, "pid", sender.mainMessenger.ID().Pretty(), "timestamp", msgTimestamp) - sender.messenger.Broadcast(sender.topic, data) + sender.mainMessenger.Broadcast(sender.topic, data) return nil, isTriggered } diff --git a/heartbeat/sender/peerAuthenticationSender_test.go b/heartbeat/sender/peerAuthenticationSender_test.go index 901ebf31d3e..b20ccd9eee3 100644 --- a/heartbeat/sender/peerAuthenticationSender_test.go +++ b/heartbeat/sender/peerAuthenticationSender_test.go @@ -9,7 +9,6 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/batch" "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-crypto-go/signing" @@ -71,17 +70,17 @@ func createMockPeerAuthenticationSenderArgsSemiIntegrationTests(baseArg argBaseS func TestNewPeerAuthenticationSender(t *testing.T) { t.Parallel() - t.Run("nil peer messenger should error", func(t *testing.T) { + t.Run("nil main messenger should error", func(t *testing.T) { t.Parallel() argsBase := createMockBaseArgs() - argsBase.messenger = nil + argsBase.mainMessenger = nil args := createMockPeerAuthenticationSenderArgs(argsBase) senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) - assert.Equal(t, heartbeat.ErrNilMessenger, err) + assert.Nil(t, senderInstance) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) }) t.Run("nil nodes coordinator should error", func(t *testing.T) { t.Parallel() @@ -90,7 +89,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args.nodesCoordinator = nil senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilNodesCoordinator, err) }) t.Run("nil peer signature handler should error", func(t *testing.T) { @@ -100,7 +99,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args.peerSignatureHandler = nil senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilPeerSignatureHandler, err) }) t.Run("nil private key should error", func(t *testing.T) { @@ -110,7 +109,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args.privKey = nil senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilPrivateKey, err) }) t.Run("nil marshaller should error", func(t *testing.T) { @@ -122,7 +121,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args := createMockPeerAuthenticationSenderArgs(argsBase) senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilMarshaller, err) }) t.Run("empty topic should error", func(t *testing.T) { @@ -134,7 +133,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args := createMockPeerAuthenticationSenderArgs(argsBase) senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrEmptySendTopic, err) }) t.Run("nil redundancy handler should error", func(t *testing.T) { @@ -144,7 +143,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args.redundancyHandler = nil senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilRedundancyHandler, err) }) t.Run("invalid time between sends should error", func(t *testing.T) { @@ -156,7 +155,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args := createMockPeerAuthenticationSenderArgs(argsBase) senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) assert.True(t, strings.Contains(err.Error(), "timeBetweenSends")) assert.False(t, strings.Contains(err.Error(), "timeBetweenSendsWhenError")) @@ -170,7 +169,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args := createMockPeerAuthenticationSenderArgs(argsBase) senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) assert.True(t, strings.Contains(err.Error(), "timeBetweenSendsWhenError")) }) @@ -203,7 +202,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args.hardforkTrigger = nil senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilHardforkTrigger, err) }) t.Run("invalid time between hardforks should error", func(t *testing.T) { @@ -213,7 +212,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args.hardforkTimeBetweenSends = time.Second - time.Nanosecond senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) assert.True(t, strings.Contains(err.Error(), "hardforkTimeBetweenSends")) }) @@ -223,7 +222,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args := createMockPeerAuthenticationSenderArgs(createMockBaseArgs()) senderInstance, err := newPeerAuthenticationSender(args) - assert.False(t, check.IfNil(senderInstance)) + assert.NotNil(t, senderInstance) assert.Nil(t, err) }) } @@ -235,7 +234,7 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { t.Parallel() argsBase := createMockBaseArgs() - argsBase.messenger = &p2pmocks.MessengerStub{ + argsBase.mainMessenger = &p2pmocks.MessengerStub{ SignCalled: func(payload []byte) ([]byte, error) { return nil, expectedErr }, @@ -255,7 +254,7 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { t.Parallel() argsBase := createMockBaseArgs() - argsBase.messenger = &p2pmocks.MessengerStub{ + argsBase.mainMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { assert.Fail(t, "should have not called Messenger.BroadcastCalled") }, @@ -277,7 +276,7 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { t.Parallel() baseArgs := createMockBaseArgs() - baseArgs.messenger = &p2pmocks.MessengerStub{ + baseArgs.mainMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { assert.Fail(t, "should have not called Messenger.BroadcastCalled") }, @@ -299,7 +298,7 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { numCalls := 0 argsBase := createMockBaseArgs() - argsBase.messenger = &p2pmocks.MessengerStub{ + argsBase.mainMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { assert.Fail(t, "should have not called Messenger.BroadcastCalled") }, @@ -325,11 +324,16 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { t.Parallel() argsBase := createMockBaseArgs() - broadcastCalled := false - argsBase.messenger = &p2pmocks.MessengerStub{ + mainBroadcastCalled := false + argsBase.mainMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { assert.Equal(t, argsBase.topic, topic) - broadcastCalled = true + mainBroadcastCalled = true + }, + } + argsBase.fullArchiveMessenger = &p2pmocks.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + assert.Fail(t, "should have not been called") }, } @@ -338,7 +342,7 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { err, isHardforkTriggered := senderInstance.execute() assert.Nil(t, err) - assert.True(t, broadcastCalled) + assert.True(t, mainBroadcastCalled) assert.False(t, isHardforkTriggered) }) t.Run("should work with some real components", func(t *testing.T) { @@ -354,7 +358,7 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { argsBase := createMockBaseArgs() argsBase.privKey = skMessenger var buffResulted []byte - messenger := &p2pmocks.MessengerStub{ + mainMessenger := &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { assert.Equal(t, argsBase.topic, topic) buffResulted = buff @@ -372,7 +376,27 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { return core.PeerID(pkBytes) }, } - argsBase.messenger = messenger + argsBase.mainMessenger = mainMessenger + + argsBase.fullArchiveMessenger = &p2pmocks.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + assert.Equal(t, argsBase.topic, topic) + assert.Equal(t, buffResulted, buff) + }, + SignCalled: func(payload []byte) ([]byte, error) { + assert.Fail(t, "should have not been called") + return nil, nil + }, + VerifyCalled: func(payload []byte, pid core.PeerID, signature []byte) error { + assert.Fail(t, "should have not been called") + return nil + }, + IDCalled: func() core.PeerID { + assert.Fail(t, "should have not been called") + return "" + }, + } + args := createMockPeerAuthenticationSenderArgsSemiIntegrationTests(argsBase) senderInstance, _ := newPeerAuthenticationSender(args) @@ -382,7 +406,7 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { skBytes, _ := senderInstance.privKey.ToByteArray() pkBytes, _ := senderInstance.publicKey.ToByteArray() - log.Info("args", "pid", argsBase.messenger.ID().Pretty(), "bls sk", skBytes, "bls pk", pkBytes) + log.Info("args", "pid", argsBase.mainMessenger.ID().Pretty(), "bls sk", skBytes, "bls pk", pkBytes) // verify the received bytes if they can be converted in a valid peer authentication message recoveredBatch := batch.Batch{} @@ -392,13 +416,13 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { err = argsBase.marshaller.Unmarshal(recoveredMessage, recoveredBatch.Data[0]) assert.Nil(t, err) assert.Equal(t, pkBytes, recoveredMessage.Pubkey) - assert.Equal(t, argsBase.messenger.ID().Pretty(), core.PeerID(recoveredMessage.Pid).Pretty()) + assert.Equal(t, argsBase.mainMessenger.ID().Pretty(), core.PeerID(recoveredMessage.Pid).Pretty()) t.Run("verify BLS sig on having the payload == message's pid", func(t *testing.T) { errVerify := args.peerSignatureHandler.VerifyPeerSignature(recoveredMessage.Pubkey, core.PeerID(recoveredMessage.Pid), recoveredMessage.Signature) assert.Nil(t, errVerify) }) t.Run("verify ed25519 sig having the payload == message's payload", func(t *testing.T) { - errVerify := messenger.Verify(recoveredMessage.Payload, core.PeerID(recoveredMessage.Pid), recoveredMessage.PayloadSignature) + errVerify := mainMessenger.Verify(recoveredMessage.Payload, core.PeerID(recoveredMessage.Pid), recoveredMessage.PayloadSignature) assert.Nil(t, errVerify) }) t.Run("verify payload", func(t *testing.T) { @@ -423,7 +447,7 @@ func TestPeerAuthenticationSender_Execute(t *testing.T) { argsBase := createMockBaseArgs() wasBroadcastCalled := false - argsBase.messenger = &p2pmocks.MessengerStub{ + argsBase.mainMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { wasBroadcastCalled = true }, @@ -492,10 +516,15 @@ func TestPeerAuthenticationSender_Execute(t *testing.T) { t.Parallel() argsBase := createMockBaseArgs() - counterBroadcast := 0 - argsBase.messenger = &p2pmocks.MessengerStub{ + counterMainBroadcast := 0 + argsBase.mainMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { - counterBroadcast++ + counterMainBroadcast++ + }, + } + argsBase.fullArchiveMessenger = &p2pmocks.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + assert.Fail(t, "should have not been called") }, } args := createMockPeerAuthenticationSenderArgs(argsBase) @@ -516,7 +545,7 @@ func TestPeerAuthenticationSender_Execute(t *testing.T) { senderInstance.Execute() // observer senderInstance.Execute() // validator senderInstance.Execute() // observer - assert.Equal(t, 1, counterBroadcast) + assert.Equal(t, 1, counterMainBroadcast) }) t.Run("execute worked, should set the hardfork time duration value", func(t *testing.T) { t.Parallel() @@ -711,3 +740,14 @@ func TestPeerAuthenticationSender_ShouldTriggerHardfork(t *testing.T) { assert.Fail(t, "should not reach timeout") } } + +func TestPeerAuthenticationSender_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var senderInstance *peerAuthenticationSender + assert.True(t, senderInstance.IsInterfaceNil()) + + args := createMockPeerAuthenticationSenderArgs(createMockBaseArgs()) + senderInstance, _ = newPeerAuthenticationSender(args) + assert.False(t, senderInstance.IsInterfaceNil()) +} diff --git a/heartbeat/sender/peerShardSender.go b/heartbeat/sender/peerShardSender.go index 8ce5a7bb02a..45d641918cd 100644 --- a/heartbeat/sender/peerShardSender.go +++ b/heartbeat/sender/peerShardSender.go @@ -20,7 +20,8 @@ const minDelayBetweenSends = time.Second // ArgPeerShardSender represents the arguments for the peer shard sender type ArgPeerShardSender struct { - Messenger p2p.Messenger + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger Marshaller marshal.Marshalizer ShardCoordinator sharding.Coordinator TimeBetweenSends time.Duration @@ -29,7 +30,8 @@ type ArgPeerShardSender struct { } type peerShardSender struct { - messenger p2p.Messenger + mainMessenger p2p.Messenger + fullArchiveMessenger p2p.Messenger marshaller marshal.Marshalizer shardCoordinator sharding.Coordinator timeBetweenSends time.Duration @@ -46,7 +48,8 @@ func NewPeerShardSender(args ArgPeerShardSender) (*peerShardSender, error) { } pss := &peerShardSender{ - messenger: args.Messenger, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, marshaller: args.Marshaller, shardCoordinator: args.ShardCoordinator, timeBetweenSends: args.TimeBetweenSends, @@ -63,8 +66,11 @@ func NewPeerShardSender(args ArgPeerShardSender) (*peerShardSender, error) { } func checkArgPeerShardSender(args ArgPeerShardSender) error { - if check.IfNil(args.Messenger) { - return process.ErrNilMessenger + if check.IfNil(args.MainMessenger) { + return fmt.Errorf("%w for main", process.ErrNilMessenger) + } + if check.IfNil(args.FullArchiveMessenger) { + return fmt.Errorf("%w for full archive", process.ErrNilMessenger) } if check.IfNil(args.Marshaller) { return process.ErrNilMarshalizer @@ -132,7 +138,8 @@ func (pss *peerShardSender) broadcastShard() { } log.Debug("broadcast peer shard", "shard", peerShard.ShardId) - pss.messenger.Broadcast(common.ConnectionTopic, peerShardBuff) + pss.mainMessenger.Broadcast(common.ConnectionTopic, peerShardBuff) + pss.fullArchiveMessenger.Broadcast(common.ConnectionTopic, peerShardBuff) } func (pss *peerShardSender) isCurrentNodeValidator() bool { diff --git a/heartbeat/sender/peerShardSender_test.go b/heartbeat/sender/peerShardSender_test.go index b3b396747af..cbc1b71714b 100644 --- a/heartbeat/sender/peerShardSender_test.go +++ b/heartbeat/sender/peerShardSender_test.go @@ -8,7 +8,6 @@ import ( "testing" "time" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/heartbeat" @@ -23,7 +22,8 @@ import ( func createMockArgPeerShardSender() ArgPeerShardSender { return ArgPeerShardSender{ - Messenger: &p2pmocks.MessengerStub{}, + MainMessenger: &p2pmocks.MessengerStub{}, + FullArchiveMessenger: &p2pmocks.MessengerStub{}, Marshaller: &marshal.GogoProtoMarshalizer{}, ShardCoordinator: &testscommon.ShardsCoordinatorMock{}, TimeBetweenSends: time.Second, @@ -39,15 +39,25 @@ func createMockArgPeerShardSender() ArgPeerShardSender { func TestNewPeerShardSender(t *testing.T) { t.Parallel() - t.Run("nil messenger should error", func(t *testing.T) { + t.Run("nil main messenger should error", func(t *testing.T) { t.Parallel() args := createMockArgPeerShardSender() - args.Messenger = nil + args.MainMessenger = nil pss, err := NewPeerShardSender(args) - assert.Equal(t, process.ErrNilMessenger, err) - assert.True(t, check.IfNil(pss)) + assert.True(t, errors.Is(err, process.ErrNilMessenger)) + assert.Nil(t, pss) + }) + t.Run("nil full archive messenger should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgPeerShardSender() + args.FullArchiveMessenger = nil + + pss, err := NewPeerShardSender(args) + assert.True(t, errors.Is(err, process.ErrNilMessenger)) + assert.Nil(t, pss) }) t.Run("nil marshaller should error", func(t *testing.T) { t.Parallel() @@ -57,7 +67,7 @@ func TestNewPeerShardSender(t *testing.T) { pss, err := NewPeerShardSender(args) assert.Equal(t, process.ErrNilMarshalizer, err) - assert.True(t, check.IfNil(pss)) + assert.Nil(t, pss) }) t.Run("nil shard coordinator should error", func(t *testing.T) { t.Parallel() @@ -67,7 +77,7 @@ func TestNewPeerShardSender(t *testing.T) { pss, err := NewPeerShardSender(args) assert.Equal(t, process.ErrNilShardCoordinator, err) - assert.True(t, check.IfNil(pss)) + assert.Nil(t, pss) }) t.Run("invalid time between sends should error", func(t *testing.T) { t.Parallel() @@ -78,7 +88,7 @@ func TestNewPeerShardSender(t *testing.T) { pss, err := NewPeerShardSender(args) assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) assert.True(t, strings.Contains(err.Error(), "TimeBetweenSends")) - assert.True(t, check.IfNil(pss)) + assert.Nil(t, pss) }) t.Run("invalid threshold between sends should error", func(t *testing.T) { t.Parallel() @@ -89,7 +99,7 @@ func TestNewPeerShardSender(t *testing.T) { pss, err := NewPeerShardSender(args) assert.True(t, errors.Is(err, heartbeat.ErrInvalidThreshold)) assert.True(t, strings.Contains(err.Error(), "TimeThresholdBetweenSends")) - assert.True(t, check.IfNil(pss)) + assert.Nil(t, pss) }) t.Run("nil nodes coordinator should error", func(t *testing.T) { t.Parallel() @@ -99,14 +109,14 @@ func TestNewPeerShardSender(t *testing.T) { pss, err := NewPeerShardSender(args) assert.True(t, errors.Is(err, heartbeat.ErrNilNodesCoordinator)) - assert.True(t, check.IfNil(pss)) + assert.Nil(t, pss) }) t.Run("should work and validator should not broadcast", func(t *testing.T) { t.Parallel() args := createMockArgPeerShardSender() wasCalled := false - args.Messenger = &p2pmocks.MessengerStub{ + args.MainMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { wasCalled = true }, @@ -119,7 +129,7 @@ func TestNewPeerShardSender(t *testing.T) { args.TimeBetweenSends = 2 * time.Second pss, _ := NewPeerShardSender(args) - assert.False(t, check.IfNil(pss)) + assert.NotNil(t, pss) time.Sleep(3 * time.Second) _ = pss.Close() @@ -131,7 +141,17 @@ func TestNewPeerShardSender(t *testing.T) { args := createMockArgPeerShardSender() expectedShard := fmt.Sprintf("%d", args.ShardCoordinator.SelfId()) numOfCalls := uint32(0) - args.Messenger = &p2pmocks.MessengerStub{ + args.MainMessenger = &p2pmocks.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + shardInfo := &factory.PeerShard{} + err := args.Marshaller.Unmarshal(shardInfo, buff) + assert.Nil(t, err) + assert.Equal(t, expectedShard, shardInfo.ShardId) + assert.Equal(t, common.ConnectionTopic, topic) + atomic.AddUint32(&numOfCalls, 1) + }, + } + args.FullArchiveMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { shardInfo := &factory.PeerShard{} err := args.Marshaller.Unmarshal(shardInfo, buff) @@ -144,10 +164,20 @@ func TestNewPeerShardSender(t *testing.T) { args.TimeBetweenSends = 2 * time.Second pss, _ := NewPeerShardSender(args) - assert.False(t, check.IfNil(pss)) + assert.NotNil(t, pss) time.Sleep(3 * time.Second) _ = pss.Close() - assert.Equal(t, uint32(1), atomic.LoadUint32(&numOfCalls)) + assert.Equal(t, uint32(2), atomic.LoadUint32(&numOfCalls)) // one call for each messenger }) } + +func TestPeerShardSender_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var pss *peerShardSender + assert.True(t, pss.IsInterfaceNil()) + + pss, _ = NewPeerShardSender(createMockArgPeerShardSender()) + assert.False(t, pss.IsInterfaceNil()) +} diff --git a/heartbeat/sender/sender.go b/heartbeat/sender/sender.go index fbc5525be26..5589621f31f 100644 --- a/heartbeat/sender/sender.go +++ b/heartbeat/sender/sender.go @@ -12,7 +12,8 @@ import ( // ArgSender represents the arguments for the sender type ArgSender struct { - Messenger heartbeat.P2PMessenger + MainMessenger heartbeat.P2PMessenger + FullArchiveMessenger heartbeat.P2PMessenger Marshaller marshal.Marshalizer PeerAuthenticationTopic string HeartbeatTopic string @@ -56,7 +57,8 @@ func NewSender(args ArgSender) (*sender, error) { pas, err := createPeerAuthenticationSender(argPeerAuthenticationSenderFactory{ argBaseSender: argBaseSender{ - messenger: args.Messenger, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, marshaller: args.Marshaller, topic: args.PeerAuthenticationTopic, timeBetweenSends: args.PeerAuthenticationTimeBetweenSends, @@ -80,7 +82,8 @@ func NewSender(args ArgSender) (*sender, error) { hbs, err := createHeartbeatSender(argHeartbeatSenderFactory{ argBaseSender: argBaseSender{ - messenger: args.Messenger, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, marshaller: args.Marshaller, topic: args.HeartbeatTopic, timeBetweenSends: args.HeartbeatTimeBetweenSends, @@ -113,7 +116,8 @@ func NewSender(args ArgSender) (*sender, error) { func checkSenderArgs(args ArgSender) error { basePeerAuthSenderArgs := argBaseSender{ - messenger: args.Messenger, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, marshaller: args.Marshaller, topic: args.PeerAuthenticationTopic, timeBetweenSends: args.PeerAuthenticationTimeBetweenSends, @@ -153,7 +157,8 @@ func checkSenderArgs(args ArgSender) error { hbsArgs := argHeartbeatSender{ argBaseSender: argBaseSender{ - messenger: args.Messenger, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, marshaller: args.Marshaller, topic: args.HeartbeatTopic, timeBetweenSends: args.HeartbeatTimeBetweenSends, @@ -177,7 +182,8 @@ func checkSenderArgs(args ArgSender) error { mhbsArgs := argMultikeyHeartbeatSender{ argBaseSender: argBaseSender{ - messenger: args.Messenger, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, marshaller: args.Marshaller, topic: args.HeartbeatTopic, timeBetweenSends: args.HeartbeatTimeBetweenSends, diff --git a/heartbeat/sender/sender_test.go b/heartbeat/sender/sender_test.go index bc9db68bad1..5509e23f16a 100644 --- a/heartbeat/sender/sender_test.go +++ b/heartbeat/sender/sender_test.go @@ -8,7 +8,6 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/heartbeat" "github.com/multiversx/mx-chain-go/heartbeat/mock" "github.com/multiversx/mx-chain-go/testscommon" @@ -22,11 +21,12 @@ import ( func createMockSenderArgs() ArgSender { return ArgSender{ - Messenger: &p2pmocks.MessengerStub{}, - Marshaller: &marshallerMock.MarshalizerMock{}, - PeerAuthenticationTopic: "pa-topic", - HeartbeatTopic: "hb-topic", - PeerAuthenticationTimeBetweenSends: time.Second, + MainMessenger: &p2pmocks.MessengerStub{}, + FullArchiveMessenger: &p2pmocks.MessengerStub{}, + Marshaller: &marshallerMock.MarshalizerMock{}, + PeerAuthenticationTopic: "pa-topic", + HeartbeatTopic: "hb-topic", + PeerAuthenticationTimeBetweenSends: time.Second, PeerAuthenticationTimeBetweenSendsWhenError: time.Second, PeerAuthenticationTimeThresholdBetweenSends: 0.1, HeartbeatTimeBetweenSends: time.Second, @@ -55,15 +55,25 @@ func createMockSenderArgs() ArgSender { func TestNewSender(t *testing.T) { t.Parallel() - t.Run("nil peer messenger should error", func(t *testing.T) { + t.Run("nil main messenger should error", func(t *testing.T) { t.Parallel() args := createMockSenderArgs() - args.Messenger = nil + args.MainMessenger = nil senderInstance, err := NewSender(args) assert.Nil(t, senderInstance) - assert.Equal(t, heartbeat.ErrNilMessenger, err) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) + }) + t.Run("nil full archive messenger should error", func(t *testing.T) { + t.Parallel() + + args := createMockSenderArgs() + args.FullArchiveMessenger = nil + senderInstance, err := NewSender(args) + + assert.Nil(t, senderInstance) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) }) t.Run("nil marshaller should error", func(t *testing.T) { t.Parallel() @@ -303,7 +313,7 @@ func TestNewSender(t *testing.T) { args := createMockSenderArgs() senderInstance, err := NewSender(args) - assert.False(t, check.IfNil(senderInstance)) + assert.NotNil(t, senderInstance) assert.Nil(t, err) }) } @@ -343,3 +353,13 @@ func TestSender_GetCurrentNodeTypeShouldNotPanic(t *testing.T) { _ = senderInstance.Close() } + +func TestSender_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var senderInstance *sender + assert.True(t, senderInstance.IsInterfaceNil()) + + senderInstance, _ = NewSender(createMockSenderArgs()) + assert.False(t, senderInstance.IsInterfaceNil()) +} diff --git a/integrationTests/consensus/consensusSigning_test.go b/integrationTests/consensus/consensusSigning_test.go index 7566828ada1..68f85cde15c 100644 --- a/integrationTests/consensus/consensusSigning_test.go +++ b/integrationTests/consensus/consensusSigning_test.go @@ -79,7 +79,8 @@ func TestConsensusWithInvalidSigners(t *testing.T) { defer func() { for shardID := range nodes { for _, n := range nodes[shardID] { - _ = n.Messenger.Close() + _ = n.MainMessenger.Close() + _ = n.FullArchiveMessenger.Close() } } }() diff --git a/integrationTests/consensus/consensus_test.go b/integrationTests/consensus/consensus_test.go index f2530e562a7..a94c5717efe 100644 --- a/integrationTests/consensus/consensus_test.go +++ b/integrationTests/consensus/consensus_test.go @@ -234,7 +234,8 @@ func runFullConsensusTest(t *testing.T, consensusType string, numKeysOnEachNode defer func() { for shardID := range nodes { for _, n := range nodes[shardID] { - _ = n.Messenger.Close() + _ = n.MainMessenger.Close() + _ = n.FullArchiveMessenger.Close() } } }() @@ -296,7 +297,8 @@ func runConsensusWithNotEnoughValidators(t *testing.T, consensusType string) { defer func() { for shardID := range nodes { for _, n := range nodes[shardID] { - _ = n.Messenger.Close() + _ = n.MainMessenger.Close() + _ = n.FullArchiveMessenger.Close() } } }() diff --git a/integrationTests/countInterceptor.go b/integrationTests/countInterceptor.go index ccca0752248..fba328de387 100644 --- a/integrationTests/countInterceptor.go +++ b/integrationTests/countInterceptor.go @@ -21,7 +21,7 @@ func NewCountInterceptor() *CountInterceptor { } // ProcessReceivedMessage is called each time a new message is received -func (ci *CountInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID) error { +func (ci *CountInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID, _ p2p.MessageHandler) error { ci.mutMessagesCount.Lock() ci.messagesCount[message.Topic()]++ ci.mutMessagesCount.Unlock() diff --git a/integrationTests/factory/componentsHelper.go b/integrationTests/factory/componentsHelper.go index 64f70e6bb8c..6238243659e 100644 --- a/integrationTests/factory/componentsHelper.go +++ b/integrationTests/factory/componentsHelper.go @@ -34,13 +34,14 @@ func CreateDefaultConfig(tb testing.TB) *config.Configs { ratingsConfig, _ := common.LoadRatingsConfig(configPathsHolder.Ratings) economicsConfig, _ := common.LoadEconomicsConfig(configPathsHolder.Economics) prefsConfig, _ := common.LoadPreferencesConfig(configPathsHolder.Preferences) - p2pConfig, _ := common.LoadP2PConfig(configPathsHolder.P2p) + mainP2PConfig, _ := common.LoadP2PConfig(configPathsHolder.MainP2p) + fullArchiveP2PConfig, _ := common.LoadP2PConfig(configPathsHolder.FullArchiveP2p) externalConfig, _ := common.LoadExternalConfig(configPathsHolder.External) systemSCConfig, _ := common.LoadSystemSmartContractsConfig(configPathsHolder.SystemSC) epochConfig, _ := common.LoadEpochConfig(configPathsHolder.Epoch) roundConfig, _ := common.LoadRoundConfig(configPathsHolder.RoundActivation) - p2pConfig.KadDhtPeerDiscovery.Enabled = false + mainP2PConfig.KadDhtPeerDiscovery.Enabled = false prefsConfig.Preferences.DestinationShardAsObserver = "0" prefsConfig.Preferences.ConnectionWatcherType = p2p.ConnectionWatcherTypePrint @@ -50,7 +51,8 @@ func CreateDefaultConfig(tb testing.TB) *config.Configs { configs.EconomicsConfig = economicsConfig configs.SystemSCConfig = systemSCConfig configs.PreferencesConfig = prefsConfig - configs.P2pConfig = p2pConfig + configs.MainP2pConfig = mainP2PConfig + configs.FullArchiveP2pConfig = fullArchiveP2PConfig configs.ExternalConfig = externalConfig configs.EpochConfig = epochConfig configs.RoundConfig = roundConfig @@ -79,7 +81,8 @@ func createConfigurationsPathsHolder() *config.ConfigurationPathsHolder { Economics: concatPath(EconomicsPath), Preferences: concatPath(PrefsPath), External: concatPath(ExternalPath), - P2p: concatPath(P2pPath), + MainP2p: concatPath(MainP2pPath), + FullArchiveP2p: concatPath(FullArchiveP2pPath), Epoch: concatPath(EpochPath), SystemSC: concatPath(SystemSCConfigPath), GasScheduleDirectoryName: concatPath(GasSchedule), diff --git a/integrationTests/factory/constants.go b/integrationTests/factory/constants.go index 1db46e07547..9fa9133b135 100644 --- a/integrationTests/factory/constants.go +++ b/integrationTests/factory/constants.go @@ -8,7 +8,8 @@ const ( EconomicsPath = "economics.toml" PrefsPath = "prefs.toml" ExternalPath = "external.toml" - P2pPath = "p2p.toml" + MainP2pPath = "p2p.toml" + FullArchiveP2pPath = "fullArchiveP2P.toml" EpochPath = "enableEpochs.toml" SystemSCConfigPath = "systemSmartContractsConfig.toml" GasSchedule = "gasSchedules" diff --git a/integrationTests/interface.go b/integrationTests/interface.go index 0ae816e28e4..a1b64756f9c 100644 --- a/integrationTests/interface.go +++ b/integrationTests/interface.go @@ -85,7 +85,7 @@ type Facade interface { GetQueryHandler(name string) (debug.QueryHandler, error) GetEpochStartDataAPI(epoch uint32) (*common.EpochStartDataAPI, error) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) - GetConnectedPeersRatings() string + GetConnectedPeersRatingsOnMainNetwork() (string, error) CreateTransaction(txArgs *external.ArgsCreateTransaction) (*transaction.Transaction, []byte, error) ValidateTransaction(tx *transaction.Transaction) error ValidateTransactionForSimulation(tx *transaction.Transaction, bypassSignature bool) error diff --git a/integrationTests/longTests/antiflooding/messageProcessor.go b/integrationTests/longTests/antiflooding/messageProcessor.go index 3e0ae8963da..5c3838dea61 100644 --- a/integrationTests/longTests/antiflooding/messageProcessor.go +++ b/integrationTests/longTests/antiflooding/messageProcessor.go @@ -31,7 +31,7 @@ func NewMessageProcessor(antiflooder process.P2PAntifloodHandler, messenger p2p. } // ProcessReceivedMessage is the callback function from the p2p side whenever a new message is received -func (mp *messageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (mp *messageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, _ p2p.MessageHandler) error { atomic.AddUint32(&mp.numMessagesReceived, 1) atomic.AddUint64(&mp.sizeMessagesReceived, uint64(len(message.Data()))) atomic.AddUint32(&mp.numMessagesReceivedPerInterval, 1) diff --git a/integrationTests/mock/networkComponentsMock.go b/integrationTests/mock/networkComponentsMock.go index 573a4ae7f66..8c4e849907f 100644 --- a/integrationTests/mock/networkComponentsMock.go +++ b/integrationTests/mock/networkComponentsMock.go @@ -8,15 +8,17 @@ import ( // NetworkComponentsStub - type NetworkComponentsStub struct { - Messenger p2p.Messenger - MessengerCalled func() p2p.Messenger - InputAntiFlood factory.P2PAntifloodHandler - OutputAntiFlood factory.P2PAntifloodHandler - PeerBlackList process.PeerBlackListCacher - PeerHonesty factory.PeerHonestyHandler - PreferredPeersHolder factory.PreferredPeersHolderHandler - PeersRatingHandlerField p2p.PeersRatingHandler - PeersRatingMonitorField p2p.PeersRatingMonitor + Messenger p2p.Messenger + MessengerCalled func() p2p.Messenger + InputAntiFlood factory.P2PAntifloodHandler + OutputAntiFlood factory.P2PAntifloodHandler + PeerBlackList process.PeerBlackListCacher + PeerHonesty factory.PeerHonestyHandler + PreferredPeersHolder factory.PreferredPeersHolderHandler + PeersRatingHandlerField p2p.PeersRatingHandler + PeersRatingMonitorField p2p.PeersRatingMonitor + FullArchiveNetworkMessengerField p2p.Messenger + FullArchivePreferredPeersHolder factory.PreferredPeersHolderHandler } // PubKeyCacher - @@ -82,6 +84,16 @@ func (ncs *NetworkComponentsStub) PeersRatingMonitor() p2p.PeersRatingMonitor { return ncs.PeersRatingMonitorField } +// FullArchiveNetworkMessenger - +func (ncs *NetworkComponentsStub) FullArchiveNetworkMessenger() p2p.Messenger { + return ncs.FullArchiveNetworkMessengerField +} + +// FullArchivePreferredPeersHolderHandler - +func (ncs *NetworkComponentsStub) FullArchivePreferredPeersHolderHandler() factory.PreferredPeersHolderHandler { + return ncs.FullArchivePreferredPeersHolder +} + // String - func (ncs *NetworkComponentsStub) String() string { return "NetworkComponentsStub" diff --git a/integrationTests/mock/processComponentsStub.go b/integrationTests/mock/processComponentsStub.go index 061a9b6f012..e5a94dd78c1 100644 --- a/integrationTests/mock/processComponentsStub.go +++ b/integrationTests/mock/processComponentsStub.go @@ -21,6 +21,7 @@ type ProcessComponentsStub struct { ShardCoord sharding.Coordinator ShardCoordinatorCalled func() sharding.Coordinator IntContainer process.InterceptorsContainer + FullArchiveIntContainer process.InterceptorsContainer ResContainer dataRetriever.ResolversContainer ReqFinder dataRetriever.RequestersFinder RoundHandlerField consensus.RoundHandler @@ -40,7 +41,8 @@ type ProcessComponentsStub struct { ReqHandler process.RequestHandler TxLogsProcess process.TransactionLogProcessorDatabase HeaderConstructValidator process.HeaderConstructionValidator - PeerMapper process.NetworkShardingCollector + MainPeerMapper process.NetworkShardingCollector + FullArchivePeerMapper process.NetworkShardingCollector TxCostSimulator factory.TransactionEvaluator FallbackHdrValidator process.FallbackHeaderValidator WhiteListHandlerInternal process.WhiteListHandler @@ -95,6 +97,11 @@ func (pcs *ProcessComponentsStub) InterceptorsContainer() process.InterceptorsCo return pcs.IntContainer } +// FullArchiveInterceptorsContainer - +func (pcs *ProcessComponentsStub) FullArchiveInterceptorsContainer() process.InterceptorsContainer { + return pcs.FullArchiveIntContainer +} + // ResolversContainer - func (pcs *ProcessComponentsStub) ResolversContainer() dataRetriever.ResolversContainer { return pcs.ResContainer @@ -190,7 +197,12 @@ func (pcs *ProcessComponentsStub) HeaderConstructionValidator() process.HeaderCo // PeerShardMapper - func (pcs *ProcessComponentsStub) PeerShardMapper() process.NetworkShardingCollector { - return pcs.PeerMapper + return pcs.MainPeerMapper +} + +// FullArchivePeerShardMapper - +func (pcs *ProcessComponentsStub) FullArchivePeerShardMapper() process.NetworkShardingCollector { + return pcs.FullArchivePeerMapper } // FallbackHeaderValidator - @@ -198,7 +210,7 @@ func (pcs *ProcessComponentsStub) FallbackHeaderValidator() process.FallbackHead return pcs.FallbackHdrValidator } -// TransactionCostSimulator - +// APITransactionEvaluator - func (pcs *ProcessComponentsStub) APITransactionEvaluator() factory.TransactionEvaluator { return pcs.TxCostSimulator } diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index de66625819d..b9492592bd3 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -33,6 +33,7 @@ import ( epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/scheduledDataSyncer" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" @@ -199,10 +200,12 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui }) messenger := integrationTests.CreateMessengerWithNoDiscovery() time.Sleep(integrationTests.P2pBootstrapDelay) - nodeToJoinLate.Messenger = messenger + nodeToJoinLate.MainMessenger = messenger + + nodeToJoinLate.FullArchiveMessenger = &p2pmocks.MessengerStub{} for _, n := range nodes { - _ = n.ConnectTo(nodeToJoinLate) + _ = n.ConnectOnMain(nodeToJoinLate) } roundHandler := &mock.RoundHandlerMock{IndexField: int64(round)} @@ -230,7 +233,8 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui argsBootstrapHandler := bootstrap.ArgsEpochStartBootstrap{ CryptoComponentsHolder: cryptoComponents, CoreComponentsHolder: coreComponents, - Messenger: nodeToJoinLate.Messenger, + MainMessenger: nodeToJoinLate.MainMessenger, + FullArchiveMessenger: nodeToJoinLate.FullArchiveMessenger, GeneralConfig: generalConfig, PrefsConfig: config.PreferencesConfig{ FullArchive: false, diff --git a/integrationTests/multiShard/hardFork/hardFork_test.go b/integrationTests/multiShard/hardFork/hardFork_test.go index 0651dad6f24..a3b3fb9cf4d 100644 --- a/integrationTests/multiShard/hardFork/hardFork_test.go +++ b/integrationTests/multiShard/hardFork/hardFork_test.go @@ -73,7 +73,7 @@ func TestHardForkWithoutTransactionInMultiShardedEnvironment(t *testing.T) { n.Close() } - _ = hardforkTriggerNode.Messenger.Close() + hardforkTriggerNode.Close() }() round := uint64(0) @@ -144,7 +144,7 @@ func TestHardForkWithContinuousTransactionsInMultiShardedEnvironment(t *testing. n.Close() } - _ = hardforkTriggerNode.Messenger.Close() + hardforkTriggerNode.Close() }() initialVal := big.NewInt(1000000000) @@ -579,16 +579,22 @@ func createHardForkExporter( AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, } + networkComponents := integrationTests.GetDefaultNetworkComponents() + networkComponents.Messenger = node.MainMessenger + networkComponents.FullArchiveNetworkMessengerField = node.FullArchiveMessenger + networkComponents.PeersRatingHandlerField = node.PeersRatingHandler + networkComponents.InputAntiFlood = &mock.NilAntifloodHandler{} + networkComponents.OutputAntiFlood = &mock.NilAntifloodHandler{} argsExportHandler := factory.ArgsExporter{ CoreComponents: coreComponents, CryptoComponents: cryptoComponents, StatusCoreComponents: statusCoreComponents, + NetworkComponents: networkComponents, HeaderValidator: node.HeaderValidator, DataPool: node.DataPool, StorageService: node.Storage, RequestHandler: node.RequestHandler, ShardCoordinator: node.ShardCoordinator, - Messenger: node.Messenger, ActiveAccountsDBs: accountsDBs, ExportFolder: node.ExportFolder, ExportTriesStorageConfig: config.StorageConfig{ @@ -605,21 +611,20 @@ func createHardForkExporter( MaxOpenFiles: 10, }, }, - ExportStateStorageConfig: exportConfig, - ExportStateKeysConfig: keysConfig, - MaxTrieLevelInMemory: uint(5), - WhiteListHandler: node.WhiteListHandler, - WhiteListerVerifiedTxs: node.WhiteListerVerifiedTxs, - InterceptorsContainer: node.InterceptorsContainer, - ExistingResolvers: node.ResolversContainer, - ExistingRequesters: node.RequestersContainer, - NodesCoordinator: node.NodesCoordinator, - HeaderSigVerifier: node.HeaderSigVerifier, - HeaderIntegrityVerifier: node.HeaderIntegrityVerifier, - ValidityAttester: node.BlockTracker, - OutputAntifloodHandler: &mock.NilAntifloodHandler{}, - InputAntifloodHandler: &mock.NilAntifloodHandler{}, - RoundHandler: &mock.RoundHandlerMock{}, + ExportStateStorageConfig: exportConfig, + ExportStateKeysConfig: keysConfig, + MaxTrieLevelInMemory: uint(5), + WhiteListHandler: node.WhiteListHandler, + WhiteListerVerifiedTxs: node.WhiteListerVerifiedTxs, + MainInterceptorsContainer: node.MainInterceptorsContainer, + FullArchiveInterceptorsContainer: node.FullArchiveInterceptorsContainer, + ExistingResolvers: node.ResolversContainer, + ExistingRequesters: node.RequestersContainer, + NodesCoordinator: node.NodesCoordinator, + HeaderSigVerifier: node.HeaderSigVerifier, + HeaderIntegrityVerifier: node.HeaderIntegrityVerifier, + ValidityAttester: node.BlockTracker, + RoundHandler: &mock.RoundHandlerMock{}, InterceptorDebugConfig: config.InterceptorResolverDebugConfig{ Enabled: true, EnablePrint: true, @@ -632,8 +637,8 @@ func createHardForkExporter( MaxHardCapForMissingNodes: 500, NumConcurrentTrieSyncers: 50, TrieSyncerVersion: 2, - PeersRatingHandler: node.PeersRatingHandler, CheckNodesOnDisk: false, + NodeOperationMode: node.NodeOperationMode, } exportHandler, err := factory.NewExportHandlerFactory(argsExportHandler) diff --git a/integrationTests/node/heartbeatV2/heartbeatV2_test.go b/integrationTests/node/heartbeatV2/heartbeatV2_test.go index 2b9051534df..51f21c17e1a 100644 --- a/integrationTests/node/heartbeatV2/heartbeatV2_test.go +++ b/integrationTests/node/heartbeatV2/heartbeatV2_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/heartbeat" "github.com/multiversx/mx-chain-go/integrationTests" logger "github.com/multiversx/mx-chain-logger-go" @@ -130,12 +131,90 @@ func TestHeartbeatV2_PeerAuthenticationMessageExpiration(t *testing.T) { assert.Equal(t, interactingNodes-2, nodes[0].DataPool.PeerAuthentications().Len()) } +func TestHeartbeatV2_AllPeersSendMessagesOnAllNetworks(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + interactingNodes := 3 + nodes := make([]*integrationTests.TestHeartbeatNode, interactingNodes) + p2pConfig := integrationTests.CreateP2PConfigWithNoDiscovery() + for i := 0; i < interactingNodes; i++ { + nodes[i] = integrationTests.NewTestHeartbeatNode(t, 3, 0, interactingNodes, p2pConfig, 60) + } + assert.Equal(t, interactingNodes, len(nodes)) + + // connect nodes on main network only + for i := 0; i < interactingNodes-1; i++ { + for j := i + 1; j < interactingNodes; j++ { + src := nodes[i] + dst := nodes[j] + _ = src.ConnectOnMain(dst) + } + } + + // Wait for messages to broadcast + time.Sleep(time.Second * 15) + + // check peer shard mappers + // full archive should not be updated at this point + for i := 0; i < interactingNodes; i++ { + for j := 0; j < interactingNodes; j++ { + if i == j { + continue + } + + peerInfo := nodes[i].FullArchivePeerShardMapper.GetPeerInfo(nodes[j].MainMessenger.ID()) + assert.Equal(t, core.UnknownPeer, peerInfo.PeerType) // nodes not connected on this network + + peerInfoMain := nodes[i].MainPeerShardMapper.GetPeerInfo(nodes[j].MainMessenger.ID()) + assert.Equal(t, nodes[j].ShardCoordinator.SelfId(), peerInfoMain.ShardID) + assert.Equal(t, core.ValidatorPeer, peerInfoMain.PeerType) // on main network they are all validators + } + } + + // connect nodes on full archive network as well + for i := 0; i < interactingNodes-1; i++ { + for j := i + 1; j < interactingNodes; j++ { + src := nodes[i] + dst := nodes[j] + _ = src.ConnectOnFullArchive(dst) + } + } + + // Wait for messages to broadcast + time.Sleep(time.Second * 15) + + // check peer shard mappers + // full archive should be updated at this point + for i := 0; i < interactingNodes; i++ { + for j := 0; j < interactingNodes; j++ { + if i == j { + continue + } + + peerInfo := nodes[i].FullArchivePeerShardMapper.GetPeerInfo(nodes[j].MainMessenger.ID()) + assert.Equal(t, nodes[j].ShardCoordinator.SelfId(), peerInfo.ShardID) + assert.Equal(t, core.ObserverPeer, peerInfo.PeerType) // observers because the peerAuth is not sent on this network + + peerInfoMain := nodes[i].MainPeerShardMapper.GetPeerInfo(nodes[j].MainMessenger.ID()) + assert.Equal(t, nodes[j].ShardCoordinator.SelfId(), peerInfoMain.ShardID) + assert.Equal(t, core.ValidatorPeer, peerInfoMain.PeerType) + } + } + + for i := 0; i < len(nodes); i++ { + nodes[i].Close() + } +} + func connectNodes(nodes []*integrationTests.TestHeartbeatNode, interactingNodes int) { for i := 0; i < interactingNodes-1; i++ { for j := i + 1; j < interactingNodes; j++ { src := nodes[i] dst := nodes[j] - _ = src.ConnectTo(dst) + _ = src.ConnectOnMain(dst) + _ = src.ConnectOnFullArchive(dst) } } } @@ -155,7 +234,7 @@ func checkMessages(t *testing.T, nodes []*integrationTests.TestHeartbeatNode, ma assert.Nil(t, err) assert.True(t, paCache.Has(pkBytes)) - assert.True(t, hbCache.Has(node.Messenger.ID().Bytes())) + assert.True(t, hbCache.Has(node.MainMessenger.ID().Bytes())) // Also check message age value, found := paCache.Get(pkBytes) diff --git a/integrationTests/oneNodeNetwork.go b/integrationTests/oneNodeNetwork.go index 554b83bb084..720ff0529c6 100644 --- a/integrationTests/oneNodeNetwork.go +++ b/integrationTests/oneNodeNetwork.go @@ -32,8 +32,7 @@ func NewOneNodeNetwork() *oneNodeNetwork { // Stop stops the test network func (n *oneNodeNetwork) Stop() { - _ = n.Node.Messenger.Close() - _ = n.Node.VMContainer.Close() + n.Node.Close() } // Mint mints the given address diff --git a/integrationTests/p2p/antiflood/messageProcessor.go b/integrationTests/p2p/antiflood/messageProcessor.go index bf04257b2c5..5f56985861f 100644 --- a/integrationTests/p2p/antiflood/messageProcessor.go +++ b/integrationTests/p2p/antiflood/messageProcessor.go @@ -30,7 +30,7 @@ func newMessageProcessor() *MessageProcessor { } // ProcessReceivedMessage is the callback function from the p2p side whenever a new message is received -func (mp *MessageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (mp *MessageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, _ p2p.MessageHandler) error { atomic.AddUint32(&mp.numMessagesReceived, 1) atomic.AddUint64(&mp.sizeMessagesReceived, uint64(len(message.Data()))) diff --git a/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go b/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go index f8916c7e016..03679fb4201 100644 --- a/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go +++ b/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go @@ -40,9 +40,6 @@ func TestConnectionsInNetworkShardingWithShardingWithLists(t *testing.T) { MaxCrossShardObservers: 1, MaxSeeders: 1, Type: p2p.ListsSharder, - AdditionalConnections: p2pConfig.AdditionalConnectionsConfig{ - MaxFullHistoryObservers: 1, - }, } testConnectionsInNetworkSharding(t, p2pCfg) @@ -122,7 +119,7 @@ func stopNodes(advertiser p2p.Messenger, nodesMap map[uint32][]*integrationTests _ = advertiser.Close() for _, nodes := range nodesMap { for _, n := range nodes { - _ = n.Messenger.Close() + n.Close() } } } @@ -130,7 +127,7 @@ func stopNodes(advertiser p2p.Messenger, nodesMap map[uint32][]*integrationTests func startNodes(nodesMap map[uint32][]*integrationTests.TestHeartbeatNode) { for _, nodes := range nodesMap { for _, n := range nodes { - _ = n.Messenger.Bootstrap() + _ = n.MainMessenger.Bootstrap() } } } @@ -153,7 +150,7 @@ func createTestInterceptorForEachNode(nodesMap map[uint32][]*integrationTests.Te func sendMessageOnGlobalTopic(nodesMap map[uint32][]*integrationTests.TestHeartbeatNode) { fmt.Println("sending a message on global topic") - nodesMap[0][0].Messenger.Broadcast(integrationTests.GlobalTopic, []byte("global message")) + nodesMap[0][0].MainMessenger.Broadcast(integrationTests.GlobalTopic, []byte("global message")) time.Sleep(time.Second) } @@ -164,7 +161,7 @@ func sendMessagesOnIntraShardTopic(nodesMap map[uint32][]*integrationTests.TestH identifier := integrationTests.ShardTopic + n.ShardCoordinator.CommunicationIdentifier(n.ShardCoordinator.SelfId()) - nodes[0].Messenger.Broadcast(identifier, []byte("intra shard message")) + nodes[0].MainMessenger.Broadcast(identifier, []byte("intra shard message")) } time.Sleep(time.Second) } @@ -182,7 +179,7 @@ func sendMessagesOnCrossShardTopic(nodesMap map[uint32][]*integrationTests.TestH identifier := integrationTests.ShardTopic + n.ShardCoordinator.CommunicationIdentifier(shardIdDest) - nodes[0].Messenger.Broadcast(identifier, []byte("cross shard message")) + nodes[0].MainMessenger.Broadcast(identifier, []byte("cross shard message")) } } time.Sleep(time.Second) @@ -212,8 +209,8 @@ func testUnknownSeederPeers( for _, nodes := range nodesMap { for _, n := range nodes { - assert.Equal(t, 0, len(n.Messenger.GetConnectedPeersInfo().UnknownPeers)) - assert.Equal(t, 1, len(n.Messenger.GetConnectedPeersInfo().Seeders)) + assert.Equal(t, 0, len(n.MainMessenger.GetConnectedPeersInfo().UnknownPeers)) + assert.Equal(t, 1, len(n.MainMessenger.GetConnectedPeersInfo().Seeders)) } } } diff --git a/integrationTests/p2p/peersRating/peersRating_test.go b/integrationTests/p2p/peersRating/peersRating_test.go index a4a4bc0551e..6b8ca67f86a 100644 --- a/integrationTests/p2p/peersRating/peersRating_test.go +++ b/integrationTests/p2p/peersRating/peersRating_test.go @@ -10,6 +10,8 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/integrationTests" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process/factory" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -27,29 +29,29 @@ func TestPeersRatingAndResponsiveness(t *testing.T) { var numOfShards uint32 = 1 var shardID uint32 = 0 - resolverNode := createNodeWithPeersRatingHandler(shardID, numOfShards) - maliciousNode := createNodeWithPeersRatingHandler(shardID, numOfShards) - requesterNode := createNodeWithPeersRatingHandler(core.MetachainShardId, numOfShards) + resolverNode := createNodeWithPeersRatingHandler(shardID, numOfShards, p2p.NormalOperation) + maliciousNode := createNodeWithPeersRatingHandler(shardID, numOfShards, p2p.NormalOperation) + requesterNode := createNodeWithPeersRatingHandler(core.MetachainShardId, numOfShards, p2p.NormalOperation) defer func() { - _ = resolverNode.Messenger.Close() - _ = maliciousNode.Messenger.Close() - _ = requesterNode.Messenger.Close() + resolverNode.Close() + maliciousNode.Close() + requesterNode.Close() }() time.Sleep(time.Second) - require.Nil(t, resolverNode.ConnectTo(maliciousNode)) - require.Nil(t, resolverNode.ConnectTo(requesterNode)) - require.Nil(t, maliciousNode.ConnectTo(requesterNode)) + require.Nil(t, resolverNode.ConnectOnMain(maliciousNode)) + require.Nil(t, resolverNode.ConnectOnMain(requesterNode)) + require.Nil(t, maliciousNode.ConnectOnMain(requesterNode)) time.Sleep(time.Second) hdr, hdrHash, hdrBuff := getHeader() // Broadcasts should not be considered for peers rating topic := factory.ShardBlocksTopic + resolverNode.ShardCoordinator.CommunicationIdentifier(requesterNode.ShardCoordinator.SelfId()) - resolverNode.Messenger.Broadcast(topic, hdrBuff) + resolverNode.MainMessenger.Broadcast(topic, hdrBuff) time.Sleep(time.Second) - maliciousNode.Messenger.Broadcast(topic, hdrBuff) + maliciousNode.MainMessenger.Broadcast(topic, hdrBuff) time.Sleep(time.Second) // check that broadcasts were successful _, err := requesterNode.DataPool.Headers().GetHeaderByHash(hdrHash) @@ -65,14 +67,14 @@ func TestPeersRatingAndResponsiveness(t *testing.T) { resolverNode.DataPool.Headers().AddHeader(hdrHash, hdr) requestHeader(requesterNode, numOfRequests, hdrHash, resolverNode.ShardCoordinator.SelfId()) - peerRatingsMap := getRatingsMap(t, requesterNode) + peerRatingsMap := getRatingsMap(t, requesterNode.PeersRatingMonitor, requesterNode.MainMessenger) // resolver node should have received and responded to numOfRequests - initialResolverRating, exists := peerRatingsMap[resolverNode.Messenger.ID().Pretty()] + initialResolverRating, exists := peerRatingsMap[resolverNode.MainMessenger.ID().Pretty()] require.True(t, exists) initialResolverExpectedRating := fmt.Sprintf("%d", numOfRequests*(decreaseFactor+increaseFactor)) assert.Equal(t, initialResolverExpectedRating, initialResolverRating) // malicious node should have only received numOfRequests - initialMaliciousRating, exists := peerRatingsMap[maliciousNode.Messenger.ID().Pretty()] + initialMaliciousRating, exists := peerRatingsMap[maliciousNode.MainMessenger.ID().Pretty()] require.True(t, exists) initialMaliciousExpectedRating := fmt.Sprintf("%d", numOfRequests*decreaseFactor) assert.Equal(t, initialMaliciousExpectedRating, initialMaliciousRating) @@ -81,14 +83,14 @@ func TestPeersRatingAndResponsiveness(t *testing.T) { numOfRequests = 120 requestHeader(requesterNode, numOfRequests, hdrHash, resolverNode.ShardCoordinator.SelfId()) - peerRatingsMap = getRatingsMap(t, requesterNode) + peerRatingsMap = getRatingsMap(t, requesterNode.PeersRatingMonitor, requesterNode.MainMessenger) // Resolver should have reached max limit and timestamps still update - initialResolverRating, exists = peerRatingsMap[resolverNode.Messenger.ID().Pretty()] + initialResolverRating, exists = peerRatingsMap[resolverNode.MainMessenger.ID().Pretty()] require.True(t, exists) assert.Equal(t, "100", initialResolverRating) // Malicious should have reached min limit and timestamps still update - initialMaliciousRating, exists = peerRatingsMap[maliciousNode.Messenger.ID().Pretty()] + initialMaliciousRating, exists = peerRatingsMap[maliciousNode.MainMessenger.ID().Pretty()] require.True(t, exists) assert.Equal(t, "-100", initialMaliciousRating) @@ -98,25 +100,116 @@ func TestPeersRatingAndResponsiveness(t *testing.T) { numOfRequests = 10 requestHeader(requesterNode, numOfRequests, hdrHash, resolverNode.ShardCoordinator.SelfId()) - peerRatingsMap = getRatingsMap(t, requesterNode) + peerRatingsMap = getRatingsMap(t, requesterNode.PeersRatingMonitor, requesterNode.MainMessenger) // resolver node should have the max rating + numOfRequests that didn't answer to - resolverRating, exists := peerRatingsMap[resolverNode.Messenger.ID().Pretty()] + resolverRating, exists := peerRatingsMap[resolverNode.MainMessenger.ID().Pretty()] require.True(t, exists) finalResolverExpectedRating := fmt.Sprintf("%d", 100+decreaseFactor*numOfRequests) assert.Equal(t, finalResolverExpectedRating, resolverRating) // malicious node should have the min rating + numOfRequests that received and responded to - maliciousRating, exists := peerRatingsMap[maliciousNode.Messenger.ID().Pretty()] + maliciousRating, exists := peerRatingsMap[maliciousNode.MainMessenger.ID().Pretty()] require.True(t, exists) finalMaliciousExpectedRating := fmt.Sprintf("%d", -100+numOfRequests*increaseFactor+(numOfRequests-1)*decreaseFactor) assert.Equal(t, finalMaliciousExpectedRating, maliciousRating) } -func createNodeWithPeersRatingHandler(shardID uint32, numShards uint32) *integrationTests.TestProcessorNode { +func TestPeersRatingAndResponsivenessOnFullArchive(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + var numOfShards uint32 = 1 + var shardID uint32 = 0 + resolverFullArchiveNode := createNodeWithPeersRatingHandler(shardID, numOfShards, p2p.FullArchiveMode) + requesterFullArchiveNode := createNodeWithPeersRatingHandler(core.MetachainShardId, numOfShards, p2p.FullArchiveMode) + regularNode := createNodeWithPeersRatingHandler(shardID, numOfShards, p2p.FullArchiveMode) + + defer func() { + resolverFullArchiveNode.Close() + requesterFullArchiveNode.Close() + regularNode.Close() + }() + + // all nodes are connected on main network, but only the full archive resolver and requester are connected on full archive network + time.Sleep(time.Second) + require.Nil(t, resolverFullArchiveNode.ConnectOnFullArchive(requesterFullArchiveNode)) + require.Nil(t, resolverFullArchiveNode.ConnectOnMain(regularNode)) + require.Nil(t, requesterFullArchiveNode.ConnectOnMain(regularNode)) + time.Sleep(time.Second) + + hdr, hdrHash, hdrBuff := getHeader() + + // Broadcasts should not be considered for peers rating and should only be available on full archive network + topic := factory.ShardBlocksTopic + resolverFullArchiveNode.ShardCoordinator.CommunicationIdentifier(requesterFullArchiveNode.ShardCoordinator.SelfId()) + resolverFullArchiveNode.FullArchiveMessenger.Broadcast(topic, hdrBuff) + time.Sleep(time.Second) + // check that broadcasts were successful + _, err := requesterFullArchiveNode.DataPool.Headers().GetHeaderByHash(hdrHash) + assert.Nil(t, err) + _, err = regularNode.DataPool.Headers().GetHeaderByHash(hdrHash) + assert.NotNil(t, err) + // clean the above broadcast consequences + requesterFullArchiveNode.DataPool.Headers().RemoveHeaderByHash(hdrHash) + resolverFullArchiveNode.DataPool.Headers().RemoveHeaderByHash(hdrHash) + + // Broadcast on main network should also work and reach all nodes + topic = factory.ShardBlocksTopic + regularNode.ShardCoordinator.CommunicationIdentifier(requesterFullArchiveNode.ShardCoordinator.SelfId()) + regularNode.MainMessenger.Broadcast(topic, hdrBuff) + time.Sleep(time.Second) + // check that broadcasts were successful + _, err = requesterFullArchiveNode.DataPool.Headers().GetHeaderByHash(hdrHash) + assert.Nil(t, err) + _, err = resolverFullArchiveNode.DataPool.Headers().GetHeaderByHash(hdrHash) + assert.Nil(t, err) + // clean the above broadcast consequences + requesterFullArchiveNode.DataPool.Headers().RemoveHeaderByHash(hdrHash) + resolverFullArchiveNode.DataPool.Headers().RemoveHeaderByHash(hdrHash) + regularNode.DataPool.Headers().RemoveHeaderByHash(hdrHash) + + numOfRequests := 10 + // Add header to the resolver node's cache + resolverFullArchiveNode.DataPool.Headers().AddHeader(hdrHash, hdr) + epochProviderStub, ok := requesterFullArchiveNode.EpochProvider.(*mock.CurrentNetworkEpochProviderStub) + assert.True(t, ok) + epochProviderStub.EpochIsActiveInNetworkCalled = func(epoch uint32) bool { + return false // force the full archive requester to request from full archive network + } + requestHeader(requesterFullArchiveNode, numOfRequests, hdrHash, resolverFullArchiveNode.ShardCoordinator.SelfId()) + + peerRatingsMap := getRatingsMap(t, requesterFullArchiveNode.PeersRatingMonitor, requesterFullArchiveNode.FullArchiveMessenger) + // resolver node should have received and responded to numOfRequests + initialResolverRating, exists := peerRatingsMap[resolverFullArchiveNode.MainMessenger.ID().Pretty()] + require.True(t, exists) + initialResolverExpectedRating := fmt.Sprintf("%d", numOfRequests*(decreaseFactor+increaseFactor)) + assert.Equal(t, initialResolverExpectedRating, initialResolverRating) + // main nodes should not be found in this cacher + _, exists = peerRatingsMap[regularNode.MainMessenger.ID().Pretty()] + require.False(t, exists) + + // force the full archive requester to request the header from main network + // as it does not exists on the main resolver, it should only decrease its rating + epochProviderStub.EpochIsActiveInNetworkCalled = func(epoch uint32) bool { + return true // force the full archive requester to request from main network + } + requestHeader(requesterFullArchiveNode, numOfRequests, hdrHash, regularNode.ShardCoordinator.SelfId()) + peerRatingsMap = getRatingsMap(t, requesterFullArchiveNode.PeersRatingMonitor, requesterFullArchiveNode.MainMessenger) + + _, exists = peerRatingsMap[resolverFullArchiveNode.MainMessenger.ID().Pretty()] + require.False(t, exists) // resolverFullArchiveNode is not even connected to requesterFullArchiveNode on main network + + mainResolverRating, exists := peerRatingsMap[regularNode.MainMessenger.ID().Pretty()] + require.True(t, exists) + mainResolverExpectedRating := fmt.Sprintf("%d", numOfRequests*decreaseFactor) + assert.Equal(t, mainResolverExpectedRating, mainResolverRating) +} + +func createNodeWithPeersRatingHandler(shardID uint32, numShards uint32, nodeOperationMode p2p.NodeOperation) *integrationTests.TestProcessorNode { tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: numShards, NodeShardId: shardID, WithPeersRatingHandler: true, + NodeOperationMode: nodeOperationMode, }) return tpn @@ -147,11 +240,12 @@ func getHeader() (*block.Header, []byte, []byte) { return hdr, hdrHash, hdrBuff } -func getRatingsMap(t *testing.T, node *integrationTests.TestProcessorNode) map[string]string { - peerRatingsStr := node.PeersRatingMonitor.GetConnectedPeersRatings() +func getRatingsMap(t *testing.T, monitor p2p.PeersRatingMonitor, connectionsHandler p2p.ConnectionsHandler) map[string]string { + peerRatingsStr, err := monitor.GetConnectedPeersRatings(connectionsHandler) + require.Nil(t, err) peerRatingsMap := make(map[string]string) - err := json.Unmarshal([]byte(peerRatingsStr), &peerRatingsMap) + err = json.Unmarshal([]byte(peerRatingsStr), &peerRatingsMap) require.Nil(t, err) return peerRatingsMap diff --git a/integrationTests/realcomponents/processorRunner.go b/integrationTests/realcomponents/processorRunner.go index 9265298737e..14f48a03ce4 100644 --- a/integrationTests/realcomponents/processorRunner.go +++ b/integrationTests/realcomponents/processorRunner.go @@ -167,7 +167,8 @@ func (pr *ProcessorRunner) createStatusCoreComponents(tb testing.TB) { func (pr *ProcessorRunner) createNetworkComponents(tb testing.TB) { argsNetwork := factoryNetwork.NetworkComponentsFactoryArgs{ - P2pConfig: *pr.Config.P2pConfig, + MainP2pConfig: *pr.Config.MainP2pConfig, + FullArchiveP2pConfig: *pr.Config.FullArchiveP2pConfig, MainConfig: *pr.Config.GeneralConfig, RatingsConfig: *pr.Config.RatingsConfig, StatusHandler: pr.StatusCoreComponents.AppStatusHandler(), diff --git a/integrationTests/resolvers/headers/headers_test.go b/integrationTests/resolvers/headers/headers_test.go index 29823a0090d..e686225bbc6 100644 --- a/integrationTests/resolvers/headers/headers_test.go +++ b/integrationTests/resolvers/headers/headers_test.go @@ -24,8 +24,8 @@ func TestRequestResolveShardHeadersByHashRequestingShardResolvingShard(t *testin shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateShardHeader(headerNonce, integrationTests.ChainID) @@ -61,8 +61,8 @@ func TestRequestResolveShardHeadersByHashRequestingMetaResolvingShard(t *testing shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, core.MetachainShardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateShardHeader(headerNonce, integrationTests.ChainID) @@ -98,8 +98,8 @@ func TestRequestResolveShardHeadersByHashRequestingShardResolvingMeta(t *testing shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(core.MetachainShardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateShardHeader(headerNonce, integrationTests.ChainID) @@ -137,8 +137,8 @@ func TestRequestResolveShardHeadersByNonceRequestingShardResolvingShard(t *testi shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateShardHeader(headerNonce, integrationTests.ChainID) @@ -176,8 +176,8 @@ func TestRequestResolveShardHeadersByNonceRequestingMetaResolvingShard(t *testin shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, core.MetachainShardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateShardHeader(headerNonce, integrationTests.ChainID) @@ -215,8 +215,8 @@ func TestRequestResolveShardHeadersByNonceRequestingShardResolvingMeta(t *testin shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(core.MetachainShardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateShardHeader(headerNonce, integrationTests.ChainID) diff --git a/integrationTests/resolvers/metablock/metablock_test.go b/integrationTests/resolvers/metablock/metablock_test.go index 37da9de9c3a..957fffb7fa2 100644 --- a/integrationTests/resolvers/metablock/metablock_test.go +++ b/integrationTests/resolvers/metablock/metablock_test.go @@ -24,8 +24,8 @@ func TestRequestResolveMetaHeadersByHashRequestingShardResolvingShard(t *testing shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateMetaHeader(headerNonce, integrationTests.ChainID) @@ -61,8 +61,8 @@ func TestRequestResolveMetaHeadersByHashRequestingMetaResolvingShard(t *testing. shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, core.MetachainShardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateMetaHeader(headerNonce, integrationTests.ChainID) @@ -98,8 +98,8 @@ func TestRequestResolveMetaHeadersByHashRequestingShardResolvingMeta(t *testing. shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(core.MetachainShardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateMetaHeader(headerNonce, integrationTests.ChainID) @@ -137,8 +137,8 @@ func TestRequestResolveMetaHeadersByNonceRequestingShardResolvingShard(t *testin shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateMetaHeader(headerNonce, integrationTests.ChainID) @@ -176,8 +176,8 @@ func TestRequestResolveMetaHeadersByNonceRequestingMetaResolvingShard(t *testing shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, core.MetachainShardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateMetaHeader(headerNonce, integrationTests.ChainID) @@ -215,8 +215,8 @@ func TestRequestResolveMetaHeadersByNonceRequestingShardResolvingMeta(t *testing shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(core.MetachainShardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateMetaHeader(headerNonce, integrationTests.ChainID) diff --git a/integrationTests/resolvers/miniblocks/miniblocks_test.go b/integrationTests/resolvers/miniblocks/miniblocks_test.go index d115d76ce54..989dd239ec6 100644 --- a/integrationTests/resolvers/miniblocks/miniblocks_test.go +++ b/integrationTests/resolvers/miniblocks/miniblocks_test.go @@ -18,8 +18,8 @@ func TestRequestResolveMiniblockByHashRequestingShardResolvingSameShard(t *testi shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() miniblock, hash := resolvers.CreateMiniblock(shardId, shardId) @@ -54,8 +54,8 @@ func TestRequestResolveMiniblockByHashRequestingShardResolvingOtherShard(t *test shardIdRequester := uint32(1) nResolver, nRequester := resolvers.CreateResolverRequester(shardIdResolver, shardIdRequester) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() miniblock, hash := resolvers.CreateMiniblock(shardIdResolver, shardIdRequester) @@ -89,8 +89,8 @@ func TestRequestResolveMiniblockByHashRequestingShardResolvingMeta(t *testing.T) shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(core.MetachainShardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() miniblock, hash := resolvers.CreateMiniblock(shardId, shardId) @@ -124,8 +124,8 @@ func TestRequestResolveMiniblockByHashRequestingMetaResolvingShard(t *testing.T) shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, core.MetachainShardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() miniblock, hash := resolvers.CreateMiniblock(shardId, core.MetachainShardId) @@ -159,8 +159,8 @@ func TestRequestResolvePeerMiniblockByHashRequestingShardResolvingSameShard(t *t shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() miniblock, hash := resolvers.CreateMiniblock(core.MetachainShardId, core.AllShardId) @@ -195,8 +195,8 @@ func TestRequestResolvePeerMiniblockByHashRequestingShardResolvingOtherShard(t * shardIdRequester := uint32(1) nResolver, nRequester := resolvers.CreateResolverRequester(shardIdResolver, shardIdRequester) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() miniblock, hash := resolvers.CreateMiniblock(shardIdResolver, core.AllShardId) @@ -230,8 +230,8 @@ func TestRequestResolvePeerMiniblockByHashRequestingShardResolvingMeta(t *testin shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(core.MetachainShardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() miniblock, hash := resolvers.CreateMiniblock(shardId, core.AllShardId) @@ -265,8 +265,8 @@ func TestRequestResolvePeerMiniblockByHashRequestingMetaResolvingShard(t *testin shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, core.MetachainShardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() miniblock, hash := resolvers.CreateMiniblock(shardId, core.AllShardId) diff --git a/integrationTests/resolvers/rewards/rewards_test.go b/integrationTests/resolvers/rewards/rewards_test.go index 5e12fd9c1cf..c0cf4cea66d 100644 --- a/integrationTests/resolvers/rewards/rewards_test.go +++ b/integrationTests/resolvers/rewards/rewards_test.go @@ -20,8 +20,8 @@ func TestRequestResolveRewardsByHashRequestingShardResolvingOtherShard(t *testin shardIdRequester := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardIdResolver, shardIdRequester) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) reward, hash := resolvers.CreateReward(headerNonce) diff --git a/integrationTests/resolvers/smartContractsResults/scrs_test.go b/integrationTests/resolvers/smartContractsResults/scrs_test.go index 7b243379259..75ac19c693c 100644 --- a/integrationTests/resolvers/smartContractsResults/scrs_test.go +++ b/integrationTests/resolvers/smartContractsResults/scrs_test.go @@ -20,8 +20,8 @@ func TestRequestResolveLargeSCRByHashRequestingShardResolvingOtherShard(t *testi shardIdRequester := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardIdResolver, shardIdRequester) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() scr, hash := resolvers.CreateLargeSmartContractResults() diff --git a/integrationTests/resolvers/testInitializer.go b/integrationTests/resolvers/testInitializer.go index 95f0d388df9..2910c7590f7 100644 --- a/integrationTests/resolvers/testInitializer.go +++ b/integrationTests/resolvers/testInitializer.go @@ -38,7 +38,7 @@ func CreateResolverRequester( }) time.Sleep(time.Second) - err := nRequester.Messenger.ConnectToPeer(integrationTests.GetConnectableAddress(nResolver.Messenger)) + err := nRequester.MainMessenger.ConnectToPeer(integrationTests.GetConnectableAddress(nResolver.MainMessenger)) Log.LogIfError(err) time.Sleep(time.Second) diff --git a/integrationTests/singleShard/block/interceptedRequestHdr/interceptedRequestHdr_test.go b/integrationTests/singleShard/block/interceptedRequestHdr/interceptedRequestHdr_test.go index a36c16007b8..9a96797d702 100644 --- a/integrationTests/singleShard/block/interceptedRequestHdr/interceptedRequestHdr_test.go +++ b/integrationTests/singleShard/block/interceptedRequestHdr/interceptedRequestHdr_test.go @@ -49,13 +49,13 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { }) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() //connect messengers together time.Sleep(time.Second) - err := nResolver.ConnectTo(nRequester) + err := nResolver.ConnectOnMain(nRequester) require.Nil(t, err) time.Sleep(time.Second) @@ -117,13 +117,13 @@ func TestNode_InterceptedHeaderWithWrongChainIDShouldBeDiscarded(t *testing.T) { }) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() //connect messengers together time.Sleep(time.Second) - err := nResolver.ConnectTo(nRequester) + err := nResolver.ConnectOnMain(nRequester) require.Nil(t, err) time.Sleep(time.Second) diff --git a/integrationTests/singleShard/block/interceptedRequestTxBlockBody/interceptedRequestTxBlockBody_test.go b/integrationTests/singleShard/block/interceptedRequestTxBlockBody/interceptedRequestTxBlockBody_test.go index 1ef06e7aacb..4a66cccfc02 100644 --- a/integrationTests/singleShard/block/interceptedRequestTxBlockBody/interceptedRequestTxBlockBody_test.go +++ b/integrationTests/singleShard/block/interceptedRequestTxBlockBody/interceptedRequestTxBlockBody_test.go @@ -42,13 +42,13 @@ func TestNode_GenerateSendInterceptTxBlockBodyWithNetMessenger(t *testing.T) { }) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() //connect messengers together time.Sleep(time.Second) - err := nRequester.ConnectTo(nResolver) + err := nRequester.ConnectOnMain(nResolver) require.Nil(t, err) time.Sleep(time.Second) diff --git a/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go b/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go index 3e8a63e1b33..fab7310acb5 100644 --- a/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go @@ -136,7 +136,7 @@ func TestNode_SendTransactionFromAnUnmintedAccountShouldReturnErrorAtApiLevel(t }) defer func() { - _ = node.Messenger.Close() + node.Close() }() tx := &transaction.Transaction{ diff --git a/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx/interceptedBulkUnsignedTx_test.go b/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx/interceptedBulkUnsignedTx_test.go index ea919f0139a..9d624eb4038 100644 --- a/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx/interceptedBulkUnsignedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx/interceptedBulkUnsignedTx_test.go @@ -91,7 +91,7 @@ func TestNode_GenerateSendInterceptBulkUnsignedTransactionsWithMessenger(t *test noOfUnsignedTx, integrationTests.TestMarshalizer, n.ShardCoordinator, - n.Messenger, + n.MainMessenger, ) assert.Nil(t, err) diff --git a/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go index 0de6313c58d..c0c6250cdc2 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go @@ -40,13 +40,13 @@ func TestNode_RequestInterceptTransactionWithMessengerAndWhitelist(t *testing.T) TxSignPrivKeyShardId: txSignPrivKeyShardId, }) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() //connect messengers together time.Sleep(time.Second) - err := nRequester.ConnectTo(nResolver) + err := nRequester.ConnectOnMain(nResolver) require.Nil(t, err) time.Sleep(time.Second) @@ -136,13 +136,13 @@ func TestNode_RequestInterceptRewardTransactionWithMessenger(t *testing.T) { TxSignPrivKeyShardId: txSignPrivKeyShardId, }) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() //connect messengers together time.Sleep(time.Second) - err := nRequester.ConnectTo(nResolver) + err := nRequester.ConnectOnMain(nResolver) require.Nil(t, err) time.Sleep(time.Second) diff --git a/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx/interceptedResolvedUnsignedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx/interceptedResolvedUnsignedTx_test.go index b685444fa85..5fb7594aa32 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx/interceptedResolvedUnsignedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx/interceptedResolvedUnsignedTx_test.go @@ -38,13 +38,13 @@ func TestNode_RequestInterceptUnsignedTransactionWithMessenger(t *testing.T) { TxSignPrivKeyShardId: txSignPrivKeyShardId, }) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() //connect messengers together time.Sleep(time.Second) - err := nRequester.ConnectTo(nResolver) + err := nRequester.ConnectOnMain(nResolver) require.Nil(t, err) time.Sleep(time.Second) diff --git a/integrationTests/state/stateTrieSync/stateTrieSync_test.go b/integrationTests/state/stateTrieSync/stateTrieSync_test.go index 9dfda16865a..fea8e9e3fe2 100644 --- a/integrationTests/state/stateTrieSync/stateTrieSync_test.go +++ b/integrationTests/state/stateTrieSync/stateTrieSync_test.go @@ -53,7 +53,7 @@ func createTestProcessorNodeAndTrieStorage( TrieStore: mainStorer, GasScheduleMap: createTestGasMap(), }) - _ = node.Messenger.CreateTopic(common.ConsensusTopic+node.ShardCoordinator.CommunicationIdentifier(node.ShardCoordinator.SelfId()), true) + _ = node.MainMessenger.CreateTopic(common.ConsensusTopic+node.ShardCoordinator.CommunicationIdentifier(node.ShardCoordinator.SelfId()), true) return node, mainStorer } @@ -86,12 +86,12 @@ func testNodeRequestInterceptTrieNodesWithMessenger(t *testing.T, version int) { _ = trieStorageRequester.DestroyUnit() _ = trieStorageResolver.DestroyUnit() - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() time.Sleep(time.Second) - err := nRequester.ConnectTo(nResolver) + err := nRequester.ConnectOnMain(nResolver) require.Nil(t, err) time.Sleep(integrationTests.SyncDelay) @@ -207,12 +207,12 @@ func testNodeRequestInterceptTrieNodesWithMessengerNotSyncingShouldErr(t *testin _ = trieStorageRequester.DestroyUnit() _ = trieStorageResolver.DestroyUnit() - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() time.Sleep(time.Second) - err := nRequester.ConnectTo(nResolver) + err := nRequester.ConnectOnMain(nResolver) require.Nil(t, err) time.Sleep(integrationTests.SyncDelay) @@ -254,7 +254,7 @@ func testNodeRequestInterceptTrieNodesWithMessengerNotSyncingShouldErr(t *testin go func() { // sudden close of the resolver node after just 2 seconds time.Sleep(time.Second * 2) - _ = nResolver.Messenger.Close() + nResolver.Close() log.Info("resolver node closed, the requester should soon fail in error") }() @@ -315,12 +315,12 @@ func testMultipleDataTriesSync(t *testing.T, numAccounts int, numDataTrieLeaves _ = trieStorageRequester.DestroyUnit() _ = trieStorageResolver.DestroyUnit() - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + nRequester.Close() + nResolver.Close() }() time.Sleep(time.Second) - err := nRequester.ConnectTo(nResolver) + err := nRequester.ConnectOnMain(nResolver) require.Nil(t, err) time.Sleep(integrationTests.SyncDelay) @@ -453,7 +453,7 @@ func testSyncMissingSnapshotNodes(t *testing.T, version int) { nRequester := nodes[0] nResolver := nodes[1] - err := nRequester.ConnectTo(nResolver) + err := nRequester.ConnectOnMain(nResolver) require.Nil(t, err) time.Sleep(integrationTests.SyncDelay) diff --git a/integrationTests/sync/edgeCases/edgeCases_test.go b/integrationTests/sync/edgeCases/edgeCases_test.go index 285f0876cd1..f3167b0528e 100644 --- a/integrationTests/sync/edgeCases/edgeCases_test.go +++ b/integrationTests/sync/edgeCases/edgeCases_test.go @@ -89,12 +89,12 @@ func TestSyncMetaNodeIsSyncingReceivedHigherRoundBlockFromShard(t *testing.T) { syncNodesSlice := []*integrationTests.TestProcessorNode{syncMetaNode} for _, n := range nodes { for _, sn := range syncNodesSlice { - _ = sn.ConnectTo(n) + _ = sn.ConnectOnMain(n) } } integrationTests.BootstrapDelay() - require.True(t, len(syncMetaNode.Messenger.ConnectedPeers()) > 1, "not enough peers connected to this node."+ + require.True(t, len(syncMetaNode.MainMessenger.ConnectedPeers()) > 1, "not enough peers connected to this node."+ " Check that the peer discovery mechanism works properly.") integrationTests.StartSyncingBlocks(syncNodesSlice) diff --git a/integrationTests/testConsensusNode.go b/integrationTests/testConsensusNode.go index 59620306e34..049e660a8dc 100644 --- a/integrationTests/testConsensusNode.go +++ b/integrationTests/testConsensusNode.go @@ -44,6 +44,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" testFactory "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" @@ -76,16 +77,17 @@ type ArgsTestConsensusNode struct { // TestConsensusNode represents a structure used in integration tests used for consensus tests type TestConsensusNode struct { - Node *node.Node - Messenger p2p.Messenger - NodesCoordinator nodesCoordinator.NodesCoordinator - ShardCoordinator sharding.Coordinator - ChainHandler data.ChainHandler - BlockProcessor *mock.BlockProcessorMock - RequestersFinder dataRetriever.RequestersFinder - AccountsDB *state.AccountsDB - NodeKeys *TestKeyPair - MultiSigner *cryptoMocks.MultisignerMock + Node *node.Node + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger + NodesCoordinator nodesCoordinator.NodesCoordinator + ShardCoordinator sharding.Coordinator + ChainHandler data.ChainHandler + BlockProcessor *mock.BlockProcessorMock + RequestersFinder dataRetriever.RequestersFinder + AccountsDB *state.AccountsDB + NodeKeys *TestKeyPair + MultiSigner *cryptoMocks.MultisignerMock } // NewTestConsensusNode returns a new TestConsensusNode @@ -181,7 +183,8 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { pkBytes, _ := tcn.NodeKeys.Pk.ToByteArray() tcn.initNodesCoordinator(args.ConsensusSize, testHasher, epochStartRegistrationHandler, args.EligibleMap, args.WaitingMap, pkBytes, consensusCache) - tcn.Messenger = CreateMessengerWithNoDiscovery() + tcn.MainMessenger = CreateMessengerWithNoDiscovery() + tcn.FullArchiveMessenger = &p2pmocks.MessengerStub{} tcn.initBlockChain(testHasher) tcn.initBlockProcessor() @@ -271,7 +274,7 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { argsKeysHandler := keysManagement.ArgsKeysHandler{ ManagedPeersHolder: keysHolder, PrivateKey: tcn.NodeKeys.Sk, - Pid: tcn.Messenger.ID(), + Pid: tcn.MainMessenger.ID(), } keysHandler, _ := keysManagement.NewKeysHandler(argsKeysHandler) @@ -285,7 +288,7 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { sigHandler, _ := cryptoFactory.NewSigningHandler(signingHandlerArgs) networkComponents := GetDefaultNetworkComponents() - networkComponents.Messenger = tcn.Messenger + networkComponents.Messenger = tcn.MainMessenger networkComponents.InputAntiFlood = &mock.NilAntifloodHandler{} networkComponents.PeerHonesty = &mock.PeerHonestyHandlerStub{} @@ -313,7 +316,8 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { processComponents.HeaderSigVerif = &mock.HeaderSigVerifierStub{} processComponents.HeaderIntegrVerif = &mock.HeaderIntegrityVerifierStub{} processComponents.ReqHandler = &testscommon.RequestHandlerStub{} - processComponents.PeerMapper = mock.NewNetworkShardingCollectorMock() + processComponents.MainPeerMapper = mock.NewNetworkShardingCollectorMock() + processComponents.FullArchivePeerMapper = mock.NewNetworkShardingCollectorMock() processComponents.RoundHandlerField = roundHandler processComponents.ScheduledTxsExecutionHandlerInternal = &testscommon.ScheduledTxsExecutionStub{} processComponents.ProcessedMiniBlocksTrackerInternal = &testscommon.ProcessedMiniBlocksTrackerStub{} @@ -511,22 +515,40 @@ func createTestStore() dataRetriever.StorageService { return store } -// ConnectTo will try to initiate a connection to the provided parameter -func (tcn *TestConsensusNode) ConnectTo(connectable Connectable) error { +// ConnectOnMain will try to initiate a connection to the provided parameter on the main messenger +func (tcn *TestConsensusNode) ConnectOnMain(connectable Connectable) error { if check.IfNil(connectable) { return fmt.Errorf("trying to connect to a nil Connectable parameter") } - return tcn.Messenger.ConnectToPeer(connectable.GetConnectableAddress()) + return tcn.MainMessenger.ConnectToPeer(connectable.GetMainConnectableAddress()) } -// GetConnectableAddress returns a non circuit, non windows default connectable p2p address -func (tcn *TestConsensusNode) GetConnectableAddress() string { +// ConnectOnFullArchive will try to initiate a connection to the provided parameter on the full archive messenger +func (tcn *TestConsensusNode) ConnectOnFullArchive(connectable Connectable) error { + if check.IfNil(connectable) { + return fmt.Errorf("trying to connect to a nil Connectable parameter") + } + + return tcn.FullArchiveMessenger.ConnectToPeer(connectable.GetMainConnectableAddress()) +} + +// GetMainConnectableAddress returns a non circuit, non windows default connectable p2p address +func (tcn *TestConsensusNode) GetMainConnectableAddress() string { + if tcn == nil { + return "nil" + } + + return GetConnectableAddress(tcn.MainMessenger) +} + +// GetFullArchiveConnectableAddress returns a non circuit, non windows default connectable p2p address of the full archive network +func (tcn *TestConsensusNode) GetFullArchiveConnectableAddress() string { if tcn == nil { return "nil" } - return GetConnectableAddress(tcn.Messenger) + return GetConnectableAddress(tcn.FullArchiveMessenger) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/integrationTests/testHeartbeatNode.go b/integrationTests/testHeartbeatNode.go index d8457d853ab..8217e587437 100644 --- a/integrationTests/testHeartbeatNode.go +++ b/integrationTests/testHeartbeatNode.go @@ -84,29 +84,34 @@ var TestThrottler = &processMock.InterceptorThrottlerStub{ // TestHeartbeatNode represents a container type of class used in integration tests // with all its fields exported type TestHeartbeatNode struct { - ShardCoordinator sharding.Coordinator - NodesCoordinator nodesCoordinator.NodesCoordinator - PeerShardMapper process.NetworkShardingCollector - Messenger p2p.Messenger - NodeKeys *TestNodeKeys - DataPool dataRetriever.PoolsHolder - Sender update.Closer - PeerAuthInterceptor *interceptors.MultiDataInterceptor - HeartbeatInterceptor *interceptors.SingleDataInterceptor - PeerShardInterceptor *interceptors.SingleDataInterceptor - PeerSigHandler crypto.PeerSignatureHandler - WhiteListHandler process.WhiteListHandler - Storage dataRetriever.StorageService - ResolversContainer dataRetriever.ResolversContainer - RequestersContainer dataRetriever.RequestersContainer - RequestersFinder dataRetriever.RequestersFinder - RequestHandler process.RequestHandler - RequestedItemsHandler dataRetriever.RequestedItemsHandler - RequestsProcessor update.Closer - ShardSender update.Closer - DirectConnectionProcessor update.Closer - Interceptor *CountInterceptor - heartbeatExpiryTimespanInSec int64 + ShardCoordinator sharding.Coordinator + NodesCoordinator nodesCoordinator.NodesCoordinator + MainPeerShardMapper process.NetworkShardingCollector + FullArchivePeerShardMapper process.NetworkShardingCollector + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger + NodeKeys *TestNodeKeys + DataPool dataRetriever.PoolsHolder + Sender update.Closer + PeerAuthInterceptor *interceptors.MultiDataInterceptor + HeartbeatInterceptor *interceptors.SingleDataInterceptor + FullArchiveHeartbeatInterceptor *interceptors.SingleDataInterceptor + PeerShardInterceptor *interceptors.SingleDataInterceptor + FullArchivePeerShardInterceptor *interceptors.SingleDataInterceptor + PeerSigHandler crypto.PeerSignatureHandler + WhiteListHandler process.WhiteListHandler + Storage dataRetriever.StorageService + ResolversContainer dataRetriever.ResolversContainer + RequestersContainer dataRetriever.RequestersContainer + RequestersFinder dataRetriever.RequestersFinder + RequestHandler process.RequestHandler + RequestedItemsHandler dataRetriever.RequestedItemsHandler + RequestsProcessor update.Closer + ShardSender update.Closer + MainDirectConnectionProcessor update.Closer + FullArchiveDirectConnectionProcessor update.Closer + Interceptor *CountInterceptor + heartbeatExpiryTimespanInSec int64 } // NewTestHeartbeatNode returns a new TestHeartbeatNode instance with a libp2p messenger @@ -121,21 +126,7 @@ func NewTestHeartbeatNode( keygen := signing.NewKeyGenerator(mcl.NewSuiteBLS12()) sk, pk := keygen.GeneratePair() - pksBytes := make(map[uint32][]byte, maxShards) - pksBytes[nodeShardId], _ = pk.ToByteArray() - nodesCoordinatorInstance := &shardingMocks.NodesCoordinatorStub{ - GetAllValidatorsPublicKeysCalled: func() (map[uint32][][]byte, error) { - keys := make(map[uint32][][]byte) - for shardID := uint32(0); shardID < maxShards; shardID++ { - keys[shardID] = append(keys[shardID], pksBytes[shardID]) - } - - shardID := core.MetachainShardId - keys[shardID] = append(keys[shardID], pksBytes[shardID]) - - return keys, nil - }, GetValidatorWithPublicKeyCalled: func(publicKey []byte) (nodesCoordinator.Validator, uint32, error) { validatorInstance, _ := nodesCoordinator.NewValidator(publicKey, defaultChancesSelection, 1) return validatorInstance, 0, nil @@ -158,7 +149,8 @@ func NewTestHeartbeatNode( shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) - messenger := CreateMessengerFromConfig(p2pConfig) + p2pKey := mock.NewPrivateKeyMock() + messenger := CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig, &p2pmocks.PeersRatingHandlerStub{}, p2pKey) pidPk, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) pkShardId, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) pidShardId, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) @@ -175,21 +167,43 @@ func NewTestHeartbeatNode( } err = messenger.SetPeerShardResolver(peerShardMapper) if err != nil { - log.Error("error setting NewPeerShardMapper in p2p messenger", "error", err) + log.Error("error setting the peer shard mapper for the main p2p messenger", "error", err) + } + + fullArchiveMessenger := CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig, &p2pmocks.PeersRatingHandlerStub{}, p2pKey) + pidPkFullArch, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) + pkShardIdFullArch, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) + pidShardIdFullArch, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) + argFullArch := networksharding.ArgPeerShardMapper{ + PeerIdPkCache: pidPkFullArch, + FallbackPkShardCache: pkShardIdFullArch, + FallbackPidShardCache: pidShardIdFullArch, + NodesCoordinator: nodesCoordinatorInstance, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + } + peerShardMapperFullArch, err := networksharding.NewPeerShardMapper(argFullArch) + if err != nil { + log.Error("error creating NewPeerShardMapper for full archive network", "error", err) + } + err = fullArchiveMessenger.SetPeerShardResolver(peerShardMapperFullArch) + if err != nil { + log.Error("error setting the peer shard mapper for the full archive p2p messenger", "error", err) } thn := &TestHeartbeatNode{ ShardCoordinator: shardCoordinator, NodesCoordinator: nodesCoordinatorInstance, - Messenger: messenger, + MainMessenger: messenger, + FullArchiveMessenger: fullArchiveMessenger, PeerSigHandler: peerSigHandler, - PeerShardMapper: peerShardMapper, + MainPeerShardMapper: peerShardMapper, + FullArchivePeerShardMapper: peerShardMapperFullArch, heartbeatExpiryTimespanInSec: heartbeatExpiryTimespanInSec, } - localId := thn.Messenger.ID() + localId := thn.MainMessenger.ID() pkBytes, _ := pk.ToByteArray() - thn.PeerShardMapper.UpdatePeerIDInfo(localId, pkBytes, shardCoordinator.SelfId()) + thn.MainPeerShardMapper.UpdatePeerIDInfo(localId, pkBytes, shardCoordinator.SelfId()) thn.NodeKeys = &TestNodeKeys{ MainKey: &TestKeyPair{ @@ -248,21 +262,23 @@ func NewTestHeartbeatNodeWithCoordinator( } err = messenger.SetPeerShardResolver(peerShardMapper) if err != nil { - log.Error("error setting NewPeerShardMapper in p2p messenger", "error", err) + log.Error("error setting the peer shard mapper for the main p2p messenger", "error", err) } thn := &TestHeartbeatNode{ ShardCoordinator: shardCoordinator, NodesCoordinator: coordinator, - Messenger: messenger, + MainMessenger: messenger, + FullArchiveMessenger: &p2pmocks.MessengerStub{}, PeerSigHandler: peerSigHandler, - PeerShardMapper: peerShardMapper, + MainPeerShardMapper: peerShardMapper, + FullArchivePeerShardMapper: &mock.PeerShardMapperStub{}, Interceptor: NewCountInterceptor(), heartbeatExpiryTimespanInSec: 30, } - localId := thn.Messenger.ID() - thn.PeerShardMapper.UpdatePeerIDInfo(localId, []byte(""), shardCoordinator.SelfId()) + localId := thn.MainMessenger.ID() + thn.MainPeerShardMapper.UpdatePeerIDInfo(localId, []byte(""), shardCoordinator.SelfId()) thn.NodeKeys = keys @@ -389,7 +405,7 @@ func (thn *TestHeartbeatNode) InitTestHeartbeatNode(tb testing.TB, minPeersWaiti thn.initCrossShardPeerTopicNotifier(tb) thn.initDirectConnectionProcessor(tb) - for len(thn.Messenger.Peers()) < minPeersWaiting { + for len(thn.MainMessenger.Peers()) < minPeersWaiting { time.Sleep(time.Second) } @@ -412,7 +428,8 @@ func (thn *TestHeartbeatNode) initStorage() { func (thn *TestHeartbeatNode) initSender() { identifierHeartbeat := common.HeartbeatV2Topic + thn.ShardCoordinator.CommunicationIdentifier(thn.ShardCoordinator.SelfId()) argsSender := sender.ArgSender{ - Messenger: thn.Messenger, + MainMessenger: thn.MainMessenger, + FullArchiveMessenger: thn.FullArchiveMessenger, Marshaller: TestMarshaller, PeerAuthenticationTopic: common.PeerAuthenticationTopic, HeartbeatTopic: identifierHeartbeat, @@ -448,12 +465,13 @@ func (thn *TestHeartbeatNode) initSender() { func (thn *TestHeartbeatNode) initResolversAndRequesters() { dataPacker, _ := partitioning.NewSimpleDataPacker(TestMarshaller) - _ = thn.Messenger.CreateTopic(common.ConsensusTopic+thn.ShardCoordinator.CommunicationIdentifier(thn.ShardCoordinator.SelfId()), true) + _ = thn.MainMessenger.CreateTopic(common.ConsensusTopic+thn.ShardCoordinator.CommunicationIdentifier(thn.ShardCoordinator.SelfId()), true) payloadValidator, _ := validator.NewPeerAuthenticationPayloadValidator(thn.heartbeatExpiryTimespanInSec) resolverContainerFactoryArgs := resolverscontainer.FactoryArgs{ ShardCoordinator: thn.ShardCoordinator, - Messenger: thn.Messenger, + MainMessenger: thn.MainMessenger, + FullArchiveMessenger: thn.FullArchiveMessenger, Store: thn.Storage, Marshalizer: TestMarshaller, DataPools: thn.DataPool, @@ -464,12 +482,13 @@ func (thn *TestHeartbeatNode) initResolversAndRequesters() { return &trieMock.TrieStub{} }, }, - SizeCheckDelta: 100, - InputAntifloodHandler: &mock.NilAntifloodHandler{}, - OutputAntifloodHandler: &mock.NilAntifloodHandler{}, - NumConcurrentResolvingJobs: 10, - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PayloadValidator: payloadValidator, + SizeCheckDelta: 100, + InputAntifloodHandler: &mock.NilAntifloodHandler{}, + OutputAntifloodHandler: &mock.NilAntifloodHandler{}, + NumConcurrentResolvingJobs: 10, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PayloadValidator: payloadValidator, } requestersContainerFactoryArgs := requesterscontainer.FactoryArgs{ @@ -477,15 +496,17 @@ func (thn *TestHeartbeatNode) initResolversAndRequesters() { NumCrossShardPeers: 2, NumTotalPeers: 3, NumFullHistoryPeers: 3}, - ShardCoordinator: thn.ShardCoordinator, - Messenger: thn.Messenger, - Marshaller: TestMarshaller, - Uint64ByteSliceConverter: TestUint64Converter, - OutputAntifloodHandler: &mock.NilAntifloodHandler{}, - CurrentNetworkEpochProvider: &mock.CurrentNetworkEpochProviderStub{}, - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, - SizeCheckDelta: 0, + ShardCoordinator: thn.ShardCoordinator, + MainMessenger: thn.MainMessenger, + FullArchiveMessenger: thn.FullArchiveMessenger, + Marshaller: TestMarshaller, + Uint64ByteSliceConverter: TestUint64Converter, + OutputAntifloodHandler: &mock.NilAntifloodHandler{}, + CurrentNetworkEpochProvider: &mock.CurrentNetworkEpochProviderStub{}, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, + SizeCheckDelta: 0, } if thn.ShardCoordinator.SelfId() == core.MetachainShardId { @@ -557,18 +578,20 @@ func (thn *TestHeartbeatNode) initInterceptors() { PeerSignatureHandler: thn.PeerSigHandler, SignaturesHandler: &processMock.SignaturesHandlerStub{}, HeartbeatExpiryTimespanInSec: thn.heartbeatExpiryTimespanInSec, - PeerID: thn.Messenger.ID(), + PeerID: thn.MainMessenger.ID(), } thn.createPeerAuthInterceptor(argsFactory) thn.createHeartbeatInterceptor(argsFactory) + thn.createFullArchiveHeartbeatInterceptor(argsFactory) thn.createPeerShardInterceptor(argsFactory) + thn.createFullArchivePeerShardInterceptor(argsFactory) } func (thn *TestHeartbeatNode) createPeerAuthInterceptor(argsFactory interceptorFactory.ArgInterceptedDataFactory) { args := interceptorsProcessor.ArgPeerAuthenticationInterceptorProcessor{ PeerAuthenticationCacher: thn.DataPool.PeerAuthentications(), - PeerShardMapper: thn.PeerShardMapper, + PeerShardMapper: thn.MainPeerShardMapper, Marshaller: TestMarshaller, HardforkTrigger: &testscommon.HardforkTriggerStub{}, } @@ -581,21 +604,42 @@ func (thn *TestHeartbeatNode) createHeartbeatInterceptor(argsFactory interceptor args := interceptorsProcessor.ArgHeartbeatInterceptorProcessor{ HeartbeatCacher: thn.DataPool.Heartbeats(), ShardCoordinator: thn.ShardCoordinator, - PeerShardMapper: thn.PeerShardMapper, + PeerShardMapper: thn.MainPeerShardMapper, } hbProcessor, _ := interceptorsProcessor.NewHeartbeatInterceptorProcessor(args) hbFactory, _ := interceptorFactory.NewInterceptedHeartbeatDataFactory(argsFactory) identifierHeartbeat := common.HeartbeatV2Topic + thn.ShardCoordinator.CommunicationIdentifier(thn.ShardCoordinator.SelfId()) - thn.HeartbeatInterceptor = thn.initSingleDataInterceptor(identifierHeartbeat, hbFactory, hbProcessor) + thn.HeartbeatInterceptor = thn.initSingleDataInterceptor(identifierHeartbeat, hbFactory, hbProcessor, thn.MainMessenger) +} + +func (thn *TestHeartbeatNode) createFullArchiveHeartbeatInterceptor(argsFactory interceptorFactory.ArgInterceptedDataFactory) { + args := interceptorsProcessor.ArgHeartbeatInterceptorProcessor{ + HeartbeatCacher: thn.DataPool.Heartbeats(), + ShardCoordinator: thn.ShardCoordinator, + PeerShardMapper: thn.FullArchivePeerShardMapper, + } + hbProcessor, _ := interceptorsProcessor.NewHeartbeatInterceptorProcessor(args) + hbFactory, _ := interceptorFactory.NewInterceptedHeartbeatDataFactory(argsFactory) + identifierHeartbeat := common.HeartbeatV2Topic + thn.ShardCoordinator.CommunicationIdentifier(thn.ShardCoordinator.SelfId()) + thn.FullArchiveHeartbeatInterceptor = thn.initSingleDataInterceptor(identifierHeartbeat, hbFactory, hbProcessor, thn.FullArchiveMessenger) } func (thn *TestHeartbeatNode) createPeerShardInterceptor(argsFactory interceptorFactory.ArgInterceptedDataFactory) { args := interceptorsProcessor.ArgPeerShardInterceptorProcessor{ - PeerShardMapper: thn.PeerShardMapper, + PeerShardMapper: thn.MainPeerShardMapper, } dciProcessor, _ := interceptorsProcessor.NewPeerShardInterceptorProcessor(args) dciFactory, _ := interceptorFactory.NewInterceptedPeerShardFactory(argsFactory) - thn.PeerShardInterceptor = thn.initSingleDataInterceptor(common.ConnectionTopic, dciFactory, dciProcessor) + thn.PeerShardInterceptor = thn.initSingleDataInterceptor(common.ConnectionTopic, dciFactory, dciProcessor, thn.MainMessenger) +} + +func (thn *TestHeartbeatNode) createFullArchivePeerShardInterceptor(argsFactory interceptorFactory.ArgInterceptedDataFactory) { + args := interceptorsProcessor.ArgPeerShardInterceptorProcessor{ + PeerShardMapper: thn.FullArchivePeerShardMapper, + } + dciProcessor, _ := interceptorsProcessor.NewPeerShardInterceptorProcessor(args) + dciFactory, _ := interceptorFactory.NewInterceptedPeerShardFactory(argsFactory) + thn.FullArchivePeerShardInterceptor = thn.initSingleDataInterceptor(common.ConnectionTopic, dciFactory, dciProcessor, thn.FullArchiveMessenger) } func (thn *TestHeartbeatNode) initMultiDataInterceptor(topic string, dataFactory process.InterceptedDataFactory, processor process.InterceptorProcessor) *interceptors.MultiDataInterceptor { @@ -613,16 +657,16 @@ func (thn *TestHeartbeatNode) initMultiDataInterceptor(topic string, dataFactory }, }, PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - CurrentPeerId: thn.Messenger.ID(), + CurrentPeerId: thn.MainMessenger.ID(), }, ) - thn.registerTopicValidator(topic, mdInterceptor) + thn.registerTopicValidator(topic, mdInterceptor, thn.MainMessenger) return mdInterceptor } -func (thn *TestHeartbeatNode) initSingleDataInterceptor(topic string, dataFactory process.InterceptedDataFactory, processor process.InterceptorProcessor) *interceptors.SingleDataInterceptor { +func (thn *TestHeartbeatNode) initSingleDataInterceptor(topic string, dataFactory process.InterceptedDataFactory, processor process.InterceptorProcessor, messenger p2p.Messenger) *interceptors.SingleDataInterceptor { sdInterceptor, _ := interceptors.NewSingleDataInterceptor( interceptors.ArgSingleDataInterceptor{ Topic: topic, @@ -636,11 +680,11 @@ func (thn *TestHeartbeatNode) initSingleDataInterceptor(topic string, dataFactor }, }, PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - CurrentPeerId: thn.Messenger.ID(), + CurrentPeerId: thn.MainMessenger.ID(), }, ) - thn.registerTopicValidator(topic, sdInterceptor) + thn.registerTopicValidator(topic, sdInterceptor, messenger) return sdInterceptor } @@ -663,7 +707,8 @@ func (thn *TestHeartbeatNode) initRequestsProcessor() { func (thn *TestHeartbeatNode) initShardSender(tb testing.TB) { args := sender.ArgPeerShardSender{ - Messenger: thn.Messenger, + MainMessenger: thn.MainMessenger, + FullArchiveMessenger: thn.FullArchiveMessenger, Marshaller: TestMarshaller, ShardCoordinator: thn.ShardCoordinator, TimeBetweenSends: 5 * time.Second, @@ -679,46 +724,87 @@ func (thn *TestHeartbeatNode) initShardSender(tb testing.TB) { func (thn *TestHeartbeatNode) initDirectConnectionProcessor(tb testing.TB) { argsDirectConnectionProcessor := processor.ArgsDirectConnectionProcessor{ TimeToReadDirectConnections: 5 * time.Second, - Messenger: thn.Messenger, - PeerShardMapper: thn.PeerShardMapper, + Messenger: thn.MainMessenger, + PeerShardMapper: thn.MainPeerShardMapper, ShardCoordinator: thn.ShardCoordinator, BaseIntraShardTopic: ShardTopic, BaseCrossShardTopic: ShardTopic, } var err error - thn.DirectConnectionProcessor, err = processor.NewDirectConnectionProcessor(argsDirectConnectionProcessor) + thn.MainDirectConnectionProcessor, err = processor.NewDirectConnectionProcessor(argsDirectConnectionProcessor) + require.Nil(tb, err) + + argsDirectConnectionProcessor = processor.ArgsDirectConnectionProcessor{ + TimeToReadDirectConnections: 5 * time.Second, + Messenger: thn.FullArchiveMessenger, + PeerShardMapper: thn.FullArchivePeerShardMapper, + ShardCoordinator: thn.ShardCoordinator, + BaseIntraShardTopic: ShardTopic, + BaseCrossShardTopic: ShardTopic, + } + + thn.FullArchiveDirectConnectionProcessor, err = processor.NewDirectConnectionProcessor(argsDirectConnectionProcessor) require.Nil(tb, err) } func (thn *TestHeartbeatNode) initCrossShardPeerTopicNotifier(tb testing.TB) { argsCrossShardPeerTopicNotifier := monitor.ArgsCrossShardPeerTopicNotifier{ ShardCoordinator: thn.ShardCoordinator, - PeerShardMapper: thn.PeerShardMapper, + PeerShardMapper: thn.MainPeerShardMapper, } crossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsCrossShardPeerTopicNotifier) require.Nil(tb, err) - err = thn.Messenger.AddPeerTopicNotifier(crossShardPeerTopicNotifier) + err = thn.MainMessenger.AddPeerTopicNotifier(crossShardPeerTopicNotifier) + require.Nil(tb, err) + + argsCrossShardPeerTopicNotifier = monitor.ArgsCrossShardPeerTopicNotifier{ + ShardCoordinator: thn.ShardCoordinator, + PeerShardMapper: thn.FullArchivePeerShardMapper, + } + fullArchiveCrossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsCrossShardPeerTopicNotifier) + require.Nil(tb, err) + + err = thn.FullArchiveMessenger.AddPeerTopicNotifier(fullArchiveCrossShardPeerTopicNotifier) require.Nil(tb, err) + +} + +// ConnectOnMain will try to initiate a connection to the provided parameter on the main messenger +func (thn *TestHeartbeatNode) ConnectOnMain(connectable Connectable) error { + if check.IfNil(connectable) { + return fmt.Errorf("trying to connect to a nil Connectable parameter") + } + + return thn.MainMessenger.ConnectToPeer(connectable.GetMainConnectableAddress()) } -// ConnectTo will try to initiate a connection to the provided parameter -func (thn *TestHeartbeatNode) ConnectTo(connectable Connectable) error { +// ConnectOnFullArchive will try to initiate a connection to the provided parameter on the full archive messenger +func (thn *TestHeartbeatNode) ConnectOnFullArchive(connectable Connectable) error { if check.IfNil(connectable) { return fmt.Errorf("trying to connect to a nil Connectable parameter") } - return thn.Messenger.ConnectToPeer(connectable.GetConnectableAddress()) + return thn.FullArchiveMessenger.ConnectToPeer(connectable.GetFullArchiveConnectableAddress()) +} + +// GetMainConnectableAddress returns a non circuit, non windows default connectable p2p address +func (thn *TestHeartbeatNode) GetMainConnectableAddress() string { + if thn == nil { + return "nil" + } + + return GetConnectableAddress(thn.MainMessenger) } -// GetConnectableAddress returns a non circuit, non windows default connectable p2p address -func (thn *TestHeartbeatNode) GetConnectableAddress() string { +// GetFullArchiveConnectableAddress returns a non circuit, non windows default connectable p2p address of the full archive network +func (thn *TestHeartbeatNode) GetFullArchiveConnectableAddress() string { if thn == nil { return "nil" } - return GetConnectableAddress(thn.Messenger) + return GetConnectableAddress(thn.FullArchiveMessenger) } // MakeDisplayTableForHeartbeatNodes returns a string containing counters for received messages for all provided test nodes @@ -730,9 +816,9 @@ func MakeDisplayTableForHeartbeatNodes(nodes map[uint32][]*TestHeartbeatNode) st for _, n := range nodesList { buffPk, _ := n.NodeKeys.MainKey.Pk.ToByteArray() - peerInfo := n.Messenger.GetConnectedPeersInfo() + peerInfo := n.MainMessenger.GetConnectedPeersInfo() - pid := n.Messenger.ID().Pretty() + pid := n.MainMessenger.ID().Pretty() lineData := display.NewLineData( false, []string{ @@ -742,13 +828,12 @@ func MakeDisplayTableForHeartbeatNodes(nodes map[uint32][]*TestHeartbeatNode) st fmt.Sprintf("%d", n.CountGlobalMessages()), fmt.Sprintf("%d", n.CountIntraShardMessages()), fmt.Sprintf("%d", n.CountCrossShardMessages()), - fmt.Sprintf("%d/%d/%d/%d/%d/%d/%d/%d", - len(n.Messenger.ConnectedPeers()), + fmt.Sprintf("%d/%d/%d/%d/%d/%d/%d", + len(n.MainMessenger.ConnectedPeers()), peerInfo.NumIntraShardValidators, peerInfo.NumCrossShardValidators, peerInfo.NumIntraShardObservers, peerInfo.NumCrossShardObservers, - peerInfo.NumFullHistoryObservers, len(peerInfo.UnknownPeers), len(peerInfo.Seeders), ), @@ -764,14 +849,14 @@ func MakeDisplayTableForHeartbeatNodes(nodes map[uint32][]*TestHeartbeatNode) st } // registerTopicValidator registers a message processor instance on the provided topic -func (thn *TestHeartbeatNode) registerTopicValidator(topic string, processor p2p.MessageProcessor) { - err := thn.Messenger.CreateTopic(topic, true) +func (thn *TestHeartbeatNode) registerTopicValidator(topic string, processor p2p.MessageProcessor, messenger p2p.Messenger) { + err := messenger.CreateTopic(topic, true) if err != nil { fmt.Printf("error while creating topic %s: %s\n", topic, err.Error()) return } - err = thn.Messenger.RegisterMessageProcessor(topic, "test", processor) + err = messenger.RegisterMessageProcessor(topic, "test", processor) if err != nil { fmt.Printf("error while registering topic validator %s: %s\n", topic, err.Error()) return @@ -780,14 +865,14 @@ func (thn *TestHeartbeatNode) registerTopicValidator(topic string, processor p2p // CreateTestInterceptors creates test interceptors that count the number of received messages func (thn *TestHeartbeatNode) CreateTestInterceptors() { - thn.registerTopicValidator(GlobalTopic, thn.Interceptor) + thn.registerTopicValidator(GlobalTopic, thn.Interceptor, thn.MainMessenger) metaIdentifier := ShardTopic + thn.ShardCoordinator.CommunicationIdentifier(core.MetachainShardId) - thn.registerTopicValidator(metaIdentifier, thn.Interceptor) + thn.registerTopicValidator(metaIdentifier, thn.Interceptor, thn.MainMessenger) for i := uint32(0); i < thn.ShardCoordinator.NumberOfShards(); i++ { identifier := ShardTopic + thn.ShardCoordinator.CommunicationIdentifier(i) - thn.registerTopicValidator(identifier, thn.Interceptor) + thn.registerTopicValidator(identifier, thn.Interceptor, thn.MainMessenger) } } @@ -831,8 +916,10 @@ func (thn *TestHeartbeatNode) Close() { _ = thn.RequestersContainer.Close() _ = thn.ResolversContainer.Close() _ = thn.ShardSender.Close() - _ = thn.Messenger.Close() - _ = thn.DirectConnectionProcessor.Close() + _ = thn.MainMessenger.Close() + _ = thn.FullArchiveMessenger.Close() + _ = thn.MainDirectConnectionProcessor.Close() + _ = thn.FullArchiveDirectConnectionProcessor.Close() } // IsInterfaceNil returns true if there is no value under the interface diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 16fe7bf86d7..ebbf0fff697 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -165,12 +165,12 @@ func CreateMessengerWithKadDht(initialAddr string) p2p.Messenger { P2pConfig: createP2PConfig(initialAddresses), SyncTimer: &p2pFactory.LocalSyncTimer{}, PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - NodeOperationMode: p2p.NormalOperation, PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, ConnectionWatcherType: p2p.ConnectionWatcherTypePrint, P2pPrivateKey: mock.NewPrivateKeyMock(), P2pSingleSigner: &mock.SignerMock{}, P2pKeyGenerator: &mock.KeyGenMock{}, + Logger: logger.GetOrCreate("tests/p2p"), } libP2PMes, err := p2pFactory.NewNetworkMessenger(arg) @@ -187,17 +187,12 @@ func CreateMessengerFromConfig(p2pConfig p2pConfig.P2PConfig) p2p.Messenger { P2pConfig: p2pConfig, SyncTimer: &p2pFactory.LocalSyncTimer{}, PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - NodeOperationMode: p2p.NormalOperation, PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, ConnectionWatcherType: p2p.ConnectionWatcherTypePrint, P2pPrivateKey: mock.NewPrivateKeyMock(), P2pSingleSigner: &mock.SignerMock{}, P2pKeyGenerator: &mock.KeyGenMock{}, - } - - if p2pConfig.Sharding.AdditionalConnections.MaxFullHistoryObservers > 0 { - // we deliberately set this, automatically choose full archive node mode - arg.NodeOperationMode = p2p.FullArchiveMode + Logger: logger.GetOrCreate("tests/p2p"), } libP2PMes, err := p2pFactory.NewNetworkMessenger(arg) @@ -207,24 +202,19 @@ func CreateMessengerFromConfig(p2pConfig p2pConfig.P2PConfig) p2p.Messenger { } // CreateMessengerFromConfigWithPeersRatingHandler creates a new libp2p messenger with provided configuration -func CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig p2pConfig.P2PConfig, peersRatingHandler p2p.PeersRatingHandler) p2p.Messenger { +func CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig p2pConfig.P2PConfig, peersRatingHandler p2p.PeersRatingHandler, p2pKey crypto.PrivateKey) p2p.Messenger { arg := p2pFactory.ArgsNetworkMessenger{ Marshaller: TestMarshalizer, ListenAddress: p2p.ListenLocalhostAddrWithIp4AndTcp, P2pConfig: p2pConfig, SyncTimer: &p2pFactory.LocalSyncTimer{}, PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - NodeOperationMode: p2p.NormalOperation, PeersRatingHandler: peersRatingHandler, ConnectionWatcherType: p2p.ConnectionWatcherTypePrint, - P2pPrivateKey: mock.NewPrivateKeyMock(), + P2pPrivateKey: p2pKey, P2pSingleSigner: &mock.SignerMock{}, P2pKeyGenerator: &mock.KeyGenMock{}, - } - - if p2pConfig.Sharding.AdditionalConnections.MaxFullHistoryObservers > 0 { - // we deliberately set this, automatically choose full archive node mode - arg.NodeOperationMode = p2p.FullArchiveMode + Logger: logger.GetOrCreate("tests/p2p"), } libP2PMes, err := p2pFactory.NewNetworkMessenger(arg) @@ -256,7 +246,7 @@ func CreateMessengerWithNoDiscovery() p2p.Messenger { } // CreateMessengerWithNoDiscoveryAndPeersRatingHandler creates a new libp2p messenger with no peer discovery -func CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHanlder p2p.PeersRatingHandler) p2p.Messenger { +func CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHanlder p2p.PeersRatingHandler, p2pKey crypto.PrivateKey) p2p.Messenger { p2pCfg := p2pConfig.P2PConfig{ Node: p2pConfig.NodeConfig{ Port: "0", @@ -269,7 +259,7 @@ func CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHanlder p2p. }, } - return CreateMessengerFromConfigWithPeersRatingHandler(p2pCfg, peersRatingHanlder) + return CreateMessengerFromConfigWithPeersRatingHandler(p2pCfg, peersRatingHanlder, p2pKey) } // CreateFixedNetworkOf8Peers assembles a network as following: @@ -896,7 +886,7 @@ func MakeDisplayTable(nodes []*TestProcessorNode) string { fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterMbRecv)), fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterHdrRecv)), fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterMetaRcv)), - fmt.Sprintf("%d", len(n.Messenger.ConnectedPeers())), + fmt.Sprintf("%d", len(n.MainMessenger.ConnectedPeers())), }, ) } @@ -1420,10 +1410,10 @@ func ConnectNodes(nodes []Connectable) { for j := i + 1; j < len(nodes); j++ { src := nodes[i] dst := nodes[j] - err := src.ConnectTo(dst) + err := src.ConnectOnMain(dst) if err != nil { encounteredErrors = append(encounteredErrors, - fmt.Errorf("%w while %s was connecting to %s", err, src.GetConnectableAddress(), dst.GetConnectableAddress())) + fmt.Errorf("%w while %s was connecting to %s", err, src.GetMainConnectableAddress(), dst.GetMainConnectableAddress())) } } } diff --git a/integrationTests/testNetwork.go b/integrationTests/testNetwork.go index 7380db2caea..711a0f7e202 100644 --- a/integrationTests/testNetwork.go +++ b/integrationTests/testNetwork.go @@ -462,7 +462,7 @@ func (net *TestNetwork) initDefaults() { func (net *TestNetwork) closeNodes() { for _, node := range net.Nodes { - err := node.Messenger.Close() + err := node.MainMessenger.Close() net.handleOrBypassError(err) _ = node.VMContainer.Close() } diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 9647323898e..a8465c04d6a 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -254,8 +254,10 @@ type CryptoParams struct { // Connectable defines the operations for a struct to become connectable by other struct // In other words, all instances that implement this interface are able to connect with each other type Connectable interface { - ConnectTo(connectable Connectable) error - GetConnectableAddress() string + ConnectOnMain(connectable Connectable) error + ConnectOnFullArchive(connectable Connectable) error + GetMainConnectableAddress() string + GetFullArchiveConnectableAddress() string IsInterfaceNil() bool } @@ -288,16 +290,20 @@ type ArgTestProcessorNode struct { AppStatusHandler core.AppStatusHandler StatusMetrics external.StatusMetricsHandler WithPeersRatingHandler bool + NodeOperationMode p2p.NodeOperation } // TestProcessorNode represents a container type of class used in integration tests // with all its fields exported type TestProcessorNode struct { - ShardCoordinator sharding.Coordinator - NodesCoordinator nodesCoordinator.NodesCoordinator - PeerShardMapper process.PeerShardMapper - NodesSetup sharding.GenesisNodesSetupHandler - Messenger p2p.Messenger + ShardCoordinator sharding.Coordinator + NodesCoordinator nodesCoordinator.NodesCoordinator + MainPeerShardMapper process.PeerShardMapper + FullArchivePeerShardMapper process.PeerShardMapper + NodesSetup sharding.GenesisNodesSetupHandler + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger + NodeOperationMode p2p.NodeOperation OwnAccount *TestWalletAccount NodeKeys *TestNodeKeys @@ -315,15 +321,16 @@ type TestProcessorNode struct { EconomicsData *economics.TestEconomicsData RatingsData *rating.RatingsData - BlockBlackListHandler process.TimeCacher - HeaderValidator process.HeaderConstructionValidator - BlockTracker process.BlockTracker - InterceptorsContainer process.InterceptorsContainer - ResolversContainer dataRetriever.ResolversContainer - RequestersContainer dataRetriever.RequestersContainer - RequestersFinder dataRetriever.RequestersFinder - RequestHandler process.RequestHandler - WasmVMChangeLocker common.Locker + BlockBlackListHandler process.TimeCacher + HeaderValidator process.HeaderConstructionValidator + BlockTracker process.BlockTracker + MainInterceptorsContainer process.InterceptorsContainer + FullArchiveInterceptorsContainer process.InterceptorsContainer + ResolversContainer dataRetriever.ResolversContainer + RequestersContainer dataRetriever.RequestersContainer + RequestersFinder dataRetriever.RequestersFinder + RequestHandler process.RequestHandler + WasmVMChangeLocker common.Locker InterimProcContainer process.IntermediateProcessorContainer TxProcessor process.TransactionProcessor @@ -356,6 +363,7 @@ type TestProcessorNode struct { EpochStartTrigger TestEpochStartTrigger EpochStartNotifier notifier.EpochStartNotifier + EpochProvider dataRetriever.CurrentNetworkEpochProviderHandler MultiSigner crypto.MultiSigner HeaderSigVerifier process.InterceptedHeaderSigVerifier @@ -441,29 +449,28 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { var peersRatingHandler p2p.PeersRatingHandler peersRatingHandler = &p2pmocks.PeersRatingHandlerStub{} - topRatedCache := testscommon.NewCacherMock() - badRatedCache := testscommon.NewCacherMock() + var peersRatingMonitor p2p.PeersRatingMonitor + peersRatingMonitor = &p2pmocks.PeersRatingMonitorStub{} if args.WithPeersRatingHandler { + topRatedCache := testscommon.NewCacherMock() + badRatedCache := testscommon.NewCacherMock() peersRatingHandler, _ = p2pFactory.NewPeersRatingHandler( p2pFactory.ArgPeersRatingHandler{ TopRatedCache: topRatedCache, BadRatedCache: badRatedCache, + Logger: &testscommon.LoggerStub{}, }) - } - - messenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHandler) - - var peersRatingMonitor p2p.PeersRatingMonitor - peersRatingMonitor = &p2pmocks.PeersRatingMonitorStub{} - if args.WithPeersRatingHandler { peersRatingMonitor, _ = p2pFactory.NewPeersRatingMonitor( p2pFactory.ArgPeersRatingMonitor{ - TopRatedCache: topRatedCache, - BadRatedCache: badRatedCache, - ConnectionsProvider: messenger, + TopRatedCache: topRatedCache, + BadRatedCache: badRatedCache, }) } + p2pKey := mock.NewPrivateKeyMock() + messenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHandler, p2pKey) + fullArchiveMessenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHandler, p2pKey) + genericEpochNotifier := forking.NewGenericEpochNotifier() epochsConfig := args.EpochsConfig if epochsConfig == nil { @@ -471,6 +478,11 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { } enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(*epochsConfig, genericEpochNotifier) + nodeOperationMode := p2p.NormalOperation + if len(args.NodeOperationMode) != 0 { + nodeOperationMode = args.NodeOperationMode + } + if args.RoundsConfig == nil { defaultRoundsConfig := GetDefaultRoundsConfig() args.RoundsConfig = &defaultRoundsConfig @@ -480,31 +492,35 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { logsProcessor, _ := transactionLog.NewTxLogProcessor(transactionLog.ArgTxLogProcessor{Marshalizer: TestMarshalizer}) tpn := &TestProcessorNode{ - ShardCoordinator: shardCoordinator, - Messenger: messenger, - NodesCoordinator: nodesCoordinatorInstance, - ChainID: ChainID, - MinTransactionVersion: MinTransactionVersion, - NodesSetup: nodesSetup, - HistoryRepository: &dblookupextMock.HistoryRepositoryStub{}, - EpochNotifier: genericEpochNotifier, - RoundNotifier: genericRoundNotifier, - EnableRoundsHandler: enableRoundsHandler, - EnableEpochsHandler: enableEpochsHandler, - WasmVMChangeLocker: &sync.RWMutex{}, - TransactionLogProcessor: logsProcessor, - Bootstrapper: mock.NewTestBootstrapperMock(), - PeersRatingHandler: peersRatingHandler, - PeerShardMapper: mock.NewNetworkShardingCollectorMock(), - EnableEpochs: *epochsConfig, - UseValidVmBlsSigVerifier: args.WithBLSSigVerifier, - StorageBootstrapper: &mock.StorageBootstrapperMock{}, - BootstrapStorer: &mock.BoostrapStorerMock{}, - RatingsData: args.RatingsData, - EpochStartNotifier: args.EpochStartSubscriber, - GuardedAccountHandler: &guardianMocks.GuardedAccountHandlerStub{}, - AppStatusHandler: appStatusHandler, - PeersRatingMonitor: peersRatingMonitor, + ShardCoordinator: shardCoordinator, + MainMessenger: messenger, + FullArchiveMessenger: fullArchiveMessenger, + NodeOperationMode: nodeOperationMode, + NodesCoordinator: nodesCoordinatorInstance, + ChainID: ChainID, + MinTransactionVersion: MinTransactionVersion, + NodesSetup: nodesSetup, + HistoryRepository: &dblookupextMock.HistoryRepositoryStub{}, + EpochNotifier: genericEpochNotifier, + RoundNotifier: genericRoundNotifier, + EnableRoundsHandler: enableRoundsHandler, + EnableEpochsHandler: enableEpochsHandler, + EpochProvider: &mock.CurrentNetworkEpochProviderStub{}, + WasmVMChangeLocker: &sync.RWMutex{}, + TransactionLogProcessor: logsProcessor, + Bootstrapper: mock.NewTestBootstrapperMock(), + PeersRatingHandler: peersRatingHandler, + MainPeerShardMapper: mock.NewNetworkShardingCollectorMock(), + FullArchivePeerShardMapper: mock.NewNetworkShardingCollectorMock(), + EnableEpochs: *epochsConfig, + UseValidVmBlsSigVerifier: args.WithBLSSigVerifier, + StorageBootstrapper: &mock.StorageBootstrapperMock{}, + BootstrapStorer: &mock.BoostrapStorerMock{}, + RatingsData: args.RatingsData, + EpochStartNotifier: args.EpochStartSubscriber, + GuardedAccountHandler: &guardianMocks.GuardedAccountHandlerStub{}, + AppStatusHandler: appStatusHandler, + PeersRatingMonitor: peersRatingMonitor, } tpn.NodeKeys = args.NodeKeys @@ -555,27 +571,46 @@ func NewTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { return tpn } -// ConnectTo will try to initiate a connection to the provided parameter -func (tpn *TestProcessorNode) ConnectTo(connectable Connectable) error { +// ConnectOnMain will try to initiate a connection to the provided parameter on the main messenger +func (tpn *TestProcessorNode) ConnectOnMain(connectable Connectable) error { if check.IfNil(connectable) { return fmt.Errorf("trying to connect to a nil Connectable parameter") } - return tpn.Messenger.ConnectToPeer(connectable.GetConnectableAddress()) + return tpn.MainMessenger.ConnectToPeer(connectable.GetMainConnectableAddress()) } -// GetConnectableAddress returns a non circuit, non windows default connectable p2p address -func (tpn *TestProcessorNode) GetConnectableAddress() string { +// ConnectOnFullArchive will try to initiate a connection to the provided parameter on the full archive messenger +func (tpn *TestProcessorNode) ConnectOnFullArchive(connectable Connectable) error { + if check.IfNil(connectable) { + return fmt.Errorf("trying to connect to a nil Connectable parameter") + } + + return tpn.FullArchiveMessenger.ConnectToPeer(connectable.GetFullArchiveConnectableAddress()) +} + +// GetMainConnectableAddress returns a non circuit, non windows default connectable p2p address main network +func (tpn *TestProcessorNode) GetMainConnectableAddress() string { if tpn == nil { return "nil" } - return GetConnectableAddress(tpn.Messenger) + return GetConnectableAddress(tpn.MainMessenger) +} + +// GetFullArchiveConnectableAddress returns a non circuit, non windows default connectable p2p address of the full archive network +func (tpn *TestProcessorNode) GetFullArchiveConnectableAddress() string { + if tpn == nil { + return "nil" + } + + return GetConnectableAddress(tpn.FullArchiveMessenger) } // Close - func (tpn *TestProcessorNode) Close() { - _ = tpn.Messenger.Close() + _ = tpn.MainMessenger.Close() + _ = tpn.FullArchiveMessenger.Close() _ = tpn.VMContainer.Close() } @@ -779,15 +814,15 @@ func (tpn *TestProcessorNode) initTestNodeWithArgs(args ArgTestProcessorNode) { tpn.BroadcastMessenger, _ = sposFactory.GetBroadcastMessenger( TestMarshalizer, TestHasher, - tpn.Messenger, + tpn.MainMessenger, tpn.ShardCoordinator, tpn.OwnAccount.PeerSigHandler, tpn.DataPool.Headers(), - tpn.InterceptorsContainer, + tpn.MainInterceptorsContainer, &testscommon.AlarmSchedulerStub{}, testscommon.NewKeysHandlerSingleSignerMock( tpn.NodeKeys.MainKey.Sk, - tpn.Messenger.ID(), + tpn.MainMessenger.ID(), ), ) @@ -968,15 +1003,15 @@ func (tpn *TestProcessorNode) InitializeProcessors(gasMap map[string]map[string] tpn.BroadcastMessenger, _ = sposFactory.GetBroadcastMessenger( TestMarshalizer, TestHasher, - tpn.Messenger, + tpn.MainMessenger, tpn.ShardCoordinator, tpn.OwnAccount.PeerSigHandler, tpn.DataPool.Headers(), - tpn.InterceptorsContainer, + tpn.MainInterceptorsContainer, &testscommon.AlarmSchedulerStub{}, testscommon.NewKeysHandlerSingleSignerMock( tpn.NodeKeys.MainKey.Sk, - tpn.Messenger.ID(), + tpn.MainMessenger.ID(), ), ) tpn.setGenesisBlock() @@ -1222,7 +1257,8 @@ func (tpn *TestProcessorNode) initInterceptors(heartbeatPk string) { Accounts: tpn.AccntState, ShardCoordinator: tpn.ShardCoordinator, NodesCoordinator: tpn.NodesCoordinator, - Messenger: tpn.Messenger, + MainMessenger: tpn.MainMessenger, + FullArchiveMessenger: tpn.FullArchiveMessenger, Store: tpn.Storage, DataPool: tpn.DataPool, MaxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, @@ -1242,12 +1278,14 @@ func (tpn *TestProcessorNode) initInterceptors(heartbeatPk string) { PeerSignatureHandler: &processMock.PeerSignatureHandlerStub{}, SignaturesHandler: &processMock.SignaturesHandlerStub{}, HeartbeatExpiryTimespanInSec: 30, - PeerShardMapper: tpn.PeerShardMapper, + MainPeerShardMapper: tpn.MainPeerShardMapper, + FullArchivePeerShardMapper: tpn.FullArchivePeerShardMapper, HardforkTrigger: tpn.HardforkTrigger, + NodeOperationMode: tpn.NodeOperationMode, } interceptorContainerFactory, _ := interceptorscontainer.NewMetaInterceptorsContainerFactory(metaInterceptorContainerFactoryArgs) - tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() + tpn.MainInterceptorsContainer, tpn.FullArchiveInterceptorsContainer, err = interceptorContainerFactory.Create() if err != nil { log.Debug("interceptor container factory Create", "error", err.Error()) } @@ -1287,7 +1325,8 @@ func (tpn *TestProcessorNode) initInterceptors(heartbeatPk string) { Accounts: tpn.AccntState, ShardCoordinator: tpn.ShardCoordinator, NodesCoordinator: tpn.NodesCoordinator, - Messenger: tpn.Messenger, + MainMessenger: tpn.MainMessenger, + FullArchiveMessenger: tpn.FullArchiveMessenger, Store: tpn.Storage, DataPool: tpn.DataPool, MaxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, @@ -1307,12 +1346,14 @@ func (tpn *TestProcessorNode) initInterceptors(heartbeatPk string) { PeerSignatureHandler: &processMock.PeerSignatureHandlerStub{}, SignaturesHandler: &processMock.SignaturesHandlerStub{}, HeartbeatExpiryTimespanInSec: 30, - PeerShardMapper: tpn.PeerShardMapper, + MainPeerShardMapper: tpn.MainPeerShardMapper, + FullArchivePeerShardMapper: tpn.FullArchivePeerShardMapper, HardforkTrigger: tpn.HardforkTrigger, + NodeOperationMode: tpn.NodeOperationMode, } interceptorContainerFactory, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(shardIntereptorContainerFactoryArgs) - tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() + tpn.MainInterceptorsContainer, tpn.FullArchiveInterceptorsContainer, err = interceptorContainerFactory.Create() if err != nil { fmt.Println(err.Error()) } @@ -1350,25 +1391,30 @@ func (tpn *TestProcessorNode) createHardforkTrigger(heartbeatPk string) []byte { func (tpn *TestProcessorNode) initResolvers() { dataPacker, _ := partitioning.NewSimpleDataPacker(TestMarshalizer) - _ = tpn.Messenger.CreateTopic(common.ConsensusTopic+tpn.ShardCoordinator.CommunicationIdentifier(tpn.ShardCoordinator.SelfId()), true) + consensusTopic := common.ConsensusTopic + tpn.ShardCoordinator.CommunicationIdentifier(tpn.ShardCoordinator.SelfId()) + _ = tpn.MainMessenger.CreateTopic(consensusTopic, true) + _ = tpn.FullArchiveMessenger.CreateTopic(consensusTopic, true) payloadValidator, _ := validator.NewPeerAuthenticationPayloadValidator(60) preferredPeersHolder, _ := p2pFactory.NewPeersHolder([]string{}) + fullArchivePreferredPeersHolder, _ := p2pFactory.NewPeersHolder([]string{}) resolverContainerFactory := resolverscontainer.FactoryArgs{ - ShardCoordinator: tpn.ShardCoordinator, - Messenger: tpn.Messenger, - Store: tpn.Storage, - Marshalizer: TestMarshalizer, - DataPools: tpn.DataPool, - Uint64ByteSliceConverter: TestUint64Converter, - DataPacker: dataPacker, - TriesContainer: tpn.TrieContainer, - SizeCheckDelta: 100, - InputAntifloodHandler: &mock.NilAntifloodHandler{}, - OutputAntifloodHandler: &mock.NilAntifloodHandler{}, - NumConcurrentResolvingJobs: 10, - PreferredPeersHolder: preferredPeersHolder, - PayloadValidator: payloadValidator, + ShardCoordinator: tpn.ShardCoordinator, + MainMessenger: tpn.MainMessenger, + FullArchiveMessenger: tpn.FullArchiveMessenger, + Store: tpn.Storage, + Marshalizer: TestMarshalizer, + DataPools: tpn.DataPool, + Uint64ByteSliceConverter: TestUint64Converter, + DataPacker: dataPacker, + TriesContainer: tpn.TrieContainer, + SizeCheckDelta: 100, + InputAntifloodHandler: &mock.NilAntifloodHandler{}, + OutputAntifloodHandler: &mock.NilAntifloodHandler{}, + NumConcurrentResolvingJobs: 10, + MainPreferredPeersHolder: preferredPeersHolder, + FullArchivePreferredPeersHolder: fullArchivePreferredPeersHolder, + PayloadValidator: payloadValidator, } var err error @@ -1392,15 +1438,17 @@ func (tpn *TestProcessorNode) initRequesters() { NumTotalPeers: 3, NumFullHistoryPeers: 3, }, - ShardCoordinator: tpn.ShardCoordinator, - Messenger: tpn.Messenger, - Marshaller: TestMarshaller, - Uint64ByteSliceConverter: TestUint64Converter, - OutputAntifloodHandler: &mock.NilAntifloodHandler{}, - CurrentNetworkEpochProvider: &mock.CurrentNetworkEpochProviderStub{}, - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PeersRatingHandler: tpn.PeersRatingHandler, - SizeCheckDelta: 0, + ShardCoordinator: tpn.ShardCoordinator, + MainMessenger: tpn.MainMessenger, + FullArchiveMessenger: tpn.FullArchiveMessenger, + Marshaller: TestMarshaller, + Uint64ByteSliceConverter: TestUint64Converter, + OutputAntifloodHandler: &mock.NilAntifloodHandler{}, + CurrentNetworkEpochProvider: tpn.EpochProvider, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PeersRatingHandler: tpn.PeersRatingHandler, + SizeCheckDelta: 0, } if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { @@ -2071,10 +2119,9 @@ func (tpn *TestProcessorNode) initBlockProcessor(stateCheckpointModulus uint) { coreComponents.Uint64ByteSliceConverterField = TestUint64Converter coreComponents.RoundHandlerField = tpn.RoundHandler coreComponents.EnableEpochsHandlerField = tpn.EnableEpochsHandler - coreComponents.EnableRoundsHandlerField = tpn.EnableRoundsHandler coreComponents.EpochNotifierField = tpn.EpochNotifier - coreComponents.RoundNotifierField = tpn.RoundNotifier coreComponents.EconomicsDataField = tpn.EconomicsData + coreComponents.RoundNotifierField = tpn.RoundNotifier dataComponents := GetDefaultDataComponents() dataComponents.Store = tpn.Storage @@ -2377,11 +2424,12 @@ func (tpn *TestProcessorNode) initNode() { processComponents.BlackListHdl = tpn.BlockBlackListHandler processComponents.NodesCoord = tpn.NodesCoordinator processComponents.ShardCoord = tpn.ShardCoordinator - processComponents.IntContainer = tpn.InterceptorsContainer + processComponents.IntContainer = tpn.MainInterceptorsContainer + processComponents.FullArchiveIntContainer = tpn.FullArchiveInterceptorsContainer processComponents.HistoryRepositoryInternal = tpn.HistoryRepository processComponents.WhiteListHandlerInternal = tpn.WhiteListHandler processComponents.WhiteListerVerifiedTxsInternal = tpn.WhiteListerVerifiedTxs - processComponents.TxsSenderHandlerField = createTxsSender(tpn.ShardCoordinator, tpn.Messenger) + processComponents.TxsSenderHandlerField = createTxsSender(tpn.ShardCoordinator, tpn.MainMessenger) processComponents.HardforkTriggerField = tpn.HardforkTrigger cryptoComponents := GetDefaultCryptoComponents() @@ -2413,7 +2461,8 @@ func (tpn *TestProcessorNode) initNode() { stateComponents.AccountsRepo, _ = state.NewAccountsRepository(argsAccountsRepo) networkComponents := GetDefaultNetworkComponents() - networkComponents.Messenger = tpn.Messenger + networkComponents.Messenger = tpn.MainMessenger + networkComponents.FullArchiveNetworkMessengerField = tpn.FullArchiveMessenger networkComponents.PeersRatingHandlerField = tpn.PeersRatingHandler networkComponents.PeersRatingMonitorField = tpn.PeersRatingMonitor @@ -2434,7 +2483,7 @@ func (tpn *TestProcessorNode) initNode() { err = nodeDebugFactory.CreateInterceptedDebugHandler( tpn.Node, - tpn.InterceptorsContainer, + tpn.MainInterceptorsContainer, tpn.ResolversContainer, tpn.RequestersFinder, config.InterceptorResolverDebugConfig{ @@ -2959,7 +3008,8 @@ func (tpn *TestProcessorNode) createHeartbeatWithHardforkTrigger() { cryptoComponents.PeerSignHandler = psh networkComponents := GetDefaultNetworkComponents() - networkComponents.Messenger = tpn.Messenger + networkComponents.Messenger = tpn.MainMessenger + networkComponents.FullArchiveNetworkMessengerField = tpn.FullArchiveMessenger networkComponents.InputAntiFlood = &mock.NilAntifloodHandler{} processComponents := GetDefaultProcessComponents() @@ -2970,7 +3020,8 @@ func (tpn *TestProcessorNode) createHeartbeatWithHardforkTrigger() { processComponents.BlackListHdl = tpn.BlockBlackListHandler processComponents.NodesCoord = tpn.NodesCoordinator processComponents.ShardCoord = tpn.ShardCoordinator - processComponents.IntContainer = tpn.InterceptorsContainer + processComponents.IntContainer = tpn.MainInterceptorsContainer + processComponents.FullArchiveIntContainer = tpn.FullArchiveInterceptorsContainer processComponents.ValidatorStatistics = &mock.ValidatorStatisticsProcessorStub{ GetValidatorInfoForRootHashCalled: func(_ []byte) (map[uint32][]*state.ValidatorInfo, error) { return map[uint32][]*state.ValidatorInfo{ @@ -2984,7 +3035,7 @@ func (tpn *TestProcessorNode) createHeartbeatWithHardforkTrigger() { processComponents.WhiteListerVerifiedTxsInternal = tpn.WhiteListerVerifiedTxs processComponents.WhiteListHandlerInternal = tpn.WhiteListHandler processComponents.HistoryRepositoryInternal = tpn.HistoryRepository - processComponents.TxsSenderHandlerField = createTxsSender(tpn.ShardCoordinator, tpn.Messenger) + processComponents.TxsSenderHandlerField = createTxsSender(tpn.ShardCoordinator, tpn.MainMessenger) processComponents.HardforkTriggerField = tpn.HardforkTrigger @@ -3196,7 +3247,8 @@ func GetDefaultProcessComponents() *mock.ProcessComponentsStub { ReqHandler: &testscommon.RequestHandlerStub{}, TxLogsProcess: &mock.TxLogProcessorMock{}, HeaderConstructValidator: &mock.HeaderValidatorStub{}, - PeerMapper: &p2pmocks.NetworkShardingCollectorStub{}, + MainPeerMapper: &p2pmocks.NetworkShardingCollectorStub{}, + FullArchivePeerMapper: &p2pmocks.NetworkShardingCollectorStub{}, FallbackHdrValidator: &testscommon.FallBackHeaderValidatorStub{}, NodeRedundancyHandlerInternal: &mock.RedundancyHandlerStub{ IsRedundancyNodeCalled: func() bool { @@ -3263,12 +3315,15 @@ func GetDefaultStateComponents() *testFactory.StateComponentsMock { // GetDefaultNetworkComponents - func GetDefaultNetworkComponents() *mock.NetworkComponentsStub { return &mock.NetworkComponentsStub{ - Messenger: &p2pmocks.MessengerStub{}, - InputAntiFlood: &mock.P2PAntifloodHandlerStub{}, - OutputAntiFlood: &mock.P2PAntifloodHandlerStub{}, - PeerBlackList: &mock.PeerBlackListCacherStub{}, - PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, - PeersRatingMonitorField: &p2pmocks.PeersRatingMonitorStub{}, + Messenger: &p2pmocks.MessengerStub{}, + InputAntiFlood: &mock.P2PAntifloodHandlerStub{}, + OutputAntiFlood: &mock.P2PAntifloodHandlerStub{}, + PeerBlackList: &mock.PeerBlackListCacherStub{}, + PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, + PeersRatingMonitorField: &p2pmocks.PeersRatingMonitorStub{}, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, } } diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index d98515fc5b3..ee4d95a0c63 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -160,7 +160,7 @@ func (tpn *TestProcessorNode) createShardBootstrapper() (TestBootstrapper, error ShardCoordinator: tpn.ShardCoordinator, Accounts: tpn.AccntState, BlackListHandler: tpn.BlockBlackListHandler, - NetworkWatcher: tpn.Messenger, + NetworkWatcher: tpn.MainMessenger, BootStorer: tpn.BootstrapStorer, StorageBootstrapper: tpn.StorageBootstrapper, EpochHandler: tpn.EpochStartTrigger, @@ -206,7 +206,7 @@ func (tpn *TestProcessorNode) createMetaChainBootstrapper() (TestBootstrapper, e ShardCoordinator: tpn.ShardCoordinator, Accounts: tpn.AccntState, BlackListHandler: tpn.BlockBlackListHandler, - NetworkWatcher: tpn.Messenger, + NetworkWatcher: tpn.MainMessenger, BootStorer: tpn.BootstrapStorer, StorageBootstrapper: tpn.StorageBootstrapper, EpochHandler: tpn.EpochStartTrigger, diff --git a/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go b/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go index 717615301b6..0117b84fd3d 100644 --- a/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go +++ b/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go @@ -779,7 +779,7 @@ func testESDTSemiFungibleTokenTransferRole(t *testing.T, numOfShards int) { defer func() { for _, n := range nodes { - _ = n.Messenger.Close() + _ = n.MainMessenger.Close() } }() @@ -929,7 +929,7 @@ func TestESDTSFTWithEnhancedTransferRole(t *testing.T) { defer func() { for _, n := range nodes { - _ = n.Messenger.Close() + _ = n.MainMessenger.Close() } }() diff --git a/node/metrics/metrics.go b/node/metrics/metrics.go index d584be00004..e862de23c9c 100644 --- a/node/metrics/metrics.go +++ b/node/metrics/metrics.go @@ -69,7 +69,6 @@ func InitBaseMetrics(appStatusHandler core.AppStatusHandler) error { appStatusHandler.SetStringValue(common.MetricP2PIntraShardObservers, initString) appStatusHandler.SetStringValue(common.MetricP2PCrossShardValidators, initString) appStatusHandler.SetStringValue(common.MetricP2PCrossShardObservers, initString) - appStatusHandler.SetStringValue(common.MetricP2PFullHistoryObservers, initString) appStatusHandler.SetStringValue(common.MetricP2PUnknownPeers, initString) appStatusHandler.SetStringValue(common.MetricInflation, initZeroString) diff --git a/node/metrics/metrics_test.go b/node/metrics/metrics_test.go index 381a16100d6..a3a6cd8ea30 100644 --- a/node/metrics/metrics_test.go +++ b/node/metrics/metrics_test.go @@ -52,7 +52,6 @@ func TestInitBaseMetrics(t *testing.T) { common.MetricP2PIntraShardObservers, common.MetricP2PCrossShardValidators, common.MetricP2PCrossShardObservers, - common.MetricP2PFullHistoryObservers, common.MetricP2PUnknownPeers, common.MetricInflation, common.MetricDevRewardsInEpoch, diff --git a/node/mock/factory/networkComponentsMock.go b/node/mock/factory/networkComponentsMock.go index c9dd18d26b6..88acf5468ea 100644 --- a/node/mock/factory/networkComponentsMock.go +++ b/node/mock/factory/networkComponentsMock.go @@ -8,13 +8,15 @@ import ( // NetworkComponentsMock - type NetworkComponentsMock struct { - Messenger p2p.Messenger - InputAntiFlood factory.P2PAntifloodHandler - OutputAntiFlood factory.P2PAntifloodHandler - PeerBlackList process.PeerBlackListCacher - PreferredPeersHolder factory.PreferredPeersHolderHandler - PeersRatingHandlerField p2p.PeersRatingHandler - PeersRatingMonitorField p2p.PeersRatingMonitor + Messenger p2p.Messenger + InputAntiFlood factory.P2PAntifloodHandler + OutputAntiFlood factory.P2PAntifloodHandler + PeerBlackList process.PeerBlackListCacher + PreferredPeersHolder factory.PreferredPeersHolderHandler + PeersRatingHandlerField p2p.PeersRatingHandler + PeersRatingMonitorField p2p.PeersRatingMonitor + FullArchiveNetworkMessengerField p2p.Messenger + FullArchivePreferredPeersHolder factory.PreferredPeersHolderHandler } // PubKeyCacher - @@ -77,11 +79,21 @@ func (ncm *NetworkComponentsMock) PeersRatingMonitor() p2p.PeersRatingMonitor { return ncm.PeersRatingMonitorField } +// FullArchiveNetworkMessenger - +func (ncm *NetworkComponentsMock) FullArchiveNetworkMessenger() p2p.Messenger { + return ncm.FullArchiveNetworkMessengerField +} + // String - func (ncm *NetworkComponentsMock) String() string { return "NetworkComponentsMock" } +// FullArchivePreferredPeersHolderHandler - +func (ncm *NetworkComponentsMock) FullArchivePreferredPeersHolderHandler() factory.PreferredPeersHolderHandler { + return ncm.FullArchivePreferredPeersHolder +} + // IsInterfaceNil - func (ncm *NetworkComponentsMock) IsInterfaceNil() bool { return ncm == nil diff --git a/node/node.go b/node/node.go index 6e057a97fde..e02f84be2cb 100644 --- a/node/node.go +++ b/node/node.go @@ -1092,9 +1092,9 @@ func (n *Node) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) { return peerInfoSlice, nil } -// GetConnectedPeersRatings returns the connected peers ratings -func (n *Node) GetConnectedPeersRatings() string { - return n.networkComponents.PeersRatingMonitor().GetConnectedPeersRatings() +// GetConnectedPeersRatingsOnMainNetwork returns the connected peers ratings on the main network +func (n *Node) GetConnectedPeersRatingsOnMainNetwork() (string, error) { + return n.networkComponents.PeersRatingMonitor().GetConnectedPeersRatings(n.networkComponents.NetworkMessenger()) } // GetEpochStartDataAPI returns epoch start data of a given epoch diff --git a/node/nodeHelper.go b/node/nodeHelper.go index f04ba91bbcb..b1b5a27c816 100644 --- a/node/nodeHelper.go +++ b/node/nodeHelper.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/node/nodeDebugFactory" + "github.com/multiversx/mx-chain-go/p2p" procFactory "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/process/throttle/antiflood/blackList" "github.com/multiversx/mx-chain-go/sharding" @@ -23,12 +24,19 @@ func prepareOpenTopics( selfID := shardCoordinator.SelfId() selfShardHeartbeatV2Topic := common.HeartbeatV2Topic + core.CommunicationIdentifierBetweenShards(selfID, selfID) if selfID == core.MetachainShardId { - antiflood.SetTopicsForAll(common.PeerAuthenticationTopic, selfShardHeartbeatV2Topic, common.ConnectionTopic) + antiflood.SetTopicsForAll( + common.PeerAuthenticationTopic, + selfShardHeartbeatV2Topic, + common.ConnectionTopic) return } selfShardTxTopic := procFactory.TransactionTopic + core.CommunicationIdentifierBetweenShards(selfID, selfID) - antiflood.SetTopicsForAll(common.PeerAuthenticationTopic, selfShardHeartbeatV2Topic, common.ConnectionTopic, selfShardTxTopic) + antiflood.SetTopicsForAll( + common.PeerAuthenticationTopic, + selfShardHeartbeatV2Topic, + common.ConnectionTopic, + selfShardTxTopic) } // CreateNode is the node factory @@ -50,16 +58,7 @@ func CreateNode( ) (*Node, error) { prepareOpenTopics(networkComponents.InputAntiFloodHandler(), processComponents.ShardCoordinator()) - peerDenialEvaluator, err := blackList.NewPeerDenialEvaluator( - networkComponents.PeerBlackListHandler(), - networkComponents.PubKeyCacher(), - processComponents.PeerShardMapper(), - ) - if err != nil { - return nil, err - } - - err = networkComponents.NetworkMessenger().SetPeerDenialEvaluator(peerDenialEvaluator) + peerDenialEvaluator, err := createAndAttachPeerDenialEvaluators(networkComponents, processComponents) if err != nil { return nil, err } @@ -123,3 +122,38 @@ func CreateNode( return nd, nil } + +func createAndAttachPeerDenialEvaluators( + networkComponents factory.NetworkComponentsHandler, + processComponents factory.ProcessComponentsHandler, +) (p2p.PeerDenialEvaluator, error) { + mainPeerDenialEvaluator, err := blackList.NewPeerDenialEvaluator( + networkComponents.PeerBlackListHandler(), + networkComponents.PubKeyCacher(), + processComponents.PeerShardMapper(), + ) + if err != nil { + return nil, err + } + + err = networkComponents.NetworkMessenger().SetPeerDenialEvaluator(mainPeerDenialEvaluator) + if err != nil { + return nil, err + } + + fullArchivePeerDenialEvaluator, err := blackList.NewPeerDenialEvaluator( + networkComponents.PeerBlackListHandler(), + networkComponents.PubKeyCacher(), + processComponents.FullArchivePeerShardMapper(), + ) + if err != nil { + return nil, err + } + + err = networkComponents.FullArchiveNetworkMessenger().SetPeerDenialEvaluator(fullArchivePeerDenialEvaluator) + if err != nil { + return nil, err + } + + return mainPeerDenialEvaluator, nil +} diff --git a/node/nodeRunner.go b/node/nodeRunner.go index 6918fd6c557..6a1b1c1e117 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -1106,7 +1106,8 @@ func (nr *nodeRunner) logSessionInformation( configurationPaths.Genesis, configurationPaths.SmartContracts, configurationPaths.Nodes, - configurationPaths.P2p, + configurationPaths.MainP2p, + configurationPaths.FullArchiveP2p, configurationPaths.Preferences, configurationPaths.Ratings, configurationPaths.SystemSC, @@ -1386,7 +1387,8 @@ func (nr *nodeRunner) CreateManagedNetworkComponents( cryptoComponents mainFactory.CryptoComponentsHolder, ) (mainFactory.NetworkComponentsHandler, error) { networkComponentsFactoryArgs := networkComp.NetworkComponentsFactoryArgs{ - P2pConfig: *nr.configs.P2pConfig, + MainP2pConfig: *nr.configs.MainP2pConfig, + FullArchiveP2pConfig: *nr.configs.FullArchiveP2pConfig, MainConfig: *nr.configs.GeneralConfig, RatingsConfig: *nr.configs.RatingsConfig, StatusHandler: statusCoreComponents.AppStatusHandler(), diff --git a/node/node_test.go b/node/node_test.go index 70416168df4..7a86514150b 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -3735,7 +3735,7 @@ func TestNode_ShouldWork(t *testing.T) { pid2 := "pid2" processComponents := getDefaultProcessComponents() - processComponents.PeerMapper = &p2pmocks.NetworkShardingCollectorStub{ + processComponents.MainPeerMapper = &p2pmocks.NetworkShardingCollectorStub{ GetPeerInfoCalled: func(pid core.PeerID) core.P2PPeerInfo { return core.P2PPeerInfo{ PeerType: 0, @@ -5109,7 +5109,8 @@ func getDefaultProcessComponents() *factoryMock.ProcessComponentsMock { ReqHandler: &testscommon.RequestHandlerStub{}, TxLogsProcess: &mock.TxLogProcessorMock{}, HeaderConstructValidator: &mock.HeaderValidatorStub{}, - PeerMapper: &p2pmocks.NetworkShardingCollectorStub{}, + MainPeerMapper: &p2pmocks.NetworkShardingCollectorStub{}, + FullArchivePeerMapper: &p2pmocks.NetworkShardingCollectorStub{}, WhiteListHandlerInternal: &testscommon.WhiteListHandlerStub{}, WhiteListerVerifiedTxsInternal: &testscommon.WhiteListHandlerStub{}, TxsSenderHandlerField: &txsSenderMock.TxsSenderHandlerMock{}, diff --git a/p2p/config/config.go b/p2p/config/config.go index eb2bf95d07c..311a6e64484 100644 --- a/p2p/config/config.go +++ b/p2p/config/config.go @@ -13,7 +13,3 @@ type KadDhtPeerDiscoveryConfig = config.KadDhtPeerDiscoveryConfig // ShardingConfig will hold the network sharding config settings type ShardingConfig = config.ShardingConfig - -// AdditionalConnectionsConfig will hold the additional connections that will be open when certain conditions are met -// All these values should be added to the maximum target peer count value -type AdditionalConnectionsConfig = config.AdditionalConnectionsConfig diff --git a/p2p/constants.go b/p2p/constants.go index 4f0807484b7..620339577dc 100644 --- a/p2p/constants.go +++ b/p2p/constants.go @@ -13,6 +13,15 @@ const NormalOperation = p2p.NormalOperation // FullArchiveMode defines the node operation as a full archive mode const FullArchiveMode = p2p.FullArchiveMode +// NetworkType defines the type of the network a messenger is running on +type NetworkType = p2p.NetworkType + +// MainNetwork defines the main network +const MainNetwork NetworkType = "main" + +// FullArchiveNetwork defines the full archive network +const FullArchiveNetwork NetworkType = "full archive" + // ListsSharder is the variant that uses lists const ListsSharder = p2p.ListsSharder diff --git a/p2p/disabled/networkMessenger.go b/p2p/disabled/networkMessenger.go new file mode 100644 index 00000000000..cd64969f476 --- /dev/null +++ b/p2p/disabled/networkMessenger.go @@ -0,0 +1,191 @@ +package disabled + +import ( + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/p2p" +) + +type networkMessenger struct { +} + +// NewNetworkMessenger creates a new disabled Messenger implementation +func NewNetworkMessenger() *networkMessenger { + return &networkMessenger{} +} + +// Close returns nil as it is disabled +func (netMes *networkMessenger) Close() error { + return nil +} + +// CreateTopic returns nil as it is disabled +func (netMes *networkMessenger) CreateTopic(_ string, _ bool) error { + return nil +} + +// HasTopic returns true as it is disabled +func (netMes *networkMessenger) HasTopic(_ string) bool { + return true +} + +// RegisterMessageProcessor returns nil as it is disabled +func (netMes *networkMessenger) RegisterMessageProcessor(_ string, _ string, _ p2p.MessageProcessor) error { + return nil +} + +// UnregisterAllMessageProcessors returns nil as it is disabled +func (netMes *networkMessenger) UnregisterAllMessageProcessors() error { + return nil +} + +// UnregisterMessageProcessor returns nil as it is disabled +func (netMes *networkMessenger) UnregisterMessageProcessor(_ string, _ string) error { + return nil +} + +// Broadcast does nothing as it is disabled +func (netMes *networkMessenger) Broadcast(_ string, _ []byte) { +} + +// BroadcastOnChannel does nothing as it is disabled +func (netMes *networkMessenger) BroadcastOnChannel(_ string, _ string, _ []byte) { +} + +// BroadcastUsingPrivateKey does nothing as it is disabled +func (netMes *networkMessenger) BroadcastUsingPrivateKey(_ string, _ []byte, _ core.PeerID, _ []byte) { +} + +// BroadcastOnChannelUsingPrivateKey does nothing as it is disabled +func (netMes *networkMessenger) BroadcastOnChannelUsingPrivateKey(_ string, _ string, _ []byte, _ core.PeerID, _ []byte) { +} + +// SendToConnectedPeer returns nil as it is disabled +func (netMes *networkMessenger) SendToConnectedPeer(_ string, _ []byte, _ core.PeerID) error { + return nil +} + +// UnJoinAllTopics returns nil as it is disabled +func (netMes *networkMessenger) UnJoinAllTopics() error { + return nil +} + +// Bootstrap returns nil as it is disabled +func (netMes *networkMessenger) Bootstrap() error { + return nil +} + +// Peers returns an empty slice as it is disabled +func (netMes *networkMessenger) Peers() []core.PeerID { + return make([]core.PeerID, 0) +} + +// Addresses returns an empty slice as it is disabled +func (netMes *networkMessenger) Addresses() []string { + return make([]string, 0) +} + +// ConnectToPeer returns nil as it is disabled +func (netMes *networkMessenger) ConnectToPeer(_ string) error { + return nil +} + +// IsConnected returns false as it is disabled +func (netMes *networkMessenger) IsConnected(_ core.PeerID) bool { + return false +} + +// ConnectedPeers returns an empty slice as it is disabled +func (netMes *networkMessenger) ConnectedPeers() []core.PeerID { + return make([]core.PeerID, 0) +} + +// ConnectedAddresses returns an empty slice as it is disabled +func (netMes *networkMessenger) ConnectedAddresses() []string { + return make([]string, 0) +} + +// PeerAddresses returns an empty slice as it is disabled +func (netMes *networkMessenger) PeerAddresses(_ core.PeerID) []string { + return make([]string, 0) +} + +// ConnectedPeersOnTopic returns an empty slice as it is disabled +func (netMes *networkMessenger) ConnectedPeersOnTopic(_ string) []core.PeerID { + return make([]core.PeerID, 0) +} + +// SetPeerShardResolver returns nil as it is disabled +func (netMes *networkMessenger) SetPeerShardResolver(_ p2p.PeerShardResolver) error { + return nil +} + +// GetConnectedPeersInfo returns an empty structure as it is disabled +func (netMes *networkMessenger) GetConnectedPeersInfo() *p2p.ConnectedPeersInfo { + return &p2p.ConnectedPeersInfo{} +} + +// WaitForConnections does nothing as it is disabled +func (netMes *networkMessenger) WaitForConnections(_ time.Duration, _ uint32) { +} + +// IsConnectedToTheNetwork returns true as it is disabled +func (netMes *networkMessenger) IsConnectedToTheNetwork() bool { + return true +} + +// ThresholdMinConnectedPeers returns 0 as it is disabled +func (netMes *networkMessenger) ThresholdMinConnectedPeers() int { + return 0 +} + +// SetThresholdMinConnectedPeers returns nil as it is disabled +func (netMes *networkMessenger) SetThresholdMinConnectedPeers(_ int) error { + return nil +} + +// SetPeerDenialEvaluator returns nil as it is disabled +func (netMes *networkMessenger) SetPeerDenialEvaluator(_ p2p.PeerDenialEvaluator) error { + return nil +} + +// ID returns an empty peerID as it is disabled +func (netMes *networkMessenger) ID() core.PeerID { + return "" +} + +// Port returns 0 as it is disabled +func (netMes *networkMessenger) Port() int { + return 0 +} + +// Sign returns an empty slice and nil as it is disabled +func (netMes *networkMessenger) Sign(_ []byte) ([]byte, error) { + return make([]byte, 0), nil +} + +// Verify returns nil as it is disabled +func (netMes *networkMessenger) Verify(_ []byte, _ core.PeerID, _ []byte) error { + return nil +} + +// SignUsingPrivateKey returns an empty slice and nil as it is disabled +func (netMes *networkMessenger) SignUsingPrivateKey(_ []byte, _ []byte) ([]byte, error) { + return make([]byte, 0), nil +} + +// AddPeerTopicNotifier returns nil as it is disabled +func (netMes *networkMessenger) AddPeerTopicNotifier(_ p2p.PeerTopicNotifier) error { + return nil +} + +// ProcessReceivedMessage returns nil as it is disabled +func (netMes *networkMessenger) ProcessReceivedMessage(_ p2p.MessageP2P, _ core.PeerID, _ p2p.MessageHandler) error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (netMes *networkMessenger) IsInterfaceNil() bool { + return netMes == nil +} diff --git a/p2p/interface.go b/p2p/interface.go index f643852dc32..aeb39a33689 100644 --- a/p2p/interface.go +++ b/p2p/interface.go @@ -26,6 +26,9 @@ type Messenger = p2p.Messenger // MessageP2P defines what a p2p message can do (should return) type MessageP2P = p2p.MessageP2P +// MessageHandler defines the behaviour of a component able to send and process messages +type MessageHandler = p2p.MessageHandler + // ChannelLoadBalancer defines what a load balancer that uses chans should do type ChannelLoadBalancer interface { AddChannel(channel string) error @@ -97,10 +100,7 @@ type PeersRatingHandler interface { } // PeersRatingMonitor represents an entity able to provide peers ratings -type PeersRatingMonitor interface { - GetConnectedPeersRatings() string - IsInterfaceNil() bool -} +type PeersRatingMonitor = p2p.PeersRatingMonitor // PeerTopicNotifier represents an entity able to handle new notifications on a new peer on a topic type PeerTopicNotifier = p2p.PeerTopicNotifier @@ -125,3 +125,9 @@ type P2PKeyConverter interface { ConvertPublicKeyToPeerID(pk crypto.PublicKey) (core.PeerID, error) IsInterfaceNil() bool } + +// Logger defines the behavior of a data logger component +type Logger = p2p.Logger + +// ConnectionsHandler defines the behaviour of a component able to handle connections +type ConnectionsHandler = p2p.ConnectionsHandler diff --git a/process/factory/interceptorscontainer/args.go b/process/factory/interceptorscontainer/args.go index 459812df1f9..ca8b66c61fd 100644 --- a/process/factory/interceptorscontainer/args.go +++ b/process/factory/interceptorscontainer/args.go @@ -4,6 +4,7 @@ import ( crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/heartbeat" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" @@ -17,7 +18,8 @@ type CommonInterceptorsContainerFactoryArgs struct { Accounts state.AccountsAdapter ShardCoordinator sharding.Coordinator NodesCoordinator nodesCoordinator.NodesCoordinator - Messenger process.TopicHandler + MainMessenger process.TopicHandler + FullArchiveMessenger process.TopicHandler Store dataRetriever.StorageService DataPool dataRetriever.PoolsHolder MaxTxNonceDeltaAllowed int @@ -37,6 +39,8 @@ type CommonInterceptorsContainerFactoryArgs struct { PeerSignatureHandler crypto.PeerSignatureHandler SignaturesHandler process.SignaturesHandler HeartbeatExpiryTimespanInSec int64 - PeerShardMapper process.PeerShardMapper + MainPeerShardMapper process.PeerShardMapper + FullArchivePeerShardMapper process.PeerShardMapper HardforkTrigger heartbeat.HardforkTrigger + NodeOperationMode p2p.NodeOperation } diff --git a/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go b/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go index c66ac5bea6f..cc7061d7158 100644 --- a/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go +++ b/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go @@ -1,6 +1,7 @@ package interceptorscontainer import ( + "fmt" "time" "github.com/multiversx/mx-chain-core-go/core" @@ -9,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/heartbeat" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/dataValidators" "github.com/multiversx/mx-chain-go/process/factory" @@ -18,32 +20,41 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/storage" ) -const numGoRoutines = 100 -const chunksProcessorRequestInterval = time.Millisecond * 400 -const minTimespanDurationInSec = int64(1) +const ( + numGoRoutines = 100 + chunksProcessorRequestInterval = time.Millisecond * 400 + minTimespanDurationInSec = int64(1) + errorOnMainNetworkString = "on main network" + errorOnFullArchiveNetworkString = "on full archive network" +) type baseInterceptorsContainerFactory struct { - container process.InterceptorsContainer - shardCoordinator sharding.Coordinator - accounts state.AccountsAdapter - store dataRetriever.StorageService - dataPool dataRetriever.PoolsHolder - messenger process.TopicHandler - nodesCoordinator nodesCoordinator.NodesCoordinator - blockBlackList process.TimeCacher - argInterceptorFactory *interceptorFactory.ArgInterceptedDataFactory - globalThrottler process.InterceptorThrottler - maxTxNonceDeltaAllowed int - antifloodHandler process.P2PAntifloodHandler - whiteListHandler process.WhiteListHandler - whiteListerVerifiedTxs process.WhiteListHandler - preferredPeersHolder process.PreferredPeersHolderHandler - hasher hashing.Hasher - requestHandler process.RequestHandler - peerShardMapper process.PeerShardMapper - hardforkTrigger heartbeat.HardforkTrigger + mainContainer process.InterceptorsContainer + fullArchiveContainer process.InterceptorsContainer + shardCoordinator sharding.Coordinator + accounts state.AccountsAdapter + store dataRetriever.StorageService + dataPool dataRetriever.PoolsHolder + mainMessenger process.TopicHandler + fullArchiveMessenger process.TopicHandler + nodesCoordinator nodesCoordinator.NodesCoordinator + blockBlackList process.TimeCacher + argInterceptorFactory *interceptorFactory.ArgInterceptedDataFactory + globalThrottler process.InterceptorThrottler + maxTxNonceDeltaAllowed int + antifloodHandler process.P2PAntifloodHandler + whiteListHandler process.WhiteListHandler + whiteListerVerifiedTxs process.WhiteListHandler + preferredPeersHolder process.PreferredPeersHolderHandler + hasher hashing.Hasher + requestHandler process.RequestHandler + mainPeerShardMapper process.PeerShardMapper + fullArchivePeerShardMapper process.PeerShardMapper + hardforkTrigger heartbeat.HardforkTrigger + nodeOperationMode p2p.NodeOperation } func checkBaseParams( @@ -53,7 +64,8 @@ func checkBaseParams( accounts state.AccountsAdapter, store dataRetriever.StorageService, dataPool dataRetriever.PoolsHolder, - messenger process.TopicHandler, + mainMessenger process.TopicHandler, + fullArchiveMessenger process.TopicHandler, nodesCoordinator nodesCoordinator.NodesCoordinator, blackList process.TimeCacher, antifloodHandler process.P2PAntifloodHandler, @@ -61,7 +73,8 @@ func checkBaseParams( whiteListerVerifiedTxs process.WhiteListHandler, preferredPeersHolder process.PreferredPeersHolderHandler, requestHandler process.RequestHandler, - peerShardMapper process.PeerShardMapper, + mainPeerShardMapper process.PeerShardMapper, + fullArchivePeerShardMapper process.PeerShardMapper, hardforkTrigger heartbeat.HardforkTrigger, ) error { if check.IfNil(coreComponents) { @@ -73,8 +86,11 @@ func checkBaseParams( if check.IfNil(shardCoordinator) { return process.ErrNilShardCoordinator } - if check.IfNil(messenger) { - return process.ErrNilMessenger + if check.IfNil(mainMessenger) { + return fmt.Errorf("%w %s", process.ErrNilMessenger, errorOnMainNetworkString) + } + if check.IfNil(fullArchiveMessenger) { + return fmt.Errorf("%w %s", process.ErrNilMessenger, errorOnFullArchiveNetworkString) } if check.IfNil(store) { return process.ErrNilStore @@ -149,8 +165,11 @@ func checkBaseParams( if check.IfNil(requestHandler) { return process.ErrNilRequestHandler } - if check.IfNil(peerShardMapper) { - return process.ErrNilPeerShardMapper + if check.IfNil(mainPeerShardMapper) { + return fmt.Errorf("%w %s", process.ErrNilPeerShardMapper, errorOnMainNetworkString) + } + if check.IfNil(fullArchivePeerShardMapper) { + return fmt.Errorf("%w %s", process.ErrNilPeerShardMapper, errorOnFullArchiveNetworkString) } if check.IfNil(hardforkTrigger) { return process.ErrNilHardforkTrigger @@ -165,12 +184,34 @@ func (bicf *baseInterceptorsContainerFactory) createTopicAndAssignHandler( createChannel bool, ) (process.Interceptor, error) { - err := bicf.messenger.CreateTopic(topic, createChannel) + err := createTopicAndAssignHandlerOnMessenger(topic, interceptor, createChannel, bicf.mainMessenger) if err != nil { return nil, err } - return interceptor, bicf.messenger.RegisterMessageProcessor(topic, common.DefaultInterceptorsIdentifier, interceptor) + if bicf.nodeOperationMode == p2p.FullArchiveMode { + err = createTopicAndAssignHandlerOnMessenger(topic, interceptor, createChannel, bicf.fullArchiveMessenger) + if err != nil { + return nil, err + } + } + + return interceptor, nil +} + +func createTopicAndAssignHandlerOnMessenger( + topic string, + interceptor process.Interceptor, + createChannel bool, + messenger process.TopicHandler, +) error { + + err := messenger.CreateTopic(topic, createChannel) + if err != nil { + return err + } + + return messenger.RegisterMessageProcessor(topic, common.DefaultInterceptorsIdentifier, interceptor) } // ------- Tx interceptors @@ -206,7 +247,7 @@ func (bicf *baseInterceptorsContainerFactory) generateTxInterceptors() error { keys = append(keys, identifierTx) interceptorSlice = append(interceptorSlice, interceptor) - return bicf.container.AddMultiple(keys, interceptorSlice) + return bicf.addInterceptorsToContainers(keys, interceptorSlice) } func (bicf *baseInterceptorsContainerFactory) createOneTxInterceptor(topic string) (process.Interceptor, error) { @@ -255,7 +296,7 @@ func (bicf *baseInterceptorsContainerFactory) createOneTxInterceptor(topic strin Throttler: bicf.globalThrottler, AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), PreferredPeersHolder: bicf.preferredPeersHolder, }, ) @@ -298,7 +339,7 @@ func (bicf *baseInterceptorsContainerFactory) createOneUnsignedTxInterceptor(top Throttler: bicf.globalThrottler, AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), PreferredPeersHolder: bicf.preferredPeersHolder, }, ) @@ -341,7 +382,7 @@ func (bicf *baseInterceptorsContainerFactory) createOneRewardTxInterceptor(topic Throttler: bicf.globalThrottler, AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), PreferredPeersHolder: bicf.preferredPeersHolder, }, ) @@ -383,7 +424,7 @@ func (bicf *baseInterceptorsContainerFactory) generateHeaderInterceptors() error Throttler: bicf.globalThrottler, AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), PreferredPeersHolder: bicf.preferredPeersHolder, }, ) @@ -396,7 +437,7 @@ func (bicf *baseInterceptorsContainerFactory) generateHeaderInterceptors() error return err } - return bicf.container.Add(identifierHdr, interceptor) + return bicf.addInterceptorsToContainers([]string{identifierHdr}, []process.Interceptor{interceptor}) } // ------- MiniBlocks interceptors @@ -431,15 +472,15 @@ func (bicf *baseInterceptorsContainerFactory) generateMiniBlocksInterceptors() e identifierAllShardsMiniBlocks := factory.MiniBlocksTopic + shardC.CommunicationIdentifier(core.AllShardId) - allShardsMiniBlocksInterceptorinterceptor, err := bicf.createOneMiniBlocksInterceptor(identifierAllShardsMiniBlocks) + allShardsMiniBlocksInterceptor, err := bicf.createOneMiniBlocksInterceptor(identifierAllShardsMiniBlocks) if err != nil { return err } keys[noOfShards+1] = identifierAllShardsMiniBlocks - interceptorsSlice[noOfShards+1] = allShardsMiniBlocksInterceptorinterceptor + interceptorsSlice[noOfShards+1] = allShardsMiniBlocksInterceptor - return bicf.container.AddMultiple(keys, interceptorsSlice) + return bicf.addInterceptorsToContainers(keys, interceptorsSlice) } func (bicf *baseInterceptorsContainerFactory) createOneMiniBlocksInterceptor(topic string) (process.Interceptor, error) { @@ -471,7 +512,7 @@ func (bicf *baseInterceptorsContainerFactory) createOneMiniBlocksInterceptor(top Throttler: bicf.globalThrottler, AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), PreferredPeersHolder: bicf.preferredPeersHolder, }, ) @@ -510,7 +551,7 @@ func (bicf *baseInterceptorsContainerFactory) generateMetachainHeaderInterceptor Throttler: bicf.globalThrottler, AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), PreferredPeersHolder: bicf.preferredPeersHolder, }, ) @@ -523,7 +564,7 @@ func (bicf *baseInterceptorsContainerFactory) generateMetachainHeaderInterceptor return err } - return bicf.container.Add(identifierHdr, interceptor) + return bicf.addInterceptorsToContainers([]string{identifierHdr}, []process.Interceptor{interceptor}) } func (bicf *baseInterceptorsContainerFactory) createOneTrieNodesInterceptor(topic string) (process.Interceptor, error) { @@ -547,7 +588,7 @@ func (bicf *baseInterceptorsContainerFactory) createOneTrieNodesInterceptor(topi Throttler: bicf.globalThrottler, AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), PreferredPeersHolder: bicf.preferredPeersHolder, }, ) @@ -604,7 +645,7 @@ func (bicf *baseInterceptorsContainerFactory) generateUnsignedTxsInterceptors() keys = append(keys, identifierScr) interceptorsSlice = append(interceptorsSlice, interceptor) - return bicf.container.AddMultiple(keys, interceptorsSlice) + return bicf.addInterceptorsToContainers(keys, interceptorsSlice) } //------- PeerAuthentication interceptor @@ -615,7 +656,7 @@ func (bicf *baseInterceptorsContainerFactory) generatePeerAuthenticationIntercep internalMarshaller := bicf.argInterceptorFactory.CoreComponents.InternalMarshalizer() argProcessor := processor.ArgPeerAuthenticationInterceptorProcessor{ PeerAuthenticationCacher: bicf.dataPool.PeerAuthentications(), - PeerShardMapper: bicf.peerShardMapper, + PeerShardMapper: bicf.mainPeerShardMapper, Marshaller: internalMarshaller, HardforkTrigger: bicf.hardforkTrigger, } @@ -639,19 +680,19 @@ func (bicf *baseInterceptorsContainerFactory) generatePeerAuthenticationIntercep AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, PreferredPeersHolder: bicf.preferredPeersHolder, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), }, ) if err != nil { return err } - interceptor, err := bicf.createTopicAndAssignHandler(identifierPeerAuthentication, mdInterceptor, true) + err = createTopicAndAssignHandlerOnMessenger(identifierPeerAuthentication, mdInterceptor, true, bicf.mainMessenger) if err != nil { return err } - return bicf.container.Add(identifierPeerAuthentication, interceptor) + return bicf.mainContainer.Add(identifierPeerAuthentication, mdInterceptor) } //------- Heartbeat interceptor @@ -660,43 +701,51 @@ func (bicf *baseInterceptorsContainerFactory) generateHeartbeatInterceptor() err shardC := bicf.shardCoordinator identifierHeartbeat := common.HeartbeatV2Topic + shardC.CommunicationIdentifier(shardC.SelfId()) + interceptor, err := bicf.createHeartbeatV2Interceptor(identifierHeartbeat, bicf.dataPool.Heartbeats(), bicf.mainPeerShardMapper) + if err != nil { + return err + } + + return bicf.addInterceptorsToContainers([]string{identifierHeartbeat}, []process.Interceptor{interceptor}) +} + +func (bicf *baseInterceptorsContainerFactory) createHeartbeatV2Interceptor( + identifier string, + heartbeatCahcer storage.Cacher, + peerShardMapper process.PeerShardMapper, +) (process.Interceptor, error) { argHeartbeatProcessor := processor.ArgHeartbeatInterceptorProcessor{ - HeartbeatCacher: bicf.dataPool.Heartbeats(), - ShardCoordinator: shardC, - PeerShardMapper: bicf.peerShardMapper, + HeartbeatCacher: heartbeatCahcer, + ShardCoordinator: bicf.shardCoordinator, + PeerShardMapper: peerShardMapper, } heartbeatProcessor, err := processor.NewHeartbeatInterceptorProcessor(argHeartbeatProcessor) if err != nil { - return err + return nil, err } heartbeatFactory, err := interceptorFactory.NewInterceptedHeartbeatDataFactory(*bicf.argInterceptorFactory) if err != nil { - return err + return nil, err } - sdInterceptor, err := interceptors.NewSingleDataInterceptor( + interceptor, err := interceptors.NewSingleDataInterceptor( interceptors.ArgSingleDataInterceptor{ - Topic: identifierHeartbeat, + Topic: identifier, DataFactory: heartbeatFactory, Processor: heartbeatProcessor, Throttler: bicf.globalThrottler, AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, PreferredPeersHolder: bicf.preferredPeersHolder, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), }, ) if err != nil { - return err - } - - interceptor, err := bicf.createTopicAndAssignHandler(identifierHeartbeat, sdInterceptor, true) - if err != nil { - return err + return nil, err } - return bicf.container.Add(identifierHeartbeat, interceptor) + return bicf.createTopicAndAssignHandler(identifier, interceptor, true) } // ------- PeerShard interceptor @@ -704,41 +753,48 @@ func (bicf *baseInterceptorsContainerFactory) generateHeartbeatInterceptor() err func (bicf *baseInterceptorsContainerFactory) generatePeerShardInterceptor() error { identifier := common.ConnectionTopic - interceptedPeerShardFactory, err := interceptorFactory.NewInterceptedPeerShardFactory(*bicf.argInterceptorFactory) + interceptor, err := bicf.createPeerShardInterceptor(identifier, bicf.mainPeerShardMapper) if err != nil { return err } + return bicf.addInterceptorsToContainers([]string{identifier}, []process.Interceptor{interceptor}) +} + +func (bicf *baseInterceptorsContainerFactory) createPeerShardInterceptor( + identifier string, + peerShardMapper process.PeerShardMapper, +) (process.Interceptor, error) { + interceptedPeerShardFactory, err := interceptorFactory.NewInterceptedPeerShardFactory(*bicf.argInterceptorFactory) + if err != nil { + return nil, err + } + argProcessor := processor.ArgPeerShardInterceptorProcessor{ - PeerShardMapper: bicf.peerShardMapper, + PeerShardMapper: peerShardMapper, } - dciProcessor, err := processor.NewPeerShardInterceptorProcessor(argProcessor) + psiProcessor, err := processor.NewPeerShardInterceptorProcessor(argProcessor) if err != nil { - return err + return nil, err } interceptor, err := interceptors.NewSingleDataInterceptor( interceptors.ArgSingleDataInterceptor{ Topic: identifier, DataFactory: interceptedPeerShardFactory, - Processor: dciProcessor, + Processor: psiProcessor, Throttler: bicf.globalThrottler, AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), PreferredPeersHolder: bicf.preferredPeersHolder, }, ) if err != nil { - return err - } - - _, err = bicf.createTopicAndAssignHandler(identifier, interceptor, true) - if err != nil { - return err + return nil, err } - return bicf.container.Add(identifier, interceptor) + return bicf.createTopicAndAssignHandler(identifier, interceptor, true) } func (bicf *baseInterceptorsContainerFactory) generateValidatorInfoInterceptor() error { @@ -769,7 +825,7 @@ func (bicf *baseInterceptorsContainerFactory) generateValidatorInfoInterceptor() AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, PreferredPeersHolder: bicf.preferredPeersHolder, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), }, ) if err != nil { @@ -781,5 +837,18 @@ func (bicf *baseInterceptorsContainerFactory) generateValidatorInfoInterceptor() return err } - return bicf.container.Add(identifier, interceptor) + return bicf.addInterceptorsToContainers([]string{identifier}, []process.Interceptor{interceptor}) +} + +func (bicf *baseInterceptorsContainerFactory) addInterceptorsToContainers(keys []string, interceptors []process.Interceptor) error { + err := bicf.mainContainer.AddMultiple(keys, interceptors) + if err != nil { + return err + } + + if bicf.nodeOperationMode != p2p.FullArchiveMode { + return nil + } + + return bicf.fullArchiveContainer.AddMultiple(keys, interceptors) } diff --git a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory.go b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory.go index b783556254b..38d3e460bce 100644 --- a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory.go +++ b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory.go @@ -31,7 +31,8 @@ func NewMetaInterceptorsContainerFactory( args.Accounts, args.Store, args.DataPool, - args.Messenger, + args.MainMessenger, + args.FullArchiveMessenger, args.NodesCoordinator, args.BlockBlackList, args.AntifloodHandler, @@ -39,7 +40,8 @@ func NewMetaInterceptorsContainerFactory( args.WhiteListerVerifiedTxs, args.PreferredPeersHolder, args.RequestHandler, - args.PeerShardMapper, + args.MainPeerShardMapper, + args.FullArchivePeerShardMapper, args.HardforkTrigger, ) if err != nil { @@ -96,29 +98,32 @@ func NewMetaInterceptorsContainerFactory( PeerSignatureHandler: args.PeerSignatureHandler, SignaturesHandler: args.SignaturesHandler, HeartbeatExpiryTimespanInSec: args.HeartbeatExpiryTimespanInSec, - PeerID: args.Messenger.ID(), + PeerID: args.MainMessenger.ID(), } - container := containers.NewInterceptorsContainer() base := &baseInterceptorsContainerFactory{ - container: container, - shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, - store: args.Store, - dataPool: args.DataPool, - nodesCoordinator: args.NodesCoordinator, - blockBlackList: args.BlockBlackList, - argInterceptorFactory: argInterceptorFactory, - maxTxNonceDeltaAllowed: args.MaxTxNonceDeltaAllowed, - accounts: args.Accounts, - antifloodHandler: args.AntifloodHandler, - whiteListHandler: args.WhiteListHandler, - whiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, - preferredPeersHolder: args.PreferredPeersHolder, - hasher: args.CoreComponents.Hasher(), - requestHandler: args.RequestHandler, - peerShardMapper: args.PeerShardMapper, - hardforkTrigger: args.HardforkTrigger, + mainContainer: containers.NewInterceptorsContainer(), + fullArchiveContainer: containers.NewInterceptorsContainer(), + shardCoordinator: args.ShardCoordinator, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, + store: args.Store, + dataPool: args.DataPool, + nodesCoordinator: args.NodesCoordinator, + blockBlackList: args.BlockBlackList, + argInterceptorFactory: argInterceptorFactory, + maxTxNonceDeltaAllowed: args.MaxTxNonceDeltaAllowed, + accounts: args.Accounts, + antifloodHandler: args.AntifloodHandler, + whiteListHandler: args.WhiteListHandler, + whiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, + preferredPeersHolder: args.PreferredPeersHolder, + hasher: args.CoreComponents.Hasher(), + requestHandler: args.RequestHandler, + mainPeerShardMapper: args.MainPeerShardMapper, + fullArchivePeerShardMapper: args.FullArchivePeerShardMapper, + hardforkTrigger: args.HardforkTrigger, + nodeOperationMode: args.NodeOperationMode, } icf := &metaInterceptorsContainerFactory{ @@ -134,63 +139,63 @@ func NewMetaInterceptorsContainerFactory( } // Create returns an interceptor container that will hold all interceptors in the system -func (micf *metaInterceptorsContainerFactory) Create() (process.InterceptorsContainer, error) { +func (micf *metaInterceptorsContainerFactory) Create() (process.InterceptorsContainer, process.InterceptorsContainer, error) { err := micf.generateMetachainHeaderInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = micf.generateShardHeaderInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = micf.generateTxInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = micf.generateUnsignedTxsInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = micf.generateRewardTxInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = micf.generateMiniBlocksInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = micf.generateTrieNodesInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = micf.generatePeerAuthenticationInterceptor() if err != nil { - return nil, err + return nil, nil, err } err = micf.generateHeartbeatInterceptor() if err != nil { - return nil, err + return nil, nil, err } err = micf.generatePeerShardInterceptor() if err != nil { - return nil, err + return nil, nil, err } err = micf.generateValidatorInfoInterceptor() if err != nil { - return nil, err + return nil, nil, err } - return micf.container, nil + return micf.mainContainer, micf.fullArchiveContainer, nil } // AddShardTrieNodeInterceptors will add the shard trie node interceptors into the existing container @@ -238,7 +243,7 @@ func (micf *metaInterceptorsContainerFactory) generateShardHeaderInterceptors() interceptorsSlice[int(idx)] = interceptor } - return micf.container.AddMultiple(keys, interceptorsSlice) + return micf.addInterceptorsToContainers(keys, interceptorsSlice) } func (micf *metaInterceptorsContainerFactory) createOneShardHeaderInterceptor(topic string) (process.Interceptor, error) { @@ -264,7 +269,7 @@ func (micf *metaInterceptorsContainerFactory) createOneShardHeaderInterceptor(to Throttler: micf.globalThrottler, AntifloodHandler: micf.antifloodHandler, WhiteListRequest: micf.whiteListHandler, - CurrentPeerId: micf.messenger.ID(), + CurrentPeerId: micf.mainMessenger.ID(), PreferredPeersHolder: micf.preferredPeersHolder, }, ) @@ -297,7 +302,7 @@ func (micf *metaInterceptorsContainerFactory) generateTrieNodesInterceptors() er keys = append(keys, identifierTrieNodes) trieInterceptors = append(trieInterceptors, interceptor) - return micf.container.AddMultiple(keys, trieInterceptors) + return micf.addInterceptorsToContainers(keys, trieInterceptors) } //------- Reward transactions interceptors @@ -321,7 +326,7 @@ func (micf *metaInterceptorsContainerFactory) generateRewardTxInterceptors() err interceptorSlice[int(idx)] = interceptor } - return micf.container.AddMultiple(keys, interceptorSlice) + return micf.addInterceptorsToContainers(keys, interceptorSlice) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go index 5e636622ed3..34e96b201da 100644 --- a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go +++ b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go @@ -5,6 +5,7 @@ import ( "strings" "testing" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" @@ -143,16 +144,28 @@ func TestNewMetaInterceptorsContainerFactory_NilNodesCoordinatorShouldErr(t *tes assert.Equal(t, process.ErrNilNodesCoordinator, err) } -func TestNewMetaInterceptorsContainerFactory_NilTopicHandlerShouldErr(t *testing.T) { +func TestNewMetaInterceptorsContainerFactory_NilMainTopicHandlerShouldErr(t *testing.T) { t.Parallel() coreComp, cryptoComp := createMockComponentHolders() args := getArgumentsMeta(coreComp, cryptoComp) - args.Messenger = nil + args.MainMessenger = nil icf, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) assert.Nil(t, icf) - assert.Equal(t, process.ErrNilMessenger, err) + assert.True(t, errors.Is(err, process.ErrNilMessenger)) +} + +func TestNewMetaInterceptorsContainerFactory_NilFullArchiveTopicHandlerShouldErr(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsMeta(coreComp, cryptoComp) + args.FullArchiveMessenger = nil + icf, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) + + assert.Nil(t, icf) + assert.True(t, errors.Is(err, process.ErrNilMessenger)) } func TestNewMetaInterceptorsContainerFactory_NilStoreShouldErr(t *testing.T) { @@ -420,16 +433,28 @@ func TestNewMetaInterceptorsContainerFactory_NilRequestHandlerShouldErr(t *testi assert.Equal(t, process.ErrNilRequestHandler, err) } -func TestNewMetaInterceptorsContainerFactory_NilPeerShardMapperShouldErr(t *testing.T) { +func TestNewMetaInterceptorsContainerFactory_NilMainPeerShardMapperShouldErr(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsMeta(coreComp, cryptoComp) + args.MainPeerShardMapper = nil + icf, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) + + assert.Nil(t, icf) + assert.True(t, errors.Is(err, process.ErrNilPeerShardMapper)) +} + +func TestNewMetaInterceptorsContainerFactory_NilFullArchivePeerShardMapperShouldErr(t *testing.T) { t.Parallel() coreComp, cryptoComp := createMockComponentHolders() args := getArgumentsMeta(coreComp, cryptoComp) - args.PeerShardMapper = nil + args.FullArchivePeerShardMapper = nil icf, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) assert.Nil(t, icf) - assert.Equal(t, process.ErrNilPeerShardMapper, err) + assert.True(t, errors.Is(err, process.ErrNilPeerShardMapper)) } func TestNewMetaInterceptorsContainerFactory_NilHardforkTriggerShouldErr(t *testing.T) { @@ -470,74 +495,60 @@ func TestNewMetaInterceptorsContainerFactory_ShouldWorkWithSizeCheck(t *testing. // ------- Create -func TestMetaInterceptorsContainerFactory_CreateTopicMetablocksFailsShouldErr(t *testing.T) { +func TestMetaInterceptorsContainerFactory_CreateTopicsAndRegisterFailure(t *testing.T) { t.Parallel() - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsMeta(coreComp, cryptoComp) - args.Messenger = createMetaStubTopicHandler(factory.MetachainBlocksTopic, "") - icf, _ := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateMetachainHeaderInterceptors_create", factory.MetachainBlocksTopic, "") + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateMetachainHeaderInterceptors_register", "", factory.MetachainBlocksTopic) - container, err := icf.Create() + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateShardHeaderInterceptors", factory.ShardBlocksTopic, "") - assert.Nil(t, container) - assert.Equal(t, errExpected, err) -} + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateTxInterceptors", factory.TransactionTopic, "") -func TestMetaInterceptorsContainerFactory_CreateTopicShardHeadersForMetachainFailsShouldErr(t *testing.T) { - t.Parallel() + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateUnsignedTxsInterceptors", factory.UnsignedTransactionTopic, "") - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsMeta(coreComp, cryptoComp) - args.Messenger = createMetaStubTopicHandler(factory.ShardBlocksTopic, "") - icf, _ := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateRewardTxInterceptors", factory.RewardsTransactionTopic, "") - container, err := icf.Create() + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateMiniBlocksInterceptors", factory.MiniBlocksTopic, "") - assert.Nil(t, container) - assert.Equal(t, errExpected, err) -} + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateTrieNodesInterceptors_validator", factory.ValidatorTrieNodesTopic, "") -func TestMetaInterceptorsContainerFactory_CreateRegisterForMetablocksFailsShouldErr(t *testing.T) { - t.Parallel() - - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsMeta(coreComp, cryptoComp) - args.Messenger = createMetaStubTopicHandler("", factory.MetachainBlocksTopic) - icf, _ := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateTrieNodesInterceptors_account", factory.AccountTrieNodesTopic, "") - container, err := icf.Create() + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateValidatorInfoInterceptor", common.ValidatorInfoTopic, "") - assert.Nil(t, container) - assert.Equal(t, errExpected, err) -} + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateHeartbeatInterceptor", common.HeartbeatV2Topic, "") -func TestMetaInterceptorsContainerFactory_CreateRegisterShardHeadersForMetachainFailsShouldErr(t *testing.T) { - t.Parallel() + testCreateMetaTopicShouldFailOnAllMessenger(t, "generatePeerShardInterceptor", common.ConnectionTopic, "") - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsMeta(coreComp, cryptoComp) - args.Messenger = createMetaStubTopicHandler("", factory.MetachainBlocksTopic) - icf, _ := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) - - container, err := icf.Create() + t.Run("generatePeerAuthenticationInterceptor_main", testCreateMetaTopicShouldFail(common.PeerAuthenticationTopic, "")) +} - assert.Nil(t, container) - assert.Equal(t, errExpected, err) +func testCreateMetaTopicShouldFailOnAllMessenger(t *testing.T, testNamePrefix string, matchStrToErrOnCreate string, matchStrToErrOnRegister string) { + t.Run(testNamePrefix+"main messenger", testCreateMetaTopicShouldFail(matchStrToErrOnCreate, matchStrToErrOnRegister)) + t.Run(testNamePrefix+"full archive messenger", testCreateMetaTopicShouldFail(matchStrToErrOnCreate, matchStrToErrOnRegister)) } -func TestMetaInterceptorsContainerFactory_CreateRegisterTrieNodesFailsShouldErr(t *testing.T) { - t.Parallel() +func testCreateMetaTopicShouldFail(matchStrToErrOnCreate string, matchStrToErrOnRegister string) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsMeta(coreComp, cryptoComp) - args.Messenger = createMetaStubTopicHandler("", factory.AccountTrieNodesTopic) - icf, _ := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsMeta(coreComp, cryptoComp) + if strings.Contains(t.Name(), "full_archive") { + args.NodeOperationMode = p2p.FullArchiveMode + args.FullArchiveMessenger = createMetaStubTopicHandler(matchStrToErrOnCreate, matchStrToErrOnRegister) + } else { + args.MainMessenger = createMetaStubTopicHandler(matchStrToErrOnCreate, matchStrToErrOnRegister) + } + icf, _ := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) - container, err := icf.Create() + mainContainer, fullArchiveConatiner, err := icf.Create() - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + assert.Nil(t, mainContainer) + assert.Nil(t, fullArchiveConatiner) + assert.Equal(t, errExpected, err) + } } func TestMetaInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { @@ -545,77 +556,121 @@ func TestMetaInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { coreComp, cryptoComp := createMockComponentHolders() args := getArgumentsMeta(coreComp, cryptoComp) - args.Messenger = &mock.TopicHandlerStub{ - CreateTopicCalled: func(name string, createChannelForTopic bool) error { - return nil - }, - RegisterMessageProcessorCalled: func(topic string, identifier string, handler p2p.MessageProcessor) error { - return nil - }, - } icf, _ := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) - container, err := icf.Create() + mainContainer, fullArchiveContainer, err := icf.Create() - assert.NotNil(t, container) + assert.NotNil(t, mainContainer) + assert.NotNil(t, fullArchiveContainer) assert.Nil(t, err) } func TestMetaInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { t.Parallel() - noOfShards := 4 - - shardCoordinator := mock.NewMultipleShardsCoordinatorMock() - shardCoordinator.SetNoShards(uint32(noOfShards)) - shardCoordinator.CurrentShard = 1 - - nodesCoordinator := &shardingMocks.NodesCoordinatorMock{ - ShardConsensusSize: 1, - MetaConsensusSize: 1, - NbShards: uint32(noOfShards), - ShardId: 1, - } - - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsMeta(coreComp, cryptoComp) - args.ShardCoordinator = shardCoordinator - args.NodesCoordinator = nodesCoordinator - args.Messenger = &mock.TopicHandlerStub{ - CreateTopicCalled: func(name string, createChannelForTopic bool) error { - return nil - }, - RegisterMessageProcessorCalled: func(topic string, identifier string, handler p2p.MessageProcessor) error { - return nil - }, - } - icf, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) - require.Nil(t, err) - - container, err := icf.Create() - - numInterceptorsMetablock := 1 - numInterceptorsShardHeadersForMetachain := noOfShards - numInterceptorsTransactionsForMetachain := noOfShards + 1 - numInterceptorsMiniBlocksForMetachain := noOfShards + 1 + 1 - numInterceptorsUnsignedTxsForMetachain := noOfShards + 1 - numInterceptorsRewardsTxsForMetachain := noOfShards - numInterceptorsTrieNodes := 2 - numInterceptorsPeerAuthForMetachain := 1 - numInterceptorsHeartbeatForMetachain := 1 - numInterceptorsShardValidatorInfoForMetachain := 1 - numInterceptorValidatorInfo := 1 - totalInterceptors := numInterceptorsMetablock + numInterceptorsShardHeadersForMetachain + numInterceptorsTrieNodes + - numInterceptorsTransactionsForMetachain + numInterceptorsUnsignedTxsForMetachain + numInterceptorsMiniBlocksForMetachain + - numInterceptorsRewardsTxsForMetachain + numInterceptorsPeerAuthForMetachain + numInterceptorsHeartbeatForMetachain + - numInterceptorsShardValidatorInfoForMetachain + numInterceptorValidatorInfo - - assert.Nil(t, err) - assert.Equal(t, totalInterceptors, container.Len()) - - err = icf.AddShardTrieNodeInterceptors(container) - assert.Nil(t, err) - assert.Equal(t, totalInterceptors+noOfShards, container.Len()) + t.Run("regular mode", func(t *testing.T) { + t.Parallel() + + noOfShards := 4 + + shardCoordinator := mock.NewMultipleShardsCoordinatorMock() + shardCoordinator.SetNoShards(uint32(noOfShards)) + shardCoordinator.CurrentShard = 1 + + nodesCoordinator := &shardingMocks.NodesCoordinatorMock{ + ShardConsensusSize: 1, + MetaConsensusSize: 1, + NbShards: uint32(noOfShards), + ShardId: 1, + } + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsMeta(coreComp, cryptoComp) + args.ShardCoordinator = shardCoordinator + args.NodesCoordinator = nodesCoordinator + icf, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) + require.Nil(t, err) + + mainContainer, fullArchiveContainer, err := icf.Create() + + numInterceptorsMetablock := 1 + numInterceptorsShardHeadersForMetachain := noOfShards + numInterceptorsTransactionsForMetachain := noOfShards + 1 + numInterceptorsMiniBlocksForMetachain := noOfShards + 1 + 1 + numInterceptorsUnsignedTxsForMetachain := noOfShards + 1 + numInterceptorsRewardsTxsForMetachain := noOfShards + numInterceptorsTrieNodes := 2 + numInterceptorsPeerAuthForMetachain := 1 + numInterceptorsHeartbeatForMetachain := 1 + numInterceptorsShardValidatorInfoForMetachain := 1 + numInterceptorValidatorInfo := 1 + totalInterceptors := numInterceptorsMetablock + numInterceptorsShardHeadersForMetachain + numInterceptorsTrieNodes + + numInterceptorsTransactionsForMetachain + numInterceptorsUnsignedTxsForMetachain + numInterceptorsMiniBlocksForMetachain + + numInterceptorsRewardsTxsForMetachain + numInterceptorsPeerAuthForMetachain + numInterceptorsHeartbeatForMetachain + + numInterceptorsShardValidatorInfoForMetachain + numInterceptorValidatorInfo + + assert.Nil(t, err) + assert.Equal(t, totalInterceptors, mainContainer.Len()) + assert.Equal(t, 0, fullArchiveContainer.Len()) + + err = icf.AddShardTrieNodeInterceptors(mainContainer) + assert.Nil(t, err) + assert.Equal(t, totalInterceptors+noOfShards, mainContainer.Len()) + }) + t.Run("full archive mode", func(t *testing.T) { + t.Parallel() + + noOfShards := 4 + + shardCoordinator := mock.NewMultipleShardsCoordinatorMock() + shardCoordinator.SetNoShards(uint32(noOfShards)) + shardCoordinator.CurrentShard = 1 + nodesCoordinator := &shardingMocks.NodesCoordinatorMock{ + ShardConsensusSize: 1, + MetaConsensusSize: 1, + NbShards: uint32(noOfShards), + ShardId: 1, + } + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsMeta(coreComp, cryptoComp) + args.NodeOperationMode = p2p.FullArchiveMode + args.ShardCoordinator = shardCoordinator + args.NodesCoordinator = nodesCoordinator + + icf, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) + require.Nil(t, err) + + mainContainer, fullArchiveContainer, err := icf.Create() + + numInterceptorsMetablock := 1 + numInterceptorsShardHeadersForMetachain := noOfShards + numInterceptorsTransactionsForMetachain := noOfShards + 1 + numInterceptorsMiniBlocksForMetachain := noOfShards + 1 + 1 + numInterceptorsUnsignedTxsForMetachain := noOfShards + 1 + numInterceptorsRewardsTxsForMetachain := noOfShards + numInterceptorsTrieNodes := 2 + numInterceptorsPeerAuthForMetachain := 1 + numInterceptorsHeartbeatForMetachain := 1 + numInterceptorsShardValidatorInfoForMetachain := 1 + numInterceptorValidatorInfo := 1 + totalInterceptors := numInterceptorsMetablock + numInterceptorsShardHeadersForMetachain + numInterceptorsTrieNodes + + numInterceptorsTransactionsForMetachain + numInterceptorsUnsignedTxsForMetachain + numInterceptorsMiniBlocksForMetachain + + numInterceptorsRewardsTxsForMetachain + numInterceptorsPeerAuthForMetachain + numInterceptorsHeartbeatForMetachain + + numInterceptorsShardValidatorInfoForMetachain + numInterceptorValidatorInfo + + assert.Nil(t, err) + assert.Equal(t, totalInterceptors, mainContainer.Len()) + assert.Equal(t, totalInterceptors-1, fullArchiveContainer.Len()) // no peerAuthentication needed + + err = icf.AddShardTrieNodeInterceptors(mainContainer) + assert.Nil(t, err) + assert.Equal(t, totalInterceptors+noOfShards, mainContainer.Len()) + + err = icf.AddShardTrieNodeInterceptors(fullArchiveContainer) + assert.Nil(t, err) + assert.Equal(t, totalInterceptors-1+noOfShards, fullArchiveContainer.Len()) + }) } func getArgumentsMeta( @@ -628,7 +683,8 @@ func getArgumentsMeta( Accounts: &stateMock.AccountsStub{}, ShardCoordinator: mock.NewOneShardCoordinatorMock(), NodesCoordinator: shardingMocks.NewNodesCoordinatorMock(), - Messenger: &mock.TopicHandlerStub{}, + MainMessenger: &mock.TopicHandlerStub{}, + FullArchiveMessenger: &mock.TopicHandlerStub{}, Store: createMetaStore(), DataPool: createMetaDataPools(), MaxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, @@ -647,7 +703,9 @@ func getArgumentsMeta( PeerSignatureHandler: &mock.PeerSignatureHandlerStub{}, SignaturesHandler: &mock.SignaturesHandlerStub{}, HeartbeatExpiryTimespanInSec: 30, - PeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, + MainPeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, + FullArchivePeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, HardforkTrigger: &testscommon.HardforkTriggerStub{}, + NodeOperationMode: p2p.NormalOperation, } } diff --git a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory.go b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory.go index ccd0f9ee981..beef288c54c 100644 --- a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory.go +++ b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory.go @@ -29,7 +29,8 @@ func NewShardInterceptorsContainerFactory( args.Accounts, args.Store, args.DataPool, - args.Messenger, + args.MainMessenger, + args.FullArchiveMessenger, args.NodesCoordinator, args.BlockBlackList, args.AntifloodHandler, @@ -37,7 +38,8 @@ func NewShardInterceptorsContainerFactory( args.WhiteListerVerifiedTxs, args.PreferredPeersHolder, args.RequestHandler, - args.PeerShardMapper, + args.MainPeerShardMapper, + args.FullArchivePeerShardMapper, args.HardforkTrigger, ) if err != nil { @@ -95,29 +97,32 @@ func NewShardInterceptorsContainerFactory( PeerSignatureHandler: args.PeerSignatureHandler, SignaturesHandler: args.SignaturesHandler, HeartbeatExpiryTimespanInSec: args.HeartbeatExpiryTimespanInSec, - PeerID: args.Messenger.ID(), + PeerID: args.MainMessenger.ID(), } - container := containers.NewInterceptorsContainer() base := &baseInterceptorsContainerFactory{ - container: container, - accounts: args.Accounts, - shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, - store: args.Store, - dataPool: args.DataPool, - nodesCoordinator: args.NodesCoordinator, - argInterceptorFactory: argInterceptorFactory, - blockBlackList: args.BlockBlackList, - maxTxNonceDeltaAllowed: args.MaxTxNonceDeltaAllowed, - antifloodHandler: args.AntifloodHandler, - whiteListHandler: args.WhiteListHandler, - whiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, - preferredPeersHolder: args.PreferredPeersHolder, - hasher: args.CoreComponents.Hasher(), - requestHandler: args.RequestHandler, - peerShardMapper: args.PeerShardMapper, - hardforkTrigger: args.HardforkTrigger, + mainContainer: containers.NewInterceptorsContainer(), + fullArchiveContainer: containers.NewInterceptorsContainer(), + accounts: args.Accounts, + shardCoordinator: args.ShardCoordinator, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, + store: args.Store, + dataPool: args.DataPool, + nodesCoordinator: args.NodesCoordinator, + argInterceptorFactory: argInterceptorFactory, + blockBlackList: args.BlockBlackList, + maxTxNonceDeltaAllowed: args.MaxTxNonceDeltaAllowed, + antifloodHandler: args.AntifloodHandler, + whiteListHandler: args.WhiteListHandler, + whiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, + preferredPeersHolder: args.PreferredPeersHolder, + hasher: args.CoreComponents.Hasher(), + requestHandler: args.RequestHandler, + mainPeerShardMapper: args.MainPeerShardMapper, + fullArchivePeerShardMapper: args.FullArchivePeerShardMapper, + hardforkTrigger: args.HardforkTrigger, + nodeOperationMode: args.NodeOperationMode, } icf := &shardInterceptorsContainerFactory{ @@ -133,63 +138,63 @@ func NewShardInterceptorsContainerFactory( } // Create returns an interceptor container that will hold all interceptors in the system -func (sicf *shardInterceptorsContainerFactory) Create() (process.InterceptorsContainer, error) { +func (sicf *shardInterceptorsContainerFactory) Create() (process.InterceptorsContainer, process.InterceptorsContainer, error) { err := sicf.generateTxInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = sicf.generateUnsignedTxsInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = sicf.generateRewardTxInterceptor() if err != nil { - return nil, err + return nil, nil, err } err = sicf.generateHeaderInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = sicf.generateMiniBlocksInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = sicf.generateMetachainHeaderInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = sicf.generateTrieNodesInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = sicf.generatePeerAuthenticationInterceptor() if err != nil { - return nil, err + return nil, nil, err } err = sicf.generateHeartbeatInterceptor() if err != nil { - return nil, err + return nil, nil, err } err = sicf.generatePeerShardInterceptor() if err != nil { - return nil, err + return nil, nil, err } err = sicf.generateValidatorInfoInterceptor() if err != nil { - return nil, err + return nil, nil, err } - return sicf.container, nil + return sicf.mainContainer, sicf.fullArchiveContainer, nil } func (sicf *shardInterceptorsContainerFactory) generateTrieNodesInterceptors() error { @@ -207,7 +212,7 @@ func (sicf *shardInterceptorsContainerFactory) generateTrieNodesInterceptors() e keys = append(keys, identifierTrieNodes) interceptorsSlice = append(interceptorsSlice, interceptor) - return sicf.container.AddMultiple(keys, interceptorsSlice) + return sicf.addInterceptorsToContainers(keys, interceptorsSlice) } // ------- Reward transactions interceptors @@ -227,7 +232,7 @@ func (sicf *shardInterceptorsContainerFactory) generateRewardTxInterceptor() err keys = append(keys, identifierTx) interceptorSlice = append(interceptorSlice, interceptor) - return sicf.container.AddMultiple(keys, interceptorSlice) + return sicf.addInterceptorsToContainers(keys, interceptorSlice) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go index afc6de41014..4f1ec24c12d 100644 --- a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go +++ b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go @@ -1,10 +1,12 @@ package interceptorscontainer_test import ( + "errors" "strings" "testing" "github.com/multiversx/mx-chain-core-go/core/versioning" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" @@ -133,16 +135,28 @@ func TestNewShardInterceptorsContainerFactory_NilNodesCoordinatorShouldErr(t *te assert.Equal(t, process.ErrNilNodesCoordinator, err) } -func TestNewShardInterceptorsContainerFactory_NilMessengerShouldErr(t *testing.T) { +func TestNewShardInterceptorsContainerFactory_NilMainMessengerShouldErr(t *testing.T) { t.Parallel() coreComp, cryptoComp := createMockComponentHolders() args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = nil + args.MainMessenger = nil icf, err := interceptorscontainer.NewShardInterceptorsContainerFactory(args) assert.Nil(t, icf) - assert.Equal(t, process.ErrNilMessenger, err) + assert.True(t, errors.Is(err, process.ErrNilMessenger)) +} + +func TestNewShardInterceptorsContainerFactory_NilFullArchiveMessengerShouldErr(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsShard(coreComp, cryptoComp) + args.FullArchiveMessenger = nil + icf, err := interceptorscontainer.NewShardInterceptorsContainerFactory(args) + + assert.Nil(t, icf) + assert.True(t, errors.Is(err, process.ErrNilMessenger)) } func TestNewShardInterceptorsContainerFactory_NilStoreShouldErr(t *testing.T) { @@ -379,16 +393,28 @@ func TestNewShardInterceptorsContainerFactory_EmptyEpochStartTriggerShouldErr(t assert.Equal(t, process.ErrNilEpochStartTrigger, err) } -func TestNewShardInterceptorsContainerFactory_NilPeerShardMapperShouldErr(t *testing.T) { +func TestNewShardInterceptorsContainerFactory_NilMainPeerShardMapperShouldErr(t *testing.T) { t.Parallel() coreComp, cryptoComp := createMockComponentHolders() args := getArgumentsShard(coreComp, cryptoComp) - args.PeerShardMapper = nil + args.MainPeerShardMapper = nil icf, err := interceptorscontainer.NewShardInterceptorsContainerFactory(args) assert.Nil(t, icf) - assert.Equal(t, process.ErrNilPeerShardMapper, err) + assert.True(t, errors.Is(err, process.ErrNilPeerShardMapper)) +} + +func TestNewShardInterceptorsContainerFactory_NilFullArchivePeerShardMapperShouldErr(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsShard(coreComp, cryptoComp) + args.FullArchivePeerShardMapper = nil + icf, err := interceptorscontainer.NewShardInterceptorsContainerFactory(args) + + assert.Nil(t, icf) + assert.True(t, errors.Is(err, process.ErrNilPeerShardMapper)) } func TestNewShardInterceptorsContainerFactory_NilHardforkTriggerShouldErr(t *testing.T) { @@ -429,130 +455,57 @@ func TestNewShardInterceptorsContainerFactory_ShouldWorkWithSizeCheck(t *testing // ------- Create -func TestShardInterceptorsContainerFactory_CreateTopicCreationTxFailsShouldErr(t *testing.T) { +func TestShardInterceptorsContainerFactory_CreateTopicsAndRegisterFailure(t *testing.T) { t.Parallel() - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = createShardStubTopicHandler(factory.TransactionTopic, "") - icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) + testCreateShardTopicShouldFailOnAllMessenger(t, "generateTxInterceptors_create", factory.TransactionTopic, "") + testCreateShardTopicShouldFailOnAllMessenger(t, "generateTxInterceptors_register", "", factory.TransactionTopic) - container, err := icf.Create() + testCreateShardTopicShouldFailOnAllMessenger(t, "generateUnsignedTxsInterceptors", factory.UnsignedTransactionTopic, "") - assert.Nil(t, container) - assert.Equal(t, errExpected, err) -} + testCreateShardTopicShouldFailOnAllMessenger(t, "generateRewardTxInterceptor", factory.RewardsTransactionTopic, "") -func TestShardInterceptorsContainerFactory_CreateTopicCreationHdrFailsShouldErr(t *testing.T) { - t.Parallel() + testCreateShardTopicShouldFailOnAllMessenger(t, "generateHeaderInterceptors", factory.ShardBlocksTopic, "") - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = createShardStubTopicHandler(factory.ShardBlocksTopic, "") - icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) + testCreateShardTopicShouldFailOnAllMessenger(t, "generateMiniBlocksInterceptors", factory.MiniBlocksTopic, "") - container, err := icf.Create() + testCreateShardTopicShouldFailOnAllMessenger(t, "generateMetachainHeaderInterceptors", factory.MetachainBlocksTopic, "") - assert.Nil(t, container) - assert.Equal(t, errExpected, err) -} + testCreateShardTopicShouldFailOnAllMessenger(t, "generateTrieNodesInterceptors", factory.AccountTrieNodesTopic, "") -func TestShardInterceptorsContainerFactory_CreateTopicCreationMiniBlocksFailsShouldErr(t *testing.T) { - t.Parallel() + testCreateShardTopicShouldFailOnAllMessenger(t, "generateValidatorInfoInterceptor", common.ValidatorInfoTopic, "") - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = createShardStubTopicHandler(factory.MiniBlocksTopic, "") - icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) + testCreateShardTopicShouldFailOnAllMessenger(t, "generateHeartbeatInterceptor", common.HeartbeatV2Topic, "") - container, err := icf.Create() + testCreateShardTopicShouldFailOnAllMessenger(t, "generatePeerShardIntercepto", common.ConnectionTopic, "") - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Run("generatePeerAuthenticationInterceptor_main", testCreateShardTopicShouldFail(common.PeerAuthenticationTopic, "")) } - -func TestShardInterceptorsContainerFactory_CreateTopicCreationMetachainHeadersFailsShouldErr(t *testing.T) { - t.Parallel() - - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = createShardStubTopicHandler(factory.MetachainBlocksTopic, "") - icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) +func testCreateShardTopicShouldFailOnAllMessenger(t *testing.T, testNamePrefix string, matchStrToErrOnCreate string, matchStrToErrOnRegister string) { + t.Run(testNamePrefix+"main messenger", testCreateShardTopicShouldFail(matchStrToErrOnCreate, matchStrToErrOnRegister)) + t.Run(testNamePrefix+"full archive messenger", testCreateShardTopicShouldFail(matchStrToErrOnCreate, matchStrToErrOnRegister)) } -func TestShardInterceptorsContainerFactory_CreateRegisterTxFailsShouldErr(t *testing.T) { - t.Parallel() - - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = createShardStubTopicHandler("", factory.TransactionTopic) - icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) - - container, err := icf.Create() +func testCreateShardTopicShouldFail(matchStrToErrOnCreate string, matchStrToErrOnRegister string) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() - assert.Nil(t, container) - assert.Equal(t, errExpected, err) -} + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsShard(coreComp, cryptoComp) + if strings.Contains(t.Name(), "full_archive") { + args.NodeOperationMode = p2p.FullArchiveMode + args.FullArchiveMessenger = createShardStubTopicHandler(matchStrToErrOnCreate, matchStrToErrOnRegister) + } else { + args.MainMessenger = createShardStubTopicHandler(matchStrToErrOnCreate, matchStrToErrOnRegister) + } + icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) -func TestShardInterceptorsContainerFactory_CreateRegisterHdrFailsShouldErr(t *testing.T) { - t.Parallel() + mainContainer, fullArchiveContainer, err := icf.Create() - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = createShardStubTopicHandler("", factory.ShardBlocksTopic) - icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) -} - -func TestShardInterceptorsContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t *testing.T) { - t.Parallel() - - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = createShardStubTopicHandler("", factory.MiniBlocksTopic) - icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) -} - -func TestShardInterceptorsContainerFactory_CreateRegisterMetachainHeadersShouldErr(t *testing.T) { - t.Parallel() - - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = createShardStubTopicHandler("", factory.MetachainBlocksTopic) - icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) -} - -func TestShardInterceptorsContainerFactory_CreateRegisterTrieNodesShouldErr(t *testing.T) { - t.Parallel() - - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = createShardStubTopicHandler("", factory.AccountTrieNodesTopic) - icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + assert.Nil(t, mainContainer) + assert.Nil(t, fullArchiveContainer) + assert.Equal(t, errExpected, err) + } } func TestShardInterceptorsContainerFactory_NilSignaturesHandler(t *testing.T) { @@ -596,7 +549,7 @@ func TestShardInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { coreComp, cryptoComp := createMockComponentHolders() args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = &mock.TopicHandlerStub{ + args.MainMessenger = &mock.TopicHandlerStub{ CreateTopicCalled: func(name string, createChannelForTopic bool) error { return nil }, @@ -608,66 +561,110 @@ func TestShardInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) - container, err := icf.Create() + mainContainer, fullArchiveContainer, err := icf.Create() - assert.NotNil(t, container) + assert.NotNil(t, mainContainer) + assert.NotNil(t, fullArchiveContainer) assert.Nil(t, err) } func TestShardInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { t.Parallel() - noOfShards := 4 - - shardCoordinator := mock.NewMultipleShardsCoordinatorMock() - shardCoordinator.SetNoShards(uint32(noOfShards)) - shardCoordinator.CurrentShard = 1 - - nodesCoordinator := &shardingMocks.NodesCoordinatorMock{ - ShardId: 1, - ShardConsensusSize: 1, - MetaConsensusSize: 1, - NbShards: uint32(noOfShards), - } - - messenger := &mock.TopicHandlerStub{ - CreateTopicCalled: func(name string, createChannelForTopic bool) error { - return nil - }, - RegisterMessageProcessorCalled: func(topic string, identifier string, handler p2p.MessageProcessor) error { - return nil - }, - } - - coreComp, cryptoComp := createMockComponentHolders() - coreComp.AddrPubKeyConv = testscommon.NewPubkeyConverterMock(32) - args := getArgumentsShard(coreComp, cryptoComp) - args.ShardCoordinator = shardCoordinator - args.NodesCoordinator = nodesCoordinator - args.Messenger = messenger - args.PreferredPeersHolder = &p2pmocks.PeersHolderStub{} - - icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) - - container, err := icf.Create() - - numInterceptorTxs := noOfShards + 1 - numInterceptorsUnsignedTxs := numInterceptorTxs - numInterceptorsRewardTxs := 1 - numInterceptorHeaders := 1 - numInterceptorMiniBlocks := noOfShards + 2 - numInterceptorMetachainHeaders := 1 - numInterceptorTrieNodes := 1 - numInterceptorPeerAuth := 1 - numInterceptorHeartbeat := 1 - numInterceptorsShardValidatorInfo := 1 - numInterceptorValidatorInfo := 1 - totalInterceptors := numInterceptorTxs + numInterceptorsUnsignedTxs + numInterceptorsRewardTxs + - numInterceptorHeaders + numInterceptorMiniBlocks + numInterceptorMetachainHeaders + numInterceptorTrieNodes + - numInterceptorPeerAuth + numInterceptorHeartbeat + numInterceptorsShardValidatorInfo + numInterceptorValidatorInfo - - assert.Nil(t, err) - assert.Equal(t, totalInterceptors, container.Len()) + t.Run("normal mode", func(t *testing.T) { + t.Parallel() + + noOfShards := 4 + + shardCoordinator := mock.NewMultipleShardsCoordinatorMock() + shardCoordinator.SetNoShards(uint32(noOfShards)) + shardCoordinator.CurrentShard = 1 + + nodesCoordinator := &shardingMocks.NodesCoordinatorMock{ + ShardId: 1, + ShardConsensusSize: 1, + MetaConsensusSize: 1, + NbShards: uint32(noOfShards), + } + + coreComp, cryptoComp := createMockComponentHolders() + coreComp.AddrPubKeyConv = testscommon.NewPubkeyConverterMock(32) + args := getArgumentsShard(coreComp, cryptoComp) + args.ShardCoordinator = shardCoordinator + args.NodesCoordinator = nodesCoordinator + args.PreferredPeersHolder = &p2pmocks.PeersHolderStub{} + + icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) + + mainContainer, fullArchiveContainer, err := icf.Create() + + numInterceptorTxs := noOfShards + 1 + numInterceptorsUnsignedTxs := numInterceptorTxs + numInterceptorsRewardTxs := 1 + numInterceptorHeaders := 1 + numInterceptorMiniBlocks := noOfShards + 2 + numInterceptorMetachainHeaders := 1 + numInterceptorTrieNodes := 1 + numInterceptorPeerAuth := 1 + numInterceptorHeartbeat := 1 + numInterceptorsShardValidatorInfo := 1 + numInterceptorValidatorInfo := 1 + totalInterceptors := numInterceptorTxs + numInterceptorsUnsignedTxs + numInterceptorsRewardTxs + + numInterceptorHeaders + numInterceptorMiniBlocks + numInterceptorMetachainHeaders + numInterceptorTrieNodes + + numInterceptorPeerAuth + numInterceptorHeartbeat + numInterceptorsShardValidatorInfo + numInterceptorValidatorInfo + + assert.Nil(t, err) + assert.Equal(t, totalInterceptors, mainContainer.Len()) + assert.Equal(t, 0, fullArchiveContainer.Len()) + }) + + t.Run("full archive mode", func(t *testing.T) { + t.Parallel() + + noOfShards := 4 + + shardCoordinator := mock.NewMultipleShardsCoordinatorMock() + shardCoordinator.SetNoShards(uint32(noOfShards)) + shardCoordinator.CurrentShard = 1 + + nodesCoordinator := &shardingMocks.NodesCoordinatorMock{ + ShardId: 1, + ShardConsensusSize: 1, + MetaConsensusSize: 1, + NbShards: uint32(noOfShards), + } + + coreComp, cryptoComp := createMockComponentHolders() + coreComp.AddrPubKeyConv = testscommon.NewPubkeyConverterMock(32) + args := getArgumentsShard(coreComp, cryptoComp) + args.NodeOperationMode = p2p.FullArchiveMode + args.ShardCoordinator = shardCoordinator + args.NodesCoordinator = nodesCoordinator + args.PreferredPeersHolder = &p2pmocks.PeersHolderStub{} + + icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) + + mainContainer, fullArchiveContainer, err := icf.Create() + + numInterceptorTxs := noOfShards + 1 + numInterceptorsUnsignedTxs := numInterceptorTxs + numInterceptorsRewardTxs := 1 + numInterceptorHeaders := 1 + numInterceptorMiniBlocks := noOfShards + 2 + numInterceptorMetachainHeaders := 1 + numInterceptorTrieNodes := 1 + numInterceptorPeerAuth := 1 + numInterceptorHeartbeat := 1 + numInterceptorsShardValidatorInfo := 1 + numInterceptorValidatorInfo := 1 + totalInterceptors := numInterceptorTxs + numInterceptorsUnsignedTxs + numInterceptorsRewardTxs + + numInterceptorHeaders + numInterceptorMiniBlocks + numInterceptorMetachainHeaders + numInterceptorTrieNodes + + numInterceptorPeerAuth + numInterceptorHeartbeat + numInterceptorsShardValidatorInfo + numInterceptorValidatorInfo + + assert.Nil(t, err) + assert.Equal(t, totalInterceptors, mainContainer.Len()) + assert.Equal(t, totalInterceptors-1, fullArchiveContainer.Len()) // no peerAuthentication needed + }) } func createMockComponentHolders() (*mock.CoreComponentsMock, *mock.CryptoComponentsMock) { @@ -711,7 +708,8 @@ func getArgumentsShard( Accounts: &stateMock.AccountsStub{}, ShardCoordinator: mock.NewOneShardCoordinatorMock(), NodesCoordinator: shardingMocks.NewNodesCoordinatorMock(), - Messenger: &mock.TopicHandlerStub{}, + MainMessenger: &mock.TopicHandlerStub{}, + FullArchiveMessenger: &mock.TopicHandlerStub{}, Store: createShardStore(), DataPool: createShardDataPools(), MaxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, @@ -731,7 +729,8 @@ func getArgumentsShard( PeerSignatureHandler: &mock.PeerSignatureHandlerStub{}, SignaturesHandler: &mock.SignaturesHandlerStub{}, HeartbeatExpiryTimespanInSec: 30, - PeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, + MainPeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, + FullArchivePeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, HardforkTrigger: &testscommon.HardforkTriggerStub{}, } } diff --git a/process/interceptors/epochStartMetaBlockInterceptor.go b/process/interceptors/epochStartMetaBlockInterceptor.go index 3dd033d17ec..36bfc121988 100644 --- a/process/interceptors/epochStartMetaBlockInterceptor.go +++ b/process/interceptors/epochStartMetaBlockInterceptor.go @@ -56,7 +56,7 @@ func NewEpochStartMetaBlockInterceptor(args ArgsEpochStartMetaBlockInterceptor) } // ProcessReceivedMessage will handle received messages containing epoch start meta blocks -func (e *epochStartMetaBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (e *epochStartMetaBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, _ p2p.MessageHandler) error { var epochStartMb block.MetaBlock err := e.marshalizer.Unmarshal(&epochStartMb, message.Data()) if err != nil { diff --git a/process/interceptors/epochStartMetaBlockInterceptor_test.go b/process/interceptors/epochStartMetaBlockInterceptor_test.go index 0d62cc89543..6958be19f8c 100644 --- a/process/interceptors/epochStartMetaBlockInterceptor_test.go +++ b/process/interceptors/epochStartMetaBlockInterceptor_test.go @@ -100,7 +100,7 @@ func TestEpochStartMetaBlockInterceptor_ProcessReceivedMessageUnmarshalError(t * require.NotNil(t, esmbi) message := &p2pmocks.P2PMessageMock{DataField: []byte("wrong meta block bytes")} - err := esmbi.ProcessReceivedMessage(message, "") + err := esmbi.ProcessReceivedMessage(message, "", &p2pmocks.MessengerStub{}) require.Error(t, err) } @@ -144,23 +144,23 @@ func TestEpochStartMetaBlockInterceptor_EntireFlowShouldWorkAndSetTheEpoch(t *te wrongMetaBlock := &block.MetaBlock{Epoch: 0} wrongMetaBlockBytes, _ := args.Marshalizer.Marshal(wrongMetaBlock) - err := esmbi.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{DataField: metaBlockBytes}, "peer0") + err := esmbi.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{DataField: metaBlockBytes}, "peer0", &p2pmocks.MessengerStub{}) require.NoError(t, err) require.False(t, wasCalled) - _ = esmbi.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{DataField: metaBlockBytes}, "peer1") + _ = esmbi.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{DataField: metaBlockBytes}, "peer1", &p2pmocks.MessengerStub{}) require.False(t, wasCalled) // send again from peer1 => should not be taken into account - _ = esmbi.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{DataField: metaBlockBytes}, "peer1") + _ = esmbi.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{DataField: metaBlockBytes}, "peer1", &p2pmocks.MessengerStub{}) require.False(t, wasCalled) // send another meta block - _ = esmbi.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{DataField: wrongMetaBlockBytes}, "peer2") + _ = esmbi.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{DataField: wrongMetaBlockBytes}, "peer2", &p2pmocks.MessengerStub{}) require.False(t, wasCalled) // send the last needed metablock from a new peer => should fetch the epoch - _ = esmbi.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{DataField: metaBlockBytes}, "peer3") + _ = esmbi.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{DataField: metaBlockBytes}, "peer3", &p2pmocks.MessengerStub{}) require.True(t, wasCalled) } diff --git a/process/interceptors/multiDataInterceptor.go b/process/interceptors/multiDataInterceptor.go index f785b931fc2..9e0197ea741 100644 --- a/process/interceptors/multiDataInterceptor.go +++ b/process/interceptors/multiDataInterceptor.go @@ -91,7 +91,7 @@ func NewMultiDataInterceptor(arg ArgMultiDataInterceptor) (*MultiDataInterceptor // ProcessReceivedMessage is the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to) -func (mdi *MultiDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (mdi *MultiDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, _ p2p.MessageHandler) error { err := mdi.preProcessMesage(message, fromConnectedPeer) if err != nil { return err diff --git a/process/interceptors/multiDataInterceptor_test.go b/process/interceptors/multiDataInterceptor_test.go index af8511f1f74..6ca244409b7 100644 --- a/process/interceptors/multiDataInterceptor_test.go +++ b/process/interceptors/multiDataInterceptor_test.go @@ -153,7 +153,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *testi arg := createMockArgMultiDataInterceptor() mdi, _ := interceptors.NewMultiDataInterceptor(arg) - err := mdi.ProcessReceivedMessage(nil, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(nil, fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, process.ErrNilMessage, err) } @@ -188,7 +188,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageUnmarshalFailsShouldErr(t *t DataField: []byte("data to be processed"), PeerField: originatorPid, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, errExpeced, err) assert.True(t, originatorBlackListed) @@ -209,7 +209,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageUnmarshalReturnsEmptySliceSh msg := &p2pmocks.P2PMessageMock{ DataField: []byte("data to be processed"), } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, process.ErrNoDataInMessage, err) } @@ -251,7 +251,7 @@ func TestMultiDataInterceptor_ProcessReceivedCreateFailsShouldErr(t *testing.T) DataField: dataField, PeerField: originatorPid, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -301,7 +301,7 @@ func TestMultiDataInterceptor_ProcessReceivedPartiallyCorrectDataShouldErr(t *te msg := &p2pmocks.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -360,7 +360,7 @@ func testProcessReceiveMessageMultiData(t *testing.T, isForCurrentShard bool, ex msg := &p2pmocks.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -401,7 +401,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageCheckBatchErrors(t *testing. msg := &p2pmocks.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -443,7 +443,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageCheckBatchIsIncomplete(t *te msg := &p2pmocks.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -496,7 +496,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageCheckBatchIsComplete(t *test msg := &p2pmocks.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -541,7 +541,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageWhitelistedShouldRetNil(t *t msg := &p2pmocks.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -611,7 +611,7 @@ func processReceivedMessageMultiDataInvalidVersion(t *testing.T, expectedErr err PeerField: originator, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, expectedErr, err) assert.True(t, isFromConnectedPeerBlackListed) assert.True(t, isOriginatorBlackListed) @@ -686,7 +686,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageIsOriginatorNotOkButWhiteLis msg := &p2pmocks.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -699,7 +699,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageIsOriginatorNotOkButWhiteLis whiteListHandler.IsWhiteListedCalled = func(interceptedData process.InterceptedData) bool { return false } - err = mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err = mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) assert.Equal(t, err, errOriginator) diff --git a/process/interceptors/singleDataInterceptor.go b/process/interceptors/singleDataInterceptor.go index 1fee5493cb6..84f3296acd7 100644 --- a/process/interceptors/singleDataInterceptor.go +++ b/process/interceptors/singleDataInterceptor.go @@ -74,7 +74,7 @@ func NewSingleDataInterceptor(arg ArgSingleDataInterceptor) (*SingleDataIntercep // ProcessReceivedMessage is the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to) -func (sdi *SingleDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (sdi *SingleDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, _ p2p.MessageHandler) error { err := sdi.preProcessMesage(message, fromConnectedPeer) if err != nil { return err diff --git a/process/interceptors/singleDataInterceptor_test.go b/process/interceptors/singleDataInterceptor_test.go index c95a43db238..515c2a8724c 100644 --- a/process/interceptors/singleDataInterceptor_test.go +++ b/process/interceptors/singleDataInterceptor_test.go @@ -164,7 +164,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *test arg := createMockArgSingleDataInterceptor() sdi, _ := interceptors.NewSingleDataInterceptor(arg) - err := sdi.ProcessReceivedMessage(nil, fromConnectedPeerId) + err := sdi.ProcessReceivedMessage(nil, fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, process.ErrNilMessage, err) } @@ -198,7 +198,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageFactoryCreationErrorShouldE DataField: []byte("data to be processed"), PeerField: originatorPid, } - err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, errExpected, err) assert.True(t, originatorBlackListed) @@ -250,7 +250,7 @@ func testProcessReceiveMessage(t *testing.T, isForCurrentShard bool, validityErr msg := &p2pmocks.P2PMessageMock{ DataField: []byte("data to be processed"), } - err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -294,7 +294,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageWhitelistedShouldWork(t *te msg := &p2pmocks.P2PMessageMock{ DataField: []byte("data to be processed"), } - err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -362,7 +362,7 @@ func processReceivedMessageSingleDataInvalidVersion(t *testing.T, expectedErr er DataField: []byte("data to be processed"), PeerField: originator, } - err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, expectedErr, err) assert.True(t, isFromConnectedPeerBlackListed) assert.True(t, isOriginatorBlackListed) @@ -407,7 +407,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageWithOriginator(t *testing.T msg := &p2pmocks.P2PMessageMock{ DataField: []byte("data to be processed"), } - err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -421,7 +421,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageWithOriginator(t *testing.T return false } - err = sdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err = sdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) diff --git a/process/interface.go b/process/interface.go index 773b9374edd..d052c305899 100644 --- a/process/interface.go +++ b/process/interface.go @@ -398,7 +398,7 @@ type InterceptorsContainer interface { // InterceptorsContainerFactory defines the functionality to create an interceptors container type InterceptorsContainerFactory interface { - Create() (InterceptorsContainer, error) + Create() (InterceptorsContainer, InterceptorsContainer, error) IsInterfaceNil() bool } @@ -543,7 +543,7 @@ type BlockChainHookHandler interface { // Interceptor defines what a data interceptor should do // It should also adhere to the p2p.MessageProcessor interface so it can wire to a p2p.Messenger type Interceptor interface { - ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error + ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error SetInterceptedDebugHandler(handler InterceptedDebugger) error RegisterHandler(handler func(topic string, hash []byte, data interface{})) Close() error diff --git a/process/mock/topicHandlerStub.go b/process/mock/topicHandlerStub.go index 7484a5f677d..7578b383de3 100644 --- a/process/mock/topicHandlerStub.go +++ b/process/mock/topicHandlerStub.go @@ -14,30 +14,39 @@ type TopicHandlerStub struct { } // HasTopic - -func (ths *TopicHandlerStub) HasTopic(name string) bool { - return ths.HasTopicCalled(name) +func (stub *TopicHandlerStub) HasTopic(name string) bool { + if stub.HasTopicCalled != nil { + return stub.HasTopicCalled(name) + } + return false } // CreateTopic - -func (ths *TopicHandlerStub) CreateTopic(name string, createChannelForTopic bool) error { - return ths.CreateTopicCalled(name, createChannelForTopic) +func (stub *TopicHandlerStub) CreateTopic(name string, createChannelForTopic bool) error { + if stub.CreateTopicCalled != nil { + return stub.CreateTopicCalled(name, createChannelForTopic) + } + return nil } // RegisterMessageProcessor - -func (ths *TopicHandlerStub) RegisterMessageProcessor(topic string, identifier string, handler p2p.MessageProcessor) error { - return ths.RegisterMessageProcessorCalled(topic, identifier, handler) +func (stub *TopicHandlerStub) RegisterMessageProcessor(topic string, identifier string, handler p2p.MessageProcessor) error { + if stub.RegisterMessageProcessorCalled != nil { + return stub.RegisterMessageProcessorCalled(topic, identifier, handler) + } + return nil } // ID - -func (ths *TopicHandlerStub) ID() core.PeerID { - if ths.IDCalled != nil { - return ths.IDCalled() +func (stub *TopicHandlerStub) ID() core.PeerID { + if stub.IDCalled != nil { + return stub.IDCalled() } return "peer ID" } // IsInterfaceNil returns true if there is no value under the interface -func (ths *TopicHandlerStub) IsInterfaceNil() bool { - return ths == nil +func (stub *TopicHandlerStub) IsInterfaceNil() bool { + return stub == nil } diff --git a/process/throttle/antiflood/factory/p2pAntifloodAndBlacklistFactory.go b/process/throttle/antiflood/factory/p2pAntifloodAndBlacklistFactory.go index e77e3f03494..bfbf29617c6 100644 --- a/process/throttle/antiflood/factory/p2pAntifloodAndBlacklistFactory.go +++ b/process/throttle/antiflood/factory/p2pAntifloodAndBlacklistFactory.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/config" + antifloodDebug "github.com/multiversx/mx-chain-go/debug/antiflood" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/throttle/antiflood" @@ -130,6 +131,18 @@ func initP2PAntiFloodComponents( return nil, err } + if mainConfig.Debug.Antiflood.Enabled { + debugger, errDebugger := antifloodDebug.NewAntifloodDebugger(mainConfig.Debug.Antiflood) + if errDebugger != nil { + return nil, errDebugger + } + + err = p2pAntiflood.SetDebugger(debugger) + if err != nil { + return nil, err + } + } + startResettingTopicFloodPreventer(ctx, topicFloodPreventer, topicMaxMessages) startSweepingTimeCaches(ctx, p2pPeerBlackList, publicKeysCache) diff --git a/process/transaction/shardProcess_test.go b/process/transaction/shardProcess_test.go index 435f5bfc02e..b17c99e3f0b 100644 --- a/process/transaction/shardProcess_test.go +++ b/process/transaction/shardProcess_test.go @@ -3258,37 +3258,43 @@ func TestTxProcessor_shouldIncreaseNonce(t *testing.T) { func TestTxProcessor_AddNonExecutableLog(t *testing.T) { t.Parallel() - args := createArgsForTxProcessor() - sender := []byte("sender") - relayer := []byte("relayer") - originalTx := &transaction.Transaction{ - SndAddr: relayer, - RcvAddr: sender, - } - originalTxHash, errCalculateHash := core.CalculateHash(args.Marshalizer, args.Hasher, originalTx) - assert.Nil(t, errCalculateHash) - t.Run("not a non-executable error should not record log", func(t *testing.T) { t.Parallel() - argsLocal := args - argsLocal.TxLogsProcessor = &mock.TxLogsProcessorStub{ + args := createArgsForTxProcessor() + sender := []byte("sender") + relayer := []byte("relayer") + originalTx := &transaction.Transaction{ + SndAddr: relayer, + RcvAddr: sender, + } + originalTxHash, err := core.CalculateHash(args.Marshalizer, args.Hasher, originalTx) + assert.Nil(t, err) + args.TxLogsProcessor = &mock.TxLogsProcessorStub{ SaveLogCalled: func(txHash []byte, tx data.TransactionHandler, vmLogs []*vmcommon.LogEntry) error { assert.Fail(t, "should have not called SaveLog") return nil }, } - txProc, _ := txproc.NewTxProcessor(argsLocal) - err := txProc.AddNonExecutableLog(errors.New("random error"), originalTxHash, originalTx) + txProc, _ := txproc.NewTxProcessor(args) + err = txProc.AddNonExecutableLog(errors.New("random error"), originalTxHash, originalTx) assert.Nil(t, err) }) t.Run("is non executable tx error should record log", func(t *testing.T) { t.Parallel() - argsLocal := args + args := createArgsForTxProcessor() + sender := []byte("sender") + relayer := []byte("relayer") + originalTx := &transaction.Transaction{ + SndAddr: relayer, + RcvAddr: sender, + } + originalTxHash, err := core.CalculateHash(args.Marshalizer, args.Hasher, originalTx) + assert.Nil(t, err) numLogsSaved := 0 - argsLocal.TxLogsProcessor = &mock.TxLogsProcessorStub{ + args.TxLogsProcessor = &mock.TxLogsProcessorStub{ SaveLogCalled: func(txHash []byte, tx data.TransactionHandler, vmLogs []*vmcommon.LogEntry) error { assert.Equal(t, originalTxHash, txHash) assert.Equal(t, originalTx, tx) @@ -3304,8 +3310,8 @@ func TestTxProcessor_AddNonExecutableLog(t *testing.T) { }, } - txProc, _ := txproc.NewTxProcessor(argsLocal) - err := txProc.AddNonExecutableLog(process.ErrLowerNonceInTransaction, originalTxHash, originalTx) + txProc, _ := txproc.NewTxProcessor(args) + err = txProc.AddNonExecutableLog(process.ErrLowerNonceInTransaction, originalTxHash, originalTx) assert.Nil(t, err) err = txProc.AddNonExecutableLog(process.ErrHigherNonceInTransaction, originalTxHash, originalTx) diff --git a/storage/disabled/cache.go b/storage/disabled/cache.go new file mode 100644 index 00000000000..685e8046c79 --- /dev/null +++ b/storage/disabled/cache.go @@ -0,0 +1,80 @@ +package disabled + +type cache struct { +} + +// NewCache returns a new disabled Cacher implementation +func NewCache() *cache { + return &cache{} +} + +// Clear does nothing as it is disabled +func (c *cache) Clear() { +} + +// Put returns false as it is disabled +func (c *cache) Put(_ []byte, _ interface{}, _ int) (evicted bool) { + return false +} + +// Get returns nil and false as it is disabled +func (c *cache) Get(_ []byte) (value interface{}, ok bool) { + return nil, false +} + +// Has returns false as it is disabled +func (c *cache) Has(_ []byte) bool { + return false +} + +// Peek returns nil and false as it is disabled +func (c *cache) Peek(_ []byte) (value interface{}, ok bool) { + return nil, false +} + +// HasOrAdd returns false and false as it is disabled +func (c *cache) HasOrAdd(_ []byte, _ interface{}, _ int) (has, added bool) { + return false, false +} + +// Remove does nothing as it is disabled +func (c *cache) Remove(_ []byte) { +} + +// Keys returns an empty slice as it is disabled +func (c *cache) Keys() [][]byte { + return make([][]byte, 0) +} + +// Len returns 0 as it is disabled +func (c *cache) Len() int { + return 0 +} + +// SizeInBytesContained returns 0 as it is disabled +func (c *cache) SizeInBytesContained() uint64 { + return 0 +} + +// MaxSize returns 0 as it is disabled +func (c *cache) MaxSize() int { + return 0 +} + +// RegisterHandler does nothing as it is disabled +func (c *cache) RegisterHandler(_ func(key []byte, value interface{}), _ string) { +} + +// UnRegisterHandler does nothing as it is disabled +func (c *cache) UnRegisterHandler(_ string) { +} + +// Close returns nil as it is disabled +func (c *cache) Close() error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (c *cache) IsInterfaceNil() bool { + return c == nil +} diff --git a/testscommon/components/components.go b/testscommon/components/components.go index 100c2076959..8b3f87b5268 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -261,9 +261,6 @@ func GetNetworkFactoryArgs() networkComp.NetworkComponentsFactoryArgs { MaxCrossShardObservers: 10, MaxSeeders: 2, Type: "NilListSharder", - AdditionalConnections: p2pConfig.AdditionalConnectionsConfig{ - MaxFullHistoryObservers: 10, - }, }, } @@ -295,10 +292,11 @@ func GetNetworkFactoryArgs() networkComp.NetworkComponentsFactoryArgs { cryptoCompMock := GetDefaultCryptoComponents() return networkComp.NetworkComponentsFactoryArgs{ - P2pConfig: p2pCfg, - MainConfig: mainConfig, - StatusHandler: appStatusHandler, - Marshalizer: &mock.MarshalizerMock{}, + MainP2pConfig: p2pCfg, + NodeOperationMode: p2p.NormalOperation, + MainConfig: mainConfig, + StatusHandler: appStatusHandler, + Marshalizer: &mock.MarshalizerMock{}, RatingsConfig: config.RatingsConfig{ General: config.General{}, ShardChain: config.ShardChain{}, @@ -312,9 +310,8 @@ func GetNetworkFactoryArgs() networkComp.NetworkComponentsFactoryArgs { UnitValue: 1.0, }, }, - Syncer: &p2pFactory.LocalSyncTimer{}, - NodeOperationMode: p2p.NormalOperation, - CryptoComponents: cryptoCompMock, + Syncer: &p2pFactory.LocalSyncTimer{}, + CryptoComponents: cryptoCompMock, } } diff --git a/testscommon/components/default.go b/testscommon/components/default.go index 009992ac237..c39baf24385 100644 --- a/testscommon/components/default.go +++ b/testscommon/components/default.go @@ -138,7 +138,8 @@ func GetDefaultProcessComponents(shardCoordinator sharding.Coordinator) *mock.Pr ReqHandler: &testscommon.RequestHandlerStub{}, TxLogsProcess: &mock.TxLogProcessorMock{}, HeaderConstructValidator: &mock.HeaderValidatorStub{}, - PeerMapper: &p2pmocks.NetworkShardingCollectorStub{}, + MainPeerMapper: &p2pmocks.NetworkShardingCollectorStub{}, + FullArchivePeerMapper: &p2pmocks.NetworkShardingCollectorStub{}, FallbackHdrValidator: &testscommon.FallBackHeaderValidatorStub{}, NodeRedundancyHandlerInternal: &mock.RedundancyHandlerStub{ IsRedundancyNodeCalled: func() bool { diff --git a/testscommon/interceptorStub.go b/testscommon/interceptorStub.go index db346803d2a..54fc5be30af 100644 --- a/testscommon/interceptorStub.go +++ b/testscommon/interceptorStub.go @@ -15,7 +15,7 @@ type InterceptorStub struct { } // ProcessReceivedMessage - -func (is *InterceptorStub) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID) error { +func (is *InterceptorStub) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID, _ p2p.MessageHandler) error { if is.ProcessReceivedMessageCalled != nil { return is.ProcessReceivedMessageCalled(message) } diff --git a/testscommon/p2pmocks/messengerStub.go b/testscommon/p2pmocks/messengerStub.go index a1c2cefd481..51721307898 100644 --- a/testscommon/p2pmocks/messengerStub.go +++ b/testscommon/p2pmocks/messengerStub.go @@ -9,7 +9,6 @@ import ( // MessengerStub - type MessengerStub struct { - ConnectedFullHistoryPeersOnTopicCalled func(topic string) []core.PeerID IDCalled func() core.PeerID CloseCalled func() error CreateTopicCalled func(name string, createChannelForTopic bool) error @@ -45,15 +44,7 @@ type MessengerStub struct { BroadcastUsingPrivateKeyCalled func(topic string, buff []byte, pid core.PeerID, skBytes []byte) BroadcastOnChannelUsingPrivateKeyCalled func(channel string, topic string, buff []byte, pid core.PeerID, skBytes []byte) SignUsingPrivateKeyCalled func(skBytes []byte, payload []byte) ([]byte, error) -} - -// ConnectedFullHistoryPeersOnTopic - -func (ms *MessengerStub) ConnectedFullHistoryPeersOnTopic(topic string) []core.PeerID { - if ms.ConnectedFullHistoryPeersOnTopicCalled != nil { - return ms.ConnectedFullHistoryPeersOnTopicCalled(topic) - } - - return make([]core.PeerID, 0) + ProcessReceivedMessageCalled func(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error } // ID - @@ -361,6 +352,14 @@ func (ms *MessengerStub) SignUsingPrivateKey(skBytes []byte, payload []byte) ([] return make([]byte, 0), nil } +// ProcessReceivedMessage - +func (ms *MessengerStub) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { + if ms.ProcessReceivedMessageCalled != nil { + return ms.ProcessReceivedMessageCalled(message, fromConnectedPeer, source) + } + return nil +} + // IsInterfaceNil returns true if there is no value under the interface func (ms *MessengerStub) IsInterfaceNil() bool { return ms == nil diff --git a/testscommon/p2pmocks/peersRatingMonitorStub.go b/testscommon/p2pmocks/peersRatingMonitorStub.go index 22325a03d05..35e9cc83304 100644 --- a/testscommon/p2pmocks/peersRatingMonitorStub.go +++ b/testscommon/p2pmocks/peersRatingMonitorStub.go @@ -1,16 +1,18 @@ package p2pmocks +import "github.com/multiversx/mx-chain-go/p2p" + // PeersRatingMonitorStub - type PeersRatingMonitorStub struct { - GetConnectedPeersRatingsCalled func() string + GetConnectedPeersRatingsCalled func(connectionsHandler p2p.ConnectionsHandler) (string, error) } // GetConnectedPeersRatings - -func (stub *PeersRatingMonitorStub) GetConnectedPeersRatings() string { +func (stub *PeersRatingMonitorStub) GetConnectedPeersRatings(connectionsHandler p2p.ConnectionsHandler) (string, error) { if stub.GetConnectedPeersRatingsCalled != nil { - return stub.GetConnectedPeersRatingsCalled() + return stub.GetConnectedPeersRatingsCalled(connectionsHandler) } - return "" + return "", nil } // IsInterfaceNil - diff --git a/testscommon/realConfigsHandling.go b/testscommon/realConfigsHandling.go index 2041d9f7375..c69f5471b30 100644 --- a/testscommon/realConfigsHandling.go +++ b/testscommon/realConfigsHandling.go @@ -42,7 +42,10 @@ func CreateTestConfigs(tb testing.TB, originalConfigsPath string) *config.Config prefsConfig, err := common.LoadPreferencesConfig(path.Join(newConfigsPath, "prefs.toml")) require.Nil(tb, err) - p2pConfig, err := common.LoadP2PConfig(path.Join(newConfigsPath, "p2p.toml")) + mainP2PConfig, err := common.LoadP2PConfig(path.Join(newConfigsPath, "p2p.toml")) + require.Nil(tb, err) + + fullArchiveP2PConfig, err := common.LoadP2PConfig(path.Join(newConfigsPath, "fullArchiveP2P.toml")) require.Nil(tb, err) externalConfig, err := common.LoadExternalConfig(path.Join(newConfigsPath, "external.toml")) @@ -58,18 +61,21 @@ func CreateTestConfigs(tb testing.TB, originalConfigsPath string) *config.Config require.Nil(tb, err) // make the node pass the network wait constraints - p2pConfig.Node.MinNumPeersToWaitForOnBootstrap = 0 - p2pConfig.Node.ThresholdMinConnectedPeers = 0 + mainP2PConfig.Node.MinNumPeersToWaitForOnBootstrap = 0 + mainP2PConfig.Node.ThresholdMinConnectedPeers = 0 + fullArchiveP2PConfig.Node.MinNumPeersToWaitForOnBootstrap = 0 + fullArchiveP2PConfig.Node.ThresholdMinConnectedPeers = 0 return &config.Configs{ - GeneralConfig: generalConfig, - ApiRoutesConfig: apiConfig, - EconomicsConfig: economicsConfig, - SystemSCConfig: systemSCConfig, - RatingsConfig: ratingsConfig, - PreferencesConfig: prefsConfig, - ExternalConfig: externalConfig, - P2pConfig: p2pConfig, + GeneralConfig: generalConfig, + ApiRoutesConfig: apiConfig, + EconomicsConfig: economicsConfig, + SystemSCConfig: systemSCConfig, + RatingsConfig: ratingsConfig, + PreferencesConfig: prefsConfig, + ExternalConfig: externalConfig, + MainP2pConfig: mainP2PConfig, + FullArchiveP2pConfig: fullArchiveP2PConfig, FlagsConfig: &config.ContextFlagsConfig{ WorkingDir: tempDir, NoKeyProvided: true, diff --git a/update/errors.go b/update/errors.go index 938ae2020ee..8a23b6d2dce 100644 --- a/update/errors.go +++ b/update/errors.go @@ -295,3 +295,6 @@ var ErrNilAddressConverter = errors.New("nil address converter") // ErrNilEnableEpochsHandler signals that a nil enable epochs handler was provided var ErrNilEnableEpochsHandler = errors.New("nil enable epochs handler") + +// ErrNilNetworkComponents signals that a nil network components instance was provided +var ErrNilNetworkComponents = errors.New("nil network components") diff --git a/update/factory/exportHandlerFactory.go b/update/factory/exportHandlerFactory.go index bb80be0101a..7fde050935b 100644 --- a/update/factory/exportHandlerFactory.go +++ b/update/factory/exportHandlerFactory.go @@ -15,6 +15,7 @@ import ( "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/notifier" "github.com/multiversx/mx-chain-go/epochStart/shardchain" + mxFactory "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/genesis/process/disabled" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" @@ -37,79 +38,77 @@ var log = logger.GetOrCreate("update/factory") // ArgsExporter is the argument structure to create a new exporter type ArgsExporter struct { - CoreComponents process.CoreComponentsHolder - CryptoComponents process.CryptoComponentsHolder - StatusCoreComponents process.StatusCoreComponentsHolder - HeaderValidator epochStart.HeaderValidator - DataPool dataRetriever.PoolsHolder - StorageService dataRetriever.StorageService - RequestHandler process.RequestHandler - ShardCoordinator sharding.Coordinator - Messenger p2p.Messenger - ActiveAccountsDBs map[state.AccountsDbIdentifier]state.AccountsAdapter - ExistingResolvers dataRetriever.ResolversContainer - ExistingRequesters dataRetriever.RequestersContainer - ExportFolder string - ExportTriesStorageConfig config.StorageConfig - ExportStateStorageConfig config.StorageConfig - ExportStateKeysConfig config.StorageConfig - MaxTrieLevelInMemory uint - WhiteListHandler process.WhiteListHandler - WhiteListerVerifiedTxs process.WhiteListHandler - InterceptorsContainer process.InterceptorsContainer - NodesCoordinator nodesCoordinator.NodesCoordinator - HeaderSigVerifier process.InterceptedHeaderSigVerifier - HeaderIntegrityVerifier process.HeaderIntegrityVerifier - ValidityAttester process.ValidityAttester - InputAntifloodHandler process.P2PAntifloodHandler - OutputAntifloodHandler process.P2PAntifloodHandler - RoundHandler process.RoundHandler - PeersRatingHandler dataRetriever.PeersRatingHandler - InterceptorDebugConfig config.InterceptorResolverDebugConfig - MaxHardCapForMissingNodes int - NumConcurrentTrieSyncers int - TrieSyncerVersion int - CheckNodesOnDisk bool + CoreComponents process.CoreComponentsHolder + CryptoComponents process.CryptoComponentsHolder + StatusCoreComponents process.StatusCoreComponentsHolder + NetworkComponents mxFactory.NetworkComponentsHolder + HeaderValidator epochStart.HeaderValidator + DataPool dataRetriever.PoolsHolder + StorageService dataRetriever.StorageService + RequestHandler process.RequestHandler + ShardCoordinator sharding.Coordinator + ActiveAccountsDBs map[state.AccountsDbIdentifier]state.AccountsAdapter + ExistingResolvers dataRetriever.ResolversContainer + ExistingRequesters dataRetriever.RequestersContainer + ExportFolder string + ExportTriesStorageConfig config.StorageConfig + ExportStateStorageConfig config.StorageConfig + ExportStateKeysConfig config.StorageConfig + MaxTrieLevelInMemory uint + WhiteListHandler process.WhiteListHandler + WhiteListerVerifiedTxs process.WhiteListHandler + MainInterceptorsContainer process.InterceptorsContainer + FullArchiveInterceptorsContainer process.InterceptorsContainer + NodesCoordinator nodesCoordinator.NodesCoordinator + HeaderSigVerifier process.InterceptedHeaderSigVerifier + HeaderIntegrityVerifier process.HeaderIntegrityVerifier + ValidityAttester process.ValidityAttester + RoundHandler process.RoundHandler + InterceptorDebugConfig config.InterceptorResolverDebugConfig + MaxHardCapForMissingNodes int + NumConcurrentTrieSyncers int + TrieSyncerVersion int + CheckNodesOnDisk bool + NodeOperationMode p2p.NodeOperation } type exportHandlerFactory struct { - CoreComponents process.CoreComponentsHolder - CryptoComponents process.CryptoComponentsHolder - statusCoreComponents process.StatusCoreComponentsHolder - headerValidator epochStart.HeaderValidator - dataPool dataRetriever.PoolsHolder - storageService dataRetriever.StorageService - requestHandler process.RequestHandler - shardCoordinator sharding.Coordinator - messenger p2p.Messenger - activeAccountsDBs map[state.AccountsDbIdentifier]state.AccountsAdapter - exportFolder string - exportTriesStorageConfig config.StorageConfig - exportStateStorageConfig config.StorageConfig - exportStateKeysConfig config.StorageConfig - maxTrieLevelInMemory uint - whiteListHandler process.WhiteListHandler - whiteListerVerifiedTxs process.WhiteListHandler - interceptorsContainer process.InterceptorsContainer - existingResolvers dataRetriever.ResolversContainer - existingRequesters dataRetriever.RequestersContainer - epochStartTrigger epochStart.TriggerHandler - accounts state.AccountsAdapter - nodesCoordinator nodesCoordinator.NodesCoordinator - headerSigVerifier process.InterceptedHeaderSigVerifier - headerIntegrityVerifier process.HeaderIntegrityVerifier - validityAttester process.ValidityAttester - resolverContainer dataRetriever.ResolversContainer - requestersContainer dataRetriever.RequestersContainer - inputAntifloodHandler process.P2PAntifloodHandler - outputAntifloodHandler process.P2PAntifloodHandler - roundHandler process.RoundHandler - peersRatingHandler dataRetriever.PeersRatingHandler - interceptorDebugConfig config.InterceptorResolverDebugConfig - maxHardCapForMissingNodes int - numConcurrentTrieSyncers int - trieSyncerVersion int - checkNodesOnDisk bool + coreComponents process.CoreComponentsHolder + cryptoComponents process.CryptoComponentsHolder + statusCoreComponents process.StatusCoreComponentsHolder + networkComponents mxFactory.NetworkComponentsHolder + headerValidator epochStart.HeaderValidator + dataPool dataRetriever.PoolsHolder + storageService dataRetriever.StorageService + requestHandler process.RequestHandler + shardCoordinator sharding.Coordinator + activeAccountsDBs map[state.AccountsDbIdentifier]state.AccountsAdapter + exportFolder string + exportTriesStorageConfig config.StorageConfig + exportStateStorageConfig config.StorageConfig + exportStateKeysConfig config.StorageConfig + maxTrieLevelInMemory uint + whiteListHandler process.WhiteListHandler + whiteListerVerifiedTxs process.WhiteListHandler + mainInterceptorsContainer process.InterceptorsContainer + fullArchiveInterceptorsContainer process.InterceptorsContainer + existingResolvers dataRetriever.ResolversContainer + existingRequesters dataRetriever.RequestersContainer + epochStartTrigger epochStart.TriggerHandler + accounts state.AccountsAdapter + nodesCoordinator nodesCoordinator.NodesCoordinator + headerSigVerifier process.InterceptedHeaderSigVerifier + headerIntegrityVerifier process.HeaderIntegrityVerifier + validityAttester process.ValidityAttester + resolverContainer dataRetriever.ResolversContainer + requestersContainer dataRetriever.RequestersContainer + roundHandler process.RoundHandler + interceptorDebugConfig config.InterceptorResolverDebugConfig + maxHardCapForMissingNodes int + numConcurrentTrieSyncers int + trieSyncerVersion int + checkNodesOnDisk bool + nodeOperationMode p2p.NodeOperation } // NewExportHandlerFactory creates an exporter factory @@ -120,6 +119,9 @@ func NewExportHandlerFactory(args ArgsExporter) (*exportHandlerFactory, error) { if check.IfNil(args.CoreComponents) { return nil, update.ErrNilCoreComponents } + if check.IfNil(args.NetworkComponents) { + return nil, update.ErrNilNetworkComponents + } if check.IfNil(args.CryptoComponents) { return nil, update.ErrNilCryptoComponents } @@ -144,7 +146,7 @@ func NewExportHandlerFactory(args ArgsExporter) (*exportHandlerFactory, error) { if check.IfNil(args.RequestHandler) { return nil, update.ErrNilRequestHandler } - if check.IfNil(args.Messenger) { + if check.IfNil(args.NetworkComponents) { return nil, update.ErrNilMessenger } if args.ActiveAccountsDBs == nil { @@ -156,8 +158,11 @@ func NewExportHandlerFactory(args ArgsExporter) (*exportHandlerFactory, error) { if check.IfNil(args.WhiteListerVerifiedTxs) { return nil, update.ErrNilWhiteListHandler } - if check.IfNil(args.InterceptorsContainer) { - return nil, update.ErrNilInterceptorsContainer + if check.IfNil(args.MainInterceptorsContainer) { + return nil, fmt.Errorf("%w on main network", update.ErrNilInterceptorsContainer) + } + if check.IfNil(args.FullArchiveInterceptorsContainer) { + return nil, fmt.Errorf("%w on full archive network", update.ErrNilInterceptorsContainer) } if check.IfNil(args.ExistingResolvers) { return nil, update.ErrNilResolverContainer @@ -205,18 +210,9 @@ func NewExportHandlerFactory(args ArgsExporter) (*exportHandlerFactory, error) { if check.IfNil(args.CoreComponents.TxMarshalizer()) { return nil, update.ErrNilMarshalizer } - if check.IfNil(args.InputAntifloodHandler) { - return nil, update.ErrNilAntiFloodHandler - } - if check.IfNil(args.OutputAntifloodHandler) { - return nil, update.ErrNilAntiFloodHandler - } if check.IfNil(args.RoundHandler) { return nil, update.ErrNilRoundHandler } - if check.IfNil(args.PeersRatingHandler) { - return nil, update.ErrNilPeersRatingHandler - } if check.IfNil(args.CoreComponents.TxSignHasher()) { return nil, update.ErrNilHasher } @@ -238,40 +234,39 @@ func NewExportHandlerFactory(args ArgsExporter) (*exportHandlerFactory, error) { } e := &exportHandlerFactory{ - CoreComponents: args.CoreComponents, - CryptoComponents: args.CryptoComponents, - headerValidator: args.HeaderValidator, - dataPool: args.DataPool, - storageService: args.StorageService, - requestHandler: args.RequestHandler, - shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, - activeAccountsDBs: args.ActiveAccountsDBs, - exportFolder: args.ExportFolder, - exportTriesStorageConfig: args.ExportTriesStorageConfig, - exportStateStorageConfig: args.ExportStateStorageConfig, - exportStateKeysConfig: args.ExportStateKeysConfig, - interceptorsContainer: args.InterceptorsContainer, - whiteListHandler: args.WhiteListHandler, - whiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, - existingResolvers: args.ExistingResolvers, - existingRequesters: args.ExistingRequesters, - accounts: args.ActiveAccountsDBs[state.UserAccountsState], - nodesCoordinator: args.NodesCoordinator, - headerSigVerifier: args.HeaderSigVerifier, - headerIntegrityVerifier: args.HeaderIntegrityVerifier, - validityAttester: args.ValidityAttester, - inputAntifloodHandler: args.InputAntifloodHandler, - outputAntifloodHandler: args.OutputAntifloodHandler, - maxTrieLevelInMemory: args.MaxTrieLevelInMemory, - roundHandler: args.RoundHandler, - peersRatingHandler: args.PeersRatingHandler, - interceptorDebugConfig: args.InterceptorDebugConfig, - maxHardCapForMissingNodes: args.MaxHardCapForMissingNodes, - numConcurrentTrieSyncers: args.NumConcurrentTrieSyncers, - trieSyncerVersion: args.TrieSyncerVersion, - checkNodesOnDisk: args.CheckNodesOnDisk, - statusCoreComponents: args.StatusCoreComponents, + coreComponents: args.CoreComponents, + cryptoComponents: args.CryptoComponents, + networkComponents: args.NetworkComponents, + headerValidator: args.HeaderValidator, + dataPool: args.DataPool, + storageService: args.StorageService, + requestHandler: args.RequestHandler, + shardCoordinator: args.ShardCoordinator, + activeAccountsDBs: args.ActiveAccountsDBs, + exportFolder: args.ExportFolder, + exportTriesStorageConfig: args.ExportTriesStorageConfig, + exportStateStorageConfig: args.ExportStateStorageConfig, + exportStateKeysConfig: args.ExportStateKeysConfig, + mainInterceptorsContainer: args.MainInterceptorsContainer, + fullArchiveInterceptorsContainer: args.FullArchiveInterceptorsContainer, + whiteListHandler: args.WhiteListHandler, + whiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, + existingResolvers: args.ExistingResolvers, + existingRequesters: args.ExistingRequesters, + accounts: args.ActiveAccountsDBs[state.UserAccountsState], + nodesCoordinator: args.NodesCoordinator, + headerSigVerifier: args.HeaderSigVerifier, + headerIntegrityVerifier: args.HeaderIntegrityVerifier, + validityAttester: args.ValidityAttester, + maxTrieLevelInMemory: args.MaxTrieLevelInMemory, + roundHandler: args.RoundHandler, + interceptorDebugConfig: args.InterceptorDebugConfig, + maxHardCapForMissingNodes: args.MaxHardCapForMissingNodes, + numConcurrentTrieSyncers: args.NumConcurrentTrieSyncers, + trieSyncerVersion: args.TrieSyncerVersion, + checkNodesOnDisk: args.CheckNodesOnDisk, + statusCoreComponents: args.StatusCoreComponents, + nodeOperationMode: args.NodeOperationMode, } return e, nil @@ -300,10 +295,10 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { return nil, err } argsEpochTrigger := shardchain.ArgsShardEpochStartTrigger{ - Marshalizer: e.CoreComponents.InternalMarshalizer(), - Hasher: e.CoreComponents.Hasher(), + Marshalizer: e.coreComponents.InternalMarshalizer(), + Hasher: e.coreComponents.Hasher(), HeaderValidator: e.headerValidator, - Uint64Converter: e.CoreComponents.Uint64ByteSliceConverter(), + Uint64Converter: e.coreComponents.Uint64ByteSliceConverter(), DataPool: e.dataPool, Storage: e.storageService, RequestHandler: e.requestHandler, @@ -314,7 +309,7 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { PeerMiniBlocksSyncer: peerMiniBlocksSyncer, RoundHandler: e.roundHandler, AppStatusHandler: e.statusCoreComponents.AppStatusHandler(), - EnableEpochsHandler: e.CoreComponents.EnableEpochsHandler(), + EnableEpochsHandler: e.coreComponents.EnableEpochsHandler(), } epochHandler, err := shardchain.NewEpochStartTrigger(&argsEpochTrigger) if err != nil { @@ -324,11 +319,11 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { argsDataTrieFactory := ArgsNewDataTrieFactory{ StorageConfig: e.exportTriesStorageConfig, SyncFolder: e.exportFolder, - Marshalizer: e.CoreComponents.InternalMarshalizer(), - Hasher: e.CoreComponents.Hasher(), + Marshalizer: e.coreComponents.InternalMarshalizer(), + Hasher: e.coreComponents.Hasher(), ShardCoordinator: e.shardCoordinator, MaxTrieLevelInMemory: e.maxTrieLevelInMemory, - EnableEpochsHandler: e.CoreComponents.EnableEpochsHandler(), + EnableEpochsHandler: e.coreComponents.EnableEpochsHandler(), } dataTriesContainerFactory, err := NewDataTrieFactory(argsDataTrieFactory) if err != nil { @@ -351,13 +346,14 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { argsResolvers := ArgsNewResolversContainerFactory{ ShardCoordinator: e.shardCoordinator, - Messenger: e.messenger, - Marshalizer: e.CoreComponents.InternalMarshalizer(), + MainMessenger: e.networkComponents.NetworkMessenger(), + FullArchiveMessenger: e.networkComponents.FullArchiveNetworkMessenger(), + Marshalizer: e.coreComponents.InternalMarshalizer(), DataTrieContainer: dataTries, ExistingResolvers: e.existingResolvers, NumConcurrentResolvingJobs: 100, - InputAntifloodHandler: e.inputAntifloodHandler, - OutputAntifloodHandler: e.outputAntifloodHandler, + InputAntifloodHandler: e.networkComponents.InputAntiFloodHandler(), + OutputAntifloodHandler: e.networkComponents.OutputAntiFloodHandler(), } resolversFactory, err := NewResolversContainerFactory(argsResolvers) if err != nil { @@ -379,11 +375,12 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { argsRequesters := ArgsRequestersContainerFactory{ ShardCoordinator: e.shardCoordinator, - Messenger: e.messenger, - Marshaller: e.CoreComponents.InternalMarshalizer(), + MainMessenger: e.networkComponents.NetworkMessenger(), + FullArchiveMessenger: e.networkComponents.FullArchiveNetworkMessenger(), + Marshaller: e.coreComponents.InternalMarshalizer(), ExistingRequesters: e.existingRequesters, - OutputAntifloodHandler: e.outputAntifloodHandler, - PeersRatingHandler: e.peersRatingHandler, + OutputAntifloodHandler: e.networkComponents.OutputAntiFloodHandler(), + PeersRatingHandler: e.networkComponents.PeersRatingHandler(), } requestersFactory, err := NewRequestersContainerFactory(argsRequesters) if err != nil { @@ -407,8 +404,8 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { TrieCacher: e.dataPool.TrieNodes(), RequestHandler: e.requestHandler, ShardCoordinator: e.shardCoordinator, - Hasher: e.CoreComponents.Hasher(), - Marshalizer: e.CoreComponents.InternalMarshalizer(), + Hasher: e.coreComponents.Hasher(), + Marshalizer: e.coreComponents.InternalMarshalizer(), TrieStorageManager: trieStorageManager, TimoutGettingTrieNode: common.TimeoutGettingTrieNodesInHardfork, MaxTrieLevelInMemory: e.maxTrieLevelInMemory, @@ -416,8 +413,8 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { NumConcurrentTrieSyncers: e.numConcurrentTrieSyncers, TrieSyncerVersion: e.trieSyncerVersion, CheckNodesOnDisk: e.checkNodesOnDisk, - AddressPubKeyConverter: e.CoreComponents.AddressPubKeyConverter(), - EnableEpochsHandler: e.CoreComponents.EnableEpochsHandler(), + AddressPubKeyConverter: e.coreComponents.AddressPubKeyConverter(), + EnableEpochsHandler: e.coreComponents.EnableEpochsHandler(), } accountsDBSyncerFactory, err := NewAccountsDBSContainerFactory(argsAccountsSyncers) if err != nil { @@ -431,11 +428,11 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { argsNewHeadersSync := sync.ArgsNewHeadersSyncHandler{ StorageService: e.storageService, Cache: e.dataPool.Headers(), - Marshalizer: e.CoreComponents.InternalMarshalizer(), - Hasher: e.CoreComponents.Hasher(), + Marshalizer: e.coreComponents.InternalMarshalizer(), + Hasher: e.coreComponents.Hasher(), EpochHandler: epochHandler, RequestHandler: e.requestHandler, - Uint64Converter: e.CoreComponents.Uint64ByteSliceConverter(), + Uint64Converter: e.coreComponents.Uint64ByteSliceConverter(), ShardCoordinator: e.shardCoordinator, } epochStartHeadersSyncer, err := sync.NewHeadersSyncHandler(argsNewHeadersSync) @@ -460,7 +457,7 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { argsMiniBlockSyncer := sync.ArgsNewPendingMiniBlocksSyncer{ Storage: storer, Cache: e.dataPool.MiniBlocks(), - Marshalizer: e.CoreComponents.InternalMarshalizer(), + Marshalizer: e.coreComponents.InternalMarshalizer(), RequestHandler: e.requestHandler, } epochStartMiniBlocksSyncer, err := sync.NewPendingMiniBlocksSyncer(argsMiniBlockSyncer) @@ -471,7 +468,7 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { argsPendingTransactions := sync.ArgsNewTransactionsSyncer{ DataPools: e.dataPool, Storages: e.storageService, - Marshaller: e.CoreComponents.InternalMarshalizer(), + Marshaller: e.coreComponents.InternalMarshalizer(), RequestHandler: e.requestHandler, } epochStartTransactionsSyncer, err := sync.NewTransactionsSyncer(argsPendingTransactions) @@ -516,7 +513,7 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { arg := storing.ArgHardforkStorer{ KeysStore: keysStorer, KeyValue: keysVals, - Marshalizer: e.CoreComponents.InternalMarshalizer(), + Marshalizer: e.coreComponents.InternalMarshalizer(), } hs, err := storing.NewHardforkStorer(arg) if err != nil { @@ -526,13 +523,13 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { argsExporter := genesis.ArgsNewStateExporter{ ShardCoordinator: e.shardCoordinator, StateSyncer: stateSyncer, - Marshalizer: e.CoreComponents.InternalMarshalizer(), + Marshalizer: e.coreComponents.InternalMarshalizer(), HardforkStorer: hs, - Hasher: e.CoreComponents.Hasher(), + Hasher: e.coreComponents.Hasher(), ExportFolder: e.exportFolder, - ValidatorPubKeyConverter: e.CoreComponents.ValidatorPubKeyConverter(), - AddressPubKeyConverter: e.CoreComponents.AddressPubKeyConverter(), - GenesisNodesSetupHandler: e.CoreComponents.GenesisNodesSetup(), + ValidatorPubKeyConverter: e.coreComponents.ValidatorPubKeyConverter(), + AddressPubKeyConverter: e.coreComponents.AddressPubKeyConverter(), + GenesisNodesSetupHandler: e.coreComponents.GenesisNodesSetup(), } exportHandler, err := genesis.NewStateExporter(argsExporter) if err != nil { @@ -545,7 +542,7 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { return nil, err } - e.interceptorsContainer.Iterate(func(key string, interceptor process.Interceptor) bool { + e.mainInterceptorsContainer.Iterate(func(key string, interceptor process.Interceptor) bool { errNotCritical = interceptor.SetInterceptedDebugHandler(debugger) if errNotCritical != nil { log.Warn("error setting debugger", "interceptor", key, "error", errNotCritical) @@ -568,38 +565,42 @@ func (e *exportHandlerFactory) prepareFolders(folder string) error { func (e *exportHandlerFactory) createInterceptors() error { argsInterceptors := ArgsNewFullSyncInterceptorsContainerFactory{ - CoreComponents: e.CoreComponents, - CryptoComponents: e.CryptoComponents, - Accounts: e.accounts, - ShardCoordinator: e.shardCoordinator, - NodesCoordinator: e.nodesCoordinator, - Messenger: e.messenger, - Store: e.storageService, - DataPool: e.dataPool, - MaxTxNonceDeltaAllowed: math.MaxInt32, - TxFeeHandler: &disabled.FeeHandler{}, - BlockBlackList: cache.NewTimeCache(time.Second), - HeaderSigVerifier: e.headerSigVerifier, - HeaderIntegrityVerifier: e.headerIntegrityVerifier, - SizeCheckDelta: math.MaxUint32, - ValidityAttester: e.validityAttester, - EpochStartTrigger: e.epochStartTrigger, - WhiteListHandler: e.whiteListHandler, - WhiteListerVerifiedTxs: e.whiteListerVerifiedTxs, - InterceptorsContainer: e.interceptorsContainer, - AntifloodHandler: e.inputAntifloodHandler, + CoreComponents: e.coreComponents, + CryptoComponents: e.cryptoComponents, + Accounts: e.accounts, + ShardCoordinator: e.shardCoordinator, + NodesCoordinator: e.nodesCoordinator, + MainMessenger: e.networkComponents.NetworkMessenger(), + FullArchiveMessenger: e.networkComponents.FullArchiveNetworkMessenger(), + Store: e.storageService, + DataPool: e.dataPool, + MaxTxNonceDeltaAllowed: math.MaxInt32, + TxFeeHandler: &disabled.FeeHandler{}, + BlockBlackList: cache.NewTimeCache(time.Second), + HeaderSigVerifier: e.headerSigVerifier, + HeaderIntegrityVerifier: e.headerIntegrityVerifier, + SizeCheckDelta: math.MaxUint32, + ValidityAttester: e.validityAttester, + EpochStartTrigger: e.epochStartTrigger, + WhiteListHandler: e.whiteListHandler, + WhiteListerVerifiedTxs: e.whiteListerVerifiedTxs, + MainInterceptorsContainer: e.mainInterceptorsContainer, + FullArchiveInterceptorsContainer: e.fullArchiveInterceptorsContainer, + AntifloodHandler: e.networkComponents.InputAntiFloodHandler(), + NodeOperationMode: e.nodeOperationMode, } fullSyncInterceptors, err := NewFullSyncInterceptorsContainerFactory(argsInterceptors) if err != nil { return err } - interceptorsContainer, err := fullSyncInterceptors.Create() + mainInterceptorsContainer, fullArchiveInterceptorsContainer, err := fullSyncInterceptors.Create() if err != nil { return err } - e.interceptorsContainer = interceptorsContainer + e.mainInterceptorsContainer = mainInterceptorsContainer + e.fullArchiveInterceptorsContainer = fullArchiveInterceptorsContainer return nil } diff --git a/update/factory/fullSyncInterceptors.go b/update/factory/fullSyncInterceptors.go index 545b4114f51..4c88c26945b 100644 --- a/update/factory/fullSyncInterceptors.go +++ b/update/factory/fullSyncInterceptors.go @@ -1,12 +1,15 @@ package factory import ( + "fmt" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/throttler" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/dataValidators" "github.com/multiversx/mx-chain-go/process/factory" @@ -27,12 +30,14 @@ const numGoRoutines = 2000 // fullSyncInterceptorsContainerFactory will handle the creation the interceptors container for shards type fullSyncInterceptorsContainerFactory struct { - container process.InterceptorsContainer + mainContainer process.InterceptorsContainer + fullArchiveContainer process.InterceptorsContainer shardCoordinator sharding.Coordinator accounts state.AccountsAdapter store dataRetriever.StorageService dataPool dataRetriever.PoolsHolder - messenger process.TopicHandler + mainMessenger process.TopicHandler + fullArchiveMessenger process.TopicHandler nodesCoordinator nodesCoordinator.NodesCoordinator blockBlackList process.TimeCacher argInterceptorFactory *interceptorFactory.ArgInterceptedDataFactory @@ -43,30 +48,34 @@ type fullSyncInterceptorsContainerFactory struct { whiteListerVerifiedTxs update.WhiteListHandler antifloodHandler process.P2PAntifloodHandler preferredPeersHolder update.PreferredPeersHolderHandler + nodeOperationMode p2p.NodeOperation } // ArgsNewFullSyncInterceptorsContainerFactory holds the arguments needed for fullSyncInterceptorsContainerFactory type ArgsNewFullSyncInterceptorsContainerFactory struct { - CoreComponents process.CoreComponentsHolder - CryptoComponents process.CryptoComponentsHolder - Accounts state.AccountsAdapter - ShardCoordinator sharding.Coordinator - NodesCoordinator nodesCoordinator.NodesCoordinator - Messenger process.TopicHandler - Store dataRetriever.StorageService - DataPool dataRetriever.PoolsHolder - MaxTxNonceDeltaAllowed int - TxFeeHandler process.FeeHandler - BlockBlackList process.TimeCacher - HeaderSigVerifier process.InterceptedHeaderSigVerifier - HeaderIntegrityVerifier process.HeaderIntegrityVerifier - SizeCheckDelta uint32 - ValidityAttester process.ValidityAttester - EpochStartTrigger process.EpochStartTriggerHandler - WhiteListHandler update.WhiteListHandler - WhiteListerVerifiedTxs update.WhiteListHandler - InterceptorsContainer process.InterceptorsContainer - AntifloodHandler process.P2PAntifloodHandler + CoreComponents process.CoreComponentsHolder + CryptoComponents process.CryptoComponentsHolder + Accounts state.AccountsAdapter + ShardCoordinator sharding.Coordinator + NodesCoordinator nodesCoordinator.NodesCoordinator + MainMessenger process.TopicHandler + FullArchiveMessenger process.TopicHandler + Store dataRetriever.StorageService + DataPool dataRetriever.PoolsHolder + MaxTxNonceDeltaAllowed int + TxFeeHandler process.FeeHandler + BlockBlackList process.TimeCacher + HeaderSigVerifier process.InterceptedHeaderSigVerifier + HeaderIntegrityVerifier process.HeaderIntegrityVerifier + SizeCheckDelta uint32 + ValidityAttester process.ValidityAttester + EpochStartTrigger process.EpochStartTriggerHandler + WhiteListHandler update.WhiteListHandler + WhiteListerVerifiedTxs update.WhiteListHandler + MainInterceptorsContainer process.InterceptorsContainer + FullArchiveInterceptorsContainer process.InterceptorsContainer + AntifloodHandler process.P2PAntifloodHandler + NodeOperationMode p2p.NodeOperation } // NewFullSyncInterceptorsContainerFactory is responsible for creating a new interceptors factory object @@ -80,7 +89,8 @@ func NewFullSyncInterceptorsContainerFactory( args.Accounts, args.Store, args.DataPool, - args.Messenger, + args.MainMessenger, + args.FullArchiveMessenger, args.NodesCoordinator, args.BlockBlackList, args.WhiteListerVerifiedTxs, @@ -111,8 +121,11 @@ func NewFullSyncInterceptorsContainerFactory( if check.IfNil(args.EpochStartTrigger) { return nil, process.ErrNilEpochStartTrigger } - if check.IfNil(args.InterceptorsContainer) { - return nil, update.ErrNilInterceptorsContainer + if check.IfNil(args.MainInterceptorsContainer) { + return nil, fmt.Errorf("%w on main network", update.ErrNilInterceptorsContainer) + } + if check.IfNil(args.FullArchiveInterceptorsContainer) { + return nil, fmt.Errorf("%w on full archive network", update.ErrNilInterceptorsContainer) } if check.IfNil(args.WhiteListHandler) { return nil, update.ErrNilWhiteListHandler @@ -136,10 +149,12 @@ func NewFullSyncInterceptorsContainerFactory( } icf := &fullSyncInterceptorsContainerFactory{ - container: args.InterceptorsContainer, + mainContainer: args.MainInterceptorsContainer, + fullArchiveContainer: args.FullArchiveInterceptorsContainer, accounts: args.Accounts, shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, store: args.Store, dataPool: args.DataPool, nodesCoordinator: args.NodesCoordinator, @@ -151,6 +166,7 @@ func NewFullSyncInterceptorsContainerFactory( antifloodHandler: args.AntifloodHandler, //TODO: inject the real peers holder once we have the peers mapping before epoch bootstrap finishes preferredPeersHolder: disabled.NewPreferredPeersHolder(), + nodeOperationMode: args.NodeOperationMode, } icf.globalThrottler, err = throttler.NewNumGoRoutinesThrottler(numGoRoutines) @@ -162,43 +178,43 @@ func NewFullSyncInterceptorsContainerFactory( } // Create returns an interceptor container that will hold all interceptors in the system -func (ficf *fullSyncInterceptorsContainerFactory) Create() (process.InterceptorsContainer, error) { +func (ficf *fullSyncInterceptorsContainerFactory) Create() (process.InterceptorsContainer, process.InterceptorsContainer, error) { err := ficf.generateTxInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = ficf.generateUnsignedTxsInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = ficf.generateRewardTxInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = ficf.generateMiniBlocksInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = ficf.generateMetachainHeaderInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = ficf.generateShardHeaderInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = ficf.generateTrieNodesInterceptors() if err != nil { - return nil, err + return nil, nil, err } - return ficf.container, nil + return ficf.mainContainer, ficf.fullArchiveContainer, nil } func checkBaseParams( @@ -208,7 +224,8 @@ func checkBaseParams( accounts state.AccountsAdapter, store dataRetriever.StorageService, dataPool dataRetriever.PoolsHolder, - messenger process.TopicHandler, + mainMessenger process.TopicHandler, + fullArchiveMessenger process.TopicHandler, nodesCoordinator nodesCoordinator.NodesCoordinator, blockBlackList process.TimeCacher, whiteListerVerifiedTxs update.WhiteListHandler, @@ -256,8 +273,11 @@ func checkBaseParams( if check.IfNil(shardCoordinator) { return process.ErrNilShardCoordinator } - if check.IfNil(messenger) { - return process.ErrNilMessenger + if check.IfNil(mainMessenger) { + return fmt.Errorf("%w on main network", process.ErrNilMessenger) + } + if check.IfNil(fullArchiveMessenger) { + return fmt.Errorf("%w on full archive network", process.ErrNilMessenger) } if check.IfNil(store) { return process.ErrNilStore @@ -282,7 +302,7 @@ func checkBaseParams( } func (ficf *fullSyncInterceptorsContainerFactory) checkIfInterceptorExists(identifier string) bool { - _, err := ficf.container.Get(identifier) + _, err := ficf.mainContainer.Get(identifier) return err == nil } @@ -312,7 +332,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) generateShardHeaderInterceptor interceptorsSlice[int(idx)] = interceptor } - return ficf.container.AddMultiple(keys, interceptorsSlice) + return ficf.addInterceptorsToContainers(keys, interceptorsSlice) } func (ficf *fullSyncInterceptorsContainerFactory) createOneShardHeaderInterceptor(topic string) (process.Interceptor, error) { @@ -338,7 +358,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) createOneShardHeaderIntercepto Throttler: ficf.globalThrottler, AntifloodHandler: ficf.antifloodHandler, WhiteListRequest: ficf.whiteListHandler, - CurrentPeerId: ficf.messenger.ID(), + CurrentPeerId: ficf.mainMessenger.ID(), }, ) if err != nil { @@ -382,7 +402,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) generateUnsignedTxsInterceptor interceptorsSlice[numShards] = interceptor } - return ficf.container.AddMultiple(keys, interceptorsSlice) + return ficf.addInterceptorsToContainers(keys, interceptorsSlice) } func (ficf *fullSyncInterceptorsContainerFactory) generateTrieNodesInterceptors() error { @@ -428,7 +448,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) generateTrieNodesInterceptors( trieInterceptors = append(trieInterceptors, interceptor) } - return ficf.container.AddMultiple(keys, trieInterceptors) + return ficf.addInterceptorsToContainers(keys, trieInterceptors) } func (ficf *fullSyncInterceptorsContainerFactory) createTopicAndAssignHandler( @@ -437,12 +457,34 @@ func (ficf *fullSyncInterceptorsContainerFactory) createTopicAndAssignHandler( createChannel bool, ) (process.Interceptor, error) { - err := ficf.messenger.CreateTopic(topic, createChannel) + err := createTopicAndAssignHandlerOnMessenger(topic, interceptor, createChannel, ficf.mainMessenger) if err != nil { return nil, err } - return interceptor, ficf.messenger.RegisterMessageProcessor(topic, common.HardforkInterceptorsIdentifier, interceptor) + if ficf.nodeOperationMode == p2p.FullArchiveMode { + err = createTopicAndAssignHandlerOnMessenger(topic, interceptor, createChannel, ficf.fullArchiveMessenger) + if err != nil { + return nil, err + } + } + + return interceptor, nil +} + +func createTopicAndAssignHandlerOnMessenger( + topic string, + interceptor process.Interceptor, + createChannel bool, + messenger process.TopicHandler, +) error { + + err := messenger.CreateTopic(topic, createChannel) + if err != nil { + return err + } + + return messenger.RegisterMessageProcessor(topic, common.HardforkInterceptorsIdentifier, interceptor) } func (ficf *fullSyncInterceptorsContainerFactory) generateTxInterceptors() error { @@ -480,7 +522,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) generateTxInterceptors() error interceptorSlice = append(interceptorSlice, interceptor) } - return ficf.container.AddMultiple(keys, interceptorSlice) + return ficf.addInterceptorsToContainers(keys, interceptorSlice) } func (ficf *fullSyncInterceptorsContainerFactory) createOneTxInterceptor(topic string) (process.Interceptor, error) { @@ -519,7 +561,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) createOneTxInterceptor(topic s Throttler: ficf.globalThrottler, AntifloodHandler: ficf.antifloodHandler, WhiteListRequest: ficf.whiteListHandler, - CurrentPeerId: ficf.messenger.ID(), + CurrentPeerId: ficf.mainMessenger.ID(), PreferredPeersHolder: ficf.preferredPeersHolder, }, ) @@ -554,7 +596,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) createOneUnsignedTxInterceptor Throttler: ficf.globalThrottler, AntifloodHandler: ficf.antifloodHandler, WhiteListRequest: ficf.whiteListHandler, - CurrentPeerId: ficf.messenger.ID(), + CurrentPeerId: ficf.mainMessenger.ID(), PreferredPeersHolder: ficf.preferredPeersHolder, }, ) @@ -589,7 +631,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) createOneRewardTxInterceptor(t Throttler: ficf.globalThrottler, AntifloodHandler: ficf.antifloodHandler, WhiteListRequest: ficf.whiteListHandler, - CurrentPeerId: ficf.messenger.ID(), + CurrentPeerId: ficf.mainMessenger.ID(), PreferredPeersHolder: ficf.preferredPeersHolder, }, ) @@ -632,7 +674,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) generateMiniBlocksInterceptors interceptorsSlice[numShards] = interceptor } - return ficf.container.AddMultiple(keys, interceptorsSlice) + return ficf.addInterceptorsToContainers(keys, interceptorsSlice) } func (ficf *fullSyncInterceptorsContainerFactory) createOneMiniBlocksInterceptor(topic string) (process.Interceptor, error) { @@ -661,7 +703,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) createOneMiniBlocksInterceptor Throttler: ficf.globalThrottler, AntifloodHandler: ficf.antifloodHandler, WhiteListRequest: ficf.whiteListHandler, - CurrentPeerId: ficf.messenger.ID(), + CurrentPeerId: ficf.mainMessenger.ID(), PreferredPeersHolder: ficf.preferredPeersHolder, }, ) @@ -701,7 +743,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) generateMetachainHeaderInterce Throttler: ficf.globalThrottler, AntifloodHandler: ficf.antifloodHandler, WhiteListRequest: ficf.whiteListHandler, - CurrentPeerId: ficf.messenger.ID(), + CurrentPeerId: ficf.mainMessenger.ID(), PreferredPeersHolder: ficf.preferredPeersHolder, }, ) @@ -714,7 +756,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) generateMetachainHeaderInterce return err } - return ficf.container.Add(identifierHdr, interceptor) + return ficf.addInterceptorsToContainers([]string{identifierHdr}, []process.Interceptor{interceptor}) } func (ficf *fullSyncInterceptorsContainerFactory) createOneTrieNodesInterceptor(topic string) (process.Interceptor, error) { @@ -737,7 +779,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) createOneTrieNodesInterceptor( Throttler: ficf.globalThrottler, AntifloodHandler: ficf.antifloodHandler, WhiteListRequest: ficf.whiteListHandler, - CurrentPeerId: ficf.messenger.ID(), + CurrentPeerId: ficf.mainMessenger.ID(), PreferredPeersHolder: ficf.preferredPeersHolder, }, ) @@ -775,7 +817,20 @@ func (ficf *fullSyncInterceptorsContainerFactory) generateRewardTxInterceptors() interceptorSlice[int(idx)] = interceptor } - return ficf.container.AddMultiple(keys, interceptorSlice) + return ficf.addInterceptorsToContainers(keys, interceptorSlice) +} + +func (ficf *fullSyncInterceptorsContainerFactory) addInterceptorsToContainers(keys []string, interceptors []process.Interceptor) error { + err := ficf.mainContainer.AddMultiple(keys, interceptors) + if err != nil { + return err + } + + if ficf.nodeOperationMode != p2p.FullArchiveMode { + return nil + } + + return ficf.fullArchiveContainer.AddMultiple(keys, interceptors) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/update/factory/fullSyncRequestersContainerFactory.go b/update/factory/fullSyncRequestersContainerFactory.go index 237c43ad192..cce6ee407d7 100644 --- a/update/factory/fullSyncRequestersContainerFactory.go +++ b/update/factory/fullSyncRequestersContainerFactory.go @@ -1,6 +1,8 @@ package factory import ( + "fmt" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/random" @@ -11,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever/requestHandlers/requesters" "github.com/multiversx/mx-chain-go/dataRetriever/topicSender" "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/update" @@ -24,7 +27,8 @@ const ( type requestersContainerFactory struct { shardCoordinator sharding.Coordinator - messenger dataRetriever.TopicMessageHandler + mainMessenger p2p.Messenger + fullArchiveMessenger p2p.Messenger marshaller marshal.Marshalizer intRandomizer dataRetriever.IntRandomizer container dataRetriever.RequestersContainer @@ -35,7 +39,8 @@ type requestersContainerFactory struct { // ArgsRequestersContainerFactory defines the arguments for the requestersContainerFactory constructor type ArgsRequestersContainerFactory struct { ShardCoordinator sharding.Coordinator - Messenger dataRetriever.TopicMessageHandler + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger Marshaller marshal.Marshalizer ExistingRequesters dataRetriever.RequestersContainer OutputAntifloodHandler dataRetriever.P2PAntifloodHandler @@ -47,8 +52,11 @@ func NewRequestersContainerFactory(args ArgsRequestersContainerFactory) (*reques if check.IfNil(args.ShardCoordinator) { return nil, update.ErrNilShardCoordinator } - if check.IfNil(args.Messenger) { - return nil, update.ErrNilMessenger + if check.IfNil(args.MainMessenger) { + return nil, fmt.Errorf("%w on main network", update.ErrNilMessenger) + } + if check.IfNil(args.FullArchiveMessenger) { + return nil, fmt.Errorf("%w on full archive network", update.ErrNilMessenger) } if check.IfNil(args.Marshaller) { return nil, update.ErrNilMarshalizer @@ -65,7 +73,8 @@ func NewRequestersContainerFactory(args ArgsRequestersContainerFactory) (*reques return &requestersContainerFactory{ shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, marshaller: args.Marshaller, intRandomizer: &random.ConcurrentSafeIntRandomizer{}, container: args.ExistingRequesters, @@ -145,7 +154,7 @@ func (rcf *requestersContainerFactory) createTrieNodesRequester(baseTopic string targetConsensusTopic := common.ConsensusTopic + targetShardCoordinator.CommunicationIdentifier(targetShardID) peerListCreator, err := topicsender.NewDiffPeerListCreator( - rcf.messenger, + rcf.mainMessenger, baseTopic, targetConsensusTopic, resolverscontainer.EmptyExcludePeersOnTopic, @@ -156,11 +165,13 @@ func (rcf *requestersContainerFactory) createTrieNodesRequester(baseTopic string arg := topicsender.ArgTopicRequestSender{ ArgBaseTopicSender: topicsender.ArgBaseTopicSender{ - Messenger: rcf.messenger, - TopicName: baseTopic, - OutputAntiflooder: rcf.outputAntifloodHandler, - PreferredPeersHolder: disabled.NewPreferredPeersHolder(), - TargetShardId: defaultTargetShardID, + MainMessenger: rcf.mainMessenger, + FullArchiveMessenger: rcf.fullArchiveMessenger, + TopicName: baseTopic, + OutputAntiflooder: rcf.outputAntifloodHandler, + MainPreferredPeersHolder: disabled.NewPreferredPeersHolder(), + FullArchivePreferredPeersHolder: disabled.NewPreferredPeersHolder(), + TargetShardId: defaultTargetShardID, }, Marshaller: rcf.marshaller, Randomizer: rcf.intRandomizer, diff --git a/update/factory/fullSyncResolversContainerFactory.go b/update/factory/fullSyncResolversContainerFactory.go index 41214051282..e2227993ef9 100644 --- a/update/factory/fullSyncResolversContainerFactory.go +++ b/update/factory/fullSyncResolversContainerFactory.go @@ -1,6 +1,8 @@ package factory import ( + "fmt" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/throttler" @@ -10,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever/resolvers" "github.com/multiversx/mx-chain-go/dataRetriever/topicSender" "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/update" @@ -20,7 +23,8 @@ const defaultTargetShardID = uint32(0) type resolversContainerFactory struct { shardCoordinator sharding.Coordinator - messenger dataRetriever.TopicMessageHandler + mainMessenger p2p.Messenger + fullArchiveMessenger p2p.Messenger marshalizer marshal.Marshalizer dataTrieContainer common.TriesHolder container dataRetriever.ResolversContainer @@ -32,7 +36,8 @@ type resolversContainerFactory struct { // ArgsNewResolversContainerFactory defines the arguments for the resolversContainerFactory constructor type ArgsNewResolversContainerFactory struct { ShardCoordinator sharding.Coordinator - Messenger dataRetriever.TopicMessageHandler + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger Marshalizer marshal.Marshalizer DataTrieContainer common.TriesHolder ExistingResolvers dataRetriever.ResolversContainer @@ -46,8 +51,11 @@ func NewResolversContainerFactory(args ArgsNewResolversContainerFactory) (*resol if check.IfNil(args.ShardCoordinator) { return nil, update.ErrNilShardCoordinator } - if check.IfNil(args.Messenger) { - return nil, update.ErrNilMessenger + if check.IfNil(args.MainMessenger) { + return nil, fmt.Errorf("%w on main network", update.ErrNilMessenger) + } + if check.IfNil(args.FullArchiveMessenger) { + return nil, fmt.Errorf("%w on full archive network", update.ErrNilMessenger) } if check.IfNil(args.Marshalizer) { return nil, update.ErrNilMarshalizer @@ -65,7 +73,8 @@ func NewResolversContainerFactory(args ArgsNewResolversContainerFactory) (*resol } return &resolversContainerFactory{ shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, marshalizer: args.Marshalizer, dataTrieContainer: args.DataTrieContainer, container: args.ExistingResolvers, @@ -143,11 +152,13 @@ func (rcf *resolversContainerFactory) createTrieNodesResolver(baseTopic string, arg := topicsender.ArgTopicResolverSender{ ArgBaseTopicSender: topicsender.ArgBaseTopicSender{ - Messenger: rcf.messenger, - TopicName: baseTopic, - OutputAntiflooder: rcf.outputAntifloodHandler, - PreferredPeersHolder: disabled.NewPreferredPeersHolder(), - TargetShardId: defaultTargetShardID, + MainMessenger: rcf.mainMessenger, + FullArchiveMessenger: rcf.fullArchiveMessenger, + TopicName: baseTopic, + OutputAntiflooder: rcf.outputAntifloodHandler, + MainPreferredPeersHolder: disabled.NewPreferredPeersHolder(), + FullArchivePreferredPeersHolder: disabled.NewPreferredPeersHolder(), + TargetShardId: defaultTargetShardID, }, } resolverSender, err := topicsender.NewTopicResolverSender(arg) @@ -170,7 +181,12 @@ func (rcf *resolversContainerFactory) createTrieNodesResolver(baseTopic string, return nil, err } - err = rcf.messenger.RegisterMessageProcessor(resolver.RequestTopic(), common.HardforkResolversIdentifier, resolver) + err = rcf.mainMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.HardforkResolversIdentifier, resolver) + if err != nil { + return nil, err + } + + err = rcf.fullArchiveMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.HardforkResolversIdentifier, resolver) if err != nil { return nil, err }