From a6c967f165e5ddaf16a52d10887975b429b15abe Mon Sep 17 00:00:00 2001 From: omer mishael Date: Sun, 12 Feb 2023 18:34:04 +0200 Subject: [PATCH 001/123] provider state tracker work in progress --- .../reliabilitymanager/reliability_manager.go | 98 ++++++++++++++++++- .../statetracker/consumer_state_tracker.go | 15 +-- protocol/statetracker/epoch_updater.go | 47 +++++++++ .../finalization_consensus_updater.go | 3 +- protocol/statetracker/pairing_updater.go | 10 +- .../statetracker/provider_state_tracker.go | 40 ++++++-- protocol/statetracker/state_query.go | 81 +++++++++++++-- protocol/statetracker/state_tracker.go | 4 - protocol/statetracker/tx_sender.go | 13 +++ protocol/statetracker/vote_updater.go | 41 ++++++++ 10 files changed, 319 insertions(+), 33 deletions(-) create mode 100644 protocol/statetracker/epoch_updater.go create mode 100644 protocol/statetracker/vote_updater.go diff --git a/protocol/rpcprovider/reliabilitymanager/reliability_manager.go b/protocol/rpcprovider/reliabilitymanager/reliability_manager.go index f0ef65e2a9..1339815a30 100644 --- a/protocol/rpcprovider/reliabilitymanager/reliability_manager.go +++ b/protocol/rpcprovider/reliabilitymanager/reliability_manager.go @@ -1,6 +1,13 @@ package reliabilitymanager -import "github.com/lavanet/lava/protocol/chaintracker" +import ( + "strconv" + "strings" + + "github.com/lavanet/lava/protocol/chaintracker" + "github.com/lavanet/lava/utils" + terderminttypes "github.com/tendermint/tendermint/abci/types" +) type ReliabilityManager struct { chainTracker *chaintracker.ChainTracker @@ -19,3 +26,92 @@ func NewReliabilityManager(chainTracker *chaintracker.ChainTracker) *Reliability rm.chainTracker = chainTracker return rm } + +type VoteParams struct { + CloseVote bool + ChainID string + ApiURL string + RequestData []byte + RequestBlock uint64 + Voters []string + ConnectionType string + ApiInterface string + VoteDeadline uint64 + VoteID string +} + +func (vp *VoteParams) GetCloseVote() bool { + if vp == nil { + // default returns false + return false + } + return vp.CloseVote +} + +func BuildVoteParamsFromDetectionEvent(event terderminttypes.Event) (*VoteParams, error) { + attributes := map[string]string{} + for _, attribute := range event.Attributes { + attributes[string(attribute.Key)] = string(attribute.Value) + } + voteID, ok := attributes["voteID"] + if !ok { + return nil, utils.LavaFormatError("failed building BuildVoteParamsFromRevealEvent", nil, &attributes) + } + chainID, ok := attributes["chainID"] + if !ok { + return nil, utils.LavaFormatError("failed building BuildVoteParamsFromRevealEvent", nil, &attributes) + } + apiURL, ok := attributes["apiURL"] + if !ok { + return nil, utils.LavaFormatError("failed building BuildVoteParamsFromRevealEvent", nil, &attributes) + } + requestData_str, ok := attributes["requestData"] + if !ok { + return nil, utils.LavaFormatError("failed building BuildVoteParamsFromRevealEvent", nil, &attributes) + } + requestData := []byte(requestData_str) + + connectionType, ok := attributes["connectionType"] + if !ok { + return nil, utils.LavaFormatError("failed building BuildVoteParamsFromRevealEvent", nil, &attributes) + } + apiInterface, ok := attributes["apiInterface"] + if !ok { + return nil, utils.LavaFormatError("failed building BuildVoteParamsFromRevealEvent", nil, &attributes) + } + num_str, ok := attributes["requestBlock"] + if !ok { + return nil, utils.LavaFormatError("failed building BuildVoteParamsFromRevealEvent", nil, &attributes) + } + requestBlock, err := strconv.ParseUint(num_str, 10, 64) + if err != nil { + return nil, utils.LavaFormatError("vote requested block could not be parsed", err, &map[string]string{"requested block": num_str, "voteID": voteID}) + + } + num_str, ok = attributes["voteDeadline"] + if !ok { + return nil, utils.LavaFormatError("failed building BuildVoteParamsFromRevealEvent", nil, &attributes) + } + voteDeadline, err := strconv.ParseUint(num_str, 10, 64) + if err != nil { + return nil, utils.LavaFormatError("vote deadline could not be parsed", err, &map[string]string{"deadline": num_str, "voteID": voteID}) + } + voters_st, ok := attributes["voters"] + if !ok { + return nil, utils.LavaFormatError("failed building BuildVoteParamsFromRevealEvent", nil, &attributes) + } + voters := strings.Split(voters_st, ",") + voteParams := &VoteParams{ + ChainID: chainID, + ApiURL: apiURL, + RequestData: requestData, + RequestBlock: requestBlock, + Voters: voters, + CloseVote: false, + ConnectionType: connectionType, + ApiInterface: apiInterface, + VoteDeadline: voteDeadline, + VoteID: voteID, + } + return voteParams, nil +} diff --git a/protocol/statetracker/consumer_state_tracker.go b/protocol/statetracker/consumer_state_tracker.go index ee304b68a0..6935b4af06 100644 --- a/protocol/statetracker/consumer_state_tracker.go +++ b/protocol/statetracker/consumer_state_tracker.go @@ -6,7 +6,6 @@ import ( "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/tx" - sdk "github.com/cosmos/cosmos-sdk/types" "github.com/lavanet/lava/protocol/chainlib" "github.com/lavanet/lava/protocol/chaintracker" "github.com/lavanet/lava/protocol/lavaprotocol" @@ -18,9 +17,8 @@ import ( // ConsumerStateTracker CSTis a class for tracking consumer data from the lava blockchain, such as epoch changes. // it allows also to query specific data form the blockchain and acts as a single place to send transactions type ConsumerStateTracker struct { - consumerAddress sdk.AccAddress - stateQuery *ConsumerStateQuery - txSender *ConsumerTxSender + stateQuery *ConsumerStateQuery + txSender *ConsumerTxSender *StateTracker } @@ -39,17 +37,20 @@ func NewConsumerStateTracker(ctx context.Context, txFactory tx.Factory, clientCt func (cst *ConsumerStateTracker) RegisterConsumerSessionManagerForPairingUpdates(ctx context.Context, consumerSessionManager *lavasession.ConsumerSessionManager) { // register this CSM to get the updated pairing list when a new epoch starts - pairingUpdater := NewPairingUpdater(cst.consumerAddress, cst.stateQuery) + pairingUpdater := NewPairingUpdater(cst.stateQuery) pairingUpdaterRaw := cst.StateTracker.RegisterForUpdates(ctx, pairingUpdater) pairingUpdater, ok := pairingUpdaterRaw.(*PairingUpdater) if !ok { utils.LavaFormatFatal("invalid updater type returned from RegisterForUpdates", nil, &map[string]string{"updater": fmt.Sprintf("%+v", pairingUpdaterRaw)}) } - pairingUpdater.RegisterPairing(ctx, consumerSessionManager) + err := pairingUpdater.RegisterPairing(ctx, consumerSessionManager) + if err != nil { + utils.LavaFormatError("failed registering for pairing updates", err, &map[string]string{"data": fmt.Sprintf("%+v", consumerSessionManager.RPCEndpoint())}) + } } func (cst *ConsumerStateTracker) RegisterFinalizationConsensusForUpdates(ctx context.Context, finalizationConsensus *lavaprotocol.FinalizationConsensus) { - finalizationConsensusUpdater := NewFinalizationConsensusUpdater(cst.consumerAddress, cst.stateQuery) + finalizationConsensusUpdater := NewFinalizationConsensusUpdater(cst.stateQuery) finalizationConsensusUpdaterRaw := cst.StateTracker.RegisterForUpdates(ctx, finalizationConsensusUpdater) finalizationConsensusUpdater, ok := finalizationConsensusUpdaterRaw.(*FinalizationConsensusUpdater) if !ok { diff --git a/protocol/statetracker/epoch_updater.go b/protocol/statetracker/epoch_updater.go new file mode 100644 index 0000000000..5eb4cfd29d --- /dev/null +++ b/protocol/statetracker/epoch_updater.go @@ -0,0 +1,47 @@ +package statetracker + +import ( + "golang.org/x/net/context" +) + +const ( + CallbackKeyForEpochUpdate = "epoch-update" +) + +type EpochUpdatable interface { + UpdateEpoch(epoch uint64) +} + +type EpochUpdater struct { + epochUpdatables []*EpochUpdatable + currentEpoch uint64 + stateQuery *ProviderStateQuery +} + +func NewEpochUpdater(stateQuery *ProviderStateQuery) *EpochUpdater { + return &EpochUpdater{epochUpdatables: []*EpochUpdatable{}, stateQuery: stateQuery} +} + +func (eu *EpochUpdater) RegisterEpochUpdatable(ctx context.Context, epochUpdatable *EpochUpdatable) { + eu.epochUpdatables = append(eu.epochUpdatables, epochUpdatable) + return +} + +func (eu *EpochUpdater) UpdaterKey() string { + return CallbackKeyForEpochUpdate +} + +func (eu *EpochUpdater) Update(latestBlock int64) { + ctx := context.Background() + currentEpoch, err := eu.stateQuery.CurrentEpochStart(ctx) + if err != nil { + return // failed to get the current epoch + } + if currentEpoch <= eu.currentEpoch { + return // still the same epoch + } + eu.currentEpoch = currentEpoch + for _, epochUpdatable := range eu.epochUpdatables { + (*epochUpdatable).UpdateEpoch(currentEpoch) + } +} diff --git a/protocol/statetracker/finalization_consensus_updater.go b/protocol/statetracker/finalization_consensus_updater.go index ca5c7a8dca..3a2b0b0010 100644 --- a/protocol/statetracker/finalization_consensus_updater.go +++ b/protocol/statetracker/finalization_consensus_updater.go @@ -4,7 +4,6 @@ import ( "context" "strconv" - sdk "github.com/cosmos/cosmos-sdk/types" "github.com/lavanet/lava/protocol/lavaprotocol" "github.com/lavanet/lava/utils" ) @@ -19,7 +18,7 @@ type FinalizationConsensusUpdater struct { stateQuery *ConsumerStateQuery } -func NewFinalizationConsensusUpdater(consumerAddress sdk.AccAddress, stateQuery *ConsumerStateQuery) *FinalizationConsensusUpdater { +func NewFinalizationConsensusUpdater(stateQuery *ConsumerStateQuery) *FinalizationConsensusUpdater { return &FinalizationConsensusUpdater{registeredFinalizationConsensuses: []*lavaprotocol.FinalizationConsensus{}, stateQuery: stateQuery} } diff --git a/protocol/statetracker/pairing_updater.go b/protocol/statetracker/pairing_updater.go index 2ffd8c6221..495109b250 100644 --- a/protocol/statetracker/pairing_updater.go +++ b/protocol/statetracker/pairing_updater.go @@ -4,7 +4,6 @@ import ( "fmt" "strconv" - sdk "github.com/cosmos/cosmos-sdk/types" "github.com/lavanet/lava/protocol/lavasession" "github.com/lavanet/lava/utils" epochstoragetypes "github.com/lavanet/lava/x/epochstorage/types" @@ -21,7 +20,7 @@ type PairingUpdater struct { stateQuery *ConsumerStateQuery } -func NewPairingUpdater(consumerAddress sdk.AccAddress, stateQuery *ConsumerStateQuery) *PairingUpdater { +func NewPairingUpdater(stateQuery *ConsumerStateQuery) *PairingUpdater { return &PairingUpdater{consumerSessionManagersMap: map[string][]*lavasession.ConsumerSessionManager{}, stateQuery: stateQuery} } @@ -32,7 +31,10 @@ func (pu *PairingUpdater) RegisterPairing(ctx context.Context, consumerSessionMa return err } pu.updateConsummerSessionManager(ctx, pairingList, consumerSessionManager, epoch) - pu.nextBlockForUpdate = nextBlockForUpdate // make sure we don't update twice when launching. + if nextBlockForUpdate > pu.nextBlockForUpdate { + // make sure we don't update twice, this updates pu.nextBlockForUpdate + pu.Update(int64(nextBlockForUpdate)) + } consumerSessionsManagersList, ok := pu.consumerSessionManagersMap[chainID] if !ok { pu.consumerSessionManagersMap[chainID] = []*lavasession.ConsumerSessionManager{consumerSessionManager} @@ -70,7 +72,7 @@ func (pu *PairingUpdater) Update(latestBlock int64) { } } } - nextBlockForUpdateMin := uint64(0) + nextBlockForUpdateMin := uint64(latestBlock) // in case the list is empty for idx, blockToUpdate := range nextBlockForUpdateList { if idx == 0 || blockToUpdate < nextBlockForUpdateMin { nextBlockForUpdateMin = blockToUpdate diff --git a/protocol/statetracker/provider_state_tracker.go b/protocol/statetracker/provider_state_tracker.go index 76ca54cee1..b7b8961c11 100644 --- a/protocol/statetracker/provider_state_tracker.go +++ b/protocol/statetracker/provider_state_tracker.go @@ -2,35 +2,57 @@ package statetracker import ( "context" + "fmt" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/tx" "github.com/lavanet/lava/protocol/chainlib" + "github.com/lavanet/lava/protocol/chaintracker" "github.com/lavanet/lava/protocol/rpcprovider/reliabilitymanager" + "github.com/lavanet/lava/utils" pairingtypes "github.com/lavanet/lava/x/pairing/types" ) // ProviderStateTracker PST is a class for tracking provider data from the lava blockchain, such as epoch changes. // it allows also to query specific data form the blockchain and acts as a single place to send transactions type ProviderStateTracker struct { - // TODO: embed stateTracker + stateQuery *ProviderStateQuery + txSender *ProviderTxSender + *StateTracker } -func (pst *ProviderStateTracker) New(ctx context.Context, txFactory tx.Factory, clientCtx client.Context) (ret *ProviderStateTracker, err error) { - // set up StateQuery - // Spin up chain tracker on the lava node, its address is in the --node flag (or its default), on new block call to newLavaBlock - // use StateQuery to get the lava spec and spin up the chain tracker with the right params - // set up txSender the same way +func NewProviderStateTracker(ctx context.Context, txFactory tx.Factory, clientCtx client.Context, chainFetcher chaintracker.ChainFetcher) (ret *ProviderStateTracker, err error) { + stateTrackerBase, err := NewStateTracker(ctx, txFactory, clientCtx, chainFetcher) + if err != nil { + return nil, err + } + txSender, err := NewProviderTxSender(ctx, clientCtx, txFactory) + if err != nil { + return nil, err + } + pst := &ProviderStateTracker{StateTracker: stateTrackerBase, stateQuery: NewProviderStateQuery(ctx, clientCtx), txSender: txSender} return pst, nil } -func (pst *ProviderStateTracker) RegisterForEpochUpdates(ctx context.Context, epochUpdatable EpochUpdatable) { +func (pst *ProviderStateTracker) RegisterForEpochUpdates(ctx context.Context, epochUpdatable *EpochUpdatable) { // create an epoch updater // add epoch updater to the updater map + epochUpdater := NewEpochUpdater(pst.stateQuery) + epochUpdaterRaw := pst.StateTracker.RegisterForUpdates(ctx, epochUpdater) + epochUpdater, ok := epochUpdaterRaw.(*EpochUpdater) + if !ok { + utils.LavaFormatFatal("invalid updater type returned from RegisterForUpdates", nil, &map[string]string{"updater": fmt.Sprintf("%+v", epochUpdaterRaw)}) + } + epochUpdater.RegisterEpochUpdatable(ctx, epochUpdatable) } -func (pst *ProviderStateTracker) RegisterChainParserForSpecUpdates(ctx context.Context, chainParser chainlib.ChainParser) { - // can be moved to base class +func (pst *ProviderStateTracker) RegisterChainParserForSpecUpdates(ctx context.Context, chainParser chainlib.ChainParser, chainID string) error { + spec, err := pst.stateQuery.GetSpec(ctx, chainID) + if err != nil { + return err + } + chainParser.SetSpec(*spec) + return nil } func (pst *ProviderStateTracker) RegisterReliabilityManagerForVoteUpdates(ctx context.Context, reliabilityManager *reliabilitymanager.ReliabilityManager) { diff --git a/protocol/statetracker/state_query.go b/protocol/statetracker/state_query.go index 08d497d714..71e8d5aa25 100644 --- a/protocol/statetracker/state_query.go +++ b/protocol/statetracker/state_query.go @@ -5,7 +5,9 @@ import ( "strconv" "github.com/cosmos/cosmos-sdk/client" + reliabilitymanager "github.com/lavanet/lava/protocol/rpcprovider/reliabilitymanager" "github.com/lavanet/lava/utils" + conflicttypes "github.com/lavanet/lava/x/conflict/types" epochstoragetypes "github.com/lavanet/lava/x/epochstorage/types" pairingtypes "github.com/lavanet/lava/x/pairing/types" spectypes "github.com/lavanet/lava/x/spec/types" @@ -25,6 +27,16 @@ func NewStateQuery(ctx context.Context, clientCtx client.Context) *StateQuery { return sq } +func (csq *StateQuery) GetSpec(ctx context.Context, chainID string) (*spectypes.Spec, error) { + spec, err := csq.SpecQueryClient.Spec(ctx, &spectypes.QueryGetSpecRequest{ + ChainID: chainID, + }) + if err != nil { + return nil, utils.LavaFormatError("Failed Querying spec for chain", err, &map[string]string{"ChainID": chainID}) + } + return &spec.Spec, nil +} + type ConsumerStateQuery struct { StateQuery clientCtx client.Context @@ -74,12 +86,69 @@ func (csq *ConsumerStateQuery) GetMaxCUForUser(ctx context.Context, chainID stri return UserEntryRes.GetMaxCU(), nil } -func (csq *ConsumerStateQuery) GetSpec(ctx context.Context, chainID string) (*spectypes.Spec, error) { - spec, err := csq.SpecQueryClient.Spec(ctx, &spectypes.QueryGetSpecRequest{ - ChainID: chainID, - }) +type ProviderStateQuery struct { + StateQuery + clientCtx client.Context +} + +func NewProviderStateQuery(ctx context.Context, clientCtx client.Context) *ProviderStateQuery { + csq := &ProviderStateQuery{StateQuery: *NewStateQuery(ctx, clientCtx), clientCtx: clientCtx} + return csq +} + +func (psq *ProviderStateQuery) CurrentEpochStart(ctx context.Context) (uint64, error) { + epochDetails, err := psq.EpochStorageQueryClient.EpochDetails(ctx, &epochstoragetypes.QueryGetEpochDetailsRequest{}) if err != nil { - return nil, utils.LavaFormatError("Failed Querying spec for chain", err, &map[string]string{"ChainID": chainID}) + return 0, utils.LavaFormatError("Failed Querying EpochDetails", err, nil) } - return &spec.Spec, nil + details := epochDetails.GetEpochDetails() + return details.StartBlock, nil + +} + +func (psq *ProviderStateQuery) VoteEvents(ctx context.Context, latestBlock int64) (votes []*reliabilitymanager.VoteParams, err error) { + blockResults, err := psq.clientCtx.Client.BlockResults(ctx, &latestBlock) + if err != nil { + return nil, err + } + transactionResults := blockResults.TxsResults + for _, tx := range transactionResults { + events := tx.Events + for _, event := range events { + if event.Type == utils.EventPrefix+conflicttypes.ConflictVoteDetectionEventName { + vote, err := reliabilitymanager.BuildVoteParamsFromDetectionEvent(event) + if err != nil { + return nil, err + } + votes = append(votes, vote) + } + } + } + + beginBlockEvents := blockResults.BeginBlockEvents + for _, event := range beginBlockEvents { + if event.Type == utils.EventPrefix+conflicttypes.ConflictVoteRevealEventName { + // eventToListen := utils.EventPrefix + conflicttypes.ConflictVoteRevealEventName + // if votesList, ok := e.Events[eventToListen+".voteID"]; ok { + // for idx, voteID := range votesList { + // num_str := e.Events[eventToListen+".voteDeadline"][idx] + // voteDeadline, err := strconv.ParseUint(num_str, 10, 64) + // if err != nil { + // utils.LavaFormatError("parsing vote deadline", err, &map[string]string{"VoteDeadline": num_str}) + // continue + // } + // go s.voteInitiationCb(ctx, voteID, voteDeadline, nil) + // } + // } + + // eventToListen = utils.EventPrefix + conflicttypes.ConflictVoteResolvedEventName + // if votesList, ok := e.Events[eventToListen+".voteID"]; ok { + // for _, voteID := range votesList { + // voteParams := &VoteParams{CloseVote: true} + // go s.voteInitiationCb(ctx, voteID, 0, voteParams) + // } + // } + } + } + return } diff --git a/protocol/statetracker/state_tracker.go b/protocol/statetracker/state_tracker.go index 7b631e781d..589f9b5493 100644 --- a/protocol/statetracker/state_tracker.go +++ b/protocol/statetracker/state_tracker.go @@ -63,7 +63,3 @@ func (cst *StateTracker) RegisterForUpdates(ctx context.Context, updater Updater } return existingUpdater } - -type EpochUpdatable interface { - UpdateEpoch(epoch uint64) -} diff --git a/protocol/statetracker/tx_sender.go b/protocol/statetracker/tx_sender.go index 9345a6463d..3dca55a14f 100644 --- a/protocol/statetracker/tx_sender.go +++ b/protocol/statetracker/tx_sender.go @@ -104,3 +104,16 @@ func (ts *ConsumerTxSender) TxConflictDetection(ctx context.Context, finalizatio } return nil } + +type ProviderTxSender struct { + *TxSender +} + +func NewProviderTxSender(ctx context.Context, clientCtx client.Context, txFactory tx.Factory) (ret *ProviderTxSender, err error) { + txSender, err := NewTxSender(ctx, clientCtx, txFactory) + if err != nil { + return nil, err + } + ts := &ProviderTxSender{TxSender: txSender} + return ts, nil +} diff --git a/protocol/statetracker/vote_updater.go b/protocol/statetracker/vote_updater.go new file mode 100644 index 0000000000..1083ea361b --- /dev/null +++ b/protocol/statetracker/vote_updater.go @@ -0,0 +1,41 @@ +package statetracker + +import ( + "github.com/lavanet/lava/protocol/lavasession" + "golang.org/x/net/context" +) + +const ( + CallbackKeyForVoteUpdate = "vote-update" +) + +type VoteUpdatable interface { + VoteCommit() + VoteReveal() +} + +type VoteUpdater struct { + voteUpdatables map[string]*VoteUpdatable + stateQuery *ProviderStateQuery +} + +func NewVoteUpdater(stateQuery *ProviderStateQuery) *VoteUpdater { + return &VoteUpdater{voteUpdatables: map[string]*VoteUpdatable{}, stateQuery: stateQuery} +} + +func (vu *VoteUpdater) RegisterVoteUpdatable(ctx context.Context, voteUpdatable *VoteUpdatable, endpoint lavasession.RPCEndpoint) { + vu.voteUpdatables[endpoint.Key()] = voteUpdatable + return +} + +func (vu *VoteUpdater) UpdaterKey() string { + return CallbackKeyForVoteUpdate +} + +func (vu *VoteUpdater) Update(latestBlock int64) { + ctx := context.Background() + _, err := vu.stateQuery.VoteEvents(ctx, latestBlock) + if err != nil { + return + } +} From b8fe4cc57ca7247285fa8580b1e58fa2571e2898 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Mon, 13 Feb 2023 19:44:31 +0200 Subject: [PATCH 002/123] reliability manager added vote handler --- .../rpcprovider/reliabilitymanager/errors.go | 9 + .../reliabilitymanager/reliability_manager.go | 156 +++++++++++++++++- protocol/rpcprovider/rpcprovider.go | 17 +- protocol/statetracker/epoch_updater.go | 4 +- .../statetracker/provider_state_tracker.go | 9 +- protocol/statetracker/state_query.go | 42 ++--- protocol/statetracker/vote_updater.go | 11 +- 7 files changed, 210 insertions(+), 38 deletions(-) create mode 100644 protocol/rpcprovider/reliabilitymanager/errors.go diff --git a/protocol/rpcprovider/reliabilitymanager/errors.go b/protocol/rpcprovider/reliabilitymanager/errors.go new file mode 100644 index 0000000000..5fe437d437 --- /dev/null +++ b/protocol/rpcprovider/reliabilitymanager/errors.go @@ -0,0 +1,9 @@ +package reliabilitymanager + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +var ( + NoVoteDeadline = sdkerrors.New("Not Connected Error", 800, "No Connection To grpc server") +) diff --git a/protocol/rpcprovider/reliabilitymanager/reliability_manager.go b/protocol/rpcprovider/reliabilitymanager/reliability_manager.go index 1339815a30..fd86a8d193 100644 --- a/protocol/rpcprovider/reliabilitymanager/reliability_manager.go +++ b/protocol/rpcprovider/reliabilitymanager/reliability_manager.go @@ -1,16 +1,129 @@ package reliabilitymanager import ( + "context" + "fmt" + "math/rand" "strconv" "strings" + "sync" + "github.com/lavanet/lava/protocol/chainlib" "github.com/lavanet/lava/protocol/chaintracker" + "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/utils" + conflicttypes "github.com/lavanet/lava/x/conflict/types" terderminttypes "github.com/tendermint/tendermint/abci/types" + "golang.org/x/exp/slices" ) +const ( + DetectionVoteType = 0 + RevealVoteType = 1 + CloseVoteType = 2 +) + +type TxSender interface { + SendVoteReveal(voteID string, vote *VoteData) + SendVoteCommitment(voteID string, vote *VoteData) +} + type ReliabilityManager struct { - chainTracker *chaintracker.ChainTracker + chainTracker *chaintracker.ChainTracker + votes_mutex sync.Mutex + votes map[string]*VoteData + txSender TxSender + publicAddress string + chainProxy chainlib.ChainProxy + chainParser chainlib.ChainParser +} + +func (rm *ReliabilityManager) VoteHandler(voteParams *VoteParams, nodeHeight uint64) { + + // got a vote event, handle the cases here + voteID := voteParams.VoteID + voteDeadline := voteParams.VoteDeadline + if !voteParams.GetCloseVote() { + // meaning we dont close a vote, so we should check stuff + if voteDeadline < nodeHeight { + // its too late to vote + utils.LavaFormatError("Vote Event received but it's too late to vote", nil, + &map[string]string{"deadline": strconv.FormatUint(voteDeadline, 10), "nodeHeight": strconv.FormatUint(nodeHeight, 10)}) + return + } + } + rm.votes_mutex.Lock() + defer rm.votes_mutex.Unlock() + vote, ok := rm.votes[voteID] + if ok { + // we have an existing vote with this ID + if voteParams.ParamsType == CloseVoteType { + if voteParams.GetCloseVote() { + // we are closing the vote, so its okay we have this voteID + utils.LavaFormatInfo("Received Vote termination event for vote, cleared entry", + &map[string]string{"voteID": voteID}) + delete(rm.votes, voteID) + return + } + // expected to start a new vote but found an existing one + utils.LavaFormatError("new vote Request for vote had existing entry", nil, + &map[string]string{"voteParams": fmt.Sprintf("%+v", voteParams), "voteID": voteID, "voteData": fmt.Sprintf("%+v", vote)}) + return + } + utils.LavaFormatInfo(" Received Vote Reveal for vote, sending Reveal for result", + &map[string]string{"voteID": voteID, "voteData": fmt.Sprintf("%+v", vote)}) + rm.txSender.SendVoteReveal(voteID, vote) + return + } else { + // new vote + if voteParams == nil { + utils.LavaFormatError("vote commit Request didn't have a vote entry", nil, + &map[string]string{"voteID": voteID}) + return + } + if voteParams.GetCloseVote() { + utils.LavaFormatError("vote closing received but didn't have a vote entry", nil, + &map[string]string{"voteID": voteID}) + return + } + if voteParams.ParamsType != DetectionVoteType { + utils.LavaFormatError("new voteID without DetectionVoteType", nil, + &map[string]string{"voteParams": fmt.Sprintf("%v", voteParams)}) + return + } + // try to find this provider in the jury + found := slices.Contains(voteParams.Voters, rm.publicAddress) + if !found { + utils.LavaFormatInfo("new vote initiated but not for this provider to vote", nil) + // this is a new vote but not for us + return + } + // we need to send a commit, first we need to use the chainProxy and get the response + // TODO: implement code that verified the requested block is finalized and if its not waits and tries again + ctx := context.Background() + chainMessage, err := rm.chainParser.ParseMsg(voteParams.ApiURL, voteParams.RequestData, voteParams.ConnectionType) + if err != nil { + utils.LavaFormatError("vote Request did not pass the api check on chain proxy", err, + &map[string]string{"voteID": voteID, "chainID": voteParams.ChainID}) + return + } + reply, _, _, err := rm.chainProxy.SendNodeMsg(ctx, nil, chainMessage) + + if err != nil { + utils.LavaFormatError("vote relay send has failed", err, + &map[string]string{"ApiURL": voteParams.ApiURL, "RequestData": string(voteParams.RequestData)}) + return + } + nonce := rand.Int63() + replyDataHash := sigs.HashMsg(reply.Data) + commitHash := conflicttypes.CommitVoteData(nonce, replyDataHash) + + vote = &VoteData{RelayDataHash: replyDataHash, Nonce: nonce, CommitHash: commitHash} + rm.votes[voteID] = vote + utils.LavaFormatInfo("Received Vote start, sending commitment for result", &map[string]string{"voteID": voteID, "voteData": fmt.Sprintf("%+v", vote)}) + rm.txSender.SendVoteCommitment(voteID, vote) + return + } } func (rm *ReliabilityManager) GetLatestBlockData(fromBlock int64, toBlock int64, specificBlock int64) (latestBlock int64, requestedHashes []*chaintracker.BlockStore, err error) { @@ -21,12 +134,25 @@ func (rm *ReliabilityManager) GetLatestBlockNum() int64 { return rm.chainTracker.GetLatestBlockNum() } -func NewReliabilityManager(chainTracker *chaintracker.ChainTracker) *ReliabilityManager { - rm := &ReliabilityManager{} - rm.chainTracker = chainTracker +func NewReliabilityManager(chainTracker *chaintracker.ChainTracker, txSender TxSender, publicAddress string, chainProxy chainlib.ChainProxy, chainParser chainlib.ChainParser) *ReliabilityManager { + rm := &ReliabilityManager{ + votes: map[string]*VoteData{}, + txSender: txSender, + publicAddress: publicAddress, + chainTracker: chainTracker, + chainProxy: chainProxy, + chainParser: chainParser, + } + return rm } +type VoteData struct { + RelayDataHash []byte + Nonce int64 + CommitHash []byte +} + type VoteParams struct { CloseVote bool ChainID string @@ -38,6 +164,7 @@ type VoteParams struct { ApiInterface string VoteDeadline uint64 VoteID string + ParamsType uint } func (vp *VoteParams) GetCloseVote() bool { @@ -48,6 +175,26 @@ func (vp *VoteParams) GetCloseVote() bool { return vp.CloseVote } +func BuildBaseVoteDataFromEvent(event terderminttypes.Event) (voteID string, voteDeadline uint64, err error) { + attributes := map[string]string{} + for _, attribute := range event.Attributes { + attributes[string(attribute.Key)] = string(attribute.Value) + } + voteID, ok := attributes["voteID"] + if !ok { + return "", 0, utils.LavaFormatError("failed building BuildVoteParamsFromRevealEvent", nil, &attributes) + } + num_str, ok := attributes["voteDeadline"] + if !ok { + return voteID, 0, utils.LavaFormatError("no attribute deadline", NoVoteDeadline, nil) + } + voteDeadline, err = strconv.ParseUint(num_str, 10, 64) + if err != nil { + return "", 0, utils.LavaFormatError("vote deadline could not be parsed", err, &map[string]string{"deadline": num_str, "voteID": voteID}) + } + return voteID, voteDeadline, nil +} + func BuildVoteParamsFromDetectionEvent(event terderminttypes.Event) (*VoteParams, error) { attributes := map[string]string{} for _, attribute := range event.Attributes { @@ -112,6 +259,7 @@ func BuildVoteParamsFromDetectionEvent(event terderminttypes.Event) (*VoteParams ApiInterface: apiInterface, VoteDeadline: voteDeadline, VoteID: voteID, + ParamsType: DetectionVoteType, } return voteParams, nil } diff --git a/protocol/rpcprovider/rpcprovider.go b/protocol/rpcprovider/rpcprovider.go index 706a3a005b..9cd0fc943b 100644 --- a/protocol/rpcprovider/rpcprovider.go +++ b/protocol/rpcprovider/rpcprovider.go @@ -34,11 +34,13 @@ var ( ) type ProviderStateTrackerInf interface { - RegisterChainParserForSpecUpdates(ctx context.Context, chainParser chainlib.ChainParser) + RegisterChainParserForSpecUpdates(ctx context.Context, chainParser chainlib.ChainParser, chainID string) error RegisterReliabilityManagerForVoteUpdates(ctx context.Context, reliabilityManager *reliabilitymanager.ReliabilityManager) RegisterForEpochUpdates(ctx context.Context, epochUpdatable statetracker.EpochUpdatable) QueryVerifyPairing(ctx context.Context, consumer string, blockHeight uint64) TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest) + SendVoteReveal(voteID string, vote *reliabilitymanager.VoteData) + SendVoteCommitment(voteID string, vote *reliabilitymanager.VoteData) } type RPCProvider struct { @@ -48,14 +50,15 @@ type RPCProvider struct { func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, clientCtx client.Context, rpcProviderEndpoints []*lavasession.RPCProviderEndpoint, cache *performance.Cache, parallelConnections uint) (err error) { // single state tracker - providerStateTracker := statetracker.ProviderStateTracker{} - rpcp.providerStateTracker, err = providerStateTracker.New(ctx, txFactory, clientCtx) + lavaChainFetcher := chainlib.NewLavaChainFetcher(ctx, clientCtx) + providerStateTracker, err := statetracker.NewProviderStateTracker(ctx, txFactory, clientCtx, lavaChainFetcher) if err != nil { return err } + rpcp.providerStateTracker = providerStateTracker rpcp.rpcProviderServers = make(map[string]*RPCProviderServer, len(rpcProviderEndpoints)) // single reward server - rewardServer := rewardserver.NewRewardServer(&providerStateTracker) + rewardServer := rewardserver.NewRewardServer(providerStateTracker) keyName, err := sigs.GetKeyName(clientCtx) if err != nil { @@ -75,14 +78,14 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client utils.LavaFormatInfo("RPCProvider pubkey: "+addr.String(), nil) utils.LavaFormatInfo("RPCProvider setting up endpoints", &map[string]string{"length": strconv.Itoa(len(rpcProviderEndpoints))}) for _, rpcProviderEndpoint := range rpcProviderEndpoints { - providerSessionManager := lavasession.NewProviderSessionManager(rpcProviderEndpoint, &providerStateTracker) + providerSessionManager := lavasession.NewProviderSessionManager(rpcProviderEndpoint, providerStateTracker) key := rpcProviderEndpoint.Key() rpcp.providerStateTracker.RegisterForEpochUpdates(ctx, providerSessionManager) chainParser, err := chainlib.NewChainParser(rpcProviderEndpoint.ApiInterface) if err != nil { return err } - providerStateTracker.RegisterChainParserForSpecUpdates(ctx, chainParser) + providerStateTracker.RegisterChainParserForSpecUpdates(ctx, chainParser, rpcProviderEndpoint.ChainID) chainProxy, err := chainlib.GetChainProxy(ctx, parallelConnections, rpcProviderEndpoint) if err != nil { @@ -101,7 +104,7 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client if err != nil { utils.LavaFormatFatal("failed creating chain tracker", err, &map[string]string{"chainTrackerConfig": fmt.Sprintf("%+v", chainTrackerConfig)}) } - reliabilityManager := reliabilitymanager.NewReliabilityManager(chainTracker) + reliabilityManager := reliabilitymanager.NewReliabilityManager(chainTracker, providerStateTracker, addr.String(), chainProxy, chainParser) providerStateTracker.RegisterReliabilityManagerForVoteUpdates(ctx, reliabilityManager) rpcp.rpcProviderServers[key] = &RPCProviderServer{} diff --git a/protocol/statetracker/epoch_updater.go b/protocol/statetracker/epoch_updater.go index 5eb4cfd29d..79acd09ce9 100644 --- a/protocol/statetracker/epoch_updater.go +++ b/protocol/statetracker/epoch_updater.go @@ -22,8 +22,8 @@ func NewEpochUpdater(stateQuery *ProviderStateQuery) *EpochUpdater { return &EpochUpdater{epochUpdatables: []*EpochUpdatable{}, stateQuery: stateQuery} } -func (eu *EpochUpdater) RegisterEpochUpdatable(ctx context.Context, epochUpdatable *EpochUpdatable) { - eu.epochUpdatables = append(eu.epochUpdatables, epochUpdatable) +func (eu *EpochUpdater) RegisterEpochUpdatable(ctx context.Context, epochUpdatable EpochUpdatable) { + eu.epochUpdatables = append(eu.epochUpdatables, &epochUpdatable) return } diff --git a/protocol/statetracker/provider_state_tracker.go b/protocol/statetracker/provider_state_tracker.go index b7b8961c11..1d653d6230 100644 --- a/protocol/statetracker/provider_state_tracker.go +++ b/protocol/statetracker/provider_state_tracker.go @@ -34,7 +34,7 @@ func NewProviderStateTracker(ctx context.Context, txFactory tx.Factory, clientCt return pst, nil } -func (pst *ProviderStateTracker) RegisterForEpochUpdates(ctx context.Context, epochUpdatable *EpochUpdatable) { +func (pst *ProviderStateTracker) RegisterForEpochUpdates(ctx context.Context, epochUpdatable EpochUpdatable) { // create an epoch updater // add epoch updater to the updater map epochUpdater := NewEpochUpdater(pst.stateQuery) @@ -66,3 +66,10 @@ func (pst *ProviderStateTracker) QueryVerifyPairing(ctx context.Context, consume func (pst *ProviderStateTracker) TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest) { // TODO: implement } + +func (pst *ProviderStateTracker) SendVoteReveal(voteID string, vote *reliabilitymanager.VoteData) { + +} +func (pst *ProviderStateTracker) SendVoteCommitment(voteID string, vote *reliabilitymanager.VoteData) { + +} diff --git a/protocol/statetracker/state_query.go b/protocol/statetracker/state_query.go index 71e8d5aa25..3e8de7445f 100644 --- a/protocol/statetracker/state_query.go +++ b/protocol/statetracker/state_query.go @@ -2,6 +2,7 @@ package statetracker import ( "context" + "fmt" "strconv" "github.com/cosmos/cosmos-sdk/client" @@ -118,8 +119,9 @@ func (psq *ProviderStateQuery) VoteEvents(ctx context.Context, latestBlock int64 if event.Type == utils.EventPrefix+conflicttypes.ConflictVoteDetectionEventName { vote, err := reliabilitymanager.BuildVoteParamsFromDetectionEvent(event) if err != nil { - return nil, err + return nil, utils.LavaFormatError("failed conflict_vote_detection_event parsing", err, &map[string]string{"event": fmt.Sprintf("%v", event)}) } + utils.LavaFormatDebug("conflict_vote_detection_event", &map[string]string{"voteID": vote.VoteID}) votes = append(votes, vote) } } @@ -128,26 +130,24 @@ func (psq *ProviderStateQuery) VoteEvents(ctx context.Context, latestBlock int64 beginBlockEvents := blockResults.BeginBlockEvents for _, event := range beginBlockEvents { if event.Type == utils.EventPrefix+conflicttypes.ConflictVoteRevealEventName { - // eventToListen := utils.EventPrefix + conflicttypes.ConflictVoteRevealEventName - // if votesList, ok := e.Events[eventToListen+".voteID"]; ok { - // for idx, voteID := range votesList { - // num_str := e.Events[eventToListen+".voteDeadline"][idx] - // voteDeadline, err := strconv.ParseUint(num_str, 10, 64) - // if err != nil { - // utils.LavaFormatError("parsing vote deadline", err, &map[string]string{"VoteDeadline": num_str}) - // continue - // } - // go s.voteInitiationCb(ctx, voteID, voteDeadline, nil) - // } - // } - - // eventToListen = utils.EventPrefix + conflicttypes.ConflictVoteResolvedEventName - // if votesList, ok := e.Events[eventToListen+".voteID"]; ok { - // for _, voteID := range votesList { - // voteParams := &VoteParams{CloseVote: true} - // go s.voteInitiationCb(ctx, voteID, 0, voteParams) - // } - // } + voteID, voteDeadline, err := reliabilitymanager.BuildBaseVoteDataFromEvent(event) + if err != nil { + return nil, utils.LavaFormatError("failed conflict_vote_reveal_event parsing", err, &map[string]string{"event": fmt.Sprintf("%v", event)}) + } + vote_reveal := &reliabilitymanager.VoteParams{VoteID: voteID, VoteDeadline: voteDeadline, ParamsType: reliabilitymanager.RevealVoteType} + utils.LavaFormatDebug("conflict_vote_reveal_event", &map[string]string{"voteID": voteID}) + votes = append(votes, vote_reveal) + } + if event.Type == utils.EventPrefix+conflicttypes.ConflictVoteResolvedEventName { + voteID, _, err := reliabilitymanager.BuildBaseVoteDataFromEvent(event) + if err != nil { + if !reliabilitymanager.NoVoteDeadline.Is(err) { + return nil, utils.LavaFormatError("failed conflict_vote_resolved_event parsing", err, &map[string]string{"event": fmt.Sprintf("%v", event)}) + } + } + vote_resolved := &reliabilitymanager.VoteParams{VoteID: voteID, VoteDeadline: 0, ParamsType: reliabilitymanager.CloseVoteType, CloseVote: true} + votes = append(votes, vote_resolved) + utils.LavaFormatDebug("conflict_vote_resolved_event", &map[string]string{"voteID": voteID}) } } return diff --git a/protocol/statetracker/vote_updater.go b/protocol/statetracker/vote_updater.go index 1083ea361b..9cd3ad803c 100644 --- a/protocol/statetracker/vote_updater.go +++ b/protocol/statetracker/vote_updater.go @@ -2,6 +2,7 @@ package statetracker import ( "github.com/lavanet/lava/protocol/lavasession" + "github.com/lavanet/lava/protocol/rpcprovider/reliabilitymanager" "golang.org/x/net/context" ) @@ -10,8 +11,7 @@ const ( ) type VoteUpdatable interface { - VoteCommit() - VoteReveal() + VoteHandler(*reliabilitymanager.VoteParams) } type VoteUpdater struct { @@ -34,8 +34,13 @@ func (vu *VoteUpdater) UpdaterKey() string { func (vu *VoteUpdater) Update(latestBlock int64) { ctx := context.Background() - _, err := vu.stateQuery.VoteEvents(ctx, latestBlock) + votes, err := vu.stateQuery.VoteEvents(ctx, latestBlock) if err != nil { return } + for _, vote := range votes { + endpoint := lavasession.RPCEndpoint{ChainID: vote.ChainID, ApiInterface: vote.ApiInterface} + updatable := vu.voteUpdatables[endpoint.Key()] + (*updatable).VoteHandler(vote) + } } From e810eba83c2212c4f02d491e2bfe270040a1ab64 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Tue, 14 Feb 2023 09:50:15 +0200 Subject: [PATCH 003/123] finished implementing vote updater and it's registration and transactions --- protocol/chaintracker/chain_tracker.go | 8 ++++++-- .../reliabilitymanager/reliability_manager.go | 4 ++-- protocol/rpcprovider/rpcprovider.go | 8 ++++---- .../statetracker/provider_state_tracker.go | 20 +++++++++++++------ protocol/statetracker/tx_sender.go | 19 ++++++++++++++++++ protocol/statetracker/vote_updater.go | 4 ++-- 6 files changed, 47 insertions(+), 16 deletions(-) diff --git a/protocol/chaintracker/chain_tracker.go b/protocol/chaintracker/chain_tracker.go index 3d51169e10..27b5ae8feb 100644 --- a/protocol/chaintracker/chain_tracker.go +++ b/protocol/chaintracker/chain_tracker.go @@ -230,11 +230,15 @@ func (cs *ChainTracker) fetchAllPreviousBlocksIfNecessary(ctx context.Context) ( } if gotNewBlock || forked { utils.LavaFormatDebug("ChainTracker should update state", &map[string]string{"gotNewBlock": fmt.Sprintf("%t", gotNewBlock), "forked": fmt.Sprintf("%t", forked), "newLatestBlock": strconv.FormatInt(newLatestBlock, 10), "currentBlock": strconv.FormatInt(cs.GetLatestBlockNum(), 10)}) - // TODO: if we didn't fork theres really no need to refetch + + prev_latest := cs.GetLatestBlockNum() cs.fetchAllPreviousBlocks(ctx, newLatestBlock) if gotNewBlock { if cs.newLatestCallback != nil { - cs.newLatestCallback(newLatestBlock) + for i := prev_latest + 1; i <= newLatestBlock; i++ { + // on catch up of several blocks we don't want to miss any callbacks + cs.newLatestCallback(i) + } } } if forked { diff --git a/protocol/rpcprovider/reliabilitymanager/reliability_manager.go b/protocol/rpcprovider/reliabilitymanager/reliability_manager.go index fd86a8d193..2a4045198b 100644 --- a/protocol/rpcprovider/reliabilitymanager/reliability_manager.go +++ b/protocol/rpcprovider/reliabilitymanager/reliability_manager.go @@ -24,8 +24,8 @@ const ( ) type TxSender interface { - SendVoteReveal(voteID string, vote *VoteData) - SendVoteCommitment(voteID string, vote *VoteData) + SendVoteReveal(voteID string, vote *VoteData) error + SendVoteCommitment(voteID string, vote *VoteData) error } type ReliabilityManager struct { diff --git a/protocol/rpcprovider/rpcprovider.go b/protocol/rpcprovider/rpcprovider.go index 9cd0fc943b..ba7695c861 100644 --- a/protocol/rpcprovider/rpcprovider.go +++ b/protocol/rpcprovider/rpcprovider.go @@ -35,12 +35,12 @@ var ( type ProviderStateTrackerInf interface { RegisterChainParserForSpecUpdates(ctx context.Context, chainParser chainlib.ChainParser, chainID string) error - RegisterReliabilityManagerForVoteUpdates(ctx context.Context, reliabilityManager *reliabilitymanager.ReliabilityManager) + RegisterReliabilityManagerForVoteUpdates(ctx context.Context, voteUpdatable statetracker.VoteUpdatable, endpointP *lavasession.RPCProviderEndpoint) RegisterForEpochUpdates(ctx context.Context, epochUpdatable statetracker.EpochUpdatable) QueryVerifyPairing(ctx context.Context, consumer string, blockHeight uint64) TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest) - SendVoteReveal(voteID string, vote *reliabilitymanager.VoteData) - SendVoteCommitment(voteID string, vote *reliabilitymanager.VoteData) + SendVoteReveal(voteID string, vote *reliabilitymanager.VoteData) error + SendVoteCommitment(voteID string, vote *reliabilitymanager.VoteData) error } type RPCProvider struct { @@ -105,7 +105,7 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client utils.LavaFormatFatal("failed creating chain tracker", err, &map[string]string{"chainTrackerConfig": fmt.Sprintf("%+v", chainTrackerConfig)}) } reliabilityManager := reliabilitymanager.NewReliabilityManager(chainTracker, providerStateTracker, addr.String(), chainProxy, chainParser) - providerStateTracker.RegisterReliabilityManagerForVoteUpdates(ctx, reliabilityManager) + providerStateTracker.RegisterReliabilityManagerForVoteUpdates(ctx, reliabilityManager, rpcProviderEndpoint) rpcp.rpcProviderServers[key] = &RPCProviderServer{} utils.LavaFormatInfo("RPCProvider Listening", &map[string]string{"endpoints": lavasession.PrintRPCProviderEndpoint(rpcProviderEndpoint)}) diff --git a/protocol/statetracker/provider_state_tracker.go b/protocol/statetracker/provider_state_tracker.go index 1d653d6230..cbeccece20 100644 --- a/protocol/statetracker/provider_state_tracker.go +++ b/protocol/statetracker/provider_state_tracker.go @@ -8,6 +8,7 @@ import ( "github.com/cosmos/cosmos-sdk/client/tx" "github.com/lavanet/lava/protocol/chainlib" "github.com/lavanet/lava/protocol/chaintracker" + "github.com/lavanet/lava/protocol/lavasession" "github.com/lavanet/lava/protocol/rpcprovider/reliabilitymanager" "github.com/lavanet/lava/utils" pairingtypes "github.com/lavanet/lava/x/pairing/types" @@ -55,8 +56,15 @@ func (pst *ProviderStateTracker) RegisterChainParserForSpecUpdates(ctx context.C return nil } -func (pst *ProviderStateTracker) RegisterReliabilityManagerForVoteUpdates(ctx context.Context, reliabilityManager *reliabilitymanager.ReliabilityManager) { - // TODO: change to an interface instead of reliabilitymanager.ReliabilityManager +func (pst *ProviderStateTracker) RegisterReliabilityManagerForVoteUpdates(ctx context.Context, voteUpdatable VoteUpdatable, endpointP *lavasession.RPCProviderEndpoint) { + voteUpdater := NewVoteUpdater(pst.stateQuery) + voteUpdaterRaw := pst.StateTracker.RegisterForUpdates(ctx, voteUpdater) + voteUpdater, ok := voteUpdaterRaw.(*VoteUpdater) + if !ok { + utils.LavaFormatFatal("invalid updater type returned from RegisterForUpdates", nil, &map[string]string{"updater": fmt.Sprintf("%+v", voteUpdaterRaw)}) + } + endpoint := lavasession.RPCEndpoint{ChainID: endpointP.ChainID, ApiInterface: endpointP.ApiInterface} + voteUpdater.RegisterVoteUpdatable(ctx, &voteUpdatable, endpoint) } func (pst *ProviderStateTracker) QueryVerifyPairing(ctx context.Context, consumer string, blockHeight uint64) { @@ -67,9 +75,9 @@ func (pst *ProviderStateTracker) TxRelayPayment(ctx context.Context, relayReques // TODO: implement } -func (pst *ProviderStateTracker) SendVoteReveal(voteID string, vote *reliabilitymanager.VoteData) { - +func (pst *ProviderStateTracker) SendVoteReveal(voteID string, vote *reliabilitymanager.VoteData) error { + return pst.txSender.SendVoteReveal(voteID, vote) } -func (pst *ProviderStateTracker) SendVoteCommitment(voteID string, vote *reliabilitymanager.VoteData) { - +func (pst *ProviderStateTracker) SendVoteCommitment(voteID string, vote *reliabilitymanager.VoteData) error { + return pst.txSender.SendVoteCommitment(voteID, vote) } diff --git a/protocol/statetracker/tx_sender.go b/protocol/statetracker/tx_sender.go index 3dca55a14f..8eb09a6144 100644 --- a/protocol/statetracker/tx_sender.go +++ b/protocol/statetracker/tx_sender.go @@ -6,6 +6,7 @@ import ( "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/tx" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/lavanet/lava/protocol/rpcprovider/reliabilitymanager" "github.com/lavanet/lava/utils" conflicttypes "github.com/lavanet/lava/x/conflict/types" ) @@ -117,3 +118,21 @@ func NewProviderTxSender(ctx context.Context, clientCtx client.Context, txFactor ts := &ProviderTxSender{TxSender: txSender} return ts, nil } + +func (pts *ProviderTxSender) SendVoteReveal(voteID string, vote *reliabilitymanager.VoteData) error { + msg := conflicttypes.NewMsgConflictVoteReveal(pts.clientCtx.FromAddress.String(), voteID, vote.Nonce, vote.RelayDataHash) + err := pts.SimulateAndBroadCastTxWithRetryOnSeqMismatch(msg) + if err != nil { + return utils.LavaFormatError("SendVoteReveal - SimulateAndBroadCastTx Failed", err, nil) + } + return nil +} + +func (pts *ProviderTxSender) SendVoteCommitment(voteID string, vote *reliabilitymanager.VoteData) error { + msg := conflicttypes.NewMsgConflictVoteCommit(pts.clientCtx.FromAddress.String(), voteID, vote.CommitHash) + err := pts.SimulateAndBroadCastTxWithRetryOnSeqMismatch(msg) + if err != nil { + return utils.LavaFormatError("SendVoteCommitment - SimulateAndBroadCastTx Failed", err, nil) + } + return nil +} diff --git a/protocol/statetracker/vote_updater.go b/protocol/statetracker/vote_updater.go index 9cd3ad803c..1f467b375e 100644 --- a/protocol/statetracker/vote_updater.go +++ b/protocol/statetracker/vote_updater.go @@ -11,7 +11,7 @@ const ( ) type VoteUpdatable interface { - VoteHandler(*reliabilitymanager.VoteParams) + VoteHandler(*reliabilitymanager.VoteParams, uint64) } type VoteUpdater struct { @@ -41,6 +41,6 @@ func (vu *VoteUpdater) Update(latestBlock int64) { for _, vote := range votes { endpoint := lavasession.RPCEndpoint{ChainID: vote.ChainID, ApiInterface: vote.ApiInterface} updatable := vu.voteUpdatables[endpoint.Key()] - (*updatable).VoteHandler(vote) + (*updatable).VoteHandler(vote, uint64(latestBlock)) } } From 527072f412d5d6b14db9ea4372dd409b7c603ce8 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Tue, 14 Feb 2023 11:32:30 +0200 Subject: [PATCH 004/123] added apiInterface to relay.pb and implemented provider listener --- go.mod | 2 + go.sum | 4 + proto/pairing/relay.proto | 1 + protocol/lavaprotocol/request_builder.go | 9 +- protocol/lavasession/provider_types.go | 4 +- protocol/rpcconsumer/rpcconsumer_server.go | 2 +- protocol/rpcprovider/provider_listener.go | 150 ++++++++++++++++++ protocol/rpcprovider/rpcprovider.go | 26 +++- protocol/rpcprovider/rpcprovider_server.go | 8 + protocol/statetracker/epoch_updater.go | 1 - protocol/statetracker/vote_updater.go | 1 - x/conflict/keeper/conflict.go | 4 +- x/conflict/keeper/msg_server_detection.go | 1 + x/pairing/keeper/msg_server_relay_payment.go | 6 + x/pairing/types/relay.pb.go | 156 ++++++++++++------- 15 files changed, 311 insertions(+), 64 deletions(-) create mode 100644 protocol/rpcprovider/provider_listener.go diff --git a/go.mod b/go.mod index f73193a0d2..1fce80309e 100644 --- a/go.mod +++ b/go.mod @@ -47,6 +47,8 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/gogo/googleapis v1.4.0 // indirect + github.com/golang/glog v1.0.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0 // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect golang.org/x/mod v0.7.0 // indirect golang.org/x/tools v0.2.0 // indirect diff --git a/go.sum b/go.sum index 3950e1518a..faf7fd1f5b 100644 --- a/go.sum +++ b/go.sum @@ -654,6 +654,8 @@ github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -778,6 +780,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0 h1:1JYBfzqrWPcCclBwxFCPAou9n+q86mfnu7NAeHfte7A= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0/go.mod h1:YDZoGHuwE+ov0c8smSH49WLF3F2LaWnYYuDVd+EWrc0= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= diff --git a/proto/pairing/relay.proto b/proto/pairing/relay.proto index 697b85b345..37c20ba8ff 100644 --- a/proto/pairing/relay.proto +++ b/proto/pairing/relay.proto @@ -26,6 +26,7 @@ message RelayRequest { VRFData DataReliability = 12; QualityOfServiceReport QoSReport = 13; bytes unresponsive_providers = 14; + string apiInterface = 15; } message RelayReply { diff --git a/protocol/lavaprotocol/request_builder.go b/protocol/lavaprotocol/request_builder.go index bdb7ff0d33..87d0205253 100644 --- a/protocol/lavaprotocol/request_builder.go +++ b/protocol/lavaprotocol/request_builder.go @@ -29,8 +29,9 @@ type RelayRequestCommonData struct { ChainID string `protobuf:"bytes,1,opt,name=chainID,proto3" json:"chainID,omitempty"` ConnectionType string `protobuf:"bytes,2,opt,name=connection_type,json=connectionType,proto3" json:"connection_type,omitempty"` ApiUrl string `protobuf:"bytes,3,opt,name=api_url,json=apiUrl,proto3" json:"api_url,omitempty"` - Data []byte `protobuf:"bytes,6,opt,name=data,proto3" json:"data,omitempty"` - RequestBlock int64 `protobuf:"varint,11,opt,name=request_block,json=requestBlock,proto3" json:"request_block,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + RequestBlock int64 `protobuf:"varint,5,opt,name=request_block,json=requestBlock,proto3" json:"request_block,omitempty"` + ApiInterface string `protobuf:"bytes,6,opt,name=apiInterface,proto3" json:"apiInterface,omitempty"` } type RelayResult struct { @@ -41,13 +42,14 @@ type RelayResult struct { Finalized bool } -func NewRelayRequestCommonData(chainID string, connectionType string, apiUrl string, data []byte, requestBlock int64) RelayRequestCommonData { +func NewRelayRequestCommonData(chainID string, connectionType string, apiUrl string, data []byte, requestBlock int64, apiInterface string) RelayRequestCommonData { return RelayRequestCommonData{ ChainID: chainID, ConnectionType: connectionType, ApiUrl: apiUrl, Data: data, RequestBlock: requestBlock, + ApiInterface: apiInterface, } } @@ -66,6 +68,7 @@ func ConstructRelayRequest(ctx context.Context, privKey *btcec.PrivateKey, chain QoSReport: consumerSession.QoSInfo.LastQoSReport, DataReliability: nil, UnresponsiveProviders: reportedProviders, + ApiInterface: relayRequestCommonData.ApiInterface, } sig, err := sigs.SignRelay(privKey, *relayRequest) if err != nil { diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index b875b1eaa4..274c06b908 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -24,8 +24,8 @@ type ProviderSessionsEpochData struct { } type RPCProviderEndpoint struct { - NetworkAddress string `yaml:"network-address,omitempty" json:"network-address,omitempty" mapstructure:"network-address"` // IP:PORT - ChainID string `yaml:"chain-id,omitempty" json:"chain-id,omitempty" mapstructure:"chain-id"` // spec chain identifier + NetworkAddress string `yaml:"network-address,omitempty" json:"network-address,omitempty" mapstructure:"network-address,omitempty"` // IP:PORT + ChainID string `yaml:"chain-id,omitempty" json:"chain-id,omitempty" mapstructure:"chain-id"` // spec chain identifier ApiInterface string `yaml:"api-interface,omitempty" json:"api-interface,omitempty" mapstructure:"api-interface"` Geolocation uint64 `yaml:"geolocation,omitempty" json:"geolocation,omitempty" mapstructure:"geolocation"` NodeUrl string `yaml:"node-url,omitempty" json:"node-url,omitempty" mapstructure:"node-url"` diff --git a/protocol/rpcconsumer/rpcconsumer_server.go b/protocol/rpcconsumer/rpcconsumer_server.go index fb8fa4f369..1fdd6f355d 100644 --- a/protocol/rpcconsumer/rpcconsumer_server.go +++ b/protocol/rpcconsumer/rpcconsumer_server.go @@ -99,7 +99,7 @@ func (rpccs *RPCConsumerServer) SendRelay( unwantedProviders := map[string]struct{}{} // do this in a loop with retry attempts, configurable via a flag, limited by the number of providers in CSM - relayRequestCommonData := lavaprotocol.NewRelayRequestCommonData(rpccs.listenEndpoint.ChainID, connectionType, url, []byte(req), chainMessage.RequestedBlock()) + relayRequestCommonData := lavaprotocol.NewRelayRequestCommonData(rpccs.listenEndpoint.ChainID, connectionType, url, []byte(req), chainMessage.RequestedBlock(), rpccs.listenEndpoint.ApiInterface) relayResults := []*lavaprotocol.RelayResult{} relayErrors := []error{} diff --git a/protocol/rpcprovider/provider_listener.go b/protocol/rpcprovider/provider_listener.go new file mode 100644 index 0000000000..5ae86e8604 --- /dev/null +++ b/protocol/rpcprovider/provider_listener.go @@ -0,0 +1,150 @@ +package rpcprovider + +import ( + "context" + "os" + "os/signal" + "strings" + "sync" + + "errors" + "net" + "net/http" + "time" + + "github.com/lavanet/lava/protocol/lavasession" + + "github.com/improbable-eng/grpc-web/go/grpcweb" + "golang.org/x/net/http2" + "golang.org/x/net/http2/h2c" + + "github.com/lavanet/lava/utils" + pairingtypes "github.com/lavanet/lava/x/pairing/types" + grpc "google.golang.org/grpc" +) + +type ProviderListener struct { + networkAddress string + relayServer *relayServer +} + +func (pl *ProviderListener) Key() string { + return pl.networkAddress +} + +func (pl *ProviderListener) RegisterReceiver(existingReceiver RelayReceiver, endpoint *lavasession.RPCProviderEndpoint) error { + listen_endpoint := lavasession.RPCEndpoint{ChainID: endpoint.ChainID, ApiInterface: endpoint.ApiInterface} + pl.relayServer.lock.Lock() + defer pl.relayServer.lock.Unlock() + _, ok := pl.relayServer.relayReceivers[listen_endpoint.Key()] + if ok { + // there was already a receiver defined + return utils.LavaFormatError("double_receiver_setup receiver already defined on this address with the same chainID and apiInterface", nil, &map[string]string{"chainID": endpoint.ChainID, "apiInterface": endpoint.ApiInterface}) + } + pl.relayServer.relayReceivers[listen_endpoint.Key()] = existingReceiver + utils.LavaFormatInfo("Provider Listening on Address", &map[string]string{"chainID": endpoint.ChainID, "apiInterface": endpoint.ApiInterface, "Address": endpoint.NetworkAddress}) + return nil +} + +func NewProviderListener(ctx context.Context, networkAddress string) *ProviderListener { + pl := &ProviderListener{networkAddress: networkAddress} + ctx, cancel := context.WithCancel(ctx) + signalChan := make(chan os.Signal, 1) + signal.Notify(signalChan, os.Interrupt) + defer func() { + signal.Stop(signalChan) + cancel() + }() + // GRPC + lis, err := net.Listen("tcp", networkAddress) + if err != nil { + utils.LavaFormatFatal("provider failure setting up listener", err, &map[string]string{"listenAddr": networkAddress}) + } + grpcServer := grpc.NewServer() + + wrappedServer := grpcweb.WrapServer(grpcServer) + handler := func(resp http.ResponseWriter, req *http.Request) { + // Set CORS headers + resp.Header().Set("Access-Control-Allow-Origin", "*") + resp.Header().Set("Access-Control-Allow-Headers", "Content-Type,x-grpc-web") + + wrappedServer.ServeHTTP(resp, req) + } + + httpServer := http.Server{ + Handler: h2c.NewHandler(http.HandlerFunc(handler), &http2.Server{}), + } + + go func() { + select { + case <-ctx.Done(): + utils.LavaFormatInfo("Provider Server ctx.Done", nil) + case <-signalChan: + utils.LavaFormatInfo("Provider Server signalChan", nil) + } + + shutdownCtx, shutdownRelease := context.WithTimeout(context.Background(), 10*time.Second) + defer shutdownRelease() + + if err := httpServer.Shutdown(shutdownCtx); err != nil { + utils.LavaFormatFatal("Provider failed to shutdown", err, &map[string]string{}) + } + }() + + relayServer := &relayServer{relayReceivers: map[string]RelayReceiver{}} + pl.relayServer = relayServer + go func() { + pairingtypes.RegisterRelayerServer(grpcServer, relayServer) + + if err := httpServer.Serve(lis); !errors.Is(err, http.ErrServerClosed) { + utils.LavaFormatFatal("provider failed to serve", err, &map[string]string{"Address": lis.Addr().String()}) + } + + }() + + return pl +} + +type relayServer struct { + pairingtypes.UnimplementedRelayerServer + relayReceivers map[string]RelayReceiver + lock sync.RWMutex +} + +type RelayReceiver interface { + Relay(ctx context.Context, request *pairingtypes.RelayRequest) (*pairingtypes.RelayReply, error) + RelaySubscribe(request *pairingtypes.RelayRequest, srv pairingtypes.Relayer_RelaySubscribeServer) error +} + +func (rs *relayServer) Relay(ctx context.Context, request *pairingtypes.RelayRequest) (*pairingtypes.RelayReply, error) { + relayReceiver, err := rs.findReceiver(request) + if err != nil { + return nil, err + } + return relayReceiver.Relay(ctx, request) +} + +func (rs *relayServer) RelaySubscribe(request *pairingtypes.RelayRequest, srv pairingtypes.Relayer_RelaySubscribeServer) error { + relayReceiver, err := rs.findReceiver(request) + if err != nil { + return err + } + return relayReceiver.RelaySubscribe(request, srv) +} + +func (rs *relayServer) findReceiver(request *pairingtypes.RelayRequest) (RelayReceiver, error) { + apiInterface := request.ApiInterface + chainID := request.ChainID + endpoint := lavasession.RPCEndpoint{ChainID: chainID, ApiInterface: apiInterface} + rs.lock.RLock() + defer rs.lock.RUnlock() + relayReceiver, ok := rs.relayReceivers[endpoint.Key()] + if !ok { + keys := make([]string, 0, len(rs.relayReceivers)) + for k := range rs.relayReceivers { + keys = append(keys, k) + } + return nil, utils.LavaFormatError("got called with unhandled relay receiver", nil, &map[string]string{"requested_receiver": endpoint.Key(), "handled_receivers": strings.Join(keys, ",")}) + } + return relayReceiver, nil +} diff --git a/protocol/rpcprovider/rpcprovider.go b/protocol/rpcprovider/rpcprovider.go index ba7695c861..33804456f8 100644 --- a/protocol/rpcprovider/rpcprovider.go +++ b/protocol/rpcprovider/rpcprovider.go @@ -46,6 +46,7 @@ type ProviderStateTrackerInf interface { type RPCProvider struct { providerStateTracker ProviderStateTrackerInf rpcProviderServers map[string]*RPCProviderServer + rpcProviderListeners map[string]*ProviderListener } func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, clientCtx client.Context, rpcProviderEndpoints []*lavasession.RPCProviderEndpoint, cache *performance.Cache, parallelConnections uint) (err error) { @@ -107,9 +108,28 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client reliabilityManager := reliabilitymanager.NewReliabilityManager(chainTracker, providerStateTracker, addr.String(), chainProxy, chainParser) providerStateTracker.RegisterReliabilityManagerForVoteUpdates(ctx, reliabilityManager, rpcProviderEndpoint) - rpcp.rpcProviderServers[key] = &RPCProviderServer{} - utils.LavaFormatInfo("RPCProvider Listening", &map[string]string{"endpoints": lavasession.PrintRPCProviderEndpoint(rpcProviderEndpoint)}) - rpcp.rpcProviderServers[key].ServeRPCRequests(ctx, rpcProviderEndpoint, chainParser, rewardServer, providerSessionManager, reliabilityManager, privKey, cache, chainProxy) + rpcProviderServer := &RPCProviderServer{} + rpcp.rpcProviderServers[key] = rpcProviderServer + rpcProviderServer.ServeRPCRequests(ctx, rpcProviderEndpoint, chainParser, rewardServer, providerSessionManager, reliabilityManager, privKey, cache, chainProxy) + + // set up grpc listener + var listener *ProviderListener + if rpcProviderEndpoint.NetworkAddress == "" && len(rpcp.rpcProviderListeners) > 0 { + // handle case only one network address was defined + for _, listener_p := range rpcp.rpcProviderListeners { + listener = listener_p + listener.RegisterReceiver(rpcProviderServer, rpcProviderEndpoint) + break + } + } else { + var ok bool + listener, ok = rpcp.rpcProviderListeners[rpcProviderEndpoint.NetworkAddress] + if !ok { + listener = NewProviderListener(ctx, rpcProviderEndpoint.NetworkAddress) + rpcp.rpcProviderListeners[listener.Key()] = listener + } + listener.RegisterReceiver(rpcProviderServer, rpcProviderEndpoint) + } } signalChan := make(chan os.Signal, 1) diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 82c429e1de..99147a3fdd 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -2,12 +2,14 @@ package rpcprovider import ( "context" + "fmt" "github.com/btcsuite/btcd/btcec" "github.com/lavanet/lava/protocol/chainlib" "github.com/lavanet/lava/protocol/chaintracker" "github.com/lavanet/lava/protocol/lavasession" "github.com/lavanet/lava/relayer/performance" + pairingtypes "github.com/lavanet/lava/x/pairing/types" ) type RPCProviderServer struct{} @@ -43,3 +45,9 @@ func (rpcps *RPCProviderServer) ServeRPCRequests( // send the proof to reward server // finalize the session } +func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes.RelayRequest) (*pairingtypes.RelayReply, error) { + return nil, fmt.Errorf("not implemented") +} +func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayRequest, srv pairingtypes.Relayer_RelaySubscribeServer) error { + return fmt.Errorf("not implemented") +} diff --git a/protocol/statetracker/epoch_updater.go b/protocol/statetracker/epoch_updater.go index 79acd09ce9..b1c1b10191 100644 --- a/protocol/statetracker/epoch_updater.go +++ b/protocol/statetracker/epoch_updater.go @@ -24,7 +24,6 @@ func NewEpochUpdater(stateQuery *ProviderStateQuery) *EpochUpdater { func (eu *EpochUpdater) RegisterEpochUpdatable(ctx context.Context, epochUpdatable EpochUpdatable) { eu.epochUpdatables = append(eu.epochUpdatables, &epochUpdatable) - return } func (eu *EpochUpdater) UpdaterKey() string { diff --git a/protocol/statetracker/vote_updater.go b/protocol/statetracker/vote_updater.go index 1f467b375e..321c510fc6 100644 --- a/protocol/statetracker/vote_updater.go +++ b/protocol/statetracker/vote_updater.go @@ -25,7 +25,6 @@ func NewVoteUpdater(stateQuery *ProviderStateQuery) *VoteUpdater { func (vu *VoteUpdater) RegisterVoteUpdatable(ctx context.Context, voteUpdatable *VoteUpdatable, endpoint lavasession.RPCEndpoint) { vu.voteUpdatables[endpoint.Key()] = voteUpdatable - return } func (vu *VoteUpdater) UpdaterKey() string { diff --git a/x/conflict/keeper/conflict.go b/x/conflict/keeper/conflict.go index e0b21df36d..21a589c59e 100644 --- a/x/conflict/keeper/conflict.go +++ b/x/conflict/keeper/conflict.go @@ -39,7 +39,9 @@ func (k Keeper) ValidateResponseConflict(ctx sdk.Context, conflictData *types.Re if conflictData.ConflictRelayData0.Request.RequestBlock != conflictData.ConflictRelayData1.Request.RequestBlock { return fmt.Errorf("mismatching request parameters between providers %d, %d", conflictData.ConflictRelayData0.Request.RequestBlock, conflictData.ConflictRelayData1.Request.RequestBlock) } - + if conflictData.ConflictRelayData0.Request.ApiInterface != conflictData.ConflictRelayData1.Request.ApiInterface { + return fmt.Errorf("mismatching request parameters between providers %s, %s", conflictData.ConflictRelayData0.Request.ApiInterface, conflictData.ConflictRelayData1.Request.ApiInterface) + } // 1.5 validate params epochStart, _, err := k.epochstorageKeeper.GetEpochStartForBlock(ctx, uint64(block)) if err != nil { diff --git a/x/conflict/keeper/msg_server_detection.go b/x/conflict/keeper/msg_server_detection.go index 7018269f22..d83d9868c6 100644 --- a/x/conflict/keeper/msg_server_detection.go +++ b/x/conflict/keeper/msg_server_detection.go @@ -95,6 +95,7 @@ func (k msgServer) Detection(goCtx context.Context, msg *types.MsgDetection) (*t eventData["requestBlock"] = strconv.FormatUint(conflictVote.RequestBlock, 10) eventData["voteDeadline"] = strconv.FormatUint(conflictVote.VoteDeadline, 10) eventData["voters"] = strings.Join(voters, ",") + eventData["apiInterface"] = msg.ResponseConflict.ConflictRelayData0.Request.ApiInterface utils.LogLavaEvent(ctx, logger, types.ConflictVoteDetectionEventName, eventData, "Simulation: Got a new valid conflict detection from consumer, starting new vote") return &types.MsgDetectionResponse{}, nil diff --git a/x/pairing/keeper/msg_server_relay_payment.go b/x/pairing/keeper/msg_server_relay_payment.go index c45cc02460..d697ac676d 100644 --- a/x/pairing/keeper/msg_server_relay_payment.go +++ b/x/pairing/keeper/msg_server_relay_payment.go @@ -80,6 +80,12 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen return errorLogAndFormat("relay_payment_epoch_start", details, "problem getting epoch start") } + expectedInterfaces := k.specKeeper.GetExpectedInterfacesForSpec(ctx, relay.ChainID) + if !expectedInterfaces[relay.ApiInterface] { + details := map[string]string{"expectedInterfaces": fmt.Sprintf("%+v", expectedInterfaces), "apiInterface": relay.ApiInterface} + return errorLogAndFormat("relay_payment_apiInterface", details, "unexpected api interface") + } + payReliability := false // validate data reliability if relay.DataReliability != nil { diff --git a/x/pairing/types/relay.pb.go b/x/pairing/types/relay.pb.go index d600d01695..cdd1fce482 100644 --- a/x/pairing/types/relay.pb.go +++ b/x/pairing/types/relay.pb.go @@ -44,6 +44,7 @@ type RelayRequest struct { DataReliability *VRFData `protobuf:"bytes,12,opt,name=DataReliability,proto3" json:"DataReliability,omitempty"` QoSReport *QualityOfServiceReport `protobuf:"bytes,13,opt,name=QoSReport,proto3" json:"QoSReport,omitempty"` UnresponsiveProviders []byte `protobuf:"bytes,14,opt,name=unresponsive_providers,json=unresponsiveProviders,proto3" json:"unresponsive_providers,omitempty"` + ApiInterface string `protobuf:"bytes,15,opt,name=apiInterface,proto3" json:"apiInterface,omitempty"` } func (m *RelayRequest) Reset() { *m = RelayRequest{} } @@ -177,6 +178,13 @@ func (m *RelayRequest) GetUnresponsiveProviders() []byte { return nil } +func (m *RelayRequest) GetApiInterface() string { + if m != nil { + return m.ApiInterface + } + return "" +} + type RelayReply struct { Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` Sig []byte `protobuf:"bytes,2,opt,name=sig,proto3" json:"sig,omitempty"` @@ -402,58 +410,59 @@ func init() { func init() { proto.RegisterFile("pairing/relay.proto", fileDescriptor_10cd1bfeb9978acf) } var fileDescriptor_10cd1bfeb9978acf = []byte{ - // 802 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcd, 0x8e, 0x1b, 0x45, - 0x10, 0xf6, 0xec, 0x7a, 0xfd, 0x53, 0xf6, 0x3a, 0xa8, 0xb3, 0x9b, 0x8c, 0x16, 0xe2, 0x35, 0x83, - 0x94, 0xf8, 0x00, 0x36, 0x0a, 0x82, 0x03, 0x12, 0x12, 0x58, 0x0b, 0x24, 0x08, 0x91, 0x6c, 0x1b, - 0x72, 0xd8, 0xcb, 0xa8, 0x3d, 0x6e, 0x8f, 0x5b, 0x69, 0x4f, 0x4f, 0xba, 0x67, 0x46, 0x0c, 0x4f, - 0x81, 0xc4, 0x9b, 0x70, 0xe0, 0x01, 0x38, 0xe5, 0x98, 0x23, 0xe2, 0xb0, 0x8a, 0x76, 0xdf, 0x80, - 0x27, 0x40, 0x5d, 0x33, 0x63, 0x9b, 0xc4, 0x42, 0x8a, 0x94, 0xd3, 0x74, 0x7d, 0x55, 0xf5, 0x95, - 0xeb, 0xab, 0xea, 0x36, 0xdc, 0x8c, 0x99, 0xd0, 0x22, 0x0a, 0xc7, 0x9a, 0x4b, 0x96, 0x8f, 0x62, - 0xad, 0x12, 0x45, 0x8e, 0x24, 0xcb, 0x58, 0xc4, 0x93, 0x91, 0xfd, 0x8e, 0xca, 0x88, 0x93, 0xa3, - 0x50, 0x85, 0x0a, 0x03, 0xc6, 0xf6, 0x54, 0xc4, 0x7a, 0xbf, 0xd5, 0xa1, 0x4b, 0x6d, 0x2e, 0xe5, - 0xcf, 0x52, 0x6e, 0x12, 0xe2, 0x42, 0x33, 0x58, 0x32, 0x11, 0x3d, 0x3c, 0x73, 0x9d, 0x81, 0x33, - 0x6c, 0xd3, 0xca, 0x24, 0xf7, 0xe0, 0x46, 0xa0, 0xa2, 0x88, 0x07, 0x89, 0x50, 0x91, 0x9f, 0xe4, - 0x31, 0x77, 0xf7, 0x30, 0xa2, 0xb7, 0x81, 0x7f, 0xcc, 0x63, 0x4e, 0x6e, 0x43, 0x93, 0xc5, 0xc2, - 0x4f, 0xb5, 0x74, 0xf7, 0x31, 0xa0, 0xc1, 0x62, 0xf1, 0x93, 0x96, 0xe4, 0x0e, 0x80, 0xe1, 0xc6, - 0xd8, 0x74, 0x31, 0x77, 0xeb, 0x03, 0x67, 0x58, 0xa7, 0xed, 0x12, 0x79, 0x38, 0x27, 0xc7, 0xd0, - 0x08, 0x52, 0xdf, 0xa4, 0x2b, 0xf7, 0x00, 0x5d, 0x07, 0x41, 0x3a, 0x4d, 0x57, 0x84, 0x40, 0x7d, - 0xce, 0x12, 0xe6, 0x36, 0x06, 0xce, 0xb0, 0x4b, 0xf1, 0x4c, 0xde, 0x81, 0x7d, 0x23, 0x42, 0xb7, - 0x89, 0x90, 0x3d, 0x92, 0x13, 0x68, 0xc5, 0x5a, 0x65, 0x62, 0xce, 0xb5, 0xdb, 0xc2, 0xaa, 0x6b, - 0x9b, 0xbc, 0x0f, 0xdd, 0x99, 0x54, 0xc1, 0x53, 0x7f, 0xc9, 0x45, 0xb8, 0x4c, 0xdc, 0xf6, 0xc0, - 0x19, 0xee, 0xd3, 0x0e, 0x62, 0x0f, 0x10, 0x22, 0xef, 0x42, 0x1b, 0x25, 0xf4, 0xa3, 0x74, 0xe5, - 0x02, 0x96, 0x6f, 0x21, 0xf0, 0x43, 0xba, 0x22, 0x1f, 0xc0, 0xa1, 0x2e, 0xe4, 0xf1, 0x31, 0xc7, - 0xed, 0x20, 0x41, 0xb7, 0x04, 0x27, 0x16, 0x23, 0xdf, 0xc2, 0x8d, 0x33, 0x96, 0x30, 0xca, 0xa5, - 0x60, 0x33, 0x21, 0x45, 0x92, 0xbb, 0xdd, 0x81, 0x33, 0xec, 0xdc, 0xbf, 0x33, 0xda, 0x35, 0x8f, - 0xd1, 0x13, 0xfa, 0x0d, 0xc6, 0xbf, 0x9a, 0x45, 0xbe, 0x83, 0xf6, 0xb9, 0x9a, 0x52, 0x1e, 0x2b, - 0x9d, 0xb8, 0x87, 0x48, 0xf1, 0xe1, 0x6e, 0x8a, 0xf3, 0x94, 0xd9, 0x8c, 0x47, 0x8b, 0x29, 0xd7, - 0x99, 0x08, 0x78, 0x91, 0x43, 0x37, 0xe9, 0xe4, 0x53, 0xb8, 0x95, 0x46, 0x9a, 0x9b, 0x58, 0x45, - 0x46, 0x64, 0xdc, 0xaf, 0x24, 0x31, 0x6e, 0x0f, 0xa5, 0x3b, 0xde, 0xf6, 0x3e, 0xae, 0x9c, 0xde, - 0x9f, 0x0e, 0x40, 0xb9, 0x15, 0xb1, 0xcc, 0xd7, 0x13, 0x70, 0x5e, 0x9f, 0xc0, 0xde, 0x66, 0x02, - 0x47, 0x70, 0x10, 0xa9, 0x28, 0xe0, 0x38, 0xf4, 0x43, 0x5a, 0x18, 0x56, 0x7b, 0xc9, 0x92, 0x8d, - 0x74, 0xf5, 0x42, 0xfb, 0x02, 0x2b, 0x94, 0xfb, 0x0c, 0x6e, 0x2f, 0x44, 0xc4, 0xa4, 0xf8, 0x85, - 0xcf, 0x8b, 0x28, 0xe3, 0x2f, 0x99, 0x59, 0x72, 0x83, 0x8b, 0xd0, 0xa5, 0xc7, 0x6b, 0x37, 0x26, - 0x98, 0x07, 0xe8, 0xc4, 0x75, 0x12, 0x61, 0x99, 0x51, 0xae, 0x47, 0xdb, 0x88, 0xb0, 0x08, 0xf2, - 0x5e, 0x3a, 0xd0, 0x2c, 0x45, 0x26, 0x77, 0xa1, 0x37, 0x17, 0x8b, 0x05, 0xd7, 0x3c, 0x4a, 0x04, - 0x4b, 0x94, 0xc6, 0x5e, 0x5a, 0xf4, 0x15, 0xd4, 0xae, 0x41, 0xa6, 0x17, 0x7e, 0xc6, 0x64, 0xca, - 0xcb, 0xde, 0x5a, 0x99, 0x5e, 0x3c, 0xb1, 0x76, 0xe5, 0x8c, 0xb5, 0x52, 0x0b, 0x6c, 0xb2, 0x70, - 0x3e, 0xb6, 0xb6, 0xed, 0xb3, 0x12, 0xd7, 0xb7, 0xc2, 0xd4, 0xd1, 0xdf, 0xa9, 0xb0, 0xa9, 0x08, - 0xc9, 0x00, 0x3a, 0x4c, 0x4a, 0xfb, 0x7b, 0x6c, 0x03, 0x65, 0x6f, 0xdb, 0x10, 0x79, 0x0f, 0xda, - 0xcf, 0x52, 0xae, 0x73, 0xf4, 0x97, 0x0d, 0xad, 0x81, 0xd7, 0x97, 0xde, 0xfb, 0x7d, 0x0f, 0x6e, - 0xed, 0x5e, 0x02, 0x72, 0x01, 0x4d, 0xab, 0x71, 0x14, 0xe4, 0xc5, 0x3d, 0x9e, 0x7c, 0xf9, 0xfc, - 0xf2, 0xb4, 0xf6, 0xf7, 0xe5, 0xe9, 0xdd, 0x50, 0x24, 0xcb, 0x74, 0x36, 0x0a, 0xd4, 0x6a, 0x1c, - 0x28, 0xb3, 0x52, 0xa6, 0xfc, 0x7c, 0x64, 0xe6, 0x4f, 0xc7, 0xf6, 0x5a, 0x9b, 0xd1, 0x19, 0x0f, - 0xfe, 0xb9, 0x3c, 0xed, 0xe5, 0x6c, 0x25, 0x3f, 0xf7, 0xbe, 0x2f, 0x68, 0x3c, 0x5a, 0x11, 0x12, - 0x01, 0x5d, 0x96, 0x31, 0x21, 0xab, 0x3d, 0xc7, 0x67, 0x60, 0xf2, 0xf5, 0x1b, 0x17, 0xb8, 0x59, - 0x14, 0xd8, 0xe6, 0xf2, 0xe8, 0x7f, 0xa8, 0xc9, 0x39, 0xd4, 0x4d, 0x1e, 0x05, 0xc5, 0x43, 0x32, - 0xf9, 0xe2, 0x8d, 0x4b, 0x74, 0x8a, 0x12, 0x96, 0xc3, 0xa3, 0x48, 0x75, 0xff, 0x0f, 0x07, 0x9a, - 0xb8, 0xdc, 0x5c, 0x93, 0x47, 0x70, 0x80, 0x47, 0xe2, 0xed, 0xbe, 0x61, 0xdb, 0x4f, 0xe3, 0xc9, - 0xe0, 0x7f, 0x63, 0x62, 0x99, 0x7b, 0x35, 0x72, 0x01, 0x3d, 0xb4, 0xa7, 0xe9, 0xcc, 0x04, 0x5a, - 0xcc, 0xf8, 0xdb, 0x62, 0xfe, 0xd8, 0x99, 0x7c, 0xf5, 0xfc, 0xaa, 0xef, 0xbc, 0xb8, 0xea, 0x3b, - 0x2f, 0xaf, 0xfa, 0xce, 0xaf, 0xd7, 0xfd, 0xda, 0x8b, 0xeb, 0x7e, 0xed, 0xaf, 0xeb, 0x7e, 0xed, - 0xe2, 0xde, 0x96, 0x1e, 0x25, 0x13, 0x7e, 0xc7, 0x3f, 0x8f, 0xab, 0x3f, 0x08, 0x14, 0x65, 0xd6, - 0xc0, 0x57, 0xff, 0x93, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x30, 0x0a, 0xad, 0xe0, 0x38, 0x06, - 0x00, 0x00, + // 819 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x41, 0x6f, 0x1b, 0x45, + 0x14, 0xf6, 0x26, 0x4e, 0x6c, 0x3f, 0x3b, 0x0e, 0x9a, 0x26, 0xed, 0x2a, 0x50, 0xc7, 0x2c, 0x52, + 0xeb, 0x03, 0xd8, 0xa8, 0x08, 0x0e, 0x48, 0x48, 0x60, 0x05, 0x68, 0x10, 0xa2, 0xcd, 0x18, 0x7a, + 0xc8, 0x65, 0x35, 0x5e, 0x8f, 0xd7, 0xa3, 0x8e, 0x67, 0xb6, 0x33, 0xbb, 0x16, 0xcb, 0xaf, 0xe0, + 0xb7, 0x70, 0x80, 0x3b, 0xa7, 0x1e, 0x7b, 0x44, 0x1c, 0xa2, 0x2a, 0xf9, 0x07, 0xfc, 0x02, 0x34, + 0x6f, 0x77, 0x13, 0xb7, 0xb5, 0x90, 0x2a, 0x71, 0xda, 0x79, 0xdf, 0x7b, 0xef, 0x7b, 0x7e, 0xdf, + 0x7b, 0x33, 0x86, 0x5b, 0x09, 0x13, 0x46, 0xa8, 0x78, 0x64, 0xb8, 0x64, 0xf9, 0x30, 0x31, 0x3a, + 0xd5, 0xe4, 0x40, 0xb2, 0x15, 0x53, 0x3c, 0x1d, 0xba, 0xef, 0xb0, 0x8c, 0x38, 0x3a, 0x88, 0x75, + 0xac, 0x31, 0x60, 0xe4, 0x4e, 0x45, 0x6c, 0xf0, 0x47, 0x1d, 0x3a, 0xd4, 0xe5, 0x52, 0xfe, 0x2c, + 0xe3, 0x36, 0x25, 0x3e, 0x34, 0xa2, 0x05, 0x13, 0xea, 0xf4, 0xc4, 0xf7, 0xfa, 0xde, 0xa0, 0x45, + 0x2b, 0x93, 0xdc, 0x87, 0xfd, 0x48, 0x2b, 0xc5, 0xa3, 0x54, 0x68, 0x15, 0xa6, 0x79, 0xc2, 0xfd, + 0x2d, 0x8c, 0xe8, 0xde, 0xc0, 0x3f, 0xe6, 0x09, 0x27, 0x77, 0xa0, 0xc1, 0x12, 0x11, 0x66, 0x46, + 0xfa, 0xdb, 0x18, 0xb0, 0xcb, 0x12, 0xf1, 0x93, 0x91, 0xe4, 0x2e, 0x80, 0xe5, 0xd6, 0xba, 0x74, + 0x31, 0xf3, 0xeb, 0x7d, 0x6f, 0x50, 0xa7, 0xad, 0x12, 0x39, 0x9d, 0x91, 0x43, 0xd8, 0x8d, 0xb2, + 0xd0, 0x66, 0x4b, 0x7f, 0x07, 0x5d, 0x3b, 0x51, 0x36, 0xc9, 0x96, 0x84, 0x40, 0x7d, 0xc6, 0x52, + 0xe6, 0xef, 0xf6, 0xbd, 0x41, 0x87, 0xe2, 0x99, 0xbc, 0x03, 0xdb, 0x56, 0xc4, 0x7e, 0x03, 0x21, + 0x77, 0x24, 0x47, 0xd0, 0x4c, 0x8c, 0x5e, 0x89, 0x19, 0x37, 0x7e, 0x13, 0xab, 0x5e, 0xdb, 0xe4, + 0x7d, 0xe8, 0x4c, 0xa5, 0x8e, 0x9e, 0x86, 0x0b, 0x2e, 0xe2, 0x45, 0xea, 0xb7, 0xfa, 0xde, 0x60, + 0x9b, 0xb6, 0x11, 0x7b, 0x88, 0x10, 0x79, 0x17, 0x5a, 0x28, 0x61, 0xa8, 0xb2, 0xa5, 0x0f, 0x58, + 0xbe, 0x89, 0xc0, 0x0f, 0xd9, 0x92, 0x7c, 0x00, 0x7b, 0xa6, 0x90, 0x27, 0xc4, 0x1c, 0xbf, 0x8d, + 0x04, 0x9d, 0x12, 0x1c, 0x3b, 0x8c, 0x7c, 0x0b, 0xfb, 0x27, 0x2c, 0x65, 0x94, 0x4b, 0xc1, 0xa6, + 0x42, 0x8a, 0x34, 0xf7, 0x3b, 0x7d, 0x6f, 0xd0, 0x7e, 0x70, 0x77, 0xb8, 0x69, 0x1e, 0xc3, 0x27, + 0xf4, 0x1b, 0x8c, 0x7f, 0x3d, 0x8b, 0x7c, 0x07, 0xad, 0x33, 0x3d, 0xa1, 0x3c, 0xd1, 0x26, 0xf5, + 0xf7, 0x90, 0xe2, 0xc3, 0xcd, 0x14, 0x67, 0x19, 0x73, 0x19, 0x8f, 0xe6, 0x13, 0x6e, 0x56, 0x22, + 0xe2, 0x45, 0x0e, 0xbd, 0x49, 0x27, 0x9f, 0xc2, 0xed, 0x4c, 0x19, 0x6e, 0x13, 0xad, 0xac, 0x58, + 0xf1, 0xb0, 0x92, 0xc4, 0xfa, 0x5d, 0x94, 0xee, 0x70, 0xdd, 0xfb, 0xb8, 0x72, 0x92, 0x00, 0x3a, + 0x2c, 0x11, 0xa7, 0x2a, 0xe5, 0x66, 0xce, 0x22, 0xee, 0xef, 0xa3, 0xa0, 0xaf, 0x60, 0xc1, 0x9f, + 0x1e, 0x40, 0xb9, 0x39, 0x89, 0xcc, 0xaf, 0xa7, 0xe4, 0xbd, 0x39, 0xa5, 0xad, 0x9b, 0x29, 0x1d, + 0xc0, 0x8e, 0xd2, 0x2a, 0xe2, 0xb8, 0x18, 0x7b, 0xb4, 0x30, 0xdc, 0x7c, 0x24, 0x4b, 0x6f, 0xe4, + 0xad, 0x17, 0xf3, 0x29, 0xb0, 0x42, 0xdd, 0xcf, 0xe0, 0xce, 0x5c, 0x28, 0x26, 0xc5, 0x2f, 0x7c, + 0x56, 0x44, 0xd9, 0x70, 0xc1, 0xec, 0x82, 0x5b, 0x5c, 0x96, 0x0e, 0x3d, 0xbc, 0x76, 0x63, 0x82, + 0x7d, 0x88, 0x4e, 0x5c, 0x39, 0x11, 0x97, 0x19, 0xe5, 0x0a, 0xb5, 0xac, 0x88, 0x8b, 0xa0, 0xe0, + 0xa5, 0x07, 0x8d, 0x72, 0x10, 0xe4, 0x1e, 0x74, 0x67, 0x62, 0x3e, 0xe7, 0x86, 0xab, 0x54, 0xb0, + 0x54, 0x1b, 0xec, 0xa5, 0x49, 0x5f, 0x43, 0xdd, 0xaa, 0xac, 0xcc, 0x3c, 0x5c, 0x31, 0x99, 0xf1, + 0xb2, 0xb7, 0xe6, 0xca, 0xcc, 0x9f, 0x38, 0xbb, 0x72, 0x26, 0x46, 0xeb, 0x39, 0x36, 0x59, 0x38, + 0x1f, 0x3b, 0xdb, 0xf5, 0x59, 0x0d, 0x20, 0x74, 0xc2, 0xd4, 0xd1, 0xdf, 0xae, 0xb0, 0x89, 0x88, + 0x49, 0x1f, 0xda, 0x4c, 0x4a, 0xf7, 0x7b, 0x5c, 0x03, 0x65, 0x6f, 0xeb, 0x10, 0x79, 0x0f, 0x5a, + 0xcf, 0x32, 0x6e, 0x72, 0xf4, 0x97, 0x0d, 0x5d, 0x03, 0x6f, 0x5e, 0x8c, 0xe0, 0xb7, 0x2d, 0xb8, + 0xbd, 0x79, 0x51, 0xc8, 0x39, 0x34, 0x9c, 0xc6, 0x2a, 0xca, 0x8b, 0xbb, 0x3e, 0xfe, 0xf2, 0xf9, + 0xc5, 0x71, 0xed, 0xef, 0x8b, 0xe3, 0x7b, 0xb1, 0x48, 0x17, 0xd9, 0x74, 0x18, 0xe9, 0xe5, 0x28, + 0xd2, 0x76, 0xa9, 0x6d, 0xf9, 0xf9, 0xc8, 0xce, 0x9e, 0x8e, 0xdc, 0xd5, 0xb7, 0xc3, 0x13, 0x1e, + 0xfd, 0x73, 0x71, 0xdc, 0xcd, 0xd9, 0x52, 0x7e, 0x1e, 0x7c, 0x5f, 0xd0, 0x04, 0xb4, 0x22, 0x24, + 0x02, 0x3a, 0x6c, 0xc5, 0x84, 0xac, 0xee, 0x02, 0x3e, 0x15, 0xe3, 0xaf, 0xdf, 0xba, 0xc0, 0xad, + 0xa2, 0xc0, 0x3a, 0x57, 0x40, 0x5f, 0xa1, 0x26, 0x67, 0x50, 0xb7, 0xb9, 0x8a, 0x8a, 0xc7, 0x66, + 0xfc, 0xc5, 0x5b, 0x97, 0x68, 0x17, 0x25, 0x1c, 0x47, 0x40, 0x91, 0xea, 0xc1, 0xef, 0x1e, 0x34, + 0x70, 0xb9, 0xb9, 0x21, 0x8f, 0x60, 0x07, 0x8f, 0x24, 0xd8, 0x7c, 0x0b, 0xd7, 0x9f, 0xcf, 0xa3, + 0xfe, 0x7f, 0xc6, 0x24, 0x32, 0x0f, 0x6a, 0xe4, 0x1c, 0xba, 0x68, 0x4f, 0xb2, 0xa9, 0x8d, 0x8c, + 0x98, 0xf2, 0xff, 0x8b, 0xf9, 0x63, 0x6f, 0xfc, 0xd5, 0xf3, 0xcb, 0x9e, 0xf7, 0xe2, 0xb2, 0xe7, + 0xbd, 0xbc, 0xec, 0x79, 0xbf, 0x5e, 0xf5, 0x6a, 0x2f, 0xae, 0x7a, 0xb5, 0xbf, 0xae, 0x7a, 0xb5, + 0xf3, 0xfb, 0x6b, 0x7a, 0x94, 0x4c, 0xf8, 0x1d, 0xfd, 0x3c, 0xaa, 0xfe, 0x44, 0x50, 0x94, 0xe9, + 0x2e, 0xfe, 0x33, 0x7c, 0xf2, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x28, 0x1d, 0x4a, 0xdd, 0x5c, + 0x06, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -620,6 +629,13 @@ func (m *RelayRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.ApiInterface) > 0 { + i -= len(m.ApiInterface) + copy(dAtA[i:], m.ApiInterface) + i = encodeVarintRelay(dAtA, i, uint64(len(m.ApiInterface))) + i-- + dAtA[i] = 0x7a + } if len(m.UnresponsiveProviders) > 0 { i -= len(m.UnresponsiveProviders) copy(dAtA[i:], m.UnresponsiveProviders) @@ -978,6 +994,10 @@ func (m *RelayRequest) Size() (n int) { if l > 0 { n += 1 + l + sovRelay(uint64(l)) } + l = len(m.ApiInterface) + if l > 0 { + n += 1 + l + sovRelay(uint64(l)) + } return n } @@ -1495,6 +1515,38 @@ func (m *RelayRequest) Unmarshal(dAtA []byte) error { m.UnresponsiveProviders = []byte{} } iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApiInterface", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRelay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRelay + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRelay + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ApiInterface = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRelay(dAtA[iNdEx:]) From bb37a8a38cb2d046143066e000da3721e483de2b Mon Sep 17 00:00:00 2001 From: omer mishael Date: Tue, 14 Feb 2023 15:54:14 +0200 Subject: [PATCH 005/123] continue work rpcprovider --- protocol/lavaprotocol/response_builder.go | 9 + .../lavasession/provider_session_manager.go | 19 +- .../rpcprovider/rewardserver/reward_server.go | 4 + protocol/rpcprovider/rpcprovider.go | 6 +- protocol/rpcprovider/rpcprovider_server.go | 232 +++++++++++++++++- .../statetracker/provider_state_tracker.go | 18 +- protocol/statetracker/state_query.go | 59 ++++- 7 files changed, 329 insertions(+), 18 deletions(-) diff --git a/protocol/lavaprotocol/response_builder.go b/protocol/lavaprotocol/response_builder.go index 054b02b819..7731b28e67 100644 --- a/protocol/lavaprotocol/response_builder.go +++ b/protocol/lavaprotocol/response_builder.go @@ -12,8 +12,17 @@ import ( conflicttypes "github.com/lavanet/lava/x/conflict/types" pairingtypes "github.com/lavanet/lava/x/pairing/types" spectypes "github.com/lavanet/lava/x/spec/types" + tenderbytes "github.com/tendermint/tendermint/libs/bytes" ) +func ExtractSignerAddress(in *pairingtypes.RelayRequest) (tenderbytes.HexBytes, error) { + pubKey, err := sigs.RecoverPubKeyFromRelay(*in) + if err != nil { + return nil, err + } + return pubKey.Address(), nil +} + func VerifyRelayReply(reply *pairingtypes.RelayReply, relayRequest *pairingtypes.RelayRequest, addr string) error { serverKey, err := sigs.RecoverPubKeyFromRelayReply(reply, relayRequest) if err != nil { diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index 8d2cd45ada..1541c00b66 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -27,8 +27,9 @@ func (psm *ProviderSessionManager) atomicReadBlockedEpoch() (epoch uint64) { return atomic.LoadUint64(&psm.blockedEpoch) } -func (psm *ProviderSessionManager) IsValidEpoch(epoch uint64) bool { - return epoch > psm.atomicReadBlockedEpoch() +func (psm *ProviderSessionManager) IsValidEpoch(epoch uint64) (valid bool, thresholdEpoch uint64) { + threshold := psm.atomicReadBlockedEpoch() + return epoch > threshold, threshold } // Check if consumer exists and is not blocked, if all is valid return the ProviderSessionsWithConsumer pointer @@ -40,8 +41,9 @@ func (psm *ProviderSessionManager) IsActiveConsumer(epoch uint64, address string return true, nil // no error } -func (psm *ProviderSessionManager) GetSession(address string, id uint64, epoch uint64, relayNum uint64, sessionId uint64) (*SingleProviderSession, error) { - if psm.IsValidEpoch(epoch) { // fast checking to see if epoch is even relevant +func (psm *ProviderSessionManager) GetSession(address string, epoch uint64, relayNum uint64, sessionId uint64) (*SingleProviderSession, error) { + valid, _ := psm.IsValidEpoch(epoch) + if valid { // fast checking to see if epoch is even relevant utils.LavaFormatError("GetSession", InvalidEpochError, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10)}) return nil, InvalidEpochError } @@ -53,7 +55,7 @@ func (psm *ProviderSessionManager) GetSession(address string, id uint64, epoch u var singleProviderSession *SingleProviderSession if activeConsumer { singleProviderSession, err = psm.getSessionFromAnActiveConsumer(epoch, address, sessionId) // after getting session verify relayNum etc.. - } else if relayNum == 0 { + } else if relayNum == 1 { // if no session found, we need to create and validate few things: pairing, // return here and call a different function. // in this function @@ -86,7 +88,8 @@ func (psm *ProviderSessionManager) createNewSingleProviderSession(providerSessio func (psm *ProviderSessionManager) getActiveConsumer(epoch uint64, address string) (singleProviderSession *ProviderSessionsWithConsumer, err error) { psm.lock.RLock() defer psm.lock.RUnlock() - if psm.IsValidEpoch(epoch) { // checking again because we are now locked and epoch cant change now. + valid, _ := psm.IsValidEpoch(epoch) + if valid { // checking again because we are now locked and epoch cant change now. utils.LavaFormatError("getActiveConsumer", InvalidEpochError, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10)}) return nil, InvalidEpochError } @@ -124,8 +127,8 @@ func (psm *ProviderSessionManager) ReportConsumer() (address string, epoch uint6 return "", 0, nil } -func (psm *ProviderSessionManager) GetDataReliabilitySession(address string, epoch uint64) (err error) { - return nil +func (psm *ProviderSessionManager) GetDataReliabilitySession(address string, epoch uint64) (*SingleProviderSession, error) { + return nil, fmt.Errorf("not implemented") } func (psm *ProviderSessionManager) OnSessionFailure() (epoch uint64, err error) { diff --git a/protocol/rpcprovider/rewardserver/reward_server.go b/protocol/rpcprovider/rewardserver/reward_server.go index 98efbc7b68..0ab0cfbd60 100644 --- a/protocol/rpcprovider/rewardserver/reward_server.go +++ b/protocol/rpcprovider/rewardserver/reward_server.go @@ -21,6 +21,10 @@ func (rws *RewardServer) SendNewProof(ctx context.Context, singleProviderSession // write to a channel the epoch } +func (rws *RewardServer) SendNewDataReliabilityProof(ctx context.Context, dataReliability *pairingtypes.VRFData, epoch uint64, consumerAddr string) { + +} + func NewRewardServer(rewardsTxSender RewardsTxSender) *RewardServer { // rws := &RewardServer{} diff --git a/protocol/rpcprovider/rpcprovider.go b/protocol/rpcprovider/rpcprovider.go index 33804456f8..b59df6fd70 100644 --- a/protocol/rpcprovider/rpcprovider.go +++ b/protocol/rpcprovider/rpcprovider.go @@ -41,6 +41,10 @@ type ProviderStateTrackerInf interface { TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest) SendVoteReveal(voteID string, vote *reliabilitymanager.VoteData) error SendVoteCommitment(voteID string, vote *reliabilitymanager.VoteData) error + LatestBlock() int64 + GetVrfPkAndMaxCuForUser(ctx context.Context, consumerAddress string, chainID string, epocu uint64) (vrfPk *utils.VrfPubKey, maxCu uint64, err error) + VerifyPairing(ctx context.Context, consumerAddress string, providerAddress string, epoch uint64, chainID string) (valid bool, index int64, err error) + GetProvidersCountForConsumer(ctx context.Context, consumerAddress string, epoch uint64, chainID string) (uint32, error) } type RPCProvider struct { @@ -110,7 +114,7 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client rpcProviderServer := &RPCProviderServer{} rpcp.rpcProviderServers[key] = rpcProviderServer - rpcProviderServer.ServeRPCRequests(ctx, rpcProviderEndpoint, chainParser, rewardServer, providerSessionManager, reliabilityManager, privKey, cache, chainProxy) + rpcProviderServer.ServeRPCRequests(ctx, rpcProviderEndpoint, chainParser, rewardServer, providerSessionManager, reliabilityManager, privKey, cache, chainProxy, providerStateTracker, addr) // set up grpc listener var listener *ProviderListener diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 99147a3fdd..4142515f4f 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -1,18 +1,36 @@ package rpcprovider import ( + "bytes" "context" + "encoding/json" "fmt" + "strconv" "github.com/btcsuite/btcd/btcec" + sdk "github.com/cosmos/cosmos-sdk/types" "github.com/lavanet/lava/protocol/chainlib" "github.com/lavanet/lava/protocol/chaintracker" + "github.com/lavanet/lava/protocol/lavaprotocol" "github.com/lavanet/lava/protocol/lavasession" "github.com/lavanet/lava/relayer/performance" + "github.com/lavanet/lava/relayer/sigs" + "github.com/lavanet/lava/utils" pairingtypes "github.com/lavanet/lava/x/pairing/types" ) -type RPCProviderServer struct{} +type RPCProviderServer struct { + cache *performance.Cache + chainProxy chainlib.ChainProxy + privKey *btcec.PrivateKey + reliabilityManager ReliabilityManagerInf + providerSessionManager *lavasession.ProviderSessionManager + rewardServer RewardServerInf + chainParser chainlib.ChainParser + rpcProviderEndpoint *lavasession.RPCProviderEndpoint + stateTracker StateTrackerInf + providerAddress sdk.AccAddress +} type ReliabilityManagerInf interface { GetLatestBlockData(fromBlock int64, toBlock int64, specificBlock int64) (latestBlock int64, requestedHashes []*chaintracker.BlockStore, err error) @@ -21,6 +39,14 @@ type ReliabilityManagerInf interface { type RewardServerInf interface { SendNewProof(ctx context.Context, singleProviderSession *lavasession.SingleProviderSession, epoch uint64, consumerAddr string) + SendNewDataReliabilityProof(ctx context.Context, dataReliability *pairingtypes.VRFData, epoch uint64, consumerAddr string) +} + +type StateTrackerInf interface { + LatestBlock() int64 + GetVrfPkAndMaxCuForUser(ctx context.Context, consumerAddress string, chainID string, epocu uint64) (vrfPk *utils.VrfPubKey, maxCu uint64, err error) + VerifyPairing(ctx context.Context, consumerAddress string, providerAddress string, epoch uint64, chainID string) (valid bool, index int64, err error) + GetProvidersCountForConsumer(ctx context.Context, consumerAddress string, epoch uint64, chainID string) (uint32, error) } func (rpcps *RPCProviderServer) ServeRPCRequests( @@ -31,8 +57,21 @@ func (rpcps *RPCProviderServer) ServeRPCRequests( reliabilityManager ReliabilityManagerInf, privKey *btcec.PrivateKey, cache *performance.Cache, chainProxy chainlib.ChainProxy, + stateTracker StateTrackerInf, + providerAddress sdk.AccAddress, ) { - // spin up a grpc listener + rpcps.cache = cache + rpcps.chainProxy = chainProxy + rpcps.privKey = privKey + rpcps.providerSessionManager = providerSessionManager + rpcps.reliabilityManager = reliabilityManager + rpcps.rewardServer = rewardServer + rpcps.chainParser = chainParser + rpcps.rpcProviderEndpoint = rpcProviderEndpoint + rpcps.stateTracker = stateTracker + rpcps.providerAddress = providerAddress +} +func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes.RelayRequest) (*pairingtypes.RelayReply, error) { // verify the relay metadata is valid (epoch, signature) // verify the consumer is authorised // create/bring a session @@ -44,10 +83,195 @@ func (rpcps *RPCProviderServer) ServeRPCRequests( // sign the response // send the proof to reward server // finalize the session -} -func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes.RelayRequest) (*pairingtypes.RelayReply, error) { + utils.LavaFormatDebug("Provider got relay request", &map[string]string{ + "request.SessionId": strconv.FormatUint(request.SessionId, 10), + "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), + "request.cu": strconv.FormatUint(request.CuSum, 10), + }) + // relaySession, consumerAddress, err := rpcps.initRelay(ctx, request) + // if err != nil { + // return nil, rpcps.handleRelayErrorStatus(err) + // } + + // reply, err := rpcps.TryRelay(ctx, request, userAddr, nodeMsg) + // if err != nil && request.DataReliability == nil { // we ignore data reliability because its not checking/adding cu/relaynum. + // // failed to send relay. we need to adjust session state. cuSum and relayNumber. + // relayFailureError := s.onRelayFailure(userSessions, relaySession, nodeMsg) + // if relayFailureError != nil { + // err = sdkerrors.Wrapf(relayFailureError, "On relay failure: "+err.Error()) + // } + // utils.LavaFormatError("TryRelay Failed", err, &map[string]string{ + // "request.SessionId": strconv.FormatUint(request.SessionId, 10), + // "request.userAddr": userAddr.String(), + // }) + // } else { + // utils.LavaFormatDebug("Provider Finished Relay Successfully", &map[string]string{ + // "request.SessionId": strconv.FormatUint(request.SessionId, 10), + // "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), + // }) + // } + // return reply, s.handleRelayErrorStatus(err) return nil, fmt.Errorf("not implemented") } func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayRequest, srv pairingtypes.Relayer_RelaySubscribeServer) error { return fmt.Errorf("not implemented") } + +// verifies basic relay fields, and gets a provider session +func (rpcps *RPCProviderServer) initRelay(ctx context.Context, request *pairingtypes.RelayRequest) (singleProviderSession *lavasession.SingleProviderSession, extractedConsumerAddress sdk.AccAddress, err error) { + valid, thresholdEpoch := rpcps.providerSessionManager.IsValidEpoch(uint64(request.BlockHeight)) + if !valid { + return nil, nil, utils.LavaFormatError("user reported invalid lava block height", nil, &map[string]string{ + "current lava block": strconv.FormatInt(rpcps.stateTracker.LatestBlock(), 10), + "requested lava block": strconv.FormatInt(request.BlockHeight, 10), + "threshold": strconv.FormatUint(thresholdEpoch, 10), + }) + } + + // Checks + err = rpcps.verifyRelayRequestMetaData(request) + if err != nil { + return nil, nil, utils.LavaFormatError("did not pass relay validation", err, nil) + } + consumerBytes, err := lavaprotocol.ExtractSignerAddress(request) + if err != nil { + return nil, nil, utils.LavaFormatError("extract signer address from relay", err, nil) + } + extractedConsumerAddress, err = sdk.AccAddressFromHex(consumerBytes.String()) + if err != nil { + return nil, nil, utils.LavaFormatError("get relay consumer address", err, nil) + } + + if request.DataReliability == nil { + //regular session + singleProviderSession, err = rpcps.providerSessionManager.GetSession(extractedConsumerAddress.String(), uint64(request.BlockHeight), request.RelayNum, request.SessionId) + if err != nil { + return nil, nil, utils.LavaFormatError("failed to get a provider session", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": extractedConsumerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) + } + return singleProviderSession, extractedConsumerAddress, nil + } + + // handle data reliability session verifications + err = rpcps.verifyDataReliabilityRelayRequest(ctx, request, extractedConsumerAddress) + if err != nil { + return nil, nil, utils.LavaFormatError("failed data reliability validation", err, nil) + } + dataReliabilitySingleProviderSession, err := rpcps.providerSessionManager.GetDataReliabilitySession(extractedConsumerAddress.String(), uint64(request.BlockHeight)) + if err != nil { + return nil, nil, utils.LavaFormatError("failed to get a provider data reliability session", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": extractedConsumerAddress.String(), "epoch": strconv.FormatInt(request.BlockHeight, 10)}) + } + return dataReliabilitySingleProviderSession, extractedConsumerAddress, nil +} + +func (rpcps *RPCProviderServer) verifyRelayRequestMetaData(request *pairingtypes.RelayRequest) error { + providerAddress := rpcps.providerAddress.String() + if request.Provider != providerAddress { + return utils.LavaFormatError("request had the wrong provider", nil, &map[string]string{"providerAddress": providerAddress, "request_provider": request.Provider}) + } + if request.ChainID != rpcps.rpcProviderEndpoint.ChainID { + return utils.LavaFormatError("request had the wrong chainID", nil, &map[string]string{"request_chainID": request.ChainID, "chainID": rpcps.rpcProviderEndpoint.ChainID}) + } + return nil +} + +func (rpcps *RPCProviderServer) verifyDataReliabilityRelayRequest(ctx context.Context, request *pairingtypes.RelayRequest, consumerAddress sdk.AccAddress) error { + + if request.RelayNum > lavasession.DataReliabilitySessionId { + return utils.LavaFormatError("request's relay num is larger than the data reliability session ID", nil, &map[string]string{"relayNum": strconv.FormatUint(request.RelayNum, 10), "DataReliabilitySessionId": strconv.Itoa(lavasession.DataReliabilitySessionId)}) + } + if request.CuSum != lavasession.DataReliabilityCuSum { + return utils.LavaFormatError("request's CU sum is not equal to the data reliability CU sum", nil, &map[string]string{"cuSum": strconv.FormatUint(request.CuSum, 10), "DataReliabilityCuSum": strconv.Itoa(lavasession.DataReliabilityCuSum)}) + } + vrf_pk, _, err := rpcps.stateTracker.GetVrfPkAndMaxCuForUser(ctx, consumerAddress.String(), request.ChainID, uint64(request.BlockHeight)) + if err != nil { + return utils.LavaFormatError("failed to get vrfpk and maxCURes for data reliability!", err, &map[string]string{ + "userAddr": consumerAddress.String(), + }) + } + + // data reliability is not session dependant, its always sent with sessionID 0 and if not we don't care + if vrf_pk == nil { + return utils.LavaFormatError("dataReliability Triggered with vrf_pk == nil", nil, + &map[string]string{"requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": consumerAddress.String()}) + } + // verify the providerSig is indeed a signature by a valid provider on this query + valid, index, err := rpcps.VerifyReliabilityAddressSigning(ctx, consumerAddress, request) + if err != nil { + return utils.LavaFormatError("VerifyReliabilityAddressSigning invalid", err, + &map[string]string{"requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": consumerAddress.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) + } + if !valid { + return utils.LavaFormatError("invalid DataReliability Provider signing", nil, + &map[string]string{"requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": consumerAddress.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) + } + // verify data reliability fields correspond to the right vrf + valid = utils.VerifyVrfProof(request, *vrf_pk, uint64(request.BlockHeight)) + if !valid { + return utils.LavaFormatError("invalid DataReliability fields, VRF wasn't verified with provided proof", nil, + &map[string]string{"requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": consumerAddress.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) + } + _, dataReliabilityThreshold := rpcps.chainParser.DataReliabilityParams() + providersCount, err := rpcps.stateTracker.GetProvidersCountForConsumer(ctx, consumerAddress.String(), uint64(request.BlockHeight), request.ChainID) + if err != nil { + return utils.LavaFormatError("VerifyReliabilityAddressSigning failed fetching providers count for consumer", err, &map[string]string{"chainID": request.ChainID, "consumer": consumerAddress.String(), "epoch": strconv.FormatInt(request.BlockHeight, 10)}) + } + vrfIndex, vrfErr := utils.GetIndexForVrf(request.DataReliability.VrfValue, providersCount, dataReliabilityThreshold) + if vrfErr != nil { + dataReliabilityMarshalled, err := json.Marshal(request.DataReliability) + if err != nil { + dataReliabilityMarshalled = []byte{} + } + return utils.LavaFormatError("Provider identified vrf value in data reliability request does not meet threshold", vrfErr, + &map[string]string{ + "requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": consumerAddress.String(), + "dataReliability": string(dataReliabilityMarshalled), "relayEpochStart": strconv.FormatInt(request.BlockHeight, 10), + "vrfIndex": strconv.FormatInt(vrfIndex, 10), + "self Index": strconv.FormatInt(index, 10), + }) + } + if index != vrfIndex { + dataReliabilityMarshalled, err := json.Marshal(request.DataReliability) + if err != nil { + dataReliabilityMarshalled = []byte{} + } + return utils.LavaFormatError("Provider identified invalid vrfIndex in data reliability request, the given index and self index are different", nil, + &map[string]string{ + "requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": consumerAddress.String(), + "dataReliability": string(dataReliabilityMarshalled), "relayEpochStart": strconv.FormatInt(request.BlockHeight, 10), + "vrfIndex": strconv.FormatInt(vrfIndex, 10), + "self Index": strconv.FormatInt(index, 10), + }) + } + utils.LavaFormatInfo("Simulation: server got valid DataReliability request", nil) + return nil +} + +func (rpcps *RPCProviderServer) VerifyReliabilityAddressSigning(ctx context.Context, consumer sdk.AccAddress, request *pairingtypes.RelayRequest) (valid bool, index int64, err error) { + queryHash := utils.CalculateQueryHash(*request) + if !bytes.Equal(queryHash, request.DataReliability.QueryHash) { + return false, 0, utils.LavaFormatError("query hash mismatch on data reliability message", nil, + &map[string]string{"queryHash": string(queryHash), "request QueryHash": string(request.DataReliability.QueryHash)}) + } + + // validate consumer signing on VRF data + valid, err = sigs.ValidateSignerOnVRFData(consumer, *request.DataReliability) + if err != nil { + return false, 0, utils.LavaFormatError("failed to Validate Signer On VRF Data", err, + &map[string]string{"consumer": consumer.String(), "request.DataReliability": fmt.Sprintf("%v", request.DataReliability)}) + } + if !valid { + return false, 0, nil + } + // validate provider signing on query data + pubKey, err := sigs.RecoverProviderPubKeyFromVrfDataAndQuery(request) + if err != nil { + return false, 0, utils.LavaFormatError("failed to Recover Provider PubKey From Vrf Data And Query", err, + &map[string]string{"consumer": consumer.String(), "request": fmt.Sprintf("%v", request)}) + } + providerAccAddress, err := sdk.AccAddressFromHex(pubKey.Address().String()) // consumer signer + if err != nil { + return false, 0, utils.LavaFormatError("failed converting signer to address", err, + &map[string]string{"consumer": consumer.String(), "PubKey": pubKey.Address().String()}) + } + return rpcps.stateTracker.VerifyPairing(ctx, consumer.String(), providerAccAddress.String(), uint64(request.BlockHeight), request.ChainID) // return if this pairing is authorised +} diff --git a/protocol/statetracker/provider_state_tracker.go b/protocol/statetracker/provider_state_tracker.go index cbeccece20..52bd57d091 100644 --- a/protocol/statetracker/provider_state_tracker.go +++ b/protocol/statetracker/provider_state_tracker.go @@ -36,8 +36,6 @@ func NewProviderStateTracker(ctx context.Context, txFactory tx.Factory, clientCt } func (pst *ProviderStateTracker) RegisterForEpochUpdates(ctx context.Context, epochUpdatable EpochUpdatable) { - // create an epoch updater - // add epoch updater to the updater map epochUpdater := NewEpochUpdater(pst.stateQuery) epochUpdaterRaw := pst.StateTracker.RegisterForUpdates(ctx, epochUpdater) epochUpdater, ok := epochUpdaterRaw.(*EpochUpdater) @@ -81,3 +79,19 @@ func (pst *ProviderStateTracker) SendVoteReveal(voteID string, vote *reliability func (pst *ProviderStateTracker) SendVoteCommitment(voteID string, vote *reliabilitymanager.VoteData) error { return pst.txSender.SendVoteCommitment(voteID, vote) } + +func (pst *ProviderStateTracker) LatestBlock() int64 { + return pst.StateTracker.chainTracker.GetLatestBlockNum() +} + +func (pst *ProviderStateTracker) GetVrfPkAndMaxCuForUser(ctx context.Context, consumerAddress string, chainID string, epoch uint64) (vrfPk *utils.VrfPubKey, maxCu uint64, err error) { + return pst.stateQuery.GetVrfPkAndMaxCuForUser(ctx, consumerAddress, chainID, epoch) +} + +func (pst *ProviderStateTracker) VerifyPairing(ctx context.Context, consumerAddress string, providerAddress string, epoch uint64, chainID string) (valid bool, index int64, err error) { + return pst.stateQuery.VerifyPairing(ctx, consumerAddress, providerAddress, epoch, chainID) +} + +func (pst *ProviderStateTracker) GetProvidersCountForConsumer(ctx context.Context, consumerAddress string, epoch uint64, chainID string) (uint32, error) { + return pst.stateQuery.GetProvidersCountForConsumer(ctx, consumerAddress, epoch, chainID) +} diff --git a/protocol/statetracker/state_query.go b/protocol/statetracker/state_query.go index 3e8de7445f..d196e3449f 100644 --- a/protocol/statetracker/state_query.go +++ b/protocol/statetracker/state_query.go @@ -41,7 +41,7 @@ func (csq *StateQuery) GetSpec(ctx context.Context, chainID string) (*spectypes. type ConsumerStateQuery struct { StateQuery clientCtx client.Context - cachedPairings map[string]*pairingtypes.QueryGetPairingResponse + cachedPairings map[string]*pairingtypes.QueryGetPairingResponse // TODO: replace this with TTL so we don't keep entries forever } func NewConsumerStateQuery(ctx context.Context, clientCtx client.Context) *ConsumerStateQuery { @@ -51,7 +51,7 @@ func NewConsumerStateQuery(ctx context.Context, clientCtx client.Context) *Consu func (csq *ConsumerStateQuery) GetPairing(ctx context.Context, chainID string, latestBlock int64) (pairingList []epochstoragetypes.StakeEntry, epoch uint64, nextBlockForUpdate uint64, errRet error) { if chainID == "" { - // the caller doesn;t care which so just return the first + // the caller doesn't care which so just return the first for key := range csq.cachedPairings { chainID = key } @@ -89,7 +89,9 @@ func (csq *ConsumerStateQuery) GetMaxCUForUser(ctx context.Context, chainID stri type ProviderStateQuery struct { StateQuery - clientCtx client.Context + clientCtx client.Context + cachedPairings map[string]*pairingtypes.QueryVerifyPairingResponse // TODO: replace this with TTL so we don't keep entries forever + cachedEntries map[string]*pairingtypes.QueryUserEntryResponse // TODO: replace this with TTL so we don't keep entries forever } func NewProviderStateQuery(ctx context.Context, clientCtx client.Context) *ProviderStateQuery { @@ -97,6 +99,28 @@ func NewProviderStateQuery(ctx context.Context, clientCtx client.Context) *Provi return csq } +func (psq *ProviderStateQuery) GetVrfPkAndMaxCuForUser(ctx context.Context, consumerAddress string, chainID string, epoch uint64) (vrfPk *utils.VrfPubKey, maxCu uint64, err error) { + key := psq.entryKey(consumerAddress, chainID, epoch, "") + UserEntryRes, ok := psq.cachedEntries[key] + if !ok { + UserEntryRes, err = psq.PairingQueryClient.UserEntry(ctx, &pairingtypes.QueryUserEntryRequest{ChainID: chainID, Address: consumerAddress, Block: epoch}) + if err != nil { + return nil, 0, utils.LavaFormatError("StakeEntry querying for consumer failed", err, &map[string]string{"chainID": chainID, "address": consumerAddress, "block": strconv.FormatUint(epoch, 10)}) + } + psq.cachedEntries[key] = UserEntryRes + } + vrfPk = &utils.VrfPubKey{} + vrfPk, err = vrfPk.DecodeFromBech32(UserEntryRes.GetConsumer().Vrfpk) + if err != nil { + err = utils.LavaFormatError("decoding vrfpk from bech32", err, &map[string]string{"chainID": chainID, "address": consumerAddress, "block": strconv.FormatUint(epoch, 10), "UserEntryRes": fmt.Sprintf("%v", UserEntryRes)}) + } + return vrfPk, UserEntryRes.GetMaxCU(), err +} + +func (psq *ProviderStateQuery) entryKey(consumerAddress string, chainID string, epoch uint64, providerAddress string) string { + return consumerAddress + chainID + strconv.FormatUint(epoch, 10) + providerAddress +} + func (psq *ProviderStateQuery) CurrentEpochStart(ctx context.Context) (uint64, error) { epochDetails, err := psq.EpochStorageQueryClient.EpochDetails(ctx, &epochstoragetypes.QueryGetEpochDetailsRequest{}) if err != nil { @@ -152,3 +176,32 @@ func (psq *ProviderStateQuery) VoteEvents(ctx context.Context, latestBlock int64 } return } + +func (psq *ProviderStateQuery) VerifyPairing(ctx context.Context, consumerAddress string, providerAddress string, epoch uint64, chainID string) (valid bool, index int64, err error) { + key := psq.entryKey(consumerAddress, chainID, epoch, providerAddress) + verifyResponse, ok := psq.cachedPairings[key] + if !ok { + verifyResponse, err = psq.PairingQueryClient.VerifyPairing(context.Background(), &pairingtypes.QueryVerifyPairingRequest{ + ChainID: chainID, + Client: consumerAddress, + Provider: providerAddress, + Block: epoch, + }) + if err != nil { + return false, 0, err + } + psq.cachedPairings[key] = verifyResponse + } + if !verifyResponse.Valid { + return false, 0, utils.LavaFormatError("invalid self pairing with consumer", nil, &map[string]string{"provider": providerAddress, "consumer address": consumerAddress, "epoch": strconv.FormatUint(epoch, 10)}) + } + return verifyResponse.Valid, verifyResponse.GetIndex(), nil +} + +func (psq *ProviderStateQuery) GetProvidersCountForConsumer(ctx context.Context, consumerAddress string, epoch uint64, chainID string) (uint32, error) { + res, err := psq.PairingQueryClient.Params(ctx, &pairingtypes.QueryParamsRequest{}) + if err != nil { + return 0, err + } + return uint32(res.GetParams().ServicersToPairCount), nil +} From a3e1ae01a6a71e3994a0046db3fff43eb1c05cba Mon Sep 17 00:00:00 2001 From: omer mishael Date: Wed, 15 Feb 2023 11:15:00 +0200 Subject: [PATCH 006/123] added single provider preparation function --- protocol/lavasession/provider_types.go | 17 ++++++++++---- protocol/rpcprovider/rpcprovider_server.go | 26 +++++++++++++++++----- 2 files changed, 34 insertions(+), 9 deletions(-) diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index 274c06b908..50dc1ed5f4 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -65,6 +65,7 @@ func (pswc *ProviderSessionsWithConsumer) readBlockListedAtomic() { type SingleProviderSession struct { userSessionsParent *ProviderSessionsWithConsumer CuSum uint64 + LatestRelayCu uint64 UniqueIdentifier uint64 Lock sync.RWMutex Proof *pairingtypes.RelayRequest // saves last relay request of a session as proof @@ -72,12 +73,20 @@ type SingleProviderSession struct { PairingEpoch uint64 } -func (r *SingleProviderSession) GetPairingEpoch() uint64 { - return atomic.LoadUint64(&r.PairingEpoch) +func (sps *SingleProviderSession) GetPairingEpoch() uint64 { + return atomic.LoadUint64(&sps.PairingEpoch) } -func (r *SingleProviderSession) SetPairingEpoch(epoch uint64) { - atomic.StoreUint64(&r.PairingEpoch, epoch) +func (sps *SingleProviderSession) SetPairingEpoch(epoch uint64) { + atomic.StoreUint64(&sps.PairingEpoch, epoch) +} + +func (sps *SingleProviderSession) PrepareSessionForUsage(cu uint64) error { + // verify locked + // verify total cu in the parent (atomic read) + // set LatestRelayCu (verify it's 0) + // add to parent with atomic - make sure there is no race to corrupt the total cu in the parent + return fmt.Errorf("not implemented") } func (pswc *ProviderSessionsWithConsumer) GetExistingSession(sessionId uint64) (session *SingleProviderSession, err error) { diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 4142515f4f..84e0cb4d9d 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -9,6 +9,7 @@ import ( "github.com/btcsuite/btcd/btcec" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/gogo/status" "github.com/lavanet/lava/protocol/chainlib" "github.com/lavanet/lava/protocol/chaintracker" "github.com/lavanet/lava/protocol/lavaprotocol" @@ -17,6 +18,7 @@ import ( "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/utils" pairingtypes "github.com/lavanet/lava/x/pairing/types" + "google.golang.org/grpc/codes" ) type RPCProviderServer struct { @@ -88,11 +90,15 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), "request.cu": strconv.FormatUint(request.CuSum, 10), }) - // relaySession, consumerAddress, err := rpcps.initRelay(ctx, request) - // if err != nil { - // return nil, rpcps.handleRelayErrorStatus(err) - // } - + relaySession, _, err := rpcps.initRelay(ctx, request) + if err != nil { + return nil, rpcps.handleRelayErrorStatus(err) + } + relayCU := uint64(1) //TODO: parse from relay + err = relaySession.PrepareSessionForUsage(relayCU) + if err != nil { + return nil, rpcps.handleRelayErrorStatus(err) + } // reply, err := rpcps.TryRelay(ctx, request, userAddr, nodeMsg) // if err != nil && request.DataReliability == nil { // we ignore data reliability because its not checking/adding cu/relaynum. // // failed to send relay. we need to adjust session state. cuSum and relayNumber. @@ -275,3 +281,13 @@ func (rpcps *RPCProviderServer) VerifyReliabilityAddressSigning(ctx context.Cont } return rpcps.stateTracker.VerifyPairing(ctx, consumer.String(), providerAccAddress.String(), uint64(request.BlockHeight), request.ChainID) // return if this pairing is authorised } + +func (rpcps *RPCProviderServer) handleRelayErrorStatus(err error) error { + if err == nil { + return nil + } + if lavasession.SessionOutOfSyncError.Is(err) { + err = status.Error(codes.Code(lavasession.SessionOutOfSyncError.ABCICode()), err.Error()) + } + return err +} From 54bdd34e43cba21da3ab14851bfd68a75113e68f Mon Sep 17 00:00:00 2001 From: omer mishael Date: Wed, 15 Feb 2023 11:22:04 +0200 Subject: [PATCH 007/123] added verification for proof --- protocol/lavasession/provider_types.go | 3 ++- protocol/rpcprovider/rpcprovider_server.go | 17 ++++++++++++----- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index 50dc1ed5f4..afc4ab1ce1 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -81,9 +81,10 @@ func (sps *SingleProviderSession) SetPairingEpoch(epoch uint64) { atomic.StoreUint64(&sps.PairingEpoch, epoch) } -func (sps *SingleProviderSession) PrepareSessionForUsage(cu uint64) error { +func (sps *SingleProviderSession) PrepareSessionForUsage(currentCU uint64, relayRequestTotalCU uint64) error { // verify locked // verify total cu in the parent (atomic read) + // verify the proof is right according to relay cu, last proof CU and current proof CU: CuSum + currentCU = relayRequestTotalCU // set LatestRelayCu (verify it's 0) // add to parent with atomic - make sure there is no race to corrupt the total cu in the parent return fmt.Errorf("not implemented") diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 84e0cb4d9d..22b3b19af9 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -94,8 +94,13 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes if err != nil { return nil, rpcps.handleRelayErrorStatus(err) } - relayCU := uint64(1) //TODO: parse from relay - err = relaySession.PrepareSessionForUsage(relayCU) + // parse the message to extract the cu and chainMessage for sending it + chainMessage, err := rpcps.chainParser.ParseMsg(request.ApiUrl, request.Data, request.ConnectionType) + if err != nil { + return nil, rpcps.handleRelayErrorStatus(err) + } + relayCU := chainMessage.GetServiceApi().ComputeUnits + err = relaySession.PrepareSessionForUsage(relayCU, request.CuSum) if err != nil { return nil, rpcps.handleRelayErrorStatus(err) } @@ -134,11 +139,12 @@ func (rpcps *RPCProviderServer) initRelay(ctx context.Context, request *pairingt }) } - // Checks + // Check data err = rpcps.verifyRelayRequestMetaData(request) if err != nil { return nil, nil, utils.LavaFormatError("did not pass relay validation", err, nil) } + // check signature consumerBytes, err := lavaprotocol.ExtractSignerAddress(request) if err != nil { return nil, nil, utils.LavaFormatError("extract signer address from relay", err, nil) @@ -148,8 +154,9 @@ func (rpcps *RPCProviderServer) initRelay(ctx context.Context, request *pairingt return nil, nil, utils.LavaFormatError("get relay consumer address", err, nil) } + // handle non data reliability relays if request.DataReliability == nil { - //regular session + // regular session, verifies pairing epoch and relay number singleProviderSession, err = rpcps.providerSessionManager.GetSession(extractedConsumerAddress.String(), uint64(request.BlockHeight), request.RelayNum, request.SessionId) if err != nil { return nil, nil, utils.LavaFormatError("failed to get a provider session", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": extractedConsumerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) @@ -157,7 +164,7 @@ func (rpcps *RPCProviderServer) initRelay(ctx context.Context, request *pairingt return singleProviderSession, extractedConsumerAddress, nil } - // handle data reliability session verifications + // data reliability session verifications err = rpcps.verifyDataReliabilityRelayRequest(ctx, request, extractedConsumerAddress) if err != nil { return nil, nil, utils.LavaFormatError("failed data reliability validation", err, nil) From 4afa16f58f928eeec8a745a04c7c1ce5fb0ff02a Mon Sep 17 00:00:00 2001 From: omer mishael Date: Wed, 15 Feb 2023 13:10:03 +0200 Subject: [PATCH 008/123] added tryRelay --- protocol/lavaprotocol/response_builder.go | 25 ++++ .../lavasession/provider_session_manager.go | 8 ++ protocol/rpcprovider/rpcprovider_server.go | 126 +++++++++++++++++- 3 files changed, 157 insertions(+), 2 deletions(-) diff --git a/protocol/lavaprotocol/response_builder.go b/protocol/lavaprotocol/response_builder.go index 7731b28e67..2c85be1af8 100644 --- a/protocol/lavaprotocol/response_builder.go +++ b/protocol/lavaprotocol/response_builder.go @@ -6,6 +6,7 @@ import ( "sort" "strconv" + btcSecp256k1 "github.com/btcsuite/btcd/btcec" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/utils" @@ -15,6 +16,30 @@ import ( tenderbytes "github.com/tendermint/tendermint/libs/bytes" ) +func SignRelayResponse(consumerAddress sdk.AccAddress, request pairingtypes.RelayRequest, pkey *btcSecp256k1.PrivateKey, reply *pairingtypes.RelayReply, signDataReliability bool) (*pairingtypes.RelayReply, error) { + // request is a copy of the original request, but won't modify it + // update relay request requestedBlock to the provided one in case it was arbitrary + UpdateRequestedBlock(&request, reply) + // Update signature, + sig, err := sigs.SignRelayResponse(pkey, reply, &request) + if err != nil { + return nil, utils.LavaFormatError("failed signing relay response", err, + &map[string]string{"request": fmt.Sprintf("%v", request), "reply": fmt.Sprintf("%v", reply)}) + } + reply.Sig = sig + + if signDataReliability { + // update sig blocks signature + sigBlocks, err := sigs.SignResponseFinalizationData(pkey, reply, &request, consumerAddress) + if err != nil { + return nil, utils.LavaFormatError("failed signing finalization data", err, + &map[string]string{"request": fmt.Sprintf("%v", request), "reply": fmt.Sprintf("%v", reply), "userAddr": consumerAddress.String()}) + } + reply.SigBlocks = sigBlocks + } + return reply, nil +} + func ExtractSignerAddress(in *pairingtypes.RelayRequest) (tenderbytes.HexBytes, error) { pubKey, err := sigs.RecoverPubKeyFromRelay(*in) if err != nil { diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index 1541c00b66..f0df6697a8 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -6,6 +6,7 @@ import ( "sync" "sync/atomic" + sdk "github.com/cosmos/cosmos-sdk/types" "github.com/lavanet/lava/utils" ) @@ -146,6 +147,13 @@ func (psm *ProviderSessionManager) RPCProviderEndpoint() *RPCProviderEndpoint { func (psm *ProviderSessionManager) UpdateEpoch(epoch uint64) { // update the epoch to limit consumer usage } +func (psm *ProviderSessionManager) ProcessUnsubscribeEthereum(subscriptionID string, consumerAddress sdk.AccAddress) error { + return fmt.Errorf("not implemented") +} + +func (psm *ProviderSessionManager) ProcessUnsubscribeTendermint(apiName string, subscriptionID string, consumerAddress sdk.AccAddress) error { + return fmt.Errorf("not implemented") +} // Returning a new provider session manager func NewProviderSessionManager(rpcProviderEndpoint *RPCProviderEndpoint, stateQuery StateQuery) *ProviderSessionManager { diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 22b3b19af9..3694fef8f8 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -6,11 +6,13 @@ import ( "encoding/json" "fmt" "strconv" + "strings" "github.com/btcsuite/btcd/btcec" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/gogo/status" "github.com/lavanet/lava/protocol/chainlib" + "github.com/lavanet/lava/protocol/chainlib/chainproxy" "github.com/lavanet/lava/protocol/chaintracker" "github.com/lavanet/lava/protocol/lavaprotocol" "github.com/lavanet/lava/protocol/lavasession" @@ -18,6 +20,7 @@ import ( "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/utils" pairingtypes "github.com/lavanet/lava/x/pairing/types" + spectypes "github.com/lavanet/lava/x/spec/types" "google.golang.org/grpc/codes" ) @@ -90,7 +93,7 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), "request.cu": strconv.FormatUint(request.CuSum, 10), }) - relaySession, _, err := rpcps.initRelay(ctx, request) + relaySession, consumerAddress, err := rpcps.initRelay(ctx, request) if err != nil { return nil, rpcps.handleRelayErrorStatus(err) } @@ -104,7 +107,7 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes if err != nil { return nil, rpcps.handleRelayErrorStatus(err) } - // reply, err := rpcps.TryRelay(ctx, request, userAddr, nodeMsg) + _, err = rpcps.TryRelay(ctx, request, consumerAddress, chainMessage) // if err != nil && request.DataReliability == nil { // we ignore data reliability because its not checking/adding cu/relaynum. // // failed to send relay. we need to adjust session state. cuSum and relayNumber. // relayFailureError := s.onRelayFailure(userSessions, relaySession, nodeMsg) @@ -298,3 +301,122 @@ func (rpcps *RPCProviderServer) handleRelayErrorStatus(err error) error { } return err } + +func (rpcps *RPCProviderServer) TryRelay(ctx context.Context, request *pairingtypes.RelayRequest, consumerAddr sdk.AccAddress, chainMsg chainlib.ChainMessage) (*pairingtypes.RelayReply, error) { + // Send + var reqMsg *chainproxy.JsonrpcMessage + var reqParams interface{} + switch msg := chainMsg.GetRPCMessage().(type) { + case *chainproxy.JsonrpcMessage: + reqMsg = msg + reqParams = reqMsg.Params + default: + reqMsg = nil + } + latestBlock := int64(0) + finalizedBlockHashes := map[int64]interface{}{} + var requestedBlockHash []byte = nil + finalized := false + dataReliabilityEnabled, _ := rpcps.chainParser.DataReliabilityParams() + if dataReliabilityEnabled { + // Add latest block and finalization data + var err error + _, _, blockDistanceToFinalization, blocksInFinalizationData := rpcps.chainParser.ChainBlockStats() + fromBlock := spectypes.LATEST_BLOCK - int64(blockDistanceToFinalization) - int64(blocksInFinalizationData) + toBlock := spectypes.LATEST_BLOCK - int64(blockDistanceToFinalization) + latestBlock, requestedHashes, err := rpcps.reliabilityManager.GetLatestBlockData(fromBlock, toBlock, request.RequestBlock) + if err != nil { + return nil, utils.LavaFormatError("Could not guarantee data reliability", err, &map[string]string{"requestedBlock": strconv.FormatInt(request.RequestBlock, 10), "latestBlock": strconv.FormatInt(latestBlock, 10)}) + } + request.RequestBlock = lavaprotocol.ReplaceRequestedBlock(request.RequestBlock, latestBlock) + for _, block := range requestedHashes { + if block.Block == request.RequestBlock { + requestedBlockHash = []byte(block.Hash) + } else { + finalizedBlockHashes[block.Block] = block.Hash + } + } + if requestedBlockHash == nil { + // avoid using cache, but can still service + utils.LavaFormatWarning("no hash data for requested block", nil, &map[string]string{"requestedBlock": strconv.FormatInt(request.RequestBlock, 10), "latestBlock": strconv.FormatInt(latestBlock, 10)}) + } + + if request.RequestBlock > latestBlock { + // consumer asked for a block that is newer than our state tracker, we cant sign this for DR + return nil, utils.LavaFormatError("Requested a block that is too new", err, &map[string]string{"requestedBlock": strconv.FormatInt(request.RequestBlock, 10), "latestBlock": strconv.FormatInt(latestBlock, 10)}) + } + + finalized = spectypes.IsFinalizedBlock(request.RequestBlock, latestBlock, blockDistanceToFinalization) + } + cache := rpcps.cache + // TODO: handle cache on fork for dataReliability = false + var reply *pairingtypes.RelayReply = nil + var err error = nil + if requestedBlockHash != nil || finalized { + reply, err = cache.GetEntry(ctx, request, rpcps.rpcProviderEndpoint.ApiInterface, requestedBlockHash, rpcps.rpcProviderEndpoint.ChainID, finalized) + } + if err != nil || reply == nil { + if err != nil && performance.NotConnectedError.Is(err) { + utils.LavaFormatWarning("cache not connected", err, nil) + } + // cache miss or invalid + reply, _, _, err = rpcps.chainProxy.SendNodeMsg(ctx, nil, chainMsg) + if err != nil { + return nil, utils.LavaFormatError("Sending chainMsg failed", err, nil) + } + if requestedBlockHash != nil || finalized { + err := cache.SetEntry(ctx, request, rpcps.rpcProviderEndpoint.ApiInterface, requestedBlockHash, rpcps.rpcProviderEndpoint.ChainID, consumerAddr.String(), reply, finalized) + if err != nil && !performance.NotInitialisedError.Is(err) { + utils.LavaFormatWarning("error updating cache with new entry", err, nil) + } + } + } + + apiName := chainMsg.GetServiceApi().Name + if reqMsg != nil && strings.Contains(apiName, "unsubscribe") { + err := rpcps.processUnsubscribe(apiName, consumerAddr, reqParams) + if err != nil { + return nil, err + } + } + // TODO: verify that the consumer still listens, if it took to much time to get the response we cant update the CU. + + jsonStr, err := json.Marshal(finalizedBlockHashes) + if err != nil { + return nil, utils.LavaFormatError("failed unmarshaling finalizedBlockHashes", err, + &map[string]string{"finalizedBlockHashes": fmt.Sprintf("%v", finalizedBlockHashes)}) + } + + reply.FinalizedBlocksHashes = jsonStr + reply.LatestBlock = latestBlock + + reply, err = lavaprotocol.SignRelayResponse(consumerAddr, *request, rpcps.privKey, reply, dataReliabilityEnabled) + if err != nil { + return nil, err + } + + // return reply to user + return reply, nil +} + +func (rpcps *RPCProviderServer) processUnsubscribe(apiName string, consumerAddr sdk.AccAddress, reqParams interface{}) error { + switch p := reqParams.(type) { + case []interface{}: + subscriptionID, ok := p[0].(string) + if !ok { + return fmt.Errorf("processUnsubscribe - p[0].(string) - type assertion failed, type:" + fmt.Sprintf("%s", p[0])) + } + return rpcps.providerSessionManager.ProcessUnsubscribeEthereum(subscriptionID, consumerAddr) + case map[string]interface{}: + subscriptionID := "" + if apiName == "unsubscribe" { + var ok bool + subscriptionID, ok = p["query"].(string) + if !ok { + return fmt.Errorf("processUnsubscribe - p['query'].(string) - type assertion failed, type:" + fmt.Sprintf("%s", p["query"])) + } + } + return rpcps.providerSessionManager.ProcessUnsubscribeTendermint(apiName, subscriptionID, consumerAddr) + } + return nil +} From 93bac0e155dc7a4c19d3f3778295c794e1988cb0 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Wed, 15 Feb 2023 13:16:50 +0200 Subject: [PATCH 009/123] added onSessionFailure usage --- .../lavasession/provider_session_manager.go | 4 +- protocol/rpcprovider/rpcprovider_server.go | 38 +++++++++---------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index f0df6697a8..347b88fdf5 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -132,8 +132,8 @@ func (psm *ProviderSessionManager) GetDataReliabilitySession(address string, epo return nil, fmt.Errorf("not implemented") } -func (psm *ProviderSessionManager) OnSessionFailure() (epoch uint64, err error) { - return 0, nil +func (psm *ProviderSessionManager) OnSessionFailure(singleProviderSession *SingleProviderSession) (err error) { + return nil } func (psm *ProviderSessionManager) OnSessionDone(proof string) (epoch uint64, err error) { diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 3694fef8f8..dbc4178cca 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -10,6 +10,7 @@ import ( "github.com/btcsuite/btcd/btcec" sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" "github.com/gogo/status" "github.com/lavanet/lava/protocol/chainlib" "github.com/lavanet/lava/protocol/chainlib/chainproxy" @@ -107,25 +108,24 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes if err != nil { return nil, rpcps.handleRelayErrorStatus(err) } - _, err = rpcps.TryRelay(ctx, request, consumerAddress, chainMessage) - // if err != nil && request.DataReliability == nil { // we ignore data reliability because its not checking/adding cu/relaynum. - // // failed to send relay. we need to adjust session state. cuSum and relayNumber. - // relayFailureError := s.onRelayFailure(userSessions, relaySession, nodeMsg) - // if relayFailureError != nil { - // err = sdkerrors.Wrapf(relayFailureError, "On relay failure: "+err.Error()) - // } - // utils.LavaFormatError("TryRelay Failed", err, &map[string]string{ - // "request.SessionId": strconv.FormatUint(request.SessionId, 10), - // "request.userAddr": userAddr.String(), - // }) - // } else { - // utils.LavaFormatDebug("Provider Finished Relay Successfully", &map[string]string{ - // "request.SessionId": strconv.FormatUint(request.SessionId, 10), - // "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), - // }) - // } - // return reply, s.handleRelayErrorStatus(err) - return nil, fmt.Errorf("not implemented") + reply, err := rpcps.TryRelay(ctx, request, consumerAddress, chainMessage) + if err != nil && request.DataReliability == nil { // we ignore data reliability because its not checking/adding cu/relaynum. + // failed to send relay. we need to adjust session state. cuSum and relayNumber. + relayFailureError := rpcps.providerSessionManager.OnSessionFailure(relaySession) + if relayFailureError != nil { + err = sdkerrors.Wrapf(relayFailureError, "On relay failure: "+err.Error()) + } + // utils.LavaFormatError("TryRelay Failed", err, &map[string]string{ + // "request.SessionId": strconv.FormatUint(request.SessionId, 10), + // "request.userAddr": userAddr.String(), + // }) + } else { + utils.LavaFormatDebug("Provider Finished Relay Successfully", &map[string]string{ + "request.SessionId": strconv.FormatUint(request.SessionId, 10), + "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), + }) + } + return reply, rpcps.handleRelayErrorStatus(err) } func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayRequest, srv pairingtypes.Relayer_RelaySubscribeServer) error { return fmt.Errorf("not implemented") From 372c69ba1a105238f127170718dbfb56eef8077b Mon Sep 17 00:00:00 2001 From: omer mishael Date: Wed, 15 Feb 2023 14:46:13 +0200 Subject: [PATCH 010/123] added onSessionDone --- .../lavasession/provider_session_manager.go | 4 ++-- protocol/rpcprovider/rpcprovider_server.go | 21 ++++++++++++------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index 347b88fdf5..f7f5c1c1d0 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -136,8 +136,8 @@ func (psm *ProviderSessionManager) OnSessionFailure(singleProviderSession *Singl return nil } -func (psm *ProviderSessionManager) OnSessionDone(proof string) (epoch uint64, err error) { - return 0, nil +func (psm *ProviderSessionManager) OnSessionDone(singleProviderSession *SingleProviderSession) (err error) { + return nil } func (psm *ProviderSessionManager) RPCProviderEndpoint() *RPCProviderEndpoint { diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index dbc4178cca..9c24e4c10c 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -115,15 +115,20 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes if relayFailureError != nil { err = sdkerrors.Wrapf(relayFailureError, "On relay failure: "+err.Error()) } - // utils.LavaFormatError("TryRelay Failed", err, &map[string]string{ - // "request.SessionId": strconv.FormatUint(request.SessionId, 10), - // "request.userAddr": userAddr.String(), - // }) - } else { - utils.LavaFormatDebug("Provider Finished Relay Successfully", &map[string]string{ - "request.SessionId": strconv.FormatUint(request.SessionId, 10), - "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), + utils.LavaFormatError("TryRelay Failed", err, &map[string]string{ + "request.SessionId": strconv.FormatUint(request.SessionId, 10), + "request.userAddr": consumerAddress.String(), }) + } else { + relayError := rpcps.providerSessionManager.OnSessionDone(relaySession) + if relayError != nil { + err = sdkerrors.Wrapf(relayError, "OnSession Done failure: "+err.Error()) + } else { + utils.LavaFormatDebug("Provider Finished Relay Successfully", &map[string]string{ + "request.SessionId": strconv.FormatUint(request.SessionId, 10), + "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), + }) + } } return reply, rpcps.handleRelayErrorStatus(err) } From 916ecc9afb44880b5579ad03476f40d6732ca804 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Wed, 15 Feb 2023 14:51:36 +0200 Subject: [PATCH 011/123] sync --- protocol/lavasession/provider_session_manager.go | 2 ++ protocol/rpcprovider/rpcprovider_server.go | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index f7f5c1c1d0..4fd1c7e8ce 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -133,10 +133,12 @@ func (psm *ProviderSessionManager) GetDataReliabilitySession(address string, epo } func (psm *ProviderSessionManager) OnSessionFailure(singleProviderSession *SingleProviderSession) (err error) { + // need to handle dataReliability session failure separately return nil } func (psm *ProviderSessionManager) OnSessionDone(singleProviderSession *SingleProviderSession) (err error) { + // need to handle dataReliability session separately return nil } diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 9c24e4c10c..62901a880f 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -109,7 +109,7 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes return nil, rpcps.handleRelayErrorStatus(err) } reply, err := rpcps.TryRelay(ctx, request, consumerAddress, chainMessage) - if err != nil && request.DataReliability == nil { // we ignore data reliability because its not checking/adding cu/relaynum. + if err != nil { // failed to send relay. we need to adjust session state. cuSum and relayNumber. relayFailureError := rpcps.providerSessionManager.OnSessionFailure(relaySession) if relayFailureError != nil { @@ -124,6 +124,7 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes if relayError != nil { err = sdkerrors.Wrapf(relayError, "OnSession Done failure: "+err.Error()) } else { + utils.LavaFormatDebug("Provider Finished Relay Successfully", &map[string]string{ "request.SessionId": strconv.FormatUint(request.SessionId, 10), "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), From 880562715305d0a3b2c7650cb159cc2d632649ee Mon Sep 17 00:00:00 2001 From: omer mishael Date: Wed, 15 Feb 2023 14:55:45 +0200 Subject: [PATCH 012/123] changed onSessionDone interface --- protocol/lavasession/provider_session_manager.go | 4 +++- protocol/rpcprovider/rpcprovider_server.go | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index 4fd1c7e8ce..b0f7d628a9 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -8,6 +8,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/lavanet/lava/utils" + pairingtypes "github.com/lavanet/lava/x/pairing/types" ) type ProviderSessionManager struct { @@ -137,8 +138,9 @@ func (psm *ProviderSessionManager) OnSessionFailure(singleProviderSession *Singl return nil } -func (psm *ProviderSessionManager) OnSessionDone(singleProviderSession *SingleProviderSession) (err error) { +func (psm *ProviderSessionManager) OnSessionDone(singleProviderSession *SingleProviderSession, request *pairingtypes.RelayRequest) (err error) { // need to handle dataReliability session separately + // store the request as proof return nil } diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 62901a880f..674f1387c3 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -120,7 +120,7 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes "request.userAddr": consumerAddress.String(), }) } else { - relayError := rpcps.providerSessionManager.OnSessionDone(relaySession) + relayError := rpcps.providerSessionManager.OnSessionDone(relaySession, request) if relayError != nil { err = sdkerrors.Wrapf(relayError, "OnSession Done failure: "+err.Error()) } else { From afe4a2b0b79ff426621caedad167b3e03cf80448 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Wed, 15 Feb 2023 16:16:32 +0200 Subject: [PATCH 013/123] handle proof sending --- .../rpcprovider/rewardserver/reward_server.go | 3 +-- protocol/rpcprovider/rpcprovider_server.go | 20 +++++++++++++------ 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/protocol/rpcprovider/rewardserver/reward_server.go b/protocol/rpcprovider/rewardserver/reward_server.go index 0ab0cfbd60..281a39b475 100644 --- a/protocol/rpcprovider/rewardserver/reward_server.go +++ b/protocol/rpcprovider/rewardserver/reward_server.go @@ -3,7 +3,6 @@ package rewardserver import ( "context" - "github.com/lavanet/lava/protocol/lavasession" pairingtypes "github.com/lavanet/lava/x/pairing/types" ) @@ -15,7 +14,7 @@ type RewardsTxSender interface { TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest) } -func (rws *RewardServer) SendNewProof(ctx context.Context, singleProviderSession *lavasession.SingleProviderSession, epoch uint64, consumerAddr string) { +func (rws *RewardServer) SendNewProof(ctx context.Context, proof *pairingtypes.RelayRequest, epoch uint64, consumerAddr string) { // TODO: implement // get the proof for this consumer for this epoch for this session, update the latest proof // write to a channel the epoch diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 674f1387c3..a30e8510da 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -44,7 +44,7 @@ type ReliabilityManagerInf interface { } type RewardServerInf interface { - SendNewProof(ctx context.Context, singleProviderSession *lavasession.SingleProviderSession, epoch uint64, consumerAddr string) + SendNewProof(ctx context.Context, proof *pairingtypes.RelayRequest, epoch uint64, consumerAddr string) SendNewDataReliabilityProof(ctx context.Context, dataReliability *pairingtypes.VRFData, epoch uint64, consumerAddr string) } @@ -124,11 +124,19 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes if relayError != nil { err = sdkerrors.Wrapf(relayError, "OnSession Done failure: "+err.Error()) } else { - - utils.LavaFormatDebug("Provider Finished Relay Successfully", &map[string]string{ - "request.SessionId": strconv.FormatUint(request.SessionId, 10), - "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), - }) + if request.DataReliability == nil { + rpcps.rewardServer.SendNewProof(ctx, request, relaySession.PairingEpoch, consumerAddress.String()) + utils.LavaFormatDebug("Provider Finished Relay Successfully", &map[string]string{ + "request.SessionId": strconv.FormatUint(request.SessionId, 10), + "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), + }) + } else { + rpcps.rewardServer.SendNewDataReliabilityProof(ctx, request.DataReliability, relaySession.PairingEpoch, consumerAddress.String()) + utils.LavaFormatDebug("Provider Finished DataReliability Relay Successfully", &map[string]string{ + "request.SessionId": strconv.FormatUint(request.SessionId, 10), + "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), + }) + } } } return reply, rpcps.handleRelayErrorStatus(err) From efa881f61f1eef80b74267c0b144f390e5523557 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Wed, 15 Feb 2023 17:52:27 +0200 Subject: [PATCH 014/123] fix reference --- protocol/rpcprovider/rpcprovider_server.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index a30e8510da..1460a207a3 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -13,7 +13,7 @@ import ( sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" "github.com/gogo/status" "github.com/lavanet/lava/protocol/chainlib" - "github.com/lavanet/lava/protocol/chainlib/chainproxy" + "github.com/lavanet/lava/protocol/chainlib/chainproxy/rpcInterfaceMessages" "github.com/lavanet/lava/protocol/chaintracker" "github.com/lavanet/lava/protocol/lavaprotocol" "github.com/lavanet/lava/protocol/lavasession" @@ -318,10 +318,10 @@ func (rpcps *RPCProviderServer) handleRelayErrorStatus(err error) error { func (rpcps *RPCProviderServer) TryRelay(ctx context.Context, request *pairingtypes.RelayRequest, consumerAddr sdk.AccAddress, chainMsg chainlib.ChainMessage) (*pairingtypes.RelayReply, error) { // Send - var reqMsg *chainproxy.JsonrpcMessage + var reqMsg *rpcInterfaceMessages.JsonrpcMessage var reqParams interface{} switch msg := chainMsg.GetRPCMessage().(type) { - case *chainproxy.JsonrpcMessage: + case *rpcInterfaceMessages.JsonrpcMessage: reqMsg = msg reqParams = reqMsg.Params default: From 57d66b68c3245961469c3db6b67d3c0b61f39f4d Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Wed, 15 Feb 2023 16:55:16 +0100 Subject: [PATCH 015/123] WIP psm --- protocol/lavasession/errors.go | 3 +- .../lavasession/provider_session_manager.go | 33 ++++++++++--------- protocol/lavasession/provider_types.go | 2 +- 3 files changed, 21 insertions(+), 17 deletions(-) diff --git a/protocol/lavasession/errors.go b/protocol/lavasession/errors.go index cc96dbb90c..f8e3b23378 100644 --- a/protocol/lavasession/errors.go +++ b/protocol/lavasession/errors.go @@ -31,5 +31,6 @@ var ( // Provider Side Errors InvalidEpochError = sdkerrors.New("InvalidEpoch Error", 881, "Requested Epoch Is Too Old") NewSessionWithRelayNumError = sdkerrors.New("NewSessionWithRelayNum Error", 882, "Requested Session With Relay Number Is Invalid") ConsumerIsBlockListed = sdkerrors.New("ConsumerIsBlockListed Error", 883, "This Consumer Is Blocked.") - ConsumerNotActive = sdkerrors.New("ConsumerNotActive Error", 884, "This Consumer Is Not Active.") + ConsumerNotRegisteredYet = sdkerrors.New("ConsumerNotActive Error", 884, "This Consumer Is Not Currently In The Pool.") + SessionDoesNotExist = sdkerrors.New("SessionDoesNotExist Error", 885, "This Session Id Does Not Exist.") ) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index 347b88fdf5..f3a218ab42 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -34,12 +34,16 @@ func (psm *ProviderSessionManager) IsValidEpoch(epoch uint64) (valid bool, thres } // Check if consumer exists and is not blocked, if all is valid return the ProviderSessionsWithConsumer pointer -func (psm *ProviderSessionManager) IsActiveConsumer(epoch uint64, address string) (active bool, err error) { - _, err = psm.getActiveConsumer(epoch, address) +func (psm *ProviderSessionManager) IsActiveConsumer(epoch uint64, address string) (isActive bool, providerSessionWithConsumer *ProviderSessionsWithConsumer, err error) { + providerSessionWithConsumer, err = psm.getActiveConsumer(epoch, address) if err != nil { - return false, nil + if ConsumerNotRegisteredYet.Is(err) { + // consumer is not registered, but its not an error. + return false, nil, nil + } + return false, nil, err } - return true, nil // no error + return true, providerSessionWithConsumer, nil // no error } func (psm *ProviderSessionManager) GetSession(address string, epoch uint64, relayNum uint64, sessionId uint64) (*SingleProviderSession, error) { @@ -49,13 +53,13 @@ func (psm *ProviderSessionManager) GetSession(address string, epoch uint64, rela return nil, InvalidEpochError } - activeConsumer, err := psm.IsActiveConsumer(epoch, address) + activeConsumer, providerSessionWithConsumer, err := psm.IsActiveConsumer(epoch, address) if err != nil { return nil, err } var singleProviderSession *SingleProviderSession if activeConsumer { - singleProviderSession, err = psm.getSessionFromAnActiveConsumer(epoch, address, sessionId) // after getting session verify relayNum etc.. + singleProviderSession, err = psm.getSessionFromAnActiveConsumer(providerSessionWithConsumer, sessionId) // after getting session verify relayNum etc.. } else if relayNum == 1 { // if no session found, we need to create and validate few things: pairing, // return here and call a different function. @@ -63,8 +67,8 @@ func (psm *ProviderSessionManager) GetSession(address string, epoch uint64, rela singleProviderSession, err = psm.getNewSession(epoch, address) // after getting session verify relayNum etc.. } else { - utils.LavaFormatError("GetSession", NewSessionWithRelayNumError, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10)}) - return nil, NewSessionWithRelayNumError + return nil, utils.LavaFormatError("GetSession Error, Consumer is not active and relayNum != 1", + NewSessionWithRelayNumError, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10), "relayNum": strconv.FormatUint(relayNum, 10)}) } if err != nil { @@ -74,6 +78,7 @@ func (psm *ProviderSessionManager) GetSession(address string, epoch uint64, rela // validate later relayNum etc.. + // singleProviderSession Is locked at this point (currently it isnt dont forget to add locks where needed) return singleProviderSession, nil } @@ -86,7 +91,7 @@ func (psm *ProviderSessionManager) createNewSingleProviderSession(providerSessio return nil, fmt.Errorf("not implemented") } -func (psm *ProviderSessionManager) getActiveConsumer(epoch uint64, address string) (singleProviderSession *ProviderSessionsWithConsumer, err error) { +func (psm *ProviderSessionManager) getActiveConsumer(epoch uint64, address string) (providerSessionWithConsumer *ProviderSessionsWithConsumer, err error) { psm.lock.RLock() defer psm.lock.RUnlock() valid, _ := psm.IsValidEpoch(epoch) @@ -104,17 +109,15 @@ func (psm *ProviderSessionManager) getActiveConsumer(epoch uint64, address strin return providerSessionWithConsumer, nil // no error } } - return nil, ConsumerNotActive + return nil, ConsumerNotRegisteredYet } -func (psm *ProviderSessionManager) getSessionFromAnActiveConsumer(epoch uint64, address string, sessionId uint64) (singleProviderSession *SingleProviderSession, err error) { - providerSessionWithConsumer, err := psm.getActiveConsumer(epoch, address) - if err != nil { - return nil, err - } +func (psm *ProviderSessionManager) getSessionFromAnActiveConsumer(providerSessionWithConsumer *ProviderSessionsWithConsumer, sessionId uint64) (singleProviderSession *SingleProviderSession, err error) { session, err := providerSessionWithConsumer.GetExistingSession(sessionId) if err == nil { return session, nil + } else if SessionDoesNotExist.Is(err) { + utils.LavaFormatFatal("GetExistingSession Unexpected Error", err, nil) } // if we don't have a session we need to create a new one. return psm.createNewSingleProviderSession(providerSessionWithConsumer, sessionId) diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index afc4ab1ce1..c621f05ab8 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -96,7 +96,7 @@ func (pswc *ProviderSessionsWithConsumer) GetExistingSession(sessionId uint64) ( if session, ok := pswc.Sessions[sessionId]; ok { return session, nil } - return nil, fmt.Errorf("session does not exist") + return nil, SessionDoesNotExist } type StateQuery interface { From 33bd93fac1560f85cea9703b3a72abbdfdfad996 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Feb 2023 11:19:08 +0200 Subject: [PATCH 016/123] added subscribe support --- .../lavasession/provider_session_manager.go | 26 ++++++ protocol/lavasession/provider_types.go | 7 ++ protocol/rpcprovider/rpcprovider_server.go | 85 ++++++++++++++++++- 3 files changed, 116 insertions(+), 2 deletions(-) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index 6bac7b2cfe..20ad961dbe 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -162,6 +162,32 @@ func (psm *ProviderSessionManager) ProcessUnsubscribeTendermint(apiName string, return fmt.Errorf("not implemented") } +func (psm *ProviderSessionManager) NewSubscription(consumerAddress string, epoch uint64, subscription *RPCSubscription) error { + // return an error if subscriptionID exists + // original code: + // userSessions.Lock.Lock() + // if _, ok := userSessions.Subs[subscriptionID]; ok { + // return utils.LavaFormatError("SubscriptiodID: "+subscriptionID+"exists", nil, nil) + // } + // userSessions.Subs[subscriptionID] = &subscription{ + // id: subscriptionID, + // sub: clientSub, + // subscribeRepliesChan: subscribeRepliesChan, + // } + // userSessions.Lock.Unlock() + return fmt.Errorf("not implemented") +} + +func (psm *ProviderSessionManager) SubscriptionFailure(consumerAddress string, epoch uint64, subscriptionID string) { + // original code + // userSessions.Lock.Lock() + // if sub, ok := userSessions.Subs[subscriptionID]; ok { + // sub.disconnect() + // delete(userSessions.Subs, subscriptionID) + // } + // userSessions.Lock.Unlock() +} + // Returning a new provider session manager func NewProviderSessionManager(rpcProviderEndpoint *RPCProviderEndpoint, stateQuery StateQuery) *ProviderSessionManager { return &ProviderSessionManager{rpcProviderEndpoint: rpcProviderEndpoint, stateQuery: stateQuery} diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index f815707bd8..5e11dccb26 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -6,6 +6,7 @@ import ( "sync" "sync/atomic" + "github.com/lavanet/lava/protocol/chainlib/chainproxy/rpcclient" "github.com/lavanet/lava/utils" pairingtypes "github.com/lavanet/lava/x/pairing/types" ) @@ -31,6 +32,12 @@ type RPCProviderEndpoint struct { NodeUrl []string `yaml:"node-url,omitempty" json:"node-url,omitempty" mapstructure:"node-url"` } +type RPCSubscription struct { + Id string + Sub *rpcclient.ClientSubscription + SubscribeRepliesChan chan interface{} +} + func (rpcpe *RPCProviderEndpoint) Key() string { return rpcpe.ChainID + rpcpe.ApiInterface } diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 1460a207a3..02f77cd728 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -14,6 +14,7 @@ import ( "github.com/gogo/status" "github.com/lavanet/lava/protocol/chainlib" "github.com/lavanet/lava/protocol/chainlib/chainproxy/rpcInterfaceMessages" + "github.com/lavanet/lava/protocol/chainlib/chainproxy/rpcclient" "github.com/lavanet/lava/protocol/chaintracker" "github.com/lavanet/lava/protocol/lavaprotocol" "github.com/lavanet/lava/protocol/lavasession" @@ -125,7 +126,7 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes err = sdkerrors.Wrapf(relayError, "OnSession Done failure: "+err.Error()) } else { if request.DataReliability == nil { - rpcps.rewardServer.SendNewProof(ctx, request, relaySession.PairingEpoch, consumerAddress.String()) + rpcps.rewardServer.SendNewProof(ctx, request.ShallowCopy(), relaySession.PairingEpoch, consumerAddress.String()) utils.LavaFormatDebug("Provider Finished Relay Successfully", &map[string]string{ "request.SessionId": strconv.FormatUint(request.SessionId, 10), "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), @@ -142,7 +143,87 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes return reply, rpcps.handleRelayErrorStatus(err) } func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayRequest, srv pairingtypes.Relayer_RelaySubscribeServer) error { - return fmt.Errorf("not implemented") + utils.LavaFormatDebug("Provider got relay subscribe request", &map[string]string{ + "request.SessionId": strconv.FormatUint(request.SessionId, 10), + "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), + "request.cu": strconv.FormatUint(request.CuSum, 10), + }) + ctx := context.Background() + relaySession, consumerAddress, err := rpcps.initRelay(ctx, request) + if err != nil { + return rpcps.handleRelayErrorStatus(err) + } + // parse the message to extract the cu and chainMessage for sending it + chainMessage, err := rpcps.chainParser.ParseMsg(request.ApiUrl, request.Data, request.ConnectionType) + if err != nil { + return rpcps.handleRelayErrorStatus(err) + } + relayCU := chainMessage.GetServiceApi().ComputeUnits + err = relaySession.PrepareSessionForUsage(relayCU, request.CuSum) + if err != nil { + return rpcps.handleRelayErrorStatus(err) + } + err = rpcps.TryRelaySubscribe(ctx, request, srv, chainMessage, consumerAddress) // this function does not return until subscription ends + return err +} + +func (rpcps *RPCProviderServer) TryRelaySubscribe(ctx context.Context, request *pairingtypes.RelayRequest, srv pairingtypes.Relayer_RelaySubscribeServer, chainMessage chainlib.ChainMessage, consumerAddress sdk.AccAddress) error { + var reply *pairingtypes.RelayReply + var clientSub *rpcclient.ClientSubscription + var subscriptionID string + subscribeRepliesChan := make(chan interface{}) + reply, subscriptionID, clientSub, err := rpcps.chainProxy.SendNodeMsg(ctx, subscribeRepliesChan, chainMessage) + if err != nil { + return utils.LavaFormatError("Subscription failed", err, nil) + } + subscription := &lavasession.RPCSubscription{ + Id: subscriptionID, + Sub: clientSub, + SubscribeRepliesChan: subscribeRepliesChan, + } + err = rpcps.providerSessionManager.NewSubscription(consumerAddress.String(), uint64(request.BlockHeight), subscription) + if err != nil { + return err + } + err = srv.Send(reply) // this reply contains the RPC ID + if err != nil { + utils.LavaFormatError("Error getting RPC ID", err, nil) + } + + for { + select { + case <-clientSub.Err(): + utils.LavaFormatError("client sub", err, nil) + // delete this connection from the subs map + rpcps.providerSessionManager.SubscriptionFailure(consumerAddress.String(), uint64(request.BlockHeight), subscriptionID) + return err + case subscribeReply := <-subscribeRepliesChan: + data, err := json.Marshal(subscribeReply) + if err != nil { + utils.LavaFormatError("client sub unmarshal", err, nil) + rpcps.providerSessionManager.SubscriptionFailure(consumerAddress.String(), uint64(request.BlockHeight), subscriptionID) + return err + } + + err = srv.Send( + &pairingtypes.RelayReply{ + Data: data, + }, + ) + if err != nil { + // usually triggered when client closes connection + if strings.Contains(err.Error(), "Canceled desc = context canceled") { + err = utils.LavaFormatWarning("Client closed connection", err, nil) + } else { + err = utils.LavaFormatError("srv.Send", err, nil) + } + rpcps.providerSessionManager.SubscriptionFailure(consumerAddress.String(), uint64(request.BlockHeight), subscriptionID) + return err + } + + utils.LavaFormatDebug("Sending data", &map[string]string{"data": string(data)}) + } + } } // verifies basic relay fields, and gets a provider session From 27edfdd4cab0aad8dd1e66220517581d9b841b02 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Thu, 16 Feb 2023 10:25:42 +0100 Subject: [PATCH 017/123] WIP psm --- .../chainlib/chainproxy/connector_test.go | 14 +- protocol/lavasession/errors.go | 13 +- .../lavasession/provider_session_manager.go | 104 ++++++++------ .../provider_session_manager_test.go | 16 +++ protocol/lavasession/provider_types.go | 129 ++++++++++++++---- protocol/rpcprovider/rpcprovider_server.go | 17 ++- 6 files changed, 215 insertions(+), 78 deletions(-) diff --git a/protocol/chainlib/chainproxy/connector_test.go b/protocol/chainlib/chainproxy/connector_test.go index 5a89590381..4e51e273a2 100644 --- a/protocol/chainlib/chainproxy/connector_test.go +++ b/protocol/chainlib/chainproxy/connector_test.go @@ -63,7 +63,11 @@ func TestConnector(t *testing.T) { defer listener.Close() ctx := context.Background() conn := NewConnector(ctx, numberOfClients, listenerAddressTcp) - time.Sleep(5 * time.Second) // sleep for 5 seconds so all connections will be created asynchronously + for { // wait for the routine to finish connecting + if len(conn.freeClients) == numberOfClients { + break + } + } require.Equal(t, len(conn.freeClients), numberOfClients) increasedClients := numberOfClients * 2 // increase to double the number of clients rpcList := make([]*rpcclient.Client, increasedClients) @@ -85,7 +89,11 @@ func TestConnectorGrpc(t *testing.T) { defer server.Stop() ctx := context.Background() conn := NewGRPCConnector(ctx, numberOfClients, listenerAddress) - time.Sleep(5 * time.Second) // sleep for 5 seconds so all connections will be created asynchronously + for { // wait for the routine to finish connecting + if len(conn.freeClients) == numberOfClients { + break + } + } require.Equal(t, len(conn.freeClients), numberOfClients) increasedClients := numberOfClients * 2 // increase to double the number of clients rpcList := make([]*grpc.ClientConn, increasedClients) @@ -94,7 +102,7 @@ func TestConnectorGrpc(t *testing.T) { require.Nil(t, err) rpcList[i] = rpc } - require.Equal(t, int(conn.usedClients), increasedClients) // checking we have used clients + require.Equal(t, increasedClients, int(conn.usedClients)) // checking we have used clients for i := 0; i < increasedClients; i++ { conn.ReturnRpc(rpcList[i]) } diff --git a/protocol/lavasession/errors.go b/protocol/lavasession/errors.go index f8e3b23378..9265ce8aa0 100644 --- a/protocol/lavasession/errors.go +++ b/protocol/lavasession/errors.go @@ -28,9 +28,12 @@ var ( // Consumer Side Errors ) var ( // Provider Side Errors - InvalidEpochError = sdkerrors.New("InvalidEpoch Error", 881, "Requested Epoch Is Too Old") - NewSessionWithRelayNumError = sdkerrors.New("NewSessionWithRelayNum Error", 882, "Requested Session With Relay Number Is Invalid") - ConsumerIsBlockListed = sdkerrors.New("ConsumerIsBlockListed Error", 883, "This Consumer Is Blocked.") - ConsumerNotRegisteredYet = sdkerrors.New("ConsumerNotActive Error", 884, "This Consumer Is Not Currently In The Pool.") - SessionDoesNotExist = sdkerrors.New("SessionDoesNotExist Error", 885, "This Session Id Does Not Exist.") + InvalidEpochError = sdkerrors.New("InvalidEpoch Error", 881, "Requested Epoch Is Too Old") + NewSessionWithRelayNumError = sdkerrors.New("NewSessionWithRelayNum Error", 882, "Requested Session With Relay Number Is Invalid") + ConsumerIsBlockListed = sdkerrors.New("ConsumerIsBlockListed Error", 883, "This Consumer Is Blocked.") + ConsumerNotRegisteredYet = sdkerrors.New("ConsumerNotActive Error", 884, "This Consumer Is Not Currently In The Pool.") + SessionDoesNotExist = sdkerrors.New("SessionDoesNotExist Error", 885, "This Session Id Does Not Exist.") + MaximumCULimitReachedByConsumer = sdkerrors.New("MaximumCULimitReachedByConsumer Error", 886, "Consumer reached maximum cu limit") + ProviderConsumerCuMisMatch = sdkerrors.New("ProviderConsumerCuMisMatch Error", 887, "Provider and Consumer disagree on total cu for session") + RelayNumberMismatch = sdkerrors.New("RelayNumberMismatch Error", 887, "Provider and Consumer disagree on relay number for session") ) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index 6bac7b2cfe..8f033070f4 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -35,61 +35,81 @@ func (psm *ProviderSessionManager) IsValidEpoch(epoch uint64) (valid bool, thres } // Check if consumer exists and is not blocked, if all is valid return the ProviderSessionsWithConsumer pointer -func (psm *ProviderSessionManager) IsActiveConsumer(epoch uint64, address string) (isActive bool, providerSessionWithConsumer *ProviderSessionsWithConsumer, err error) { +func (psm *ProviderSessionManager) IsActiveConsumer(epoch uint64, address string) (providerSessionWithConsumer *ProviderSessionsWithConsumer, err error) { providerSessionWithConsumer, err = psm.getActiveConsumer(epoch, address) if err != nil { - if ConsumerNotRegisteredYet.Is(err) { - // consumer is not registered, but its not an error. - return false, nil, nil - } - return false, nil, err + return nil, err } - return true, providerSessionWithConsumer, nil // no error + return providerSessionWithConsumer, nil // no error } -func (psm *ProviderSessionManager) GetSession(address string, epoch uint64, relayNum uint64, sessionId uint64) (*SingleProviderSession, error) { +func (psm *ProviderSessionManager) getSingleSessionFromProviderSessionWithConsumer(providerSessionWithConsumer *ProviderSessionsWithConsumer, sessionId uint64, epoch uint64) (*SingleProviderSession, error) { + // TODO:: we can validate here if consumer is blocked with atomicWriteBlockedEpoch + // before getting any sessions. + singleProviderSession, err := psm.getSessionFromAnActiveConsumer(providerSessionWithConsumer, sessionId, epoch) // after getting session verify relayNum etc.. + if err != nil { + return nil, utils.LavaFormatError("getSessionFromAnActiveConsumer Failure", err, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10), "sessionId": strconv.FormatUint(sessionId, 10)}) + } + // singleProviderSession is locked at this point. + return singleProviderSession, err +} + +func (psm *ProviderSessionManager) GetSession(address string, epoch uint64, sessionId uint64) (*SingleProviderSession, error) { valid, _ := psm.IsValidEpoch(epoch) if valid { // fast checking to see if epoch is even relevant utils.LavaFormatError("GetSession", InvalidEpochError, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10)}) return nil, InvalidEpochError } - activeConsumer, providerSessionWithConsumer, err := psm.IsActiveConsumer(epoch, address) + providerSessionWithConsumer, err := psm.IsActiveConsumer(epoch, address) if err != nil { return nil, err } - var singleProviderSession *SingleProviderSession - if activeConsumer { - singleProviderSession, err = psm.getSessionFromAnActiveConsumer(providerSessionWithConsumer, sessionId) // after getting session verify relayNum etc.. - } else if relayNum == 1 { - // if no session found, we need to create and validate few things: pairing, - // return here and call a different function. - // in this function - - singleProviderSession, err = psm.getNewSession(epoch, address) // after getting session verify relayNum etc.. - } else { - return nil, utils.LavaFormatError("GetSession Error, Consumer is not active and relayNum != 1", - NewSessionWithRelayNumError, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10), "relayNum": strconv.FormatUint(relayNum, 10)}) - } - if err != nil { - utils.LavaFormatError("GetSession Failure", err, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10)}) - return nil, err - } + return psm.getSingleSessionFromProviderSessionWithConsumer(providerSessionWithConsumer, sessionId, epoch) +} - // validate later relayNum etc.. +func (psm *ProviderSessionManager) registerNewSession(address string, epoch uint64, sessionId uint64) (*ProviderSessionsWithConsumer, error) { + psm.lock.Lock() + defer psm.lock.Unlock() - // singleProviderSession Is locked at this point (currently it isnt dont forget to add locks where needed) - return singleProviderSession, nil -} + valid, _ := psm.IsValidEpoch(epoch) + if valid { // checking again because we are now locked and epoch cant change now. + utils.LavaFormatError("getActiveConsumer", InvalidEpochError, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10)}) + return nil, InvalidEpochError + } -func (psm *ProviderSessionManager) createNewSingleProviderSession(providerSessionWithConsumer *ProviderSessionsWithConsumer, sessionId uint64) (singleProviderSession *SingleProviderSession, err error) { - providerSessionWithConsumer.Lock.Lock() - defer providerSessionWithConsumer.Lock.Unlock() + mapOfProviderSessionsWithConsumer, foundEpochInMap := psm.sessionsWithAllConsumers[epoch] + if !foundEpochInMap { + mapOfProviderSessionsWithConsumer = make(map[string]*ProviderSessionsWithConsumer) + psm.sessionsWithAllConsumers[epoch] = mapOfProviderSessionsWithConsumer + } - // TODO: create a new single provider session + providerSessionWithConsumer, foundAddressInMap := mapOfProviderSessionsWithConsumer[address] + if !foundAddressInMap { + providerSessionWithConsumer = &ProviderSessionsWithConsumer{ + consumer: address, + epochData: &ProviderSessionsEpochData{}, // TODO add here all the epoch data get from user + } + mapOfProviderSessionsWithConsumer[address] = providerSessionWithConsumer + } + return providerSessionWithConsumer, nil +} - return nil, fmt.Errorf("not implemented") +// TODO add vrfPk and Max compute units. +func (psm *ProviderSessionManager) RegisterProviderSessionWithConsumer(address string, epoch uint64, sessionId uint64) (*SingleProviderSession, error) { + providerSessionWithConsumer, err := psm.IsActiveConsumer(epoch, address) + if err != nil { + if ConsumerNotRegisteredYet.Is(err) { + providerSessionWithConsumer, err = psm.registerNewSession(address, epoch, sessionId) + if err != nil { + return nil, utils.LavaFormatError("RegisterProviderSessionWithConsumer Failed to registerNewSession", err, nil) + } + } else { + return nil, utils.LavaFormatError("RegisterProviderSessionWithConsumer Failed", err, nil) + } + } + return psm.getSingleSessionFromProviderSessionWithConsumer(providerSessionWithConsumer, sessionId, epoch) } func (psm *ProviderSessionManager) getActiveConsumer(epoch uint64, address string) (providerSessionWithConsumer *ProviderSessionsWithConsumer, err error) { @@ -113,19 +133,17 @@ func (psm *ProviderSessionManager) getActiveConsumer(epoch uint64, address strin return nil, ConsumerNotRegisteredYet } -func (psm *ProviderSessionManager) getSessionFromAnActiveConsumer(providerSessionWithConsumer *ProviderSessionsWithConsumer, sessionId uint64) (singleProviderSession *SingleProviderSession, err error) { +func (psm *ProviderSessionManager) getSessionFromAnActiveConsumer(providerSessionWithConsumer *ProviderSessionsWithConsumer, sessionId uint64, epoch uint64) (singleProviderSession *SingleProviderSession, err error) { session, err := providerSessionWithConsumer.GetExistingSession(sessionId) if err == nil { return session, nil } else if SessionDoesNotExist.Is(err) { + // if we don't have a session we need to create a new one. + return providerSessionWithConsumer.createNewSingleProviderSession(sessionId, epoch) + } else { utils.LavaFormatFatal("GetExistingSession Unexpected Error", err, nil) + return nil, err } - // if we don't have a session we need to create a new one. - return psm.createNewSingleProviderSession(providerSessionWithConsumer, sessionId) -} - -func (psm *ProviderSessionManager) getNewSession(epoch uint64, address string) (singleProviderSession *SingleProviderSession, err error) { - return } func (psm *ProviderSessionManager) ReportConsumer() (address string, epoch uint64, err error) { @@ -153,7 +171,9 @@ func (psm *ProviderSessionManager) RPCProviderEndpoint() *RPCProviderEndpoint { func (psm *ProviderSessionManager) UpdateEpoch(epoch uint64) { // update the epoch to limit consumer usage + // when updating the blocked epoch, we also need to clean old epochs from the map. sessionsWithAllConsumers } + func (psm *ProviderSessionManager) ProcessUnsubscribeEthereum(subscriptionID string, consumerAddress sdk.AccAddress) error { return fmt.Errorf("not implemented") } diff --git a/protocol/lavasession/provider_session_manager_test.go b/protocol/lavasession/provider_session_manager_test.go index 1af6369b01..3277aeb7a0 100644 --- a/protocol/lavasession/provider_session_manager_test.go +++ b/protocol/lavasession/provider_session_manager_test.go @@ -1 +1,17 @@ package lavasession + +import ( + "sync/atomic" + "testing" + + "github.com/stretchr/testify/require" +) + +// Test the basic functionality of the consumerSessionManager +func TestHappyFlowPSM(t *testing.T) { + var a uint64 = 5 + res_a := atomic.CompareAndSwapUint64(&a, 5, 7) + require.True(t, res_a) + res_b := atomic.CompareAndSwapUint64(&a, 5, 7) + require.False(t, res_b) +} diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index f815707bd8..1a7e777bd5 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -2,7 +2,7 @@ package lavasession import ( "context" - "fmt" + "strconv" "sync" "sync/atomic" @@ -49,8 +49,18 @@ type ProviderSessionsWithConsumer struct { Lock sync.RWMutex } -// reads cs.BlockedEpoch atomically -func (pswc *ProviderSessionsWithConsumer) atomicWriteBlockedEpoch(blockStatus uint32) { +type SingleProviderSession struct { + userSessionsParent *ProviderSessionsWithConsumer + CuSum uint64 + LatestRelayCu uint64 + SessionID uint64 + lock sync.RWMutex + RelayNum uint64 + PairingEpoch uint64 +} + +// reads cs.BlockedEpoch atomically, notBlockListedConsumer = 0, blockListedConsumer = 1 +func (pswc *ProviderSessionsWithConsumer) atomicWriteBlockedEpoch(blockStatus uint32) { // rename to blocked consumer not blocked epoch atomic.StoreUint32(&pswc.isBlockListed, blockStatus) } @@ -59,46 +69,117 @@ func (pswc *ProviderSessionsWithConsumer) atomicReadBlockedEpoch() (blockStatus return atomic.LoadUint32(&pswc.isBlockListed) } -func (pswc *ProviderSessionsWithConsumer) readBlockListedAtomic() { +func (pswc *ProviderSessionsWithConsumer) atomicReadMaxComputeUnits() (maxComputeUnits uint64) { + return atomic.LoadUint64(&pswc.epochData.MaxComputeUnits) } -type SingleProviderSession struct { - userSessionsParent *ProviderSessionsWithConsumer - CuSum uint64 - LatestRelayCu uint64 - UniqueIdentifier uint64 - Lock sync.RWMutex - Proof *pairingtypes.RelayRequest // saves last relay request of a session as proof - RelayNum uint64 - PairingEpoch uint64 +func (pswc *ProviderSessionsWithConsumer) atomicReadUsedComputeUnits() (usedComputeUnits uint64) { + return atomic.LoadUint64(&pswc.epochData.UsedComputeUnits) } -func (sps *SingleProviderSession) GetPairingEpoch() uint64 { - return atomic.LoadUint64(&sps.PairingEpoch) +func (pswc *ProviderSessionsWithConsumer) atomicWriteMaxComputeUnits(maxComputeUnits uint64) { + atomic.StoreUint64(&pswc.epochData.MaxComputeUnits, maxComputeUnits) } -func (sps *SingleProviderSession) SetPairingEpoch(epoch uint64) { - atomic.StoreUint64(&sps.PairingEpoch, epoch) +func (pswc *ProviderSessionsWithConsumer) atomicCompareAndWriteUsedComputeUnits(newUsed uint64, knownUsed uint64) bool { + return atomic.CompareAndSwapUint64(&pswc.epochData.UsedComputeUnits, knownUsed, newUsed) } -func (sps *SingleProviderSession) PrepareSessionForUsage(currentCU uint64, relayRequestTotalCU uint64) error { - // verify locked - // verify total cu in the parent (atomic read) - // verify the proof is right according to relay cu, last proof CU and current proof CU: CuSum + currentCU = relayRequestTotalCU - // set LatestRelayCu (verify it's 0) - // add to parent with atomic - make sure there is no race to corrupt the total cu in the parent - return fmt.Errorf("not implemented") +func (pswc *ProviderSessionsWithConsumer) createNewSingleProviderSession(sessionId uint64, epoch uint64) (session *SingleProviderSession, err error) { + session = &SingleProviderSession{ + userSessionsParent: pswc, + SessionID: sessionId, + PairingEpoch: epoch, + } + session.lock.Lock() + return session, nil } func (pswc *ProviderSessionsWithConsumer) GetExistingSession(sessionId uint64) (session *SingleProviderSession, err error) { pswc.Lock.RLock() defer pswc.Lock.RUnlock() if session, ok := pswc.Sessions[sessionId]; ok { + session.lock.Lock() return session, nil } return nil, SessionDoesNotExist } +func (sps *SingleProviderSession) GetPairingEpoch() uint64 { + return atomic.LoadUint64(&sps.PairingEpoch) +} + +func (sps *SingleProviderSession) SetPairingEpoch(epoch uint64) { + atomic.StoreUint64(&sps.PairingEpoch, epoch) +} + +// Verify the SingleProviderSession is locked when getting to this function, if its not locked throw an error +func (sps *SingleProviderSession) verifyLock() error { + if sps.lock.TryLock() { // verify. + // if we managed to lock throw an error for misuse. + defer sps.lock.Unlock() + return LockMisUseDetectedError + } + return nil +} + +func (sps *SingleProviderSession) PrepareSessionForUsage(currentCU uint64, relayRequestTotalCU uint64, relayNumber uint64) error { + err := sps.verifyLock() // sps is locked + if err != nil { + return utils.LavaFormatError("sps.verifyLock() failed in PrepareSessionForUsage", err, nil) + } + + if sps.RelayNum+1 != relayNumber { + sps.lock.Unlock() // unlock on error + return utils.LavaFormatError("Maximum cu exceeded PrepareSessionForUsage", MaximumCULimitReachedByConsumer, &map[string]string{ + "relayNumber": strconv.FormatUint(relayNumber, 10), + "sps.RelayNum": strconv.FormatUint(sps.RelayNum+1, 10), + }) + } + + maxCu := sps.userSessionsParent.atomicReadMaxComputeUnits() + if relayRequestTotalCU < sps.CuSum+currentCU { + sps.lock.Unlock() // unlock on error + return utils.LavaFormatError("CU mismatch PrepareSessionForUsage, Provider and consumer disagree on CuSum", ProviderConsumerCuMisMatch, &map[string]string{ + "relayRequestTotalCU": strconv.FormatUint(relayRequestTotalCU, 10), + "sps.CuSum": strconv.FormatUint(sps.CuSum, 10), + "currentCU": strconv.FormatUint(currentCU, 10), + }) + } + + // this must happen first, as we also validate and add the used cu to parent here + err = sps.validateAndAddUsedCU(currentCU, maxCu) + if err != nil { + sps.lock.Unlock() // unlock on error + return err + } + // finished validating, can add all info. + sps.LatestRelayCu = currentCU // 1. update latest + sps.CuSum = relayRequestTotalCU // 2. update CuSum, if consumer wants to pay more, let it + sps.RelayNum = sps.RelayNum + 1 + + return nil +} + +func (sps *SingleProviderSession) validateAndAddUsedCU(currentCU uint64, maxCu uint64) error { + for { + usedCu := sps.userSessionsParent.atomicReadUsedComputeUnits() // check used cu now + if usedCu+currentCU > maxCu { + return utils.LavaFormatError("Maximum cu exceeded PrepareSessionForUsage", MaximumCULimitReachedByConsumer, &map[string]string{ + "usedCu": strconv.FormatUint(usedCu, 10), + "currentCU": strconv.FormatUint(currentCU, 10), + "maxCu": strconv.FormatUint(maxCu, 10), + }) + } + // compare usedCu + current cu vs usedCu, if swap succeeds, return otherwise try again + // this can happen when multiple sessions are adding their cu at the same time. + // comparing and adding is protecting against race conditions as the parent is not locked. + if sps.userSessionsParent.atomicCompareAndWriteUsedComputeUnits(usedCu+currentCU, usedCu) { + return nil + } + } +} + type StateQuery interface { QueryVerifyPairing(ctx context.Context, consumer string, blockHeight uint64) } diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 1460a207a3..4fb9ccc04f 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -104,8 +104,8 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes return nil, rpcps.handleRelayErrorStatus(err) } relayCU := chainMessage.GetServiceApi().ComputeUnits - err = relaySession.PrepareSessionForUsage(relayCU, request.CuSum) - if err != nil { + err = relaySession.PrepareSessionForUsage(relayCU, request.CuSum, request.RelayNum) + if err != nil { // TODO: any error here we need to convert to session out of sync error and return that to the user return nil, rpcps.handleRelayErrorStatus(err) } reply, err := rpcps.TryRelay(ctx, request, consumerAddress, chainMessage) @@ -174,9 +174,18 @@ func (rpcps *RPCProviderServer) initRelay(ctx context.Context, request *pairingt // handle non data reliability relays if request.DataReliability == nil { // regular session, verifies pairing epoch and relay number - singleProviderSession, err = rpcps.providerSessionManager.GetSession(extractedConsumerAddress.String(), uint64(request.BlockHeight), request.RelayNum, request.SessionId) + singleProviderSession, err = rpcps.providerSessionManager.GetSession(extractedConsumerAddress.String(), uint64(request.BlockHeight), request.SessionId) if err != nil { - return nil, nil, utils.LavaFormatError("failed to get a provider session", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": extractedConsumerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) + if lavasession.ConsumerNotRegisteredYet.Is(err) { + // TODO:: validate consumer address get max cu and vrf data and transfer register. + + singleProviderSession, err = rpcps.providerSessionManager.RegisterProviderSessionWithConsumer(extractedConsumerAddress.String(), uint64(request.BlockHeight), request.SessionId) + if err != nil { + return nil, nil, utils.LavaFormatError("failed to RegisterProviderSessionWithConsumer", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": extractedConsumerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) + } + } else { + return nil, nil, utils.LavaFormatError("failed to get a provider session", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": extractedConsumerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) + } } return singleProviderSession, extractedConsumerAddress, nil } From 4b2b7b179ab921735b1ef3eff27bd4f745c21aee Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Thu, 16 Feb 2023 10:31:23 +0100 Subject: [PATCH 018/123] WIP --- .../lavasession/provider_session_manager.go | 2 +- protocol/lavasession/provider_types.go | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index 80f0860348..4bbc9e8a1e 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -54,7 +54,7 @@ func (psm *ProviderSessionManager) getSingleSessionFromProviderSessionWithConsum return singleProviderSession, err } -func (psm *ProviderSessionManager) GetSession(address string, epoch uint64, sessionId uint64) (*SingleProviderSession, error) { +func (psm *ProviderSessionManager) GetSession(address string, epoch uint64, sessionId uint64, relayNumber uint64) (*SingleProviderSession, error) { valid, _ := psm.IsValidEpoch(epoch) if valid { // fast checking to see if epoch is even relevant utils.LavaFormatError("GetSession", InvalidEpochError, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10)}) diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index 96e915d6ab..c8bb5d387b 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -130,19 +130,19 @@ func (sps *SingleProviderSession) verifyLock() error { return nil } -func (sps *SingleProviderSession) PrepareSessionForUsage(currentCU uint64, relayRequestTotalCU uint64, relayNumber uint64) error { +func (sps *SingleProviderSession) PrepareSessionForUsage(currentCU uint64, relayRequestTotalCU uint64) error { err := sps.verifyLock() // sps is locked if err != nil { return utils.LavaFormatError("sps.verifyLock() failed in PrepareSessionForUsage", err, nil) } - if sps.RelayNum+1 != relayNumber { - sps.lock.Unlock() // unlock on error - return utils.LavaFormatError("Maximum cu exceeded PrepareSessionForUsage", MaximumCULimitReachedByConsumer, &map[string]string{ - "relayNumber": strconv.FormatUint(relayNumber, 10), - "sps.RelayNum": strconv.FormatUint(sps.RelayNum+1, 10), - }) - } + // if sps.RelayNum+1 != relayNumber { + // sps.lock.Unlock() // unlock on error + // return utils.LavaFormatError("Maximum cu exceeded PrepareSessionForUsage", MaximumCULimitReachedByConsumer, &map[string]string{ + // "relayNumber": strconv.FormatUint(relayNumber, 10), + // "sps.RelayNum": strconv.FormatUint(sps.RelayNum+1, 10), + // }) + // } maxCu := sps.userSessionsParent.atomicReadMaxComputeUnits() if relayRequestTotalCU < sps.CuSum+currentCU { @@ -163,7 +163,7 @@ func (sps *SingleProviderSession) PrepareSessionForUsage(currentCU uint64, relay // finished validating, can add all info. sps.LatestRelayCu = currentCU // 1. update latest sps.CuSum = relayRequestTotalCU // 2. update CuSum, if consumer wants to pay more, let it - sps.RelayNum = sps.RelayNum + 1 + // sps.RelayNum = sps.RelayNum + 1 return nil } From a215cc91298b29a5849c2df1b3e69fae70b6bd9b Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Thu, 16 Feb 2023 10:32:03 +0100 Subject: [PATCH 019/123] WIP --- protocol/rpcprovider/rpcprovider_server.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 1e2add7217..277d487adb 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -105,7 +105,7 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes return nil, rpcps.handleRelayErrorStatus(err) } relayCU := chainMessage.GetServiceApi().ComputeUnits - err = relaySession.PrepareSessionForUsage(relayCU, request.CuSum, request.RelayNum) + err = relaySession.PrepareSessionForUsage(relayCU, request.CuSum) if err != nil { // TODO: any error here we need to convert to session out of sync error and return that to the user return nil, rpcps.handleRelayErrorStatus(err) } @@ -255,7 +255,7 @@ func (rpcps *RPCProviderServer) initRelay(ctx context.Context, request *pairingt // handle non data reliability relays if request.DataReliability == nil { // regular session, verifies pairing epoch and relay number - singleProviderSession, err = rpcps.providerSessionManager.GetSession(extractedConsumerAddress.String(), uint64(request.BlockHeight), request.SessionId) + singleProviderSession, err = rpcps.providerSessionManager.GetSession(extractedConsumerAddress.String(), uint64(request.BlockHeight), request.SessionId, request.RelayNum) if err != nil { if lavasession.ConsumerNotRegisteredYet.Is(err) { // TODO:: validate consumer address get max cu and vrf data and transfer register. From 0c2bdc92322ce5f04e36438de6ef8e00343620e9 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Feb 2023 11:59:53 +0200 Subject: [PATCH 020/123] continued subscription code --- .../rpcprovider/rewardserver/reward_server.go | 8 ++ protocol/rpcprovider/rpcprovider_server.go | 91 +++++++++++-------- 2 files changed, 62 insertions(+), 37 deletions(-) diff --git a/protocol/rpcprovider/rewardserver/reward_server.go b/protocol/rpcprovider/rewardserver/reward_server.go index 281a39b475..d739cc8214 100644 --- a/protocol/rpcprovider/rewardserver/reward_server.go +++ b/protocol/rpcprovider/rewardserver/reward_server.go @@ -30,3 +30,11 @@ func NewRewardServer(rewardsTxSender RewardsTxSender) *RewardServer { rws.rewardsTxSender = rewardsTxSender return rws } + +func SubscribeStarted(consumer string, epoch uint64, subscribeID string) { + // hold off reward claims for subscription while this is still active +} + +func SubscribeEnded(consumer string, epoch uint64, subscribeID string) { + // can collect now +} diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 277d487adb..2cccff9a6f 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -47,6 +47,8 @@ type ReliabilityManagerInf interface { type RewardServerInf interface { SendNewProof(ctx context.Context, proof *pairingtypes.RelayRequest, epoch uint64, consumerAddr string) SendNewDataReliabilityProof(ctx context.Context, dataReliability *pairingtypes.VRFData, epoch uint64, consumerAddr string) + SubscribeStarted(consumer string, epoch uint64, subscribeID string) + SubscribeEnded(consumer string, epoch uint64, subscribeID string) } type StateTrackerInf interface { @@ -163,18 +165,25 @@ func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayReques if err != nil { return rpcps.handleRelayErrorStatus(err) } - err = rpcps.TryRelaySubscribe(ctx, request, srv, chainMessage, consumerAddress) // this function does not return until subscription ends + subscribed, err := rpcps.TryRelaySubscribe(ctx, request, srv, chainMessage, consumerAddress) // this function does not return until subscription ends + if subscribed { + // meaning we created a subscription and used it for at least a message + relayError := rpcps.providerSessionManager.OnSessionDone(relaySession, request) // TODO: when we pay as u go on subscription this will need to change + if relayError != nil { + err = sdkerrors.Wrapf(relayError, "OnSession Done failure: "+err.Error()) + } + } return err } -func (rpcps *RPCProviderServer) TryRelaySubscribe(ctx context.Context, request *pairingtypes.RelayRequest, srv pairingtypes.Relayer_RelaySubscribeServer, chainMessage chainlib.ChainMessage, consumerAddress sdk.AccAddress) error { +func (rpcps *RPCProviderServer) TryRelaySubscribe(ctx context.Context, request *pairingtypes.RelayRequest, srv pairingtypes.Relayer_RelaySubscribeServer, chainMessage chainlib.ChainMessage, consumerAddress sdk.AccAddress) (subscribed bool, errRet error) { var reply *pairingtypes.RelayReply var clientSub *rpcclient.ClientSubscription var subscriptionID string subscribeRepliesChan := make(chan interface{}) reply, subscriptionID, clientSub, err := rpcps.chainProxy.SendNodeMsg(ctx, subscribeRepliesChan, chainMessage) if err != nil { - return utils.LavaFormatError("Subscription failed", err, nil) + return false, utils.LavaFormatError("Subscription failed", err, nil) } subscription := &lavasession.RPCSubscription{ Id: subscriptionID, @@ -183,47 +192,55 @@ func (rpcps *RPCProviderServer) TryRelaySubscribe(ctx context.Context, request * } err = rpcps.providerSessionManager.NewSubscription(consumerAddress.String(), uint64(request.BlockHeight), subscription) if err != nil { - return err + return false, err } - err = srv.Send(reply) // this reply contains the RPC ID - if err != nil { - utils.LavaFormatError("Error getting RPC ID", err, nil) - } - - for { - select { - case <-clientSub.Err(): - utils.LavaFormatError("client sub", err, nil) - // delete this connection from the subs map - rpcps.providerSessionManager.SubscriptionFailure(consumerAddress.String(), uint64(request.BlockHeight), subscriptionID) - return err - case subscribeReply := <-subscribeRepliesChan: - data, err := json.Marshal(subscribeReply) - if err != nil { - utils.LavaFormatError("client sub unmarshal", err, nil) - rpcps.providerSessionManager.SubscriptionFailure(consumerAddress.String(), uint64(request.BlockHeight), subscriptionID) - return err - } + rpcps.rewardServer.SubscribeStarted(consumerAddress.String(), uint64(request.BlockHeight), subscriptionID) + processSubscribeMessages := func() (subscribed bool, errRet error) { + err = srv.Send(reply) // this reply contains the RPC ID + if err != nil { + utils.LavaFormatError("Error getting RPC ID", err, nil) + } else { + subscribed = true + } + + for { + select { + case <-clientSub.Err(): + utils.LavaFormatError("client sub", err, nil) + // delete this connection from the subs map + + return subscribed, err + case subscribeReply := <-subscribeRepliesChan: + data, err := json.Marshal(subscribeReply) + if err != nil { + return subscribed, utils.LavaFormatError("client sub unmarshal", err, nil) + } - err = srv.Send( - &pairingtypes.RelayReply{ - Data: data, - }, - ) - if err != nil { - // usually triggered when client closes connection - if strings.Contains(err.Error(), "Canceled desc = context canceled") { - err = utils.LavaFormatWarning("Client closed connection", err, nil) + err = srv.Send( + &pairingtypes.RelayReply{ + Data: data, + }, + ) + if err != nil { + // usually triggered when client closes connection + if strings.Contains(err.Error(), "Canceled desc = context canceled") { + err = utils.LavaFormatWarning("Client closed connection", err, nil) + } else { + err = utils.LavaFormatError("srv.Send", err, nil) + } + return subscribed, err } else { - err = utils.LavaFormatError("srv.Send", err, nil) + subscribed = true } - rpcps.providerSessionManager.SubscriptionFailure(consumerAddress.String(), uint64(request.BlockHeight), subscriptionID) - return err - } - utils.LavaFormatDebug("Sending data", &map[string]string{"data": string(data)}) + utils.LavaFormatDebug("Sending data", &map[string]string{"data": string(data)}) + } } } + subscribed, errRet = processSubscribeMessages() + rpcps.providerSessionManager.SubscriptionFailure(consumerAddress.String(), uint64(request.BlockHeight), subscriptionID) + rpcps.rewardServer.SubscribeEnded(consumerAddress.String(), uint64(request.BlockHeight), subscriptionID) + return } // verifies basic relay fields, and gets a provider session From 42718520f1f9cd5569673f899c42cd8bf69fcebd Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Feb 2023 12:20:49 +0200 Subject: [PATCH 021/123] finished provide side subscribe --- protocol/lavasession/errors.go | 3 ++- protocol/rpcprovider/rpcprovider_server.go | 23 ++++++++++++++++++++-- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/protocol/lavasession/errors.go b/protocol/lavasession/errors.go index 9265ce8aa0..e1679a6567 100644 --- a/protocol/lavasession/errors.go +++ b/protocol/lavasession/errors.go @@ -35,5 +35,6 @@ var ( // Provider Side Errors SessionDoesNotExist = sdkerrors.New("SessionDoesNotExist Error", 885, "This Session Id Does Not Exist.") MaximumCULimitReachedByConsumer = sdkerrors.New("MaximumCULimitReachedByConsumer Error", 886, "Consumer reached maximum cu limit") ProviderConsumerCuMisMatch = sdkerrors.New("ProviderConsumerCuMisMatch Error", 887, "Provider and Consumer disagree on total cu for session") - RelayNumberMismatch = sdkerrors.New("RelayNumberMismatch Error", 887, "Provider and Consumer disagree on relay number for session") + RelayNumberMismatch = sdkerrors.New("RelayNumberMismatch Error", 888, "Provider and Consumer disagree on relay number for session") + SubscriptionInitiationError = sdkerrors.New("SubscriptionInitiationError Error", 889, "provider failed initiating subscription") ) diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 2cccff9a6f..1e1c5604ab 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -118,7 +118,7 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes if relayFailureError != nil { err = sdkerrors.Wrapf(relayFailureError, "On relay failure: "+err.Error()) } - utils.LavaFormatError("TryRelay Failed", err, &map[string]string{ + err = utils.LavaFormatError("TryRelay Failed", err, &map[string]string{ "request.SessionId": strconv.FormatUint(request.SessionId, 10), "request.userAddr": consumerAddress.String(), }) @@ -145,6 +145,9 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes return reply, rpcps.handleRelayErrorStatus(err) } func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayRequest, srv pairingtypes.Relayer_RelaySubscribeServer) error { + if request.DataReliability != nil { + return utils.LavaFormatError("subscribe data reliability not supported", nil, nil) + } utils.LavaFormatDebug("Provider got relay subscribe request", &map[string]string{ "request.SessionId": strconv.FormatUint(request.SessionId, 10), "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), @@ -171,6 +174,22 @@ func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayReques relayError := rpcps.providerSessionManager.OnSessionDone(relaySession, request) // TODO: when we pay as u go on subscription this will need to change if relayError != nil { err = sdkerrors.Wrapf(relayError, "OnSession Done failure: "+err.Error()) + } else { + rpcps.rewardServer.SendNewProof(ctx, request.ShallowCopy(), relaySession.PairingEpoch, consumerAddress.String()) + utils.LavaFormatDebug("Provider finished subscribing", &map[string]string{ + "request.SessionId": strconv.FormatUint(request.SessionId, 10), + "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), + "request.cu": strconv.FormatUint(request.CuSum, 10), + "termination": err.Error(), + }) + } + } else { + // we didn't even manage to subscribe + relayFailureError := rpcps.providerSessionManager.OnSessionFailure(relaySession) + if relayFailureError != nil { + err = utils.LavaFormatError("failed subscribing", lavasession.SubscriptionInitiationError, &map[string]string{"onSessionFailureError": relayFailureError.Error()}) + } else { + err = utils.LavaFormatError("failed subscribing", lavasession.SubscriptionInitiationError, nil) } } return err @@ -240,7 +259,7 @@ func (rpcps *RPCProviderServer) TryRelaySubscribe(ctx context.Context, request * subscribed, errRet = processSubscribeMessages() rpcps.providerSessionManager.SubscriptionFailure(consumerAddress.String(), uint64(request.BlockHeight), subscriptionID) rpcps.rewardServer.SubscribeEnded(consumerAddress.String(), uint64(request.BlockHeight), subscriptionID) - return + return subscribed, errRet } // verifies basic relay fields, and gets a provider session From 30d26d494a6f246d03b157b90866fec91f617093 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Feb 2023 12:29:53 +0200 Subject: [PATCH 022/123] handle subscription failure in consumer --- protocol/rpcconsumer/rpcconsumer_server.go | 10 ++++++---- protocol/rpcprovider/rewardserver/reward_server.go | 4 ++-- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/protocol/rpcconsumer/rpcconsumer_server.go b/protocol/rpcconsumer/rpcconsumer_server.go index 4527c87d88..438448cd88 100644 --- a/protocol/rpcconsumer/rpcconsumer_server.go +++ b/protocol/rpcconsumer/rpcconsumer_server.go @@ -290,11 +290,13 @@ func (rpccs *RPCConsumerServer) relaySubscriptionInner(ctx context.Context, endp replyServer, err := endpointClient.RelaySubscribe(ctx, relayResult.Request) // relayLatency := time.Since(relaySentTime) // TODO: use subscription QoS if err != nil { - errReport := rpccs.consumerSessionManager.OnSessionFailure(singleConsumerSession, err) - if errReport != nil { - return relayResult, utils.LavaFormatError("subscribe relay failed onSessionFailure errored", errReport, &map[string]string{"original error": err.Error()}) + if lavasession.SubscriptionInitiationError.Is(err) { + errReport := rpccs.consumerSessionManager.OnSessionFailure(singleConsumerSession, err) + if errReport != nil { + return relayResult, utils.LavaFormatError("subscribe relay failed onSessionFailure errored", errReport, &map[string]string{"original error": err.Error()}) + } + return relayResult, err } - return relayResult, err } relayResult.ReplyServer = &replyServer err = rpccs.consumerSessionManager.OnSessionDoneIncreaseRelayAndCu(singleConsumerSession) diff --git a/protocol/rpcprovider/rewardserver/reward_server.go b/protocol/rpcprovider/rewardserver/reward_server.go index d739cc8214..72ec671be6 100644 --- a/protocol/rpcprovider/rewardserver/reward_server.go +++ b/protocol/rpcprovider/rewardserver/reward_server.go @@ -31,10 +31,10 @@ func NewRewardServer(rewardsTxSender RewardsTxSender) *RewardServer { return rws } -func SubscribeStarted(consumer string, epoch uint64, subscribeID string) { +func (rws *RewardServer) SubscribeStarted(consumer string, epoch uint64, subscribeID string) { // hold off reward claims for subscription while this is still active } -func SubscribeEnded(consumer string, epoch uint64, subscribeID string) { +func (rws *RewardServer) SubscribeEnded(consumer string, epoch uint64, subscribeID string) { // can collect now } From 3b6f47eb350f60913a118be8d3009ce4ee32e0be Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Feb 2023 12:34:22 +0200 Subject: [PATCH 023/123] handle subscribe success without an error on provider side --- protocol/rpcconsumer/rpcconsumer_server.go | 10 ++++------ protocol/rpcprovider/rpcprovider_server.go | 1 + 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/protocol/rpcconsumer/rpcconsumer_server.go b/protocol/rpcconsumer/rpcconsumer_server.go index 438448cd88..4527c87d88 100644 --- a/protocol/rpcconsumer/rpcconsumer_server.go +++ b/protocol/rpcconsumer/rpcconsumer_server.go @@ -290,13 +290,11 @@ func (rpccs *RPCConsumerServer) relaySubscriptionInner(ctx context.Context, endp replyServer, err := endpointClient.RelaySubscribe(ctx, relayResult.Request) // relayLatency := time.Since(relaySentTime) // TODO: use subscription QoS if err != nil { - if lavasession.SubscriptionInitiationError.Is(err) { - errReport := rpccs.consumerSessionManager.OnSessionFailure(singleConsumerSession, err) - if errReport != nil { - return relayResult, utils.LavaFormatError("subscribe relay failed onSessionFailure errored", errReport, &map[string]string{"original error": err.Error()}) - } - return relayResult, err + errReport := rpccs.consumerSessionManager.OnSessionFailure(singleConsumerSession, err) + if errReport != nil { + return relayResult, utils.LavaFormatError("subscribe relay failed onSessionFailure errored", errReport, &map[string]string{"original error": err.Error()}) } + return relayResult, err } relayResult.ReplyServer = &replyServer err = rpccs.consumerSessionManager.OnSessionDoneIncreaseRelayAndCu(singleConsumerSession) diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 1e1c5604ab..9344998ae1 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -182,6 +182,7 @@ func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayReques "request.cu": strconv.FormatUint(request.CuSum, 10), "termination": err.Error(), }) + err = nil // we don't want to return an error here } } else { // we didn't even manage to subscribe From e9244895729bd353fe7fb2b5c7099e7d80b9dfee Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Feb 2023 12:37:12 +0200 Subject: [PATCH 024/123] handle error status on relaySubscribe --- protocol/rpcprovider/rpcprovider_server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 9344998ae1..35bbdb12e0 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -193,7 +193,7 @@ func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayReques err = utils.LavaFormatError("failed subscribing", lavasession.SubscriptionInitiationError, nil) } } - return err + return rpcps.handleRelayErrorStatus(err) } func (rpcps *RPCProviderServer) TryRelaySubscribe(ctx context.Context, request *pairingtypes.RelayRequest, srv pairingtypes.Relayer_RelaySubscribeServer, chainMessage chainlib.ChainMessage, consumerAddress sdk.AccAddress) (subscribed bool, errRet error) { From 15eed874f158af224bad06aa9b694d503ed4e03e Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Feb 2023 14:53:58 +0200 Subject: [PATCH 025/123] added cache to statequery --- config/rpcprovider.yml | 13 ++++ protocol/rpcconsumer/rpcconsumer_server.go | 3 + protocol/statetracker/state_query.go | 85 ++++++++++++++++------ 3 files changed, 77 insertions(+), 24 deletions(-) create mode 100644 config/rpcprovider.yml diff --git a/config/rpcprovider.yml b/config/rpcprovider.yml new file mode 100644 index 0000000000..b5b69efe1b --- /dev/null +++ b/config/rpcprovider.yml @@ -0,0 +1,13 @@ +endpoints: + - chain-id: COS3 + api-interface: tendermintrpc + network-address: 127.0.0.1:3333 + node-url: ["wss://tendermint-websocket.xyz","https://tendermint-https.xyz"] + - chain-id: COS3 + api-interface: rest + network-address: 127.0.0.1:3333 + node-url: ["https://tendermint-rest.xyz"] + - chain-id: ETH1 + api-interface: jsonrpc + network-address: 127.0.0.1:3333 + node-url: ["wss://ethereum-websocket.xyz"] \ No newline at end of file diff --git a/protocol/rpcconsumer/rpcconsumer_server.go b/protocol/rpcconsumer/rpcconsumer_server.go index 4527c87d88..5ddd4d6d6e 100644 --- a/protocol/rpcconsumer/rpcconsumer_server.go +++ b/protocol/rpcconsumer/rpcconsumer_server.go @@ -296,6 +296,9 @@ func (rpccs *RPCConsumerServer) relaySubscriptionInner(ctx context.Context, endp } return relayResult, err } + // TODO: need to check that if provider fails and returns error, this is reflected here and we run onSessionDone + // my thoughts are that this fails if the grpc fails not if the provider fails, and if the provider returns an error this is reflected by the Recv function on the chainListener calling us here + // and this is too late relayResult.ReplyServer = &replyServer err = rpccs.consumerSessionManager.OnSessionDoneIncreaseRelayAndCu(singleConsumerSession) return relayResult, err diff --git a/protocol/statetracker/state_query.go b/protocol/statetracker/state_query.go index d196e3449f..3a8801d29c 100644 --- a/protocol/statetracker/state_query.go +++ b/protocol/statetracker/state_query.go @@ -4,8 +4,10 @@ import ( "context" "fmt" "strconv" + "time" "github.com/cosmos/cosmos-sdk/client" + "github.com/dgraph-io/ristretto" reliabilitymanager "github.com/lavanet/lava/protocol/rpcprovider/reliabilitymanager" "github.com/lavanet/lava/utils" conflicttypes "github.com/lavanet/lava/x/conflict/types" @@ -14,10 +16,20 @@ import ( spectypes "github.com/lavanet/lava/x/spec/types" ) +const ( + CacheMaxCost = 10 * 1024 * 1024 // 10M cost + CacheNumCounters = 100000 // expect 10K items + DefaultTimeToLiveExpiration = 30 * time.Minute + PairingRespKey = "pairing-resp" + VerifyPairingRespKey = "verify-pairing-resp" + VrfPkAndMaxCuResponseKey = "vrf-and-max-cu-resp" +) + type StateQuery struct { SpecQueryClient spectypes.QueryClient PairingQueryClient pairingtypes.QueryClient EpochStorageQueryClient epochstoragetypes.QueryClient + ResponsesCache *ristretto.Cache } func NewStateQuery(ctx context.Context, clientCtx client.Context) *StateQuery { @@ -25,6 +37,11 @@ func NewStateQuery(ctx context.Context, clientCtx client.Context) *StateQuery { sq.SpecQueryClient = spectypes.NewQueryClient(clientCtx) sq.PairingQueryClient = pairingtypes.NewQueryClient(clientCtx) sq.EpochStorageQueryClient = epochstoragetypes.NewQueryClient(clientCtx) + cache, err := ristretto.NewCache(&ristretto.Config{NumCounters: CacheNumCounters, MaxCost: CacheMaxCost, BufferItems: 64}) + if err != nil { + utils.LavaFormatFatal("failed setting up cache for queries", err, nil) + } + sq.ResponsesCache = cache return sq } @@ -40,30 +57,34 @@ func (csq *StateQuery) GetSpec(ctx context.Context, chainID string) (*spectypes. type ConsumerStateQuery struct { StateQuery - clientCtx client.Context - cachedPairings map[string]*pairingtypes.QueryGetPairingResponse // TODO: replace this with TTL so we don't keep entries forever + clientCtx client.Context + lastChainID string } func NewConsumerStateQuery(ctx context.Context, clientCtx client.Context) *ConsumerStateQuery { - csq := &ConsumerStateQuery{StateQuery: *NewStateQuery(ctx, clientCtx), clientCtx: clientCtx, cachedPairings: map[string]*pairingtypes.QueryGetPairingResponse{}} + csq := &ConsumerStateQuery{StateQuery: *NewStateQuery(ctx, clientCtx), clientCtx: clientCtx, lastChainID: ""} return csq } func (csq *ConsumerStateQuery) GetPairing(ctx context.Context, chainID string, latestBlock int64) (pairingList []epochstoragetypes.StakeEntry, epoch uint64, nextBlockForUpdate uint64, errRet error) { if chainID == "" { - // the caller doesn't care which so just return the first - for key := range csq.cachedPairings { - chainID = key + if csq.lastChainID != "" { + chainID = csq.lastChainID } if chainID == "" { chainID = "LAV1" - utils.LavaFormatWarning("failed to run get pairing as there is no cached entry for empty chainID call, using default chainID", nil, &map[string]string{"chainID": chainID}) + utils.LavaFormatWarning("failed to run get pairing as there is no entry for empty chainID call, using default chainID", nil, &map[string]string{"chainID": chainID}) } } - if cachedResp, ok := csq.cachedPairings[chainID]; ok { - if cachedResp.BlockOfNextPairing > uint64(latestBlock) { - return cachedResp.Providers, cachedResp.CurrentEpoch, cachedResp.BlockOfNextPairing, nil + cachedInterface, found := csq.ResponsesCache.Get(PairingRespKey + chainID) + if found && cachedInterface != nil { + if cachedResp, ok := cachedInterface.(*pairingtypes.QueryGetPairingResponse); ok { + if cachedResp.BlockOfNextPairing > uint64(latestBlock) { + return cachedResp.Providers, cachedResp.CurrentEpoch, cachedResp.BlockOfNextPairing, nil + } else { + utils.LavaFormatError("invalid cache entry - failed casting response", nil, &map[string]string{"castingType": "*pairingtypes.QueryGetPairingResponse", "type": fmt.Sprintf("%t", cachedInterface)}) + } } } @@ -74,7 +95,8 @@ func (csq *ConsumerStateQuery) GetPairing(ctx context.Context, chainID string, l if err != nil { return nil, 0, 0, utils.LavaFormatError("Failed in get pairing query", err, &map[string]string{}) } - csq.cachedPairings[chainID] = pairingResp + csq.lastChainID = chainID + csq.ResponsesCache.SetWithTTL(PairingRespKey+chainID, pairingResp, 1, DefaultTimeToLiveExpiration) return pairingResp.Providers, pairingResp.CurrentEpoch, pairingResp.BlockOfNextPairing, nil } @@ -89,9 +111,7 @@ func (csq *ConsumerStateQuery) GetMaxCUForUser(ctx context.Context, chainID stri type ProviderStateQuery struct { StateQuery - clientCtx client.Context - cachedPairings map[string]*pairingtypes.QueryVerifyPairingResponse // TODO: replace this with TTL so we don't keep entries forever - cachedEntries map[string]*pairingtypes.QueryUserEntryResponse // TODO: replace this with TTL so we don't keep entries forever + clientCtx client.Context } func NewProviderStateQuery(ctx context.Context, clientCtx client.Context) *ProviderStateQuery { @@ -101,20 +121,28 @@ func NewProviderStateQuery(ctx context.Context, clientCtx client.Context) *Provi func (psq *ProviderStateQuery) GetVrfPkAndMaxCuForUser(ctx context.Context, consumerAddress string, chainID string, epoch uint64) (vrfPk *utils.VrfPubKey, maxCu uint64, err error) { key := psq.entryKey(consumerAddress, chainID, epoch, "") - UserEntryRes, ok := psq.cachedEntries[key] - if !ok { - UserEntryRes, err = psq.PairingQueryClient.UserEntry(ctx, &pairingtypes.QueryUserEntryRequest{ChainID: chainID, Address: consumerAddress, Block: epoch}) + cachedInterface, found := psq.ResponsesCache.Get(VrfPkAndMaxCuResponseKey + key) + var userEntryRes *pairingtypes.QueryUserEntryResponse = nil + if found && cachedInterface != nil { + if cachedResp, ok := cachedInterface.(*pairingtypes.QueryUserEntryResponse); ok { + userEntryRes = cachedResp + } else { + utils.LavaFormatError("invalid cache entry - failed casting response", nil, &map[string]string{"castingType": "*pairingtypes.QueryUserEntryResponse", "type": fmt.Sprintf("%t", cachedInterface)}) + } + } + if userEntryRes == nil { + userEntryRes, err = psq.PairingQueryClient.UserEntry(ctx, &pairingtypes.QueryUserEntryRequest{ChainID: chainID, Address: consumerAddress, Block: epoch}) if err != nil { return nil, 0, utils.LavaFormatError("StakeEntry querying for consumer failed", err, &map[string]string{"chainID": chainID, "address": consumerAddress, "block": strconv.FormatUint(epoch, 10)}) } - psq.cachedEntries[key] = UserEntryRes + psq.ResponsesCache.SetWithTTL(VrfPkAndMaxCuResponseKey+key, userEntryRes, 1, DefaultTimeToLiveExpiration) } vrfPk = &utils.VrfPubKey{} - vrfPk, err = vrfPk.DecodeFromBech32(UserEntryRes.GetConsumer().Vrfpk) + vrfPk, err = vrfPk.DecodeFromBech32(userEntryRes.GetConsumer().Vrfpk) if err != nil { - err = utils.LavaFormatError("decoding vrfpk from bech32", err, &map[string]string{"chainID": chainID, "address": consumerAddress, "block": strconv.FormatUint(epoch, 10), "UserEntryRes": fmt.Sprintf("%v", UserEntryRes)}) + err = utils.LavaFormatError("decoding vrfpk from bech32", err, &map[string]string{"chainID": chainID, "address": consumerAddress, "block": strconv.FormatUint(epoch, 10), "UserEntryRes": fmt.Sprintf("%v", userEntryRes)}) } - return vrfPk, UserEntryRes.GetMaxCU(), err + return vrfPk, userEntryRes.GetMaxCU(), err } func (psq *ProviderStateQuery) entryKey(consumerAddress string, chainID string, epoch uint64, providerAddress string) string { @@ -179,8 +207,17 @@ func (psq *ProviderStateQuery) VoteEvents(ctx context.Context, latestBlock int64 func (psq *ProviderStateQuery) VerifyPairing(ctx context.Context, consumerAddress string, providerAddress string, epoch uint64, chainID string) (valid bool, index int64, err error) { key := psq.entryKey(consumerAddress, chainID, epoch, providerAddress) - verifyResponse, ok := psq.cachedPairings[key] - if !ok { + + cachedInterface, found := psq.ResponsesCache.Get(VerifyPairingRespKey + key) + var verifyResponse *pairingtypes.QueryVerifyPairingResponse = nil + if found && cachedInterface != nil { + if cachedResp, ok := cachedInterface.(*pairingtypes.QueryVerifyPairingResponse); ok { + verifyResponse = cachedResp + } else { + utils.LavaFormatError("invalid cache entry - failed casting response", nil, &map[string]string{"castingType": "*pairingtypes.QueryVerifyPairingResponse", "type": fmt.Sprintf("%t", cachedInterface)}) + } + } + if verifyResponse == nil { verifyResponse, err = psq.PairingQueryClient.VerifyPairing(context.Background(), &pairingtypes.QueryVerifyPairingRequest{ ChainID: chainID, Client: consumerAddress, @@ -190,7 +227,7 @@ func (psq *ProviderStateQuery) VerifyPairing(ctx context.Context, consumerAddres if err != nil { return false, 0, err } - psq.cachedPairings[key] = verifyResponse + psq.ResponsesCache.SetWithTTL(VerifyPairingRespKey+key, verifyResponse, 1, DefaultTimeToLiveExpiration) } if !verifyResponse.Valid { return false, 0, utils.LavaFormatError("invalid self pairing with consumer", nil, &map[string]string{"provider": providerAddress, "consumer address": consumerAddress, "epoch": strconv.FormatUint(epoch, 10)}) From dd8587fe843d41b24d841fc70068f2faf3e548ae Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Feb 2023 14:54:11 +0200 Subject: [PATCH 026/123] added deps --- go.mod | 2 +- go.sum | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 3ef05e74dd..ddb9d7727f 100644 --- a/go.mod +++ b/go.mod @@ -89,7 +89,7 @@ require ( github.com/deckarep/golang-set v1.8.0 github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect github.com/dgraph-io/badger/v2 v2.2007.2 // indirect - github.com/dgraph-io/ristretto v0.0.3 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect github.com/docker/docker v20.10.19+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect diff --git a/go.sum b/go.sum index 295f8fbd0d..2c2f58f070 100644 --- a/go.sum +++ b/go.sum @@ -441,6 +441,8 @@ github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDm github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.3 h1:jh22xisGBjrEVnRZ1DVTpBVQm0Xndu8sMl0CWDzSIBI= github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= @@ -1796,6 +1798,7 @@ golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= From bcbb90038f1c16b6e640702f52baa2b8af402264 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Feb 2023 15:08:55 +0200 Subject: [PATCH 027/123] added retry on txSender sequence mismatch --- protocol/common/tx_parsing.go | 41 +++++++++++++++ protocol/statetracker/tx_sender.go | 80 ++++++++++++++++++++++++++---- 2 files changed, 110 insertions(+), 11 deletions(-) create mode 100644 protocol/common/tx_parsing.go diff --git a/protocol/common/tx_parsing.go b/protocol/common/tx_parsing.go new file mode 100644 index 0000000000..cf8a50e35c --- /dev/null +++ b/protocol/common/tx_parsing.go @@ -0,0 +1,41 @@ +package common + +import ( + "regexp" + "strconv" + "strings" + + "github.com/lavanet/lava/utils" +) + +// extract requested sequence number from tx error. +func FindSequenceNumber(sequence string) (int, error) { + re := regexp.MustCompile(`expected (\d+), got (\d+)`) + match := re.FindStringSubmatch(sequence) + if match == nil || len(match) < 2 { + return 0, utils.LavaFormatWarning("Failed to parse sequence number from error", nil, &map[string]string{"sequence": sequence}) + } + return strconv.Atoi(match[1]) // atoi return 0 upon error, so it will be ok when sequenceNumberParsed uses it +} + +func ParseTransactionResult(transactionResult string) (string, int) { + transactionResult = strings.ReplaceAll(transactionResult, ": ", ":") + transactionResults := strings.Split(transactionResult, "\n") + summarizedResult := "" + for _, str := range transactionResults { + if strings.Contains(str, "raw_log:") || strings.Contains(str, "txhash:") || strings.Contains(str, "code:") { + summarizedResult = summarizedResult + str + ", " + } + } + + re := regexp.MustCompile(`code:(\d+)`) // extracting code from transaction result (in format code:%d) + match := re.FindStringSubmatch(transactionResult) + if match == nil || len(match) < 2 { + return summarizedResult, 1 // not zero + } + retCode, err := strconv.Atoi(match[1]) // extract return code. + if err != nil { + return summarizedResult, 1 // not zero + } + return summarizedResult, retCode +} diff --git a/protocol/statetracker/tx_sender.go b/protocol/statetracker/tx_sender.go index 8eb09a6144..e976ab80af 100644 --- a/protocol/statetracker/tx_sender.go +++ b/protocol/statetracker/tx_sender.go @@ -1,19 +1,25 @@ package statetracker import ( + "bytes" "context" + "fmt" + "strconv" + "strings" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/tx" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/lavanet/lava/protocol/common" "github.com/lavanet/lava/protocol/rpcprovider/reliabilitymanager" "github.com/lavanet/lava/utils" conflicttypes "github.com/lavanet/lava/x/conflict/types" ) const ( - defaultGasPrice = "0.000000001ulava" - defaultGasAdjustment = 1.5 + defaultGasPrice = "0.000000001ulava" + defaultGasAdjustment = 1.5 + RETRY_INCORRECT_SEQUENCE = 5 ) type TxSender struct { @@ -29,28 +35,80 @@ func NewTxSender(ctx context.Context, clientCtx client.Context, txFactory tx.Fac } func (ts *TxSender) SimulateAndBroadCastTxWithRetryOnSeqMismatch(msg sdk.Msg) error { - txf := ts.txFactory.WithGasPrices(defaultGasPrice) - txf = txf.WithGasAdjustment(defaultGasAdjustment) + txfactory := ts.txFactory.WithGasPrices(defaultGasPrice) + txfactory = txfactory.WithGasAdjustment(defaultGasAdjustment) if err := msg.ValidateBasic(); err != nil { return err } clientCtx := ts.clientCtx - txf, err := ts.prepareFactory(txf) + txfactory, err := ts.prepareFactory(txfactory) if err != nil { return err } - _, gasUsed, err := tx.CalculateGas(clientCtx, txf, msg) + _, gasUsed, err := tx.CalculateGas(clientCtx, txfactory, msg) if err != nil { return err } - txf = txf.WithGas(gasUsed) - - err = tx.GenerateOrBroadcastTxWithFactory(clientCtx, txf, msg) - if err != nil { - return err + txfactory = txfactory.WithGas(gasUsed) + myWriter := bytes.Buffer{} + hasSequenceError := false + success := false + idx := -1 + sequenceNumberParsed := 0 + summarizedTransactionResult := "" + for ; idx < RETRY_INCORRECT_SEQUENCE && !success; idx++ { + if hasSequenceError { // a retry + // if sequence number error happened it means that we already sent a tx this block. + // we need to wait a block for the tx to be approved, + // only then we can ask for a new sequence number continue and try again. + var seq uint64 + if sequenceNumberParsed != 0 { + utils.LavaFormatInfo("Sequence Number extracted from transaction error, retrying", &map[string]string{"sequence": strconv.Itoa(sequenceNumberParsed)}) + seq = uint64(sequenceNumberParsed) + } else { + var err error + _, seq, err = clientCtx.AccountRetriever.GetAccountNumberSequence(clientCtx, clientCtx.GetFromAddress()) + if err != nil { + utils.LavaFormatError("failed to get correct sequence number for account, give up", err, nil) + break // give up + } + } + txfactory = txfactory.WithSequence(seq) + myWriter.Reset() + utils.LavaFormatInfo("Retrying with sequence number:", &map[string]string{ + "SeqNum": strconv.FormatUint(seq, 10), + }) + } + var transactionResult string + err = tx.GenerateOrBroadcastTxWithFactory(clientCtx, txfactory, msg) + if err != nil { + utils.LavaFormatWarning("Sending CheckProfitabilityAndBroadCastTx failed", err, &map[string]string{ + "msg": fmt.Sprintf("%+v", msg), + }) + transactionResult = err.Error() // incase we got an error the tx result is basically the error + } else { + transactionResult = myWriter.String() + } + var returnCode int + summarizedTransactionResult, returnCode = common.ParseTransactionResult(transactionResult) + + if returnCode == 0 { // if we get some other code which isn't 0 then keep retrying + success = true + } else if strings.Contains(transactionResult, "account sequence") { + hasSequenceError = true + sequenceNumberParsed, err = common.FindSequenceNumber(transactionResult) + if err != nil { + utils.LavaFormatWarning("Failed findSequenceNumber", err, &map[string]string{"sequence": transactionResult}) + } + summarizedTransactionResult = transactionResult + } + } + if !success { + return utils.LavaFormatError(fmt.Sprintf("failed sending transaction %s", summarizedTransactionResult), nil, nil) } + utils.LavaFormatInfo(fmt.Sprintf("succeeded sending transaction %s", summarizedTransactionResult), nil) return nil } From a1988b1a984f5c76257399dfad4de08be06fe14f Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Feb 2023 15:24:31 +0200 Subject: [PATCH 028/123] move common code to a method --- protocol/rpcprovider/rpcprovider_server.go | 46 +++++++++++----------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 35bbdb12e0..47bf9fa20a 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -97,20 +97,10 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), "request.cu": strconv.FormatUint(request.CuSum, 10), }) - relaySession, consumerAddress, err := rpcps.initRelay(ctx, request) + relaySession, consumerAddress, chainMessage, err := rpcps.initRelay(ctx, request) if err != nil { return nil, rpcps.handleRelayErrorStatus(err) } - // parse the message to extract the cu and chainMessage for sending it - chainMessage, err := rpcps.chainParser.ParseMsg(request.ApiUrl, request.Data, request.ConnectionType) - if err != nil { - return nil, rpcps.handleRelayErrorStatus(err) - } - relayCU := chainMessage.GetServiceApi().ComputeUnits - err = relaySession.PrepareSessionForUsage(relayCU, request.CuSum) - if err != nil { // TODO: any error here we need to convert to session out of sync error and return that to the user - return nil, rpcps.handleRelayErrorStatus(err) - } reply, err := rpcps.TryRelay(ctx, request, consumerAddress, chainMessage) if err != nil { // failed to send relay. we need to adjust session state. cuSum and relayNumber. @@ -144,6 +134,26 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes } return reply, rpcps.handleRelayErrorStatus(err) } + +func (rpcps *RPCProviderServer) initRelay(ctx context.Context, request *pairingtypes.RelayRequest) (relaySession *lavasession.SingleProviderSession, consumerAddress sdk.AccAddress, chainMessage chainlib.ChainMessage, err error) { + relaySession, consumerAddress, err = rpcps.verifyRelaySession(ctx, request) + if err != nil { + return nil, nil, nil, err + } + // parse the message to extract the cu and chainMessage for sending it + chainMessage, err = rpcps.chainParser.ParseMsg(request.ApiUrl, request.Data, request.ConnectionType) + if err != nil { + return nil, nil, nil, err + } + relayCU := chainMessage.GetServiceApi().ComputeUnits + err = relaySession.PrepareSessionForUsage(relayCU, request.CuSum) + if err != nil { + // TODO: any error here we need to convert to session out of sync error and return that to the user + return nil, nil, nil, err + } + return relaySession, consumerAddress, chainMessage, nil +} + func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayRequest, srv pairingtypes.Relayer_RelaySubscribeServer) error { if request.DataReliability != nil { return utils.LavaFormatError("subscribe data reliability not supported", nil, nil) @@ -154,17 +164,7 @@ func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayReques "request.cu": strconv.FormatUint(request.CuSum, 10), }) ctx := context.Background() - relaySession, consumerAddress, err := rpcps.initRelay(ctx, request) - if err != nil { - return rpcps.handleRelayErrorStatus(err) - } - // parse the message to extract the cu and chainMessage for sending it - chainMessage, err := rpcps.chainParser.ParseMsg(request.ApiUrl, request.Data, request.ConnectionType) - if err != nil { - return rpcps.handleRelayErrorStatus(err) - } - relayCU := chainMessage.GetServiceApi().ComputeUnits - err = relaySession.PrepareSessionForUsage(relayCU, request.CuSum) + relaySession, consumerAddress, chainMessage, err := rpcps.initRelay(ctx, request) if err != nil { return rpcps.handleRelayErrorStatus(err) } @@ -264,7 +264,7 @@ func (rpcps *RPCProviderServer) TryRelaySubscribe(ctx context.Context, request * } // verifies basic relay fields, and gets a provider session -func (rpcps *RPCProviderServer) initRelay(ctx context.Context, request *pairingtypes.RelayRequest) (singleProviderSession *lavasession.SingleProviderSession, extractedConsumerAddress sdk.AccAddress, err error) { +func (rpcps *RPCProviderServer) verifyRelaySession(ctx context.Context, request *pairingtypes.RelayRequest) (singleProviderSession *lavasession.SingleProviderSession, extractedConsumerAddress sdk.AccAddress, err error) { valid, thresholdEpoch := rpcps.providerSessionManager.IsValidEpoch(uint64(request.BlockHeight)) if !valid { return nil, nil, utils.LavaFormatError("user reported invalid lava block height", nil, &map[string]string{ From 81c24a37dd790546e3e35f2ea537794789376510 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Feb 2023 15:45:32 +0200 Subject: [PATCH 029/123] added option for reward server to reject a proof when it has an updated value --- protocol/lavasession/provider_session_manager.go | 6 ++++++ protocol/rpcprovider/rewardserver/reward_server.go | 3 ++- protocol/rpcprovider/rpcprovider_server.go | 14 +++++++++++--- 3 files changed, 19 insertions(+), 4 deletions(-) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index 4bbc9e8a1e..8360d136c9 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -208,6 +208,12 @@ func (psm *ProviderSessionManager) SubscriptionFailure(consumerAddress string, e // userSessions.Lock.Unlock() } +// called when the reward server has information on a higher cu proof and usage and this providerSessionsManager needs to sync up on it +func (psm *ProviderSessionManager) UpdateSessionCU(consumerAddress string, epoch uint64, sessionID uint64, storedCU uint64) error { + // load the session and update the CU inside + return fmt.Errorf("not implemented") +} + // Returning a new provider session manager func NewProviderSessionManager(rpcProviderEndpoint *RPCProviderEndpoint, stateQuery StateQuery) *ProviderSessionManager { return &ProviderSessionManager{rpcProviderEndpoint: rpcProviderEndpoint, stateQuery: stateQuery} diff --git a/protocol/rpcprovider/rewardserver/reward_server.go b/protocol/rpcprovider/rewardserver/reward_server.go index 72ec671be6..38f441d24e 100644 --- a/protocol/rpcprovider/rewardserver/reward_server.go +++ b/protocol/rpcprovider/rewardserver/reward_server.go @@ -14,10 +14,11 @@ type RewardsTxSender interface { TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest) } -func (rws *RewardServer) SendNewProof(ctx context.Context, proof *pairingtypes.RelayRequest, epoch uint64, consumerAddr string) { +func (rws *RewardServer) SendNewProof(ctx context.Context, proof *pairingtypes.RelayRequest, epoch uint64, consumerAddr string) (existingCU uint64, updatedWithProof bool) { // TODO: implement // get the proof for this consumer for this epoch for this session, update the latest proof // write to a channel the epoch + return 0, false } func (rws *RewardServer) SendNewDataReliabilityProof(ctx context.Context, dataReliability *pairingtypes.VRFData, epoch uint64, consumerAddr string) { diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 47bf9fa20a..23135333b2 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -45,7 +45,7 @@ type ReliabilityManagerInf interface { } type RewardServerInf interface { - SendNewProof(ctx context.Context, proof *pairingtypes.RelayRequest, epoch uint64, consumerAddr string) + SendNewProof(ctx context.Context, proof *pairingtypes.RelayRequest, epoch uint64, consumerAddr string) (existingCU uint64, updatedWithProof bool) SendNewDataReliabilityProof(ctx context.Context, dataReliability *pairingtypes.VRFData, epoch uint64, consumerAddr string) SubscribeStarted(consumer string, epoch uint64, subscribeID string) SubscribeEnded(consumer string, epoch uint64, subscribeID string) @@ -118,7 +118,11 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes err = sdkerrors.Wrapf(relayError, "OnSession Done failure: "+err.Error()) } else { if request.DataReliability == nil { - rpcps.rewardServer.SendNewProof(ctx, request.ShallowCopy(), relaySession.PairingEpoch, consumerAddress.String()) + epoch := relaySession.PairingEpoch + storedCU, updatedWithProof := rpcps.rewardServer.SendNewProof(ctx, request.ShallowCopy(), epoch, consumerAddress.String()) + if !updatedWithProof { + rpcps.providerSessionManager.UpdateSessionCU(consumerAddress.String(), epoch, request.SessionId, storedCU) + } utils.LavaFormatDebug("Provider Finished Relay Successfully", &map[string]string{ "request.SessionId": strconv.FormatUint(request.SessionId, 10), "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), @@ -175,7 +179,11 @@ func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayReques if relayError != nil { err = sdkerrors.Wrapf(relayError, "OnSession Done failure: "+err.Error()) } else { - rpcps.rewardServer.SendNewProof(ctx, request.ShallowCopy(), relaySession.PairingEpoch, consumerAddress.String()) + epoch := relaySession.PairingEpoch + storedCU, updatedWithProof := rpcps.rewardServer.SendNewProof(ctx, request.ShallowCopy(), epoch, consumerAddress.String()) + if !updatedWithProof { + rpcps.providerSessionManager.UpdateSessionCU(consumerAddress.String(), epoch, request.SessionId, storedCU) + } utils.LavaFormatDebug("Provider finished subscribing", &map[string]string{ "request.SessionId": strconv.FormatUint(request.SessionId, 10), "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), From 65e2095e9bf2d5aa1088f10f22d1f26e0e32e38b Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Feb 2023 15:57:55 +0200 Subject: [PATCH 030/123] wrap common code in a method --- protocol/rpcprovider/rpcprovider_server.go | 25 +++++++++++++++------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 23135333b2..1c3839b7c5 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -118,10 +118,9 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes err = sdkerrors.Wrapf(relayError, "OnSession Done failure: "+err.Error()) } else { if request.DataReliability == nil { - epoch := relaySession.PairingEpoch - storedCU, updatedWithProof := rpcps.rewardServer.SendNewProof(ctx, request.ShallowCopy(), epoch, consumerAddress.String()) - if !updatedWithProof { - rpcps.providerSessionManager.UpdateSessionCU(consumerAddress.String(), epoch, request.SessionId, storedCU) + err = rpcps.SendProof(ctx, relaySession, request, consumerAddress) + if err != nil { + return nil, err } utils.LavaFormatDebug("Provider Finished Relay Successfully", &map[string]string{ "request.SessionId": strconv.FormatUint(request.SessionId, 10), @@ -179,10 +178,9 @@ func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayReques if relayError != nil { err = sdkerrors.Wrapf(relayError, "OnSession Done failure: "+err.Error()) } else { - epoch := relaySession.PairingEpoch - storedCU, updatedWithProof := rpcps.rewardServer.SendNewProof(ctx, request.ShallowCopy(), epoch, consumerAddress.String()) - if !updatedWithProof { - rpcps.providerSessionManager.UpdateSessionCU(consumerAddress.String(), epoch, request.SessionId, storedCU) + err = rpcps.SendProof(ctx, relaySession, request, consumerAddress) + if err != nil { + return err } utils.LavaFormatDebug("Provider finished subscribing", &map[string]string{ "request.SessionId": strconv.FormatUint(request.SessionId, 10), @@ -204,6 +202,17 @@ func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayReques return rpcps.handleRelayErrorStatus(err) } +func (rpcps *RPCProviderServer) SendProof(ctx context.Context, relaySession *lavasession.SingleProviderSession, request *pairingtypes.RelayRequest, consumerAddress sdk.AccAddress) error { + epoch := relaySession.PairingEpoch + storedCU, updatedWithProof := rpcps.rewardServer.SendNewProof(ctx, request.ShallowCopy(), epoch, consumerAddress.String()) + if !updatedWithProof && storedCU > request.CuSum { + rpcps.providerSessionManager.UpdateSessionCU(consumerAddress.String(), epoch, request.SessionId, storedCU) + err := utils.LavaFormatError("Cu in relay smaller than existing proof", lavasession.ProviderConsumerCuMisMatch, &map[string]string{"existing_proof_cu": strconv.FormatUint(storedCU, 10)}) + return rpcps.handleRelayErrorStatus(err) + } + return nil +} + func (rpcps *RPCProviderServer) TryRelaySubscribe(ctx context.Context, request *pairingtypes.RelayRequest, srv pairingtypes.Relayer_RelaySubscribeServer, chainMessage chainlib.ChainMessage, consumerAddress sdk.AccAddress) (subscribed bool, errRet error) { var reply *pairingtypes.RelayReply var clientSub *rpcclient.ClientSubscription From 9bb8ede9e87bf732c8b71b291a28bc7aea90a947 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Feb 2023 18:14:35 +0200 Subject: [PATCH 031/123] reward server work --- .../lavasession/provider_session_manager.go | 5 +- protocol/lavasession/provider_types.go | 5 - .../rpcprovider/rewardserver/reward_server.go | 138 ++++++++++++++++-- protocol/rpcprovider/rpcprovider.go | 6 +- protocol/rpcprovider/rpcprovider_server.go | 20 +-- .../statetracker/provider_state_tracker.go | 8 +- protocol/statetracker/state_query.go | 8 + 7 files changed, 152 insertions(+), 38 deletions(-) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index 8360d136c9..811e3b5cd8 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -16,7 +16,6 @@ type ProviderSessionManager struct { lock sync.RWMutex blockedEpoch uint64 // requests from this epoch are blocked rpcProviderEndpoint *RPCProviderEndpoint - stateQuery StateQuery } // reads cs.BlockedEpoch atomically @@ -215,6 +214,6 @@ func (psm *ProviderSessionManager) UpdateSessionCU(consumerAddress string, epoch } // Returning a new provider session manager -func NewProviderSessionManager(rpcProviderEndpoint *RPCProviderEndpoint, stateQuery StateQuery) *ProviderSessionManager { - return &ProviderSessionManager{rpcProviderEndpoint: rpcProviderEndpoint, stateQuery: stateQuery} +func NewProviderSessionManager(rpcProviderEndpoint *RPCProviderEndpoint) *ProviderSessionManager { + return &ProviderSessionManager{rpcProviderEndpoint: rpcProviderEndpoint} } diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index c8bb5d387b..d586f18e05 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -1,7 +1,6 @@ package lavasession import ( - "context" "strconv" "sync" "sync/atomic" @@ -186,7 +185,3 @@ func (sps *SingleProviderSession) validateAndAddUsedCU(currentCU uint64, maxCu u } } } - -type StateQuery interface { - QueryVerifyPairing(ctx context.Context, consumer string, blockHeight uint64) -} diff --git a/protocol/rpcprovider/rewardserver/reward_server.go b/protocol/rpcprovider/rewardserver/reward_server.go index 38f441d24e..4cb6e23181 100644 --- a/protocol/rpcprovider/rewardserver/reward_server.go +++ b/protocol/rpcprovider/rewardserver/reward_server.go @@ -2,34 +2,143 @@ package rewardserver import ( "context" + "strconv" + "sync" + "github.com/lavanet/lava/utils" pairingtypes "github.com/lavanet/lava/x/pairing/types" ) +const ( + StaleEpochDistance = 2 +) + +type ConsumerRewards struct { + epoch uint64 + consumer string + proofs map[uint64]*pairingtypes.RelayRequest // key is sessionID + dataReliabilityProofs []*pairingtypes.VRFData +} + +func (csrw *ConsumerRewards) PrepareRewardsForClaim() (retProofs []*pairingtypes.RelayRequest, errRet error) { + for _, proof := range csrw.proofs { + retProofs = append(retProofs, proof) + } + dataReliabilityProofs := len(csrw.dataReliabilityProofs) + if len(retProofs) > 0 && dataReliabilityProofs > 0 { + for idx := range retProofs { + if idx > dataReliabilityProofs-1 { + break + } + retProofs[idx].DataReliability = csrw.dataReliabilityProofs[idx] + } + } + return +} + +type EpochRewards struct { + epoch uint64 + consumerRewards map[string]*ConsumerRewards // key is consumer +} + type RewardServer struct { rewardsTxSender RewardsTxSender + lock sync.RWMutex + rewards map[uint64]*EpochRewards } type RewardsTxSender interface { TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest) + GetEpochSize(ctx context.Context) (uint64, error) } func (rws *RewardServer) SendNewProof(ctx context.Context, proof *pairingtypes.RelayRequest, epoch uint64, consumerAddr string) (existingCU uint64, updatedWithProof bool) { - // TODO: implement - // get the proof for this consumer for this epoch for this session, update the latest proof - // write to a channel the epoch - return 0, false + rws.lock.Lock() // assuming 99% of the time we will need to write the new entry so there's no use in doing the read lock first to check stuff + defer rws.lock.Unlock() + epochRewards, ok := rws.rewards[epoch] + if !ok { + proofs := map[uint64]*pairingtypes.RelayRequest{proof.SessionId: proof} + consumerRewardsMap := map[string]*ConsumerRewards{consumerAddr: {epoch: epoch, consumer: consumerAddr, proofs: proofs, dataReliabilityProofs: []*pairingtypes.VRFData{}}} + rws.rewards[epoch] = &EpochRewards{epoch: epoch, consumerRewards: consumerRewardsMap} + return 0, true + } + consumerRewards, ok := epochRewards.consumerRewards[consumerAddr] + if !ok { + proofs := map[uint64]*pairingtypes.RelayRequest{proof.SessionId: proof} + consumerRewards := &ConsumerRewards{epoch: epoch, consumer: consumerAddr, proofs: proofs, dataReliabilityProofs: []*pairingtypes.VRFData{}} + epochRewards.consumerRewards[consumerAddr] = consumerRewards + return 0, true + } + relayProof, ok := consumerRewards.proofs[proof.SessionId] + if !ok { + consumerRewards.proofs[proof.SessionId] = proof + return 0, true + } + cuSumStored := relayProof.CuSum + if cuSumStored >= proof.CuSum { + return cuSumStored, false + } + consumerRewards.proofs[proof.SessionId] = proof + return 0, true } -func (rws *RewardServer) SendNewDataReliabilityProof(ctx context.Context, dataReliability *pairingtypes.VRFData, epoch uint64, consumerAddr string) { +func (rws *RewardServer) SendNewDataReliabilityProof(ctx context.Context, dataReliability *pairingtypes.VRFData, epoch uint64, consumerAddr string) (updatedWithProof bool) { + rws.lock.Lock() // assuming 99% of the time we will need to write the new entry so there's no use in doing the read lock first to check stuff + defer rws.lock.Unlock() + epochRewards, ok := rws.rewards[epoch] + if !ok { + consumerRewardsMap := map[string]*ConsumerRewards{consumerAddr: {epoch: epoch, consumer: consumerAddr, proofs: map[uint64]*pairingtypes.RelayRequest{}, dataReliabilityProofs: []*pairingtypes.VRFData{dataReliability}}} + rws.rewards[epoch] = &EpochRewards{epoch: epoch, consumerRewards: consumerRewardsMap} + return true + } + consumerRewards, ok := epochRewards.consumerRewards[consumerAddr] + if !ok { + consumerRewards := &ConsumerRewards{epoch: epoch, consumer: consumerAddr, proofs: map[uint64]*pairingtypes.RelayRequest{}, dataReliabilityProofs: []*pairingtypes.VRFData{dataReliability}} + epochRewards.consumerRewards[consumerAddr] = consumerRewards + return true + } + if len(consumerRewards.dataReliabilityProofs) == 0 { + consumerRewards.dataReliabilityProofs = []*pairingtypes.VRFData{dataReliability} + return true + } + return false // currently support only one per epoch +} +func (rws *RewardServer) UpdateEpoch(epoch uint64) { + ctx := context.Background() + rws.gatherRewardsForClaim(ctx, epoch) } -func NewRewardServer(rewardsTxSender RewardsTxSender) *RewardServer { - // - rws := &RewardServer{} - rws.rewardsTxSender = rewardsTxSender - return rws +func (rws *RewardServer) gatherRewardsForClaim(ctx context.Context, current_epoch uint64) (rewardsForClaim []*pairingtypes.RelayRequest, errRet error) { + rws.lock.Lock() + defer rws.lock.Unlock() + epochSize, err := rws.rewardsTxSender.GetEpochSize(ctx) + if err != nil { + return nil, err + } + if epochSize*StaleEpochDistance > current_epoch { + return nil, utils.LavaFormatError("current epoch too low", nil, &map[string]string{"current epoch": strconv.FormatUint(current_epoch, 10)}) + } + target_epoch_to_claim_rewards := current_epoch - epochSize*StaleEpochDistance + for epoch, epochRewards := range rws.rewards { + if epoch >= uint64(target_epoch_to_claim_rewards) { + continue + } + + for consumerAddr, rewards := range epochRewards.consumerRewards { + claimables, err := rewards.PrepareRewardsForClaim() + if err != nil { + // can't claim this now + continue + } + rewardsForClaim = append(rewardsForClaim, claimables...) + delete(epochRewards.consumerRewards, consumerAddr) + } + if len(epochRewards.consumerRewards) == 0 { + delete(rws.rewards, epoch) + } + } + return } func (rws *RewardServer) SubscribeStarted(consumer string, epoch uint64, subscribeID string) { @@ -39,3 +148,12 @@ func (rws *RewardServer) SubscribeStarted(consumer string, epoch uint64, subscri func (rws *RewardServer) SubscribeEnded(consumer string, epoch uint64, subscribeID string) { // can collect now } + +func NewRewardServer(rewardsTxSender RewardsTxSender) *RewardServer { + // + rws := &RewardServer{} + rws.rewardsTxSender = rewardsTxSender + // TODO: load this from persistency + rws.rewards = map[uint64]*EpochRewards{} + return rws +} diff --git a/protocol/rpcprovider/rpcprovider.go b/protocol/rpcprovider/rpcprovider.go index 64ab373970..5959ce4de5 100644 --- a/protocol/rpcprovider/rpcprovider.go +++ b/protocol/rpcprovider/rpcprovider.go @@ -37,7 +37,6 @@ type ProviderStateTrackerInf interface { RegisterChainParserForSpecUpdates(ctx context.Context, chainParser chainlib.ChainParser, chainID string) error RegisterReliabilityManagerForVoteUpdates(ctx context.Context, voteUpdatable statetracker.VoteUpdatable, endpointP *lavasession.RPCProviderEndpoint) RegisterForEpochUpdates(ctx context.Context, epochUpdatable statetracker.EpochUpdatable) - QueryVerifyPairing(ctx context.Context, consumer string, blockHeight uint64) TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest) SendVoteReveal(voteID string, vote *reliabilitymanager.VoteData) error SendVoteCommitment(voteID string, vote *reliabilitymanager.VoteData) error @@ -45,6 +44,7 @@ type ProviderStateTrackerInf interface { GetVrfPkAndMaxCuForUser(ctx context.Context, consumerAddress string, chainID string, epocu uint64) (vrfPk *utils.VrfPubKey, maxCu uint64, err error) VerifyPairing(ctx context.Context, consumerAddress string, providerAddress string, epoch uint64, chainID string) (valid bool, index int64, err error) GetProvidersCountForConsumer(ctx context.Context, consumerAddress string, epoch uint64, chainID string) (uint32, error) + GetEpochSize(ctx context.Context) (uint64, error) } type RPCProvider struct { @@ -64,7 +64,7 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client rpcp.rpcProviderServers = make(map[string]*RPCProviderServer, len(rpcProviderEndpoints)) // single reward server rewardServer := rewardserver.NewRewardServer(providerStateTracker) - + rpcp.providerStateTracker.RegisterForEpochUpdates(ctx, rewardServer) keyName, err := sigs.GetKeyName(clientCtx) if err != nil { utils.LavaFormatFatal("failed getting key name from clientCtx", err, nil) @@ -83,7 +83,7 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client utils.LavaFormatInfo("RPCProvider pubkey: "+addr.String(), nil) utils.LavaFormatInfo("RPCProvider setting up endpoints", &map[string]string{"length": strconv.Itoa(len(rpcProviderEndpoints))}) for _, rpcProviderEndpoint := range rpcProviderEndpoints { - providerSessionManager := lavasession.NewProviderSessionManager(rpcProviderEndpoint, providerStateTracker) + providerSessionManager := lavasession.NewProviderSessionManager(rpcProviderEndpoint) key := rpcProviderEndpoint.Key() rpcp.providerStateTracker.RegisterForEpochUpdates(ctx, providerSessionManager) chainParser, err := chainlib.NewChainParser(rpcProviderEndpoint.ApiInterface) diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 1c3839b7c5..72b02e292b 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -46,7 +46,7 @@ type ReliabilityManagerInf interface { type RewardServerInf interface { SendNewProof(ctx context.Context, proof *pairingtypes.RelayRequest, epoch uint64, consumerAddr string) (existingCU uint64, updatedWithProof bool) - SendNewDataReliabilityProof(ctx context.Context, dataReliability *pairingtypes.VRFData, epoch uint64, consumerAddr string) + SendNewDataReliabilityProof(ctx context.Context, dataReliability *pairingtypes.VRFData, epoch uint64, consumerAddr string) (updatedWithProof bool) SubscribeStarted(consumer string, epoch uint64, subscribeID string) SubscribeEnded(consumer string, epoch uint64, subscribeID string) } @@ -80,18 +80,9 @@ func (rpcps *RPCProviderServer) ServeRPCRequests( rpcps.stateTracker = stateTracker rpcps.providerAddress = providerAddress } + +// function used to handle relay requests from a consumer, it is called by a provider_listener by calling RegisterReceiver func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes.RelayRequest) (*pairingtypes.RelayReply, error) { - // verify the relay metadata is valid (epoch, signature) - // verify the consumer is authorised - // create/bring a session - // verify the relay data is valid (cu, chainParser, requested block) - // check cache hit - // send the relay to the node using chainProxy - // set cache entry (async) - // attach data reliability finalization data - // sign the response - // send the proof to reward server - // finalize the session utils.LavaFormatDebug("Provider got relay request", &map[string]string{ "request.SessionId": strconv.FormatUint(request.SessionId, 10), "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), @@ -127,7 +118,10 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), }) } else { - rpcps.rewardServer.SendNewDataReliabilityProof(ctx, request.DataReliability, relaySession.PairingEpoch, consumerAddress.String()) + updated := rpcps.rewardServer.SendNewDataReliabilityProof(ctx, request.DataReliability, relaySession.PairingEpoch, consumerAddress.String()) + if !updated { + return nil, utils.LavaFormatError("existing data reliability proof", lavasession.DataReliabilityAlreadySentThisEpochError, nil) + } utils.LavaFormatDebug("Provider Finished DataReliability Relay Successfully", &map[string]string{ "request.SessionId": strconv.FormatUint(request.SessionId, 10), "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), diff --git a/protocol/statetracker/provider_state_tracker.go b/protocol/statetracker/provider_state_tracker.go index 52bd57d091..15be0c2c65 100644 --- a/protocol/statetracker/provider_state_tracker.go +++ b/protocol/statetracker/provider_state_tracker.go @@ -65,10 +65,6 @@ func (pst *ProviderStateTracker) RegisterReliabilityManagerForVoteUpdates(ctx co voteUpdater.RegisterVoteUpdatable(ctx, &voteUpdatable, endpoint) } -func (pst *ProviderStateTracker) QueryVerifyPairing(ctx context.Context, consumer string, blockHeight uint64) { - // TODO: implement -} - func (pst *ProviderStateTracker) TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest) { // TODO: implement } @@ -95,3 +91,7 @@ func (pst *ProviderStateTracker) VerifyPairing(ctx context.Context, consumerAddr func (pst *ProviderStateTracker) GetProvidersCountForConsumer(ctx context.Context, consumerAddress string, epoch uint64, chainID string) (uint32, error) { return pst.stateQuery.GetProvidersCountForConsumer(ctx, consumerAddress, epoch, chainID) } + +func (pst *ProviderStateTracker) GetEpochSize(ctx context.Context) (uint64, error) { + return pst.stateQuery.GetEpochSize(ctx) +} diff --git a/protocol/statetracker/state_query.go b/protocol/statetracker/state_query.go index 3a8801d29c..86ecb244a9 100644 --- a/protocol/statetracker/state_query.go +++ b/protocol/statetracker/state_query.go @@ -242,3 +242,11 @@ func (psq *ProviderStateQuery) GetProvidersCountForConsumer(ctx context.Context, } return uint32(res.GetParams().ServicersToPairCount), nil } + +func (psq *ProviderStateQuery) GetEpochSize(ctx context.Context) (uint64, error) { + res, err := psq.EpochStorageQueryClient.Params(ctx, &epochstoragetypes.QueryParamsRequest{}) + if err != nil { + return 0, err + } + return res.Params.EpochBlocks, nil +} From 6a2ecbbe080338bf7674c5f2f09721e7d98ba92b Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Thu, 16 Feb 2023 17:28:31 +0100 Subject: [PATCH 032/123] relaynumber fixed --- docs/static/openapi.yml | 2 ++ go.mod | 2 -- go.sum | 4 ---- protocol/lavasession/provider_session_manager.go | 13 +++++++++---- protocol/lavasession/provider_types.go | 10 +--------- protocol/rpcprovider/rpcprovider_server.go | 2 +- 6 files changed, 13 insertions(+), 20 deletions(-) diff --git a/docs/static/openapi.yml b/docs/static/openapi.yml index 165ddf2ee3..9a2ba2e312 100644 --- a/docs/static/openapi.yml +++ b/docs/static/openapi.yml @@ -53152,6 +53152,8 @@ definitions: unresponsive_providers: type: string format: byte + apiInterface: + type: string lavanet.lava.pairing.VRFData: type: object properties: diff --git a/go.mod b/go.mod index 3ef05e74dd..f09dbce7f5 100644 --- a/go.mod +++ b/go.mod @@ -48,8 +48,6 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/gogo/googleapis v1.4.0 // indirect - github.com/golang/glog v1.0.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0 // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect golang.org/x/mod v0.7.0 // indirect golang.org/x/tools v0.2.0 // indirect diff --git a/go.sum b/go.sum index 295f8fbd0d..7f6f98aa15 100644 --- a/go.sum +++ b/go.sum @@ -656,8 +656,6 @@ github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -782,8 +780,6 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0 h1:1JYBfzqrWPcCclBwxFCPAou9n+q86mfnu7NAeHfte7A= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0/go.mod h1:YDZoGHuwE+ov0c8smSH49WLF3F2LaWnYYuDVd+EWrc0= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index 4bbc9e8a1e..33969561d2 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -43,13 +43,17 @@ func (psm *ProviderSessionManager) IsActiveConsumer(epoch uint64, address string return providerSessionWithConsumer, nil // no error } -func (psm *ProviderSessionManager) getSingleSessionFromProviderSessionWithConsumer(providerSessionWithConsumer *ProviderSessionsWithConsumer, sessionId uint64, epoch uint64) (*SingleProviderSession, error) { +func (psm *ProviderSessionManager) getSingleSessionFromProviderSessionWithConsumer(providerSessionWithConsumer *ProviderSessionsWithConsumer, sessionId uint64, epoch uint64, relayNumber uint64) (*SingleProviderSession, error) { // TODO:: we can validate here if consumer is blocked with atomicWriteBlockedEpoch // before getting any sessions. singleProviderSession, err := psm.getSessionFromAnActiveConsumer(providerSessionWithConsumer, sessionId, epoch) // after getting session verify relayNum etc.. if err != nil { return nil, utils.LavaFormatError("getSessionFromAnActiveConsumer Failure", err, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10), "sessionId": strconv.FormatUint(sessionId, 10)}) } + + if singleProviderSession.RelayNum+1 < relayNumber { // validate relay number here, but add only in PrepareSessionForUsage + return nil, utils.LavaFormatError("singleProviderSession.RelayNum mismatch, session out of sync", SessionOutOfSyncError, &map[string]string{"singleProviderSession.RelayNum": strconv.FormatUint(singleProviderSession.RelayNum+1, 10), "request.relayNumber": strconv.FormatUint(relayNumber, 10)}) + } // singleProviderSession is locked at this point. return singleProviderSession, err } @@ -66,7 +70,7 @@ func (psm *ProviderSessionManager) GetSession(address string, epoch uint64, sess return nil, err } - return psm.getSingleSessionFromProviderSessionWithConsumer(providerSessionWithConsumer, sessionId, epoch) + return psm.getSingleSessionFromProviderSessionWithConsumer(providerSessionWithConsumer, sessionId, epoch, relayNumber) } func (psm *ProviderSessionManager) registerNewSession(address string, epoch uint64, sessionId uint64) (*ProviderSessionsWithConsumer, error) { @@ -97,7 +101,7 @@ func (psm *ProviderSessionManager) registerNewSession(address string, epoch uint } // TODO add vrfPk and Max compute units. -func (psm *ProviderSessionManager) RegisterProviderSessionWithConsumer(address string, epoch uint64, sessionId uint64) (*SingleProviderSession, error) { +func (psm *ProviderSessionManager) RegisterProviderSessionWithConsumer(address string, epoch uint64, sessionId uint64, relayNumber uint64) (*SingleProviderSession, error) { providerSessionWithConsumer, err := psm.IsActiveConsumer(epoch, address) if err != nil { if ConsumerNotRegisteredYet.Is(err) { @@ -109,7 +113,8 @@ func (psm *ProviderSessionManager) RegisterProviderSessionWithConsumer(address s return nil, utils.LavaFormatError("RegisterProviderSessionWithConsumer Failed", err, nil) } } - return psm.getSingleSessionFromProviderSessionWithConsumer(providerSessionWithConsumer, sessionId, epoch) + // validate relay number?? == 1 + return psm.getSingleSessionFromProviderSessionWithConsumer(providerSessionWithConsumer, sessionId, epoch, relayNumber) } func (psm *ProviderSessionManager) getActiveConsumer(epoch uint64, address string) (providerSessionWithConsumer *ProviderSessionsWithConsumer, err error) { diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index c8bb5d387b..837c26a9c4 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -136,14 +136,6 @@ func (sps *SingleProviderSession) PrepareSessionForUsage(currentCU uint64, relay return utils.LavaFormatError("sps.verifyLock() failed in PrepareSessionForUsage", err, nil) } - // if sps.RelayNum+1 != relayNumber { - // sps.lock.Unlock() // unlock on error - // return utils.LavaFormatError("Maximum cu exceeded PrepareSessionForUsage", MaximumCULimitReachedByConsumer, &map[string]string{ - // "relayNumber": strconv.FormatUint(relayNumber, 10), - // "sps.RelayNum": strconv.FormatUint(sps.RelayNum+1, 10), - // }) - // } - maxCu := sps.userSessionsParent.atomicReadMaxComputeUnits() if relayRequestTotalCU < sps.CuSum+currentCU { sps.lock.Unlock() // unlock on error @@ -163,7 +155,7 @@ func (sps *SingleProviderSession) PrepareSessionForUsage(currentCU uint64, relay // finished validating, can add all info. sps.LatestRelayCu = currentCU // 1. update latest sps.CuSum = relayRequestTotalCU // 2. update CuSum, if consumer wants to pay more, let it - // sps.RelayNum = sps.RelayNum + 1 + sps.RelayNum = sps.RelayNum + 1 return nil } diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 35bbdb12e0..97a3a47aa7 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -297,7 +297,7 @@ func (rpcps *RPCProviderServer) initRelay(ctx context.Context, request *pairingt if lavasession.ConsumerNotRegisteredYet.Is(err) { // TODO:: validate consumer address get max cu and vrf data and transfer register. - singleProviderSession, err = rpcps.providerSessionManager.RegisterProviderSessionWithConsumer(extractedConsumerAddress.String(), uint64(request.BlockHeight), request.SessionId) + singleProviderSession, err = rpcps.providerSessionManager.RegisterProviderSessionWithConsumer(extractedConsumerAddress.String(), uint64(request.BlockHeight), request.SessionId, request.RelayNum) if err != nil { return nil, nil, utils.LavaFormatError("failed to RegisterProviderSessionWithConsumer", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": extractedConsumerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) } From f21bfe97f87d5a6cae4dfac101a8d29b55b1da56 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Feb 2023 18:37:36 +0200 Subject: [PATCH 033/123] go mod tidy --- go.mod | 3 ++- go.sum | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index e46e563aa1..ebc219172a 100644 --- a/go.mod +++ b/go.mod @@ -48,6 +48,7 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/gogo/googleapis v1.4.0 // indirect + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect golang.org/x/mod v0.7.0 // indirect golang.org/x/tools v0.2.0 // indirect @@ -87,7 +88,7 @@ require ( github.com/deckarep/golang-set v1.8.0 github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect github.com/dgraph-io/badger/v2 v2.2007.2 // indirect - github.com/dgraph-io/ristretto v0.1.1 // indirect + github.com/dgraph-io/ristretto v0.1.1 github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect github.com/docker/docker v20.10.19+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect diff --git a/go.sum b/go.sum index 8de3c6f721..d8b3cf1cd5 100644 --- a/go.sum +++ b/go.sum @@ -439,7 +439,6 @@ github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFM github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k= github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.0.3 h1:jh22xisGBjrEVnRZ1DVTpBVQm0Xndu8sMl0CWDzSIBI= github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= @@ -657,6 +656,7 @@ github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoB github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= From 04418342243862d06d5bfcc09c8f2d58a0026fad Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Thu, 16 Feb 2023 19:22:51 +0100 Subject: [PATCH 034/123] finished normal flow of get session. --- go.mod | 3 +- go.sum | 2 +- .../lavasession/provider_session_manager.go | 14 +++--- protocol/lavasession/provider_types.go | 4 +- protocol/rpcprovider/rpcprovider_server.go | 45 ++++++++++++------- 5 files changed, 43 insertions(+), 25 deletions(-) diff --git a/go.mod b/go.mod index e46e563aa1..ebc219172a 100644 --- a/go.mod +++ b/go.mod @@ -48,6 +48,7 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/gogo/googleapis v1.4.0 // indirect + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect golang.org/x/mod v0.7.0 // indirect golang.org/x/tools v0.2.0 // indirect @@ -87,7 +88,7 @@ require ( github.com/deckarep/golang-set v1.8.0 github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect github.com/dgraph-io/badger/v2 v2.2007.2 // indirect - github.com/dgraph-io/ristretto v0.1.1 // indirect + github.com/dgraph-io/ristretto v0.1.1 github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect github.com/docker/docker v20.10.19+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect diff --git a/go.sum b/go.sum index 8de3c6f721..d8b3cf1cd5 100644 --- a/go.sum +++ b/go.sum @@ -439,7 +439,6 @@ github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFM github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k= github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.0.3 h1:jh22xisGBjrEVnRZ1DVTpBVQm0Xndu8sMl0CWDzSIBI= github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= @@ -657,6 +656,7 @@ github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoB github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index dd650776e7..ed6c3e5caa 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -72,7 +72,7 @@ func (psm *ProviderSessionManager) GetSession(address string, epoch uint64, sess return psm.getSingleSessionFromProviderSessionWithConsumer(providerSessionWithConsumer, sessionId, epoch, relayNumber) } -func (psm *ProviderSessionManager) registerNewSession(address string, epoch uint64, sessionId uint64) (*ProviderSessionsWithConsumer, error) { +func (psm *ProviderSessionManager) registerNewSession(address string, epoch uint64, sessionId uint64, vrfPk *utils.VrfPubKey, maxCuForConsumer uint64) (*ProviderSessionsWithConsumer, error) { psm.lock.Lock() defer psm.lock.Unlock() @@ -91,8 +91,11 @@ func (psm *ProviderSessionManager) registerNewSession(address string, epoch uint providerSessionWithConsumer, foundAddressInMap := mapOfProviderSessionsWithConsumer[address] if !foundAddressInMap { providerSessionWithConsumer = &ProviderSessionsWithConsumer{ - consumer: address, - epochData: &ProviderSessionsEpochData{}, // TODO add here all the epoch data get from user + consumer: address, + epochData: &ProviderSessionsEpochData{ + VrfPk: vrfPk, + MaxComputeUnits: maxCuForConsumer, + }, } mapOfProviderSessionsWithConsumer[address] = providerSessionWithConsumer } @@ -100,11 +103,11 @@ func (psm *ProviderSessionManager) registerNewSession(address string, epoch uint } // TODO add vrfPk and Max compute units. -func (psm *ProviderSessionManager) RegisterProviderSessionWithConsumer(address string, epoch uint64, sessionId uint64, relayNumber uint64) (*SingleProviderSession, error) { +func (psm *ProviderSessionManager) RegisterProviderSessionWithConsumer(address string, epoch uint64, sessionId uint64, relayNumber uint64, vrfPk *utils.VrfPubKey, maxCuForConsumer uint64) (*SingleProviderSession, error) { providerSessionWithConsumer, err := psm.IsActiveConsumer(epoch, address) if err != nil { if ConsumerNotRegisteredYet.Is(err) { - providerSessionWithConsumer, err = psm.registerNewSession(address, epoch, sessionId) + providerSessionWithConsumer, err = psm.registerNewSession(address, epoch, sessionId, vrfPk, maxCuForConsumer) if err != nil { return nil, utils.LavaFormatError("RegisterProviderSessionWithConsumer Failed to registerNewSession", err, nil) } @@ -112,7 +115,6 @@ func (psm *ProviderSessionManager) RegisterProviderSessionWithConsumer(address s return nil, utils.LavaFormatError("RegisterProviderSessionWithConsumer Failed", err, nil) } } - // validate relay number?? == 1 return psm.getSingleSessionFromProviderSessionWithConsumer(providerSessionWithConsumer, sessionId, epoch, relayNumber) } diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index 2e88230565..7efb915768 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -20,7 +20,7 @@ type ProviderSessionsEpochData struct { UsedComputeUnits uint64 MaxComputeUnits uint64 DataReliability *pairingtypes.VRFData - VrfPk utils.VrfPubKey + VrfPk *utils.VrfPubKey } type RPCProviderEndpoint struct { @@ -154,7 +154,7 @@ func (sps *SingleProviderSession) PrepareSessionForUsage(currentCU uint64, relay // finished validating, can add all info. sps.LatestRelayCu = currentCU // 1. update latest sps.CuSum = relayRequestTotalCU // 2. update CuSum, if consumer wants to pay more, let it - sps.RelayNum = sps.RelayNum + 1 + sps.RelayNum = sps.RelayNum + 1 // 3. update RelayNum, we already verified relayNum is valid in GetSession. return nil } diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index eb4b026255..195aa30253 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -302,21 +302,7 @@ func (rpcps *RPCProviderServer) verifyRelaySession(ctx context.Context, request // handle non data reliability relays if request.DataReliability == nil { - // regular session, verifies pairing epoch and relay number - singleProviderSession, err = rpcps.providerSessionManager.GetSession(extractedConsumerAddress.String(), uint64(request.BlockHeight), request.SessionId, request.RelayNum) - if err != nil { - if lavasession.ConsumerNotRegisteredYet.Is(err) { - // TODO:: validate consumer address get max cu and vrf data and transfer register. - - singleProviderSession, err = rpcps.providerSessionManager.RegisterProviderSessionWithConsumer(extractedConsumerAddress.String(), uint64(request.BlockHeight), request.SessionId, request.RelayNum) - if err != nil { - return nil, nil, utils.LavaFormatError("failed to RegisterProviderSessionWithConsumer", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": extractedConsumerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) - } - } else { - return nil, nil, utils.LavaFormatError("failed to get a provider session", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": extractedConsumerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) - } - } - return singleProviderSession, extractedConsumerAddress, nil + return rpcps.getSingleProviderSessionFlow(ctx, singleProviderSession, request, extractedConsumerAddress) } // data reliability session verifications @@ -331,6 +317,35 @@ func (rpcps *RPCProviderServer) verifyRelaySession(ctx context.Context, request return dataReliabilitySingleProviderSession, extractedConsumerAddress, nil } +func (rpcps *RPCProviderServer) getSingleProviderSessionFlow(ctx context.Context, singleProviderSession *lavasession.SingleProviderSession, request *pairingtypes.RelayRequest, extractedConsumerAddress sdk.AccAddress) (*lavasession.SingleProviderSession, sdk.AccAddress, error) { + consumerAddressString := extractedConsumerAddress.String() + // regular session, verifies pairing epoch and relay number + singleProviderSession, err := rpcps.providerSessionManager.GetSession(consumerAddressString, uint64(request.BlockHeight), request.SessionId, request.RelayNum) + if err != nil { + if lavasession.ConsumerNotRegisteredYet.Is(err) { + valid, _, verifyPairingError := rpcps.stateTracker.VerifyPairing(ctx, consumerAddressString, rpcps.providerAddress.String(), uint64(request.BlockHeight), request.ChainID) + if verifyPairingError != nil { + return nil, nil, utils.LavaFormatError("Failed to VerifyPairing after ConsumerNotRegisteredYet", verifyPairingError, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) + } + if !valid { + return nil, nil, utils.LavaFormatError("VerifyPairing, this consumer address is not valid with this provider", nil, &map[string]string{"epoch": strconv.FormatInt(request.BlockHeight, 10), "sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) + } + vrfPk, maxCuForConsumer, getVrfAndMaxCuError := rpcps.stateTracker.GetVrfPkAndMaxCuForUser(ctx, consumerAddressString, request.ChainID, uint64(request.BlockHeight)) + if getVrfAndMaxCuError != nil { + return nil, nil, utils.LavaFormatError("ConsumerNotRegisteredYet: GetVrfPkAndMaxCuForUser failed", getVrfAndMaxCuError, &map[string]string{"epoch": strconv.FormatInt(request.BlockHeight, 10), "sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) + } + // After validating the consumer we can register it with provider session manager. + singleProviderSession, err = rpcps.providerSessionManager.RegisterProviderSessionWithConsumer(consumerAddressString, uint64(request.BlockHeight), request.SessionId, request.RelayNum, vrfPk, maxCuForConsumer) + if err != nil { + return nil, nil, utils.LavaFormatError("Failed to RegisterProviderSessionWithConsumer", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "relayNum": strconv.FormatUint(request.RelayNum, 10)}) + } + } else { + return nil, nil, utils.LavaFormatError("Failed to get a provider session", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "relayNum": strconv.FormatUint(request.RelayNum, 10)}) + } + } + return singleProviderSession, extractedConsumerAddress, nil +} + func (rpcps *RPCProviderServer) verifyRelayRequestMetaData(request *pairingtypes.RelayRequest) error { providerAddress := rpcps.providerAddress.String() if request.Provider != providerAddress { From e276d3520f02e9b1c832662fd25bd95764e5b23f Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Thu, 16 Feb 2023 19:23:01 +0100 Subject: [PATCH 035/123] finished normal flow of get session. --- protocol/rpcprovider/rpcprovider_server.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 195aa30253..f971209620 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -302,7 +302,7 @@ func (rpcps *RPCProviderServer) verifyRelaySession(ctx context.Context, request // handle non data reliability relays if request.DataReliability == nil { - return rpcps.getSingleProviderSessionFlow(ctx, singleProviderSession, request, extractedConsumerAddress) + return rpcps.getSingleProviderSession(ctx, singleProviderSession, request, extractedConsumerAddress) } // data reliability session verifications @@ -317,7 +317,7 @@ func (rpcps *RPCProviderServer) verifyRelaySession(ctx context.Context, request return dataReliabilitySingleProviderSession, extractedConsumerAddress, nil } -func (rpcps *RPCProviderServer) getSingleProviderSessionFlow(ctx context.Context, singleProviderSession *lavasession.SingleProviderSession, request *pairingtypes.RelayRequest, extractedConsumerAddress sdk.AccAddress) (*lavasession.SingleProviderSession, sdk.AccAddress, error) { +func (rpcps *RPCProviderServer) getSingleProviderSession(ctx context.Context, singleProviderSession *lavasession.SingleProviderSession, request *pairingtypes.RelayRequest, extractedConsumerAddress sdk.AccAddress) (*lavasession.SingleProviderSession, sdk.AccAddress, error) { consumerAddressString := extractedConsumerAddress.String() // regular session, verifies pairing epoch and relay number singleProviderSession, err := rpcps.providerSessionManager.GetSession(consumerAddressString, uint64(request.BlockHeight), request.SessionId, request.RelayNum) From 622c770c2c3dff0b18c43127b4829acf2e731e8c Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Thu, 16 Feb 2023 19:30:04 +0100 Subject: [PATCH 036/123] adding sync loss handling --- protocol/rpcprovider/rpcprovider_server.go | 24 ++++++++++++---------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index f971209620..a1d739097c 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -145,8 +145,10 @@ func (rpcps *RPCProviderServer) initRelay(ctx context.Context, request *pairingt relayCU := chainMessage.GetServiceApi().ComputeUnits err = relaySession.PrepareSessionForUsage(relayCU, request.CuSum) if err != nil { - // TODO: any error here we need to convert to session out of sync error and return that to the user - return nil, nil, nil, err + // If PrepareSessionForUsage, session lose sync. + // We then wrap the error with the SessionOutOfSyncError that has a unique error code. + // The consumer knows the session lost sync using the code and will create a new session. + return nil, nil, nil, utils.LavaFormatError("Session Out of sync", lavasession.SessionOutOfSyncError, &map[string]string{"PrepareSessionForUsage_Error": err.Error()}) } return relaySession, consumerAddress, chainMessage, nil } @@ -302,7 +304,8 @@ func (rpcps *RPCProviderServer) verifyRelaySession(ctx context.Context, request // handle non data reliability relays if request.DataReliability == nil { - return rpcps.getSingleProviderSession(ctx, singleProviderSession, request, extractedConsumerAddress) + singleProviderSession, err = rpcps.getSingleProviderSession(ctx, singleProviderSession, request, extractedConsumerAddress.String()) + return singleProviderSession, extractedConsumerAddress, err } // data reliability session verifications @@ -317,33 +320,32 @@ func (rpcps *RPCProviderServer) verifyRelaySession(ctx context.Context, request return dataReliabilitySingleProviderSession, extractedConsumerAddress, nil } -func (rpcps *RPCProviderServer) getSingleProviderSession(ctx context.Context, singleProviderSession *lavasession.SingleProviderSession, request *pairingtypes.RelayRequest, extractedConsumerAddress sdk.AccAddress) (*lavasession.SingleProviderSession, sdk.AccAddress, error) { - consumerAddressString := extractedConsumerAddress.String() +func (rpcps *RPCProviderServer) getSingleProviderSession(ctx context.Context, singleProviderSession *lavasession.SingleProviderSession, request *pairingtypes.RelayRequest, consumerAddressString string) (*lavasession.SingleProviderSession, error) { // regular session, verifies pairing epoch and relay number singleProviderSession, err := rpcps.providerSessionManager.GetSession(consumerAddressString, uint64(request.BlockHeight), request.SessionId, request.RelayNum) if err != nil { if lavasession.ConsumerNotRegisteredYet.Is(err) { valid, _, verifyPairingError := rpcps.stateTracker.VerifyPairing(ctx, consumerAddressString, rpcps.providerAddress.String(), uint64(request.BlockHeight), request.ChainID) if verifyPairingError != nil { - return nil, nil, utils.LavaFormatError("Failed to VerifyPairing after ConsumerNotRegisteredYet", verifyPairingError, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) + return nil, utils.LavaFormatError("Failed to VerifyPairing after ConsumerNotRegisteredYet", verifyPairingError, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) } if !valid { - return nil, nil, utils.LavaFormatError("VerifyPairing, this consumer address is not valid with this provider", nil, &map[string]string{"epoch": strconv.FormatInt(request.BlockHeight, 10), "sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) + return nil, utils.LavaFormatError("VerifyPairing, this consumer address is not valid with this provider", nil, &map[string]string{"epoch": strconv.FormatInt(request.BlockHeight, 10), "sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) } vrfPk, maxCuForConsumer, getVrfAndMaxCuError := rpcps.stateTracker.GetVrfPkAndMaxCuForUser(ctx, consumerAddressString, request.ChainID, uint64(request.BlockHeight)) if getVrfAndMaxCuError != nil { - return nil, nil, utils.LavaFormatError("ConsumerNotRegisteredYet: GetVrfPkAndMaxCuForUser failed", getVrfAndMaxCuError, &map[string]string{"epoch": strconv.FormatInt(request.BlockHeight, 10), "sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) + return nil, utils.LavaFormatError("ConsumerNotRegisteredYet: GetVrfPkAndMaxCuForUser failed", getVrfAndMaxCuError, &map[string]string{"epoch": strconv.FormatInt(request.BlockHeight, 10), "sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) } // After validating the consumer we can register it with provider session manager. singleProviderSession, err = rpcps.providerSessionManager.RegisterProviderSessionWithConsumer(consumerAddressString, uint64(request.BlockHeight), request.SessionId, request.RelayNum, vrfPk, maxCuForConsumer) if err != nil { - return nil, nil, utils.LavaFormatError("Failed to RegisterProviderSessionWithConsumer", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "relayNum": strconv.FormatUint(request.RelayNum, 10)}) + return nil, utils.LavaFormatError("Failed to RegisterProviderSessionWithConsumer", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "relayNum": strconv.FormatUint(request.RelayNum, 10)}) } } else { - return nil, nil, utils.LavaFormatError("Failed to get a provider session", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "relayNum": strconv.FormatUint(request.RelayNum, 10)}) + return nil, utils.LavaFormatError("Failed to get a provider session", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "relayNum": strconv.FormatUint(request.RelayNum, 10)}) } } - return singleProviderSession, extractedConsumerAddress, nil + return singleProviderSession, nil } func (rpcps *RPCProviderServer) verifyRelayRequestMetaData(request *pairingtypes.RelayRequest) error { From 3db63c008c9568a098a66723c375b8cd1f11d587 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Thu, 16 Feb 2023 19:33:36 +0100 Subject: [PATCH 037/123] adding consumer blocked handling --- protocol/lavasession/provider_session_manager.go | 6 ++++-- protocol/lavasession/provider_types.go | 8 ++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index ed6c3e5caa..1587b83a4d 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -43,7 +43,9 @@ func (psm *ProviderSessionManager) IsActiveConsumer(epoch uint64, address string } func (psm *ProviderSessionManager) getSingleSessionFromProviderSessionWithConsumer(providerSessionWithConsumer *ProviderSessionsWithConsumer, sessionId uint64, epoch uint64, relayNumber uint64) (*SingleProviderSession, error) { - // TODO:: we can validate here if consumer is blocked with atomicWriteBlockedEpoch + if providerSessionWithConsumer.atomicReadConsumerBlocked() != notBlockListedConsumer { + return nil, utils.LavaFormatError("This consumer address is blocked.", nil, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10), "consumer": providerSessionWithConsumer.consumer}) + } // before getting any sessions. singleProviderSession, err := psm.getSessionFromAnActiveConsumer(providerSessionWithConsumer, sessionId, epoch) // after getting session verify relayNum etc.. if err != nil { @@ -128,7 +130,7 @@ func (psm *ProviderSessionManager) getActiveConsumer(epoch uint64, address strin } if mapOfProviderSessionsWithConsumer, ok := psm.sessionsWithAllConsumers[epoch]; ok { if providerSessionWithConsumer, ok := mapOfProviderSessionsWithConsumer[address]; ok { - if providerSessionWithConsumer.atomicReadBlockedEpoch() == blockListedConsumer { // we atomic read block listed so we dont need to lock the provider. (double lock is always a bad idea.) + if providerSessionWithConsumer.atomicReadConsumerBlocked() == blockListedConsumer { // we atomic read block listed so we dont need to lock the provider. (double lock is always a bad idea.) // consumer is blocked. utils.LavaFormatWarning("getActiveConsumer", ConsumerIsBlockListed, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10), "ConsumerAddress": address}) return nil, ConsumerIsBlockListed diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index 7efb915768..caec23e33e 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -65,13 +65,13 @@ type SingleProviderSession struct { PairingEpoch uint64 } -// reads cs.BlockedEpoch atomically, notBlockListedConsumer = 0, blockListedConsumer = 1 -func (pswc *ProviderSessionsWithConsumer) atomicWriteBlockedEpoch(blockStatus uint32) { // rename to blocked consumer not blocked epoch +// reads cs.BlockedEpoch atomically, notBlockListedConsumer = 0, blockListedConsumer = 1 +func (pswc *ProviderSessionsWithConsumer) atomicWriteConsumerBlocked(blockStatus uint32) { // rename to blocked consumer not blocked epoch atomic.StoreUint32(&pswc.isBlockListed, blockStatus) } -// reads cs.BlockedEpoch atomically -func (pswc *ProviderSessionsWithConsumer) atomicReadBlockedEpoch() (blockStatus uint32) { +// reads cs.BlockedEpoch atomically to determine if the consumer is blocked notBlockListedConsumer = 0, blockListedConsumer = 1 +func (pswc *ProviderSessionsWithConsumer) atomicReadConsumerBlocked() (blockStatus uint32) { return atomic.LoadUint32(&pswc.isBlockListed) } From b87ea56d0af317da325bd4e7deb610cfadc7043a Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Thu, 16 Feb 2023 19:39:11 +0100 Subject: [PATCH 038/123] adding on session done functionality --- protocol/lavasession/provider_session_manager.go | 12 ++++++++---- protocol/lavasession/provider_types.go | 4 ++-- protocol/rpcprovider/rpcprovider_server.go | 4 ++-- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index 1587b83a4d..5396340c45 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -8,7 +8,6 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/lavanet/lava/utils" - pairingtypes "github.com/lavanet/lava/x/pairing/types" ) type ProviderSessionManager struct { @@ -167,9 +166,14 @@ func (psm *ProviderSessionManager) OnSessionFailure(singleProviderSession *Singl return nil } -func (psm *ProviderSessionManager) OnSessionDone(singleProviderSession *SingleProviderSession, request *pairingtypes.RelayRequest) (err error) { - // need to handle dataReliability session separately - // store the request as proof +// OnSessionDone unlocks the session gracefully, this happens when session finished successfully +func (psm *ProviderSessionManager) OnSessionDone(singleProviderSession *SingleProviderSession) (err error) { + err = singleProviderSession.VerifyLock() + if err != nil { + return err + } + singleProviderSession.lock.Unlock() + // session finished successfully return nil } diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index caec23e33e..4d00c376a2 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -120,7 +120,7 @@ func (sps *SingleProviderSession) SetPairingEpoch(epoch uint64) { } // Verify the SingleProviderSession is locked when getting to this function, if its not locked throw an error -func (sps *SingleProviderSession) verifyLock() error { +func (sps *SingleProviderSession) VerifyLock() error { if sps.lock.TryLock() { // verify. // if we managed to lock throw an error for misuse. defer sps.lock.Unlock() @@ -130,7 +130,7 @@ func (sps *SingleProviderSession) verifyLock() error { } func (sps *SingleProviderSession) PrepareSessionForUsage(currentCU uint64, relayRequestTotalCU uint64) error { - err := sps.verifyLock() // sps is locked + err := sps.VerifyLock() // sps is locked if err != nil { return utils.LavaFormatError("sps.verifyLock() failed in PrepareSessionForUsage", err, nil) } diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index a1d739097c..01504cdccb 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -104,7 +104,7 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes "request.userAddr": consumerAddress.String(), }) } else { - relayError := rpcps.providerSessionManager.OnSessionDone(relaySession, request) + relayError := rpcps.providerSessionManager.OnSessionDone(relaySession) if relayError != nil { err = sdkerrors.Wrapf(relayError, "OnSession Done failure: "+err.Error()) } else { @@ -170,7 +170,7 @@ func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayReques subscribed, err := rpcps.TryRelaySubscribe(ctx, request, srv, chainMessage, consumerAddress) // this function does not return until subscription ends if subscribed { // meaning we created a subscription and used it for at least a message - relayError := rpcps.providerSessionManager.OnSessionDone(relaySession, request) // TODO: when we pay as u go on subscription this will need to change + relayError := rpcps.providerSessionManager.OnSessionDone(relaySession) // TODO: when we pay as u go on subscription this will need to change if relayError != nil { err = sdkerrors.Wrapf(relayError, "OnSession Done failure: "+err.Error()) } else { From 50561792cad980802a2f0750e0edf8e68b64d8b9 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Feb 2023 21:44:47 +0200 Subject: [PATCH 039/123] added rewards claims and tracking --- .../rpcprovider/rewardserver/reward_server.go | 227 +++++++++++++++++- protocol/rpcprovider/rpcprovider.go | 5 +- protocol/statetracker/payment_updater.go | 44 ++++ .../statetracker/provider_state_tracker.go | 19 +- protocol/statetracker/state_query.go | 31 +++ protocol/statetracker/tx_sender.go | 58 ++++- 6 files changed, 368 insertions(+), 16 deletions(-) create mode 100644 protocol/statetracker/payment_updater.go diff --git a/protocol/rpcprovider/rewardserver/reward_server.go b/protocol/rpcprovider/rewardserver/reward_server.go index 4cb6e23181..6f45b98006 100644 --- a/protocol/rpcprovider/rewardserver/reward_server.go +++ b/protocol/rpcprovider/rewardserver/reward_server.go @@ -2,17 +2,33 @@ package rewardserver import ( "context" + "fmt" + "math/rand" "strconv" "sync" + "sync/atomic" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/lavanet/lava/protocol/lavaprotocol" "github.com/lavanet/lava/utils" pairingtypes "github.com/lavanet/lava/x/pairing/types" + terderminttypes "github.com/tendermint/tendermint/abci/types" ) const ( StaleEpochDistance = 2 ) +type PaymentRequest struct { + CU uint64 + BlockHeightDeadline int64 + Amount sdk.Coin + Client sdk.AccAddress + UniqueIdentifier uint64 + Description string + ChainID string +} + type ConsumerRewards struct { epoch uint64 consumer string @@ -24,6 +40,7 @@ func (csrw *ConsumerRewards) PrepareRewardsForClaim() (retProofs []*pairingtypes for _, proof := range csrw.proofs { retProofs = append(retProofs, proof) } + // add data reliability proofs dataReliabilityProofs := len(csrw.dataReliabilityProofs) if len(retProofs) > 0 && dataReliabilityProofs > 0 { for idx := range retProofs { @@ -42,14 +59,19 @@ type EpochRewards struct { } type RewardServer struct { - rewardsTxSender RewardsTxSender - lock sync.RWMutex - rewards map[uint64]*EpochRewards + rewardsTxSender RewardsTxSender + lock sync.RWMutex + rewards map[uint64]*EpochRewards + serverID uint64 + expectedPayments []PaymentRequest + totalCUServiced uint64 + totalCUPaid uint64 } type RewardsTxSender interface { - TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest) + TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest, description string) error GetEpochSize(ctx context.Context) (uint64, error) + EarliestBlockInMemory(ctx context.Context) (uint64, error) } func (rws *RewardServer) SendNewProof(ctx context.Context, proof *pairingtypes.RelayRequest, epoch uint64, consumerAddr string) (existingCU uint64, updatedWithProof bool) { @@ -106,7 +128,103 @@ func (rws *RewardServer) SendNewDataReliabilityProof(ctx context.Context, dataRe func (rws *RewardServer) UpdateEpoch(epoch uint64) { ctx := context.Background() - rws.gatherRewardsForClaim(ctx, epoch) + _ = rws.sendRewardsClaim(ctx, epoch) + _, _ = rws.identifyMissingPayments(ctx) +} + +func (rws *RewardServer) sendRewardsClaim(ctx context.Context, epoch uint64) error { + rewardsToClaim, err := rws.gatherRewardsForClaim(ctx, epoch) + if err != nil { + return err + } + for _, relay := range rewardsToClaim { + consumerBytes, err := lavaprotocol.ExtractSignerAddress(relay) + if err != nil { + utils.LavaFormatError("invalid consumer address extraction from relay", err, &map[string]string{"relay": fmt.Sprintf("%+v", relay)}) + continue + } + consumerAddr, err := sdk.AccAddressFromHex(consumerBytes.String()) + if err != nil { + utils.LavaFormatError("invalid consumer address extraction from relay", err, &map[string]string{"relay": fmt.Sprintf("%+v", relay), "consumerBytes": consumerBytes.String()}) + continue + } + expectedPay := PaymentRequest{ChainID: relay.ChainID, CU: relay.CuSum, BlockHeightDeadline: relay.BlockHeight, Amount: sdk.Coin{}, Client: consumerAddr, UniqueIdentifier: relay.SessionId, Description: strconv.FormatUint(rws.serverID, 10)} + rws.addExpectedPayment(expectedPay) + rws.updateCUServiced(relay.CuSum) + } + err = rws.rewardsTxSender.TxRelayPayment(ctx, rewardsToClaim, strconv.FormatUint(rws.serverID, 10)) + if err != nil { + return utils.LavaFormatError("failed sending rewards claim", err, nil) + } + return nil +} + +func (rws *RewardServer) identifyMissingPayments(ctx context.Context) (missingPayments bool, err error) { + lastBlockInMemory, err := rws.rewardsTxSender.EarliestBlockInMemory(ctx) + if err != nil { + return + } + rws.lock.Lock() + defer rws.lock.Unlock() + + var updatedExpectedPayments []PaymentRequest + + for idx, expectedPay := range rws.expectedPayments { + // Exclude and log missing payments + if uint64(expectedPay.BlockHeightDeadline) < lastBlockInMemory { + utils.LavaFormatError("Identified Missing Payment", nil, + &map[string]string{ + "expectedPay.CU": strconv.FormatUint(expectedPay.CU, 10), + "expectedPay.BlockHeightDeadline": strconv.FormatInt(expectedPay.BlockHeightDeadline, 10), + "lastBlockInMemory": strconv.FormatUint(lastBlockInMemory, 10), + }) + missingPayments = true + continue + } + + // Include others + updatedExpectedPayments = append(updatedExpectedPayments, rws.expectedPayments[idx]) + } + + // Update expectedPayment + rws.expectedPayments = updatedExpectedPayments + + // can be modified in this race window, so we double-check + + utils.LavaFormatInfo("Service report", &map[string]string{ + "total CU serviced": strconv.FormatUint(rws.cUServiced(), 10), + "total CU that got paid": strconv.FormatUint(rws.paidCU(), 10), + }) + return +} + +func (rws *RewardServer) cUServiced() uint64 { + return atomic.LoadUint64(&rws.totalCUServiced) +} + +func (rws *RewardServer) paidCU() uint64 { + return atomic.LoadUint64(&rws.totalCUPaid) +} + +func (rws *RewardServer) addExpectedPayment(expectedPay PaymentRequest) { + rws.lock.Lock() // this can be a separate lock, if we have performance issues + defer rws.lock.Unlock() + rws.expectedPayments = append(rws.expectedPayments, expectedPay) +} + +func (rws *RewardServer) RemoveExpectedPayment(paidCUToFInd uint64, expectedClient sdk.AccAddress, blockHeight int64, uniqueID uint64, chainID string) bool { + rws.lock.Lock() // this can be a separate lock, if we have performance issues + defer rws.lock.Unlock() + for idx, expectedPayment := range rws.expectedPayments { + // TODO: make sure the payment is not too far from expected block, expectedPayment.BlockHeightDeadline == blockHeight + if expectedPayment.CU == paidCUToFInd && expectedPayment.Client.Equals(expectedClient) && uniqueID == expectedPayment.UniqueIdentifier && chainID == expectedPayment.ChainID { + // found payment for expected payment + rws.expectedPayments[idx] = rws.expectedPayments[len(rws.expectedPayments)-1] // replace the element at delete index with the last one + rws.expectedPayments = rws.expectedPayments[:len(rws.expectedPayments)-1] // remove last element + return true + } + } + return false } func (rws *RewardServer) gatherRewardsForClaim(ctx context.Context, current_epoch uint64) (rewardsForClaim []*pairingtypes.RelayRequest, errRet error) { @@ -142,18 +260,111 @@ func (rws *RewardServer) gatherRewardsForClaim(ctx context.Context, current_epoc } func (rws *RewardServer) SubscribeStarted(consumer string, epoch uint64, subscribeID string) { - // hold off reward claims for subscription while this is still active + // TODO: hold off reward claims for subscription while this is still active } func (rws *RewardServer) SubscribeEnded(consumer string, epoch uint64, subscribeID string) { - // can collect now + // TODO: can collect now +} + +func (rws *RewardServer) updateCUServiced(cu uint64) { + rws.lock.Lock() + defer rws.lock.Unlock() + currentCU := atomic.LoadUint64(&rws.totalCUServiced) + atomic.StoreUint64(&rws.totalCUServiced, currentCU+cu) +} + +func (rws *RewardServer) updateCUPaid(cu uint64) { + rws.lock.Lock() + defer rws.lock.Unlock() + currentCU := atomic.LoadUint64(&rws.totalCUPaid) + atomic.StoreUint64(&rws.totalCUPaid, currentCU+cu) +} + +func (rws *RewardServer) Description() string { + return strconv.FormatUint(rws.serverID, 10) +} + +func (rws *RewardServer) PaymentHandler(payment *PaymentRequest) { + serverID, err := strconv.ParseUint(payment.Description, 10, 64) + if err != nil { + utils.LavaFormatError("failed parsing description as server id", err, &map[string]string{"description": payment.Description}) + return + } + if serverID == rws.serverID { + rws.updateCUPaid(payment.CU) + removedPayment := rws.RemoveExpectedPayment(payment.CU, payment.Client, payment.BlockHeightDeadline, payment.UniqueIdentifier, payment.ChainID) + if !removedPayment { + utils.LavaFormatWarning("tried removing payment that wasn;t expected", nil, &map[string]string{"payment": fmt.Sprintf("%+v", payment)}) + } + } } func NewRewardServer(rewardsTxSender RewardsTxSender) *RewardServer { // - rws := &RewardServer{} + rws := &RewardServer{totalCUServiced: 0, totalCUPaid: 0} + rws.serverID = uint64(rand.Int63()) rws.rewardsTxSender = rewardsTxSender + rws.expectedPayments = []PaymentRequest{} // TODO: load this from persistency rws.rewards = map[uint64]*EpochRewards{} return rws } + +func BuildPaymentFromRelayPaymentEvent(event terderminttypes.Event, block int64) (*PaymentRequest, error) { + attributes := map[string]string{} + for _, attribute := range event.Attributes { + attributes[string(attribute.Key)] = string(attribute.Value) + } + chainID, ok := attributes["chainID"] + if !ok { + return nil, utils.LavaFormatError("failed building PaymentRequest from relay_payment event", nil, &attributes) + } + mint, ok := attributes["Mint"] + if !ok { + return nil, utils.LavaFormatError("failed building PaymentRequest from relay_payment event", nil, &attributes) + } + mintedCoins, err := sdk.ParseCoinNormalized(mint) + if err != nil { + return nil, err + } + cu_str, ok := attributes["CU"] + if !ok { + return nil, utils.LavaFormatError("failed building PaymentRequest from relay_payment event", nil, &attributes) + } + cu, err := strconv.ParseUint(cu_str, 10, 64) + if err != nil { + return nil, err + } + consumer, ok := attributes["client"] + if !ok { + return nil, utils.LavaFormatError("failed building PaymentRequest from relay_payment event", nil, &attributes) + } + consumerAddr, err := sdk.AccAddressFromBech32(consumer) + if err != nil { + return nil, err + } + + uniqueIdentifier, ok := attributes["uniqueIdentifier"] + if !ok { + return nil, utils.LavaFormatError("failed building PaymentRequest from relay_payment event", nil, &attributes) + } + uniqueID, err := strconv.ParseUint(uniqueIdentifier, 10, 64) + if err != nil { + return nil, err + } + description, ok := attributes["descriptionString"] + if !ok { + return nil, utils.LavaFormatError("failed building PaymentRequest from relay_payment event", nil, &attributes) + } + payment := &PaymentRequest{ + CU: cu, + BlockHeightDeadline: block, + Amount: mintedCoins, + Client: consumerAddr, + Description: description, + UniqueIdentifier: uniqueID, + ChainID: chainID, + } + return payment, nil +} diff --git a/protocol/rpcprovider/rpcprovider.go b/protocol/rpcprovider/rpcprovider.go index 5959ce4de5..7602ff51cc 100644 --- a/protocol/rpcprovider/rpcprovider.go +++ b/protocol/rpcprovider/rpcprovider.go @@ -37,7 +37,7 @@ type ProviderStateTrackerInf interface { RegisterChainParserForSpecUpdates(ctx context.Context, chainParser chainlib.ChainParser, chainID string) error RegisterReliabilityManagerForVoteUpdates(ctx context.Context, voteUpdatable statetracker.VoteUpdatable, endpointP *lavasession.RPCProviderEndpoint) RegisterForEpochUpdates(ctx context.Context, epochUpdatable statetracker.EpochUpdatable) - TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest) + TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest, description string) error SendVoteReveal(voteID string, vote *reliabilitymanager.VoteData) error SendVoteCommitment(voteID string, vote *reliabilitymanager.VoteData) error LatestBlock() int64 @@ -45,6 +45,8 @@ type ProviderStateTrackerInf interface { VerifyPairing(ctx context.Context, consumerAddress string, providerAddress string, epoch uint64, chainID string) (valid bool, index int64, err error) GetProvidersCountForConsumer(ctx context.Context, consumerAddress string, epoch uint64, chainID string) (uint32, error) GetEpochSize(ctx context.Context) (uint64, error) + EarliestBlockInMemory(ctx context.Context) (uint64, error) + RegisterPaymentUpdatableForPayments(ctx context.Context, paymentUpdatable statetracker.PaymentUpdatable) } type RPCProvider struct { @@ -65,6 +67,7 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client // single reward server rewardServer := rewardserver.NewRewardServer(providerStateTracker) rpcp.providerStateTracker.RegisterForEpochUpdates(ctx, rewardServer) + rpcp.providerStateTracker.RegisterPaymentUpdatableForPayments(ctx, rewardServer) keyName, err := sigs.GetKeyName(clientCtx) if err != nil { utils.LavaFormatFatal("failed getting key name from clientCtx", err, nil) diff --git a/protocol/statetracker/payment_updater.go b/protocol/statetracker/payment_updater.go new file mode 100644 index 0000000000..04d427e7fb --- /dev/null +++ b/protocol/statetracker/payment_updater.go @@ -0,0 +1,44 @@ +package statetracker + +import ( + "github.com/lavanet/lava/protocol/rpcprovider/rewardserver" + "golang.org/x/net/context" +) + +const ( + CallbackKeyForPaymentUpdate = "payment-update" +) + +type PaymentUpdatable interface { + PaymentHandler(*rewardserver.PaymentRequest) + Description() string +} + +type PaymentUpdater struct { + paymentUpdatables map[string]*PaymentUpdatable + stateQuery *ProviderStateQuery +} + +func NewPaymentUpdater(stateQuery *ProviderStateQuery) *PaymentUpdater { + return &PaymentUpdater{paymentUpdatables: map[string]*PaymentUpdatable{}, stateQuery: stateQuery} +} + +func (pu *PaymentUpdater) RegisterPaymentUpdatable(ctx context.Context, paymentUpdatable *PaymentUpdatable) { + pu.paymentUpdatables[(*paymentUpdatable).Description()] = paymentUpdatable +} + +func (pu *PaymentUpdater) UpdaterKey() string { + return CallbackKeyForPaymentUpdate +} + +func (pu *PaymentUpdater) Update(latestBlock int64) { + ctx := context.Background() + payments, err := pu.stateQuery.PaymentEvents(ctx, latestBlock) + if err != nil { + return + } + for _, payment := range payments { + updatable := pu.paymentUpdatables[payment.Description] + (*updatable).PaymentHandler(payment) + } +} diff --git a/protocol/statetracker/provider_state_tracker.go b/protocol/statetracker/provider_state_tracker.go index 15be0c2c65..e995cc8825 100644 --- a/protocol/statetracker/provider_state_tracker.go +++ b/protocol/statetracker/provider_state_tracker.go @@ -65,8 +65,19 @@ func (pst *ProviderStateTracker) RegisterReliabilityManagerForVoteUpdates(ctx co voteUpdater.RegisterVoteUpdatable(ctx, &voteUpdatable, endpoint) } -func (pst *ProviderStateTracker) TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest) { - // TODO: implement +func (pst *ProviderStateTracker) RegisterPaymentUpdatableForPayments(ctx context.Context, paymentUpdatable PaymentUpdatable) { + payemntUpdater := NewPaymentUpdater(pst.stateQuery) + payemntUpdaterRaw := pst.StateTracker.RegisterForUpdates(ctx, payemntUpdater) + payemntUpdater, ok := payemntUpdaterRaw.(*PaymentUpdater) + if !ok { + utils.LavaFormatFatal("invalid updater type returned from RegisterForUpdates", nil, &map[string]string{"updater": fmt.Sprintf("%+v", payemntUpdaterRaw)}) + } + + payemntUpdater.RegisterPaymentUpdatable(ctx, &paymentUpdatable) +} + +func (pst *ProviderStateTracker) TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest, description string) error { + return pst.txSender.TxRelayPayment(ctx, relayRequests, description) } func (pst *ProviderStateTracker) SendVoteReveal(voteID string, vote *reliabilitymanager.VoteData) error { @@ -95,3 +106,7 @@ func (pst *ProviderStateTracker) GetProvidersCountForConsumer(ctx context.Contex func (pst *ProviderStateTracker) GetEpochSize(ctx context.Context) (uint64, error) { return pst.stateQuery.GetEpochSize(ctx) } + +func (pst *ProviderStateTracker) EarliestBlockInMemory(ctx context.Context) (uint64, error) { + return pst.stateQuery.EarliestBlockInMemory(ctx) +} diff --git a/protocol/statetracker/state_query.go b/protocol/statetracker/state_query.go index 86ecb244a9..aed1c05416 100644 --- a/protocol/statetracker/state_query.go +++ b/protocol/statetracker/state_query.go @@ -9,6 +9,7 @@ import ( "github.com/cosmos/cosmos-sdk/client" "github.com/dgraph-io/ristretto" reliabilitymanager "github.com/lavanet/lava/protocol/rpcprovider/reliabilitymanager" + "github.com/lavanet/lava/protocol/rpcprovider/rewardserver" "github.com/lavanet/lava/utils" conflicttypes "github.com/lavanet/lava/x/conflict/types" epochstoragetypes "github.com/lavanet/lava/x/epochstorage/types" @@ -159,6 +160,28 @@ func (psq *ProviderStateQuery) CurrentEpochStart(ctx context.Context) (uint64, e } +func (psq *ProviderStateQuery) PaymentEvents(ctx context.Context, latestBlock int64) (payments []*rewardserver.PaymentRequest, err error) { + blockResults, err := psq.clientCtx.Client.BlockResults(ctx, &latestBlock) + if err != nil { + return nil, err + } + transactionResults := blockResults.TxsResults + for _, tx := range transactionResults { + events := tx.Events + for _, event := range events { + if event.Type == "lava_relay_payment" { + payment, err := rewardserver.BuildPaymentFromRelayPaymentEvent(event, latestBlock) + if err != nil { + return nil, utils.LavaFormatError("failed relay_payment_event parsing", err, &map[string]string{"event": fmt.Sprintf("%v", event)}) + } + utils.LavaFormatDebug("relay_payment_event", &map[string]string{"payment": fmt.Sprintf("%+v", payment)}) + payments = append(payments, payment) + } + } + } + return payments, nil +} + func (psq *ProviderStateQuery) VoteEvents(ctx context.Context, latestBlock int64) (votes []*reliabilitymanager.VoteParams, err error) { blockResults, err := psq.clientCtx.Client.BlockResults(ctx, &latestBlock) if err != nil { @@ -250,3 +273,11 @@ func (psq *ProviderStateQuery) GetEpochSize(ctx context.Context) (uint64, error) } return res.Params.EpochBlocks, nil } + +func (psq *ProviderStateQuery) EarliestBlockInMemory(ctx context.Context) (uint64, error) { + res, err := psq.EpochStorageQueryClient.EpochDetails(ctx, &epochstoragetypes.QueryGetEpochDetailsRequest{}) + if err != nil { + return 0, err + } + return res.EpochDetails.EarliestStart, nil +} diff --git a/protocol/statetracker/tx_sender.go b/protocol/statetracker/tx_sender.go index e976ab80af..ba73d791fd 100644 --- a/protocol/statetracker/tx_sender.go +++ b/protocol/statetracker/tx_sender.go @@ -10,10 +10,12 @@ import ( "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/tx" sdk "github.com/cosmos/cosmos-sdk/types" + typestx "github.com/cosmos/cosmos-sdk/types/tx" "github.com/lavanet/lava/protocol/common" "github.com/lavanet/lava/protocol/rpcprovider/reliabilitymanager" "github.com/lavanet/lava/utils" conflicttypes "github.com/lavanet/lava/x/conflict/types" + pairingtypes "github.com/lavanet/lava/x/pairing/types" ) const ( @@ -34,7 +36,37 @@ func NewTxSender(ctx context.Context, clientCtx client.Context, txFactory tx.Fac return ts, nil } -func (ts *TxSender) SimulateAndBroadCastTxWithRetryOnSeqMismatch(msg sdk.Msg) error { +func (ts *TxSender) checkProfitability(simResult *typestx.SimulateResponse, gasUsed uint64, txFactory tx.Factory) error { + txEvents := simResult.GetResult().Events + lavaReward := sdk.NewCoin("ulava", sdk.NewInt(0)) + for _, txEvent := range txEvents { + if txEvent.Type == "lava_relay_payment" { + for _, attribute := range txEvent.Attributes { + if string(attribute.Key) == "BasePay" { + lavaRewardTemp, err := sdk.ParseCoinNormalized(string(attribute.Value)) + if err != nil { + return utils.LavaFormatError("failed parsing simulation result", nil, &map[string]string{"attribute": string(attribute.Value)}) + } + lavaReward = lavaReward.Add(lavaRewardTemp) + break + } + } + } + } + + txFactory = txFactory.WithGas(gasUsed) + + gasFee := txFactory.GasPrices()[0] + gasFee.Amount = gasFee.Amount.MulInt64(int64(gasUsed)) + lavaRewardDec := sdk.NewDecCoinFromCoin(lavaReward) + + if gasFee.IsGTE(lavaRewardDec) { + return utils.LavaFormatError("lava_relay_payment claim is not profitable", nil, &map[string]string{"gasFee": gasFee.String(), "lava_reward:": lavaRewardDec.String()}) + } + return nil +} + +func (ts *TxSender) SimulateAndBroadCastTxWithRetryOnSeqMismatch(msg sdk.Msg, checkProfitability bool) error { txfactory := ts.txFactory.WithGasPrices(defaultGasPrice) txfactory = txfactory.WithGasAdjustment(defaultGasAdjustment) if err := msg.ValidateBasic(); err != nil { @@ -46,11 +78,18 @@ func (ts *TxSender) SimulateAndBroadCastTxWithRetryOnSeqMismatch(msg sdk.Msg) er return err } - _, gasUsed, err := tx.CalculateGas(clientCtx, txfactory, msg) + simResult, gasUsed, err := tx.CalculateGas(clientCtx, txfactory, msg) if err != nil { return err } + if checkProfitability { + err := ts.checkProfitability(simResult, gasUsed, txfactory) + if err != nil { + return err + } + } + txfactory = txfactory.WithGas(gasUsed) myWriter := bytes.Buffer{} hasSequenceError := false @@ -157,7 +196,7 @@ func (ts *ConsumerTxSender) TxConflictDetection(ctx context.Context, finalizatio // TODO: retry logic for sequence number mismatch // TODO: make sure we are not spamming the same conflicts, previous code only detecs relay by relay, it has no state tracking wether it reported already msg := conflicttypes.NewMsgDetection(ts.clientCtx.FromAddress.String(), finalizationConflict, responseConflict, sameProviderConflict) - err := ts.SimulateAndBroadCastTxWithRetryOnSeqMismatch(msg) + err := ts.SimulateAndBroadCastTxWithRetryOnSeqMismatch(msg, false) if err != nil { return utils.LavaFormatError("discrepancyChecker - SimulateAndBroadCastTx Failed", err, nil) } @@ -177,9 +216,18 @@ func NewProviderTxSender(ctx context.Context, clientCtx client.Context, txFactor return ts, nil } +func (pts *ProviderTxSender) TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest, description string) error { + msg := pairingtypes.NewMsgRelayPayment(pts.clientCtx.FromAddress.String(), relayRequests, description) + err := pts.SimulateAndBroadCastTxWithRetryOnSeqMismatch(msg, true) + if err != nil { + return utils.LavaFormatError("relay_payment - sending Tx Failed", err, nil) + } + return nil +} + func (pts *ProviderTxSender) SendVoteReveal(voteID string, vote *reliabilitymanager.VoteData) error { msg := conflicttypes.NewMsgConflictVoteReveal(pts.clientCtx.FromAddress.String(), voteID, vote.Nonce, vote.RelayDataHash) - err := pts.SimulateAndBroadCastTxWithRetryOnSeqMismatch(msg) + err := pts.SimulateAndBroadCastTxWithRetryOnSeqMismatch(msg, false) if err != nil { return utils.LavaFormatError("SendVoteReveal - SimulateAndBroadCastTx Failed", err, nil) } @@ -188,7 +236,7 @@ func (pts *ProviderTxSender) SendVoteReveal(voteID string, vote *reliabilitymana func (pts *ProviderTxSender) SendVoteCommitment(voteID string, vote *reliabilitymanager.VoteData) error { msg := conflicttypes.NewMsgConflictVoteCommit(pts.clientCtx.FromAddress.String(), voteID, vote.CommitHash) - err := pts.SimulateAndBroadCastTxWithRetryOnSeqMismatch(msg) + err := pts.SimulateAndBroadCastTxWithRetryOnSeqMismatch(msg, false) if err != nil { return utils.LavaFormatError("SendVoteCommitment - SimulateAndBroadCastTx Failed", err, nil) } From e7a87584dc1cf5b1204eb48d36a513a72c836f61 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Feb 2023 21:50:09 +0200 Subject: [PATCH 040/123] bug fix --- protocol/rpcprovider/rpcprovider_server.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 01504cdccb..5b71ebad96 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -304,7 +304,7 @@ func (rpcps *RPCProviderServer) verifyRelaySession(ctx context.Context, request // handle non data reliability relays if request.DataReliability == nil { - singleProviderSession, err = rpcps.getSingleProviderSession(ctx, singleProviderSession, request, extractedConsumerAddress.String()) + singleProviderSession, err = rpcps.getSingleProviderSession(ctx, request, extractedConsumerAddress.String()) return singleProviderSession, extractedConsumerAddress, err } @@ -320,7 +320,7 @@ func (rpcps *RPCProviderServer) verifyRelaySession(ctx context.Context, request return dataReliabilitySingleProviderSession, extractedConsumerAddress, nil } -func (rpcps *RPCProviderServer) getSingleProviderSession(ctx context.Context, singleProviderSession *lavasession.SingleProviderSession, request *pairingtypes.RelayRequest, consumerAddressString string) (*lavasession.SingleProviderSession, error) { +func (rpcps *RPCProviderServer) getSingleProviderSession(ctx context.Context, request *pairingtypes.RelayRequest, consumerAddressString string) (*lavasession.SingleProviderSession, error) { // regular session, verifies pairing epoch and relay number singleProviderSession, err := rpcps.providerSessionManager.GetSession(consumerAddressString, uint64(request.BlockHeight), request.SessionId, request.RelayNum) if err != nil { From 1e125323cd159698685f91550f4a13a675a1dab0 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Fri, 17 Feb 2023 00:51:27 +0200 Subject: [PATCH 041/123] refactored chainproxy into parsableChainProxy so we can implement chain fetcher with the existing classes --- protocol/chainlib/chainlib.go | 64 +++++++++++++++++++++++++----- protocol/chainlib/grpc.go | 4 +- protocol/chainlib/jsonRPC.go | 4 +- protocol/chainlib/rest.go | 4 +- protocol/chainlib/tendermintRPC.go | 8 ++-- 5 files changed, 64 insertions(+), 20 deletions(-) diff --git a/protocol/chainlib/chainlib.go b/protocol/chainlib/chainlib.go index 8f4f285059..d5aa95a331 100644 --- a/protocol/chainlib/chainlib.go +++ b/protocol/chainlib/chainlib.go @@ -63,6 +63,26 @@ type ChainMessage interface { GetRPCMessage() parser.RPCInput } +type ParsableChainMessage interface { + GetServiceApi() *spectypes.ServiceApi + GetInterface() *spectypes.ApiInterface + GetRPCMessage() parser.RPCInput + SetParsingData(interface{}) +} + +// in case no parsing data is needed +type DefaultParsableChainMessage struct { + ChainMessage +} + +func (dpcm DefaultParsableChainMessage) SetParsingData(interface{}) { + // just to implement interface - do nothing +} + +func NewDefaultParsableChainMessage(chainMessage ChainMessage) ParsableChainMessage { + return DefaultParsableChainMessage{ChainMessage: chainMessage} +} + type RelaySender interface { SendRelay( ctx context.Context, @@ -82,18 +102,42 @@ type ChainProxy interface { SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) // has to be thread safe, reuse code within ParseMsg as common functionality } +type DefaultChainProxy struct { + ParsableChainProxy +} + +func (dcp *DefaultChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { + return dcp.ParsableChainProxy.SendNodeMsg(ctx, ch, NewDefaultParsableChainMessage(chainMessage)) +} + +func DefaultChainProxyFromParsable(parsableChainProxy ParsableChainProxy) *DefaultChainProxy { + dcp := &DefaultChainProxy{ParsableChainProxy: parsableChainProxy} + return dcp +} + +type ParsableChainProxy interface { + SendNodeMsg(ctx context.Context, ch chan interface{}, parsableChainMessage ParsableChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) // has to be thread safe, reuse code within ParseMsg as common functionality +} + func GetChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *lavasession.RPCProviderEndpoint, averageBlockTime time.Duration) (ChainProxy, error) { - switch rpcProviderEndpoint.ApiInterface { - case spectypes.APIInterfaceJsonRPC: - return NewJrpcChainProxy(ctx, nConns, rpcProviderEndpoint, averageBlockTime) - case spectypes.APIInterfaceTendermintRPC: - return NewtendermintRpcChainProxy(ctx, nConns, rpcProviderEndpoint, averageBlockTime) - case spectypes.APIInterfaceRest: - return NewRestChainProxy(ctx, nConns, rpcProviderEndpoint, averageBlockTime) - case spectypes.APIInterfaceGrpc: - return NewGrpcChainProxy(ctx, nConns, rpcProviderEndpoint, averageBlockTime) + createParsableChainProxy := func() (ParsableChainProxy, error) { + switch rpcProviderEndpoint.ApiInterface { + case spectypes.APIInterfaceJsonRPC: + return NewJrpcChainProxy(ctx, nConns, rpcProviderEndpoint, averageBlockTime) + case spectypes.APIInterfaceTendermintRPC: + return NewtendermintRpcChainProxy(ctx, nConns, rpcProviderEndpoint, averageBlockTime) + case spectypes.APIInterfaceRest: + return NewRestChainProxy(ctx, nConns, rpcProviderEndpoint, averageBlockTime) + case spectypes.APIInterfaceGrpc: + return NewGrpcChainProxy(ctx, nConns, rpcProviderEndpoint, averageBlockTime) + } + return nil, fmt.Errorf("chain proxy for apiInterface (%s) not found", rpcProviderEndpoint.ApiInterface) + } + parsableChainProxy, err := createParsableChainProxy() + if err != nil { + return nil, err } - return nil, fmt.Errorf("chain proxy for apiInterface (%s) not found", rpcProviderEndpoint.ApiInterface) + return DefaultChainProxyFromParsable(parsableChainProxy), nil } func LocalNodeTimePerCu(cu uint64) time.Duration { diff --git a/protocol/chainlib/grpc.go b/protocol/chainlib/grpc.go index 1b1ab8d454..a5b4aed13f 100644 --- a/protocol/chainlib/grpc.go +++ b/protocol/chainlib/grpc.go @@ -226,7 +226,7 @@ type GrpcChainProxy struct { conn *chainproxy.GRPCConnector } -func NewGrpcChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *lavasession.RPCProviderEndpoint, averageBlockTime time.Duration) (ChainProxy, error) { +func NewGrpcChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *lavasession.RPCProviderEndpoint, averageBlockTime time.Duration) (ParsableChainProxy, error) { if len(rpcProviderEndpoint.NodeUrl) == 0 { return nil, utils.LavaFormatError("rpcProviderEndpoint.NodeUrl list is empty missing node url", nil, &map[string]string{"chainID": rpcProviderEndpoint.ChainID, "ApiInterface": rpcProviderEndpoint.ApiInterface}) } @@ -240,7 +240,7 @@ func NewGrpcChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *la return cp, nil } -func (cp *GrpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { +func (cp *GrpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ParsableChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { if ch != nil { return nil, "", nil, utils.LavaFormatError("Subscribe is not allowed on rest", nil, nil) } diff --git a/protocol/chainlib/jsonRPC.go b/protocol/chainlib/jsonRPC.go index b0b68c197f..d3c829eea9 100644 --- a/protocol/chainlib/jsonRPC.go +++ b/protocol/chainlib/jsonRPC.go @@ -327,7 +327,7 @@ type JrpcChainProxy struct { conn *chainproxy.Connector } -func NewJrpcChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *lavasession.RPCProviderEndpoint, averageBlockTime time.Duration) (ChainProxy, error) { +func NewJrpcChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *lavasession.RPCProviderEndpoint, averageBlockTime time.Duration) (ParsableChainProxy, error) { if len(rpcProviderEndpoint.NodeUrl) == 0 { return nil, utils.LavaFormatError("rpcProviderEndpoint.NodeUrl list is empty missing node url", nil, &map[string]string{"chainID": rpcProviderEndpoint.ChainID, "ApiInterface": rpcProviderEndpoint.ApiInterface}) } @@ -348,7 +348,7 @@ func (cp *JrpcChainProxy) start(ctx context.Context, nConns uint, nodeUrl string return nil } -func (cp *JrpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { +func (cp *JrpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ParsableChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { // Get node rpc, err := cp.conn.GetRpc(ctx, true) if err != nil { diff --git a/protocol/chainlib/rest.go b/protocol/chainlib/rest.go index d024d7033e..8cb0919303 100644 --- a/protocol/chainlib/rest.go +++ b/protocol/chainlib/rest.go @@ -276,7 +276,7 @@ type RestChainProxy struct { nodeUrl string } -func NewRestChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *lavasession.RPCProviderEndpoint, averageBlockTime time.Duration) (ChainProxy, error) { +func NewRestChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *lavasession.RPCProviderEndpoint, averageBlockTime time.Duration) (ParsableChainProxy, error) { if len(rpcProviderEndpoint.NodeUrl) == 0 { return nil, utils.LavaFormatError("rpcProviderEndpoint.NodeUrl list is empty missing node url", nil, &map[string]string{"chainID": rpcProviderEndpoint.ChainID, "ApiInterface": rpcProviderEndpoint.ApiInterface}) } @@ -287,7 +287,7 @@ func NewRestChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *la return rcp, nil } -func (rcp *RestChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { +func (rcp *RestChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ParsableChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { if ch != nil { return nil, "", nil, utils.LavaFormatError("Subscribe is not allowed on rest", nil, nil) } diff --git a/protocol/chainlib/tendermintRPC.go b/protocol/chainlib/tendermintRPC.go index 3d0224f69a..f2e07b36f0 100644 --- a/protocol/chainlib/tendermintRPC.go +++ b/protocol/chainlib/tendermintRPC.go @@ -400,7 +400,7 @@ type tendermintRpcChainProxy struct { httpNodeUrl string } -func NewtendermintRpcChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *lavasession.RPCProviderEndpoint, averageBlockTime time.Duration) (ChainProxy, error) { +func NewtendermintRpcChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *lavasession.RPCProviderEndpoint, averageBlockTime time.Duration) (ParsableChainProxy, error) { var httpUrl string var websocketUrl string if len(rpcProviderEndpoint.NodeUrl) == 0 { @@ -414,7 +414,7 @@ func NewtendermintRpcChainProxy(ctx context.Context, nConns uint, rpcProviderEnd return cp, cp.start(ctx, nConns, websocketUrl) } -func (cp *tendermintRpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { +func (cp *tendermintRpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ParsableChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { rpc, err := cp.conn.GetRpc(ctx, true) if err != nil { return nil, "", nil, err @@ -433,7 +433,7 @@ func (cp *tendermintRpcChainProxy) SendNodeMsg(ctx context.Context, ch chan inte return cp.SendRPC(ctx, &nodeMessage, ch, chainMessage) } -func (cp *tendermintRpcChainProxy) SendURI(ctx context.Context, nodeMessage *rpcInterfaceMessages.TendermintrpcMessage, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { +func (cp *tendermintRpcChainProxy) SendURI(ctx context.Context, nodeMessage *rpcInterfaceMessages.TendermintrpcMessage, ch chan interface{}, chainMessage ParsableChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { // check if the input channel is not nil if ch != nil { // return an error if the channel is not nil @@ -489,7 +489,7 @@ func (cp *tendermintRpcChainProxy) SendURI(ctx context.Context, nodeMessage *rpc } // SendRPC sends Tendermint HTTP or WebSockets call -func (cp *tendermintRpcChainProxy) SendRPC(ctx context.Context, nodeMessage *rpcInterfaceMessages.TendermintrpcMessage, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { +func (cp *tendermintRpcChainProxy) SendRPC(ctx context.Context, nodeMessage *rpcInterfaceMessages.TendermintrpcMessage, ch chan interface{}, chainMessage ParsableChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { // Get rpc connection from the connection pool rpc, err := cp.conn.GetRpc(ctx, true) if err != nil { From edb2d69accf029e2d0fee40ac3b2b64b53edd78c Mon Sep 17 00:00:00 2001 From: omer mishael Date: Fri, 17 Feb 2023 01:32:32 +0200 Subject: [PATCH 042/123] revert, parsableChainMessage isn't needed --- protocol/chainlib/chainlib.go | 64 +++++------------------------- protocol/chainlib/common.go | 9 ++--- protocol/chainlib/grpc.go | 4 +- protocol/chainlib/jsonRPC.go | 4 +- protocol/chainlib/rest.go | 4 +- protocol/chainlib/tendermintRPC.go | 8 ++-- 6 files changed, 24 insertions(+), 69 deletions(-) diff --git a/protocol/chainlib/chainlib.go b/protocol/chainlib/chainlib.go index d5aa95a331..8f4f285059 100644 --- a/protocol/chainlib/chainlib.go +++ b/protocol/chainlib/chainlib.go @@ -63,26 +63,6 @@ type ChainMessage interface { GetRPCMessage() parser.RPCInput } -type ParsableChainMessage interface { - GetServiceApi() *spectypes.ServiceApi - GetInterface() *spectypes.ApiInterface - GetRPCMessage() parser.RPCInput - SetParsingData(interface{}) -} - -// in case no parsing data is needed -type DefaultParsableChainMessage struct { - ChainMessage -} - -func (dpcm DefaultParsableChainMessage) SetParsingData(interface{}) { - // just to implement interface - do nothing -} - -func NewDefaultParsableChainMessage(chainMessage ChainMessage) ParsableChainMessage { - return DefaultParsableChainMessage{ChainMessage: chainMessage} -} - type RelaySender interface { SendRelay( ctx context.Context, @@ -102,42 +82,18 @@ type ChainProxy interface { SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) // has to be thread safe, reuse code within ParseMsg as common functionality } -type DefaultChainProxy struct { - ParsableChainProxy -} - -func (dcp *DefaultChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { - return dcp.ParsableChainProxy.SendNodeMsg(ctx, ch, NewDefaultParsableChainMessage(chainMessage)) -} - -func DefaultChainProxyFromParsable(parsableChainProxy ParsableChainProxy) *DefaultChainProxy { - dcp := &DefaultChainProxy{ParsableChainProxy: parsableChainProxy} - return dcp -} - -type ParsableChainProxy interface { - SendNodeMsg(ctx context.Context, ch chan interface{}, parsableChainMessage ParsableChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) // has to be thread safe, reuse code within ParseMsg as common functionality -} - func GetChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *lavasession.RPCProviderEndpoint, averageBlockTime time.Duration) (ChainProxy, error) { - createParsableChainProxy := func() (ParsableChainProxy, error) { - switch rpcProviderEndpoint.ApiInterface { - case spectypes.APIInterfaceJsonRPC: - return NewJrpcChainProxy(ctx, nConns, rpcProviderEndpoint, averageBlockTime) - case spectypes.APIInterfaceTendermintRPC: - return NewtendermintRpcChainProxy(ctx, nConns, rpcProviderEndpoint, averageBlockTime) - case spectypes.APIInterfaceRest: - return NewRestChainProxy(ctx, nConns, rpcProviderEndpoint, averageBlockTime) - case spectypes.APIInterfaceGrpc: - return NewGrpcChainProxy(ctx, nConns, rpcProviderEndpoint, averageBlockTime) - } - return nil, fmt.Errorf("chain proxy for apiInterface (%s) not found", rpcProviderEndpoint.ApiInterface) - } - parsableChainProxy, err := createParsableChainProxy() - if err != nil { - return nil, err + switch rpcProviderEndpoint.ApiInterface { + case spectypes.APIInterfaceJsonRPC: + return NewJrpcChainProxy(ctx, nConns, rpcProviderEndpoint, averageBlockTime) + case spectypes.APIInterfaceTendermintRPC: + return NewtendermintRpcChainProxy(ctx, nConns, rpcProviderEndpoint, averageBlockTime) + case spectypes.APIInterfaceRest: + return NewRestChainProxy(ctx, nConns, rpcProviderEndpoint, averageBlockTime) + case spectypes.APIInterfaceGrpc: + return NewGrpcChainProxy(ctx, nConns, rpcProviderEndpoint, averageBlockTime) } - return DefaultChainProxyFromParsable(parsableChainProxy), nil + return nil, fmt.Errorf("chain proxy for apiInterface (%s) not found", rpcProviderEndpoint.ApiInterface) } func LocalNodeTimePerCu(cu uint64) time.Duration { diff --git a/protocol/chainlib/common.go b/protocol/chainlib/common.go index b7df53e5cb..49c823b633 100644 --- a/protocol/chainlib/common.go +++ b/protocol/chainlib/common.go @@ -20,11 +20,10 @@ const ( ) type parsedMessage struct { - serviceApi *spectypes.ServiceApi - apiInterface *spectypes.ApiInterface - averageBlockTime int64 - requestedBlock int64 - msg interface{} + serviceApi *spectypes.ServiceApi + apiInterface *spectypes.ApiInterface + requestedBlock int64 + msg interface{} } type BaseChainProxy struct { diff --git a/protocol/chainlib/grpc.go b/protocol/chainlib/grpc.go index a5b4aed13f..1b1ab8d454 100644 --- a/protocol/chainlib/grpc.go +++ b/protocol/chainlib/grpc.go @@ -226,7 +226,7 @@ type GrpcChainProxy struct { conn *chainproxy.GRPCConnector } -func NewGrpcChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *lavasession.RPCProviderEndpoint, averageBlockTime time.Duration) (ParsableChainProxy, error) { +func NewGrpcChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *lavasession.RPCProviderEndpoint, averageBlockTime time.Duration) (ChainProxy, error) { if len(rpcProviderEndpoint.NodeUrl) == 0 { return nil, utils.LavaFormatError("rpcProviderEndpoint.NodeUrl list is empty missing node url", nil, &map[string]string{"chainID": rpcProviderEndpoint.ChainID, "ApiInterface": rpcProviderEndpoint.ApiInterface}) } @@ -240,7 +240,7 @@ func NewGrpcChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *la return cp, nil } -func (cp *GrpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ParsableChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { +func (cp *GrpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { if ch != nil { return nil, "", nil, utils.LavaFormatError("Subscribe is not allowed on rest", nil, nil) } diff --git a/protocol/chainlib/jsonRPC.go b/protocol/chainlib/jsonRPC.go index d3c829eea9..b0b68c197f 100644 --- a/protocol/chainlib/jsonRPC.go +++ b/protocol/chainlib/jsonRPC.go @@ -327,7 +327,7 @@ type JrpcChainProxy struct { conn *chainproxy.Connector } -func NewJrpcChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *lavasession.RPCProviderEndpoint, averageBlockTime time.Duration) (ParsableChainProxy, error) { +func NewJrpcChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *lavasession.RPCProviderEndpoint, averageBlockTime time.Duration) (ChainProxy, error) { if len(rpcProviderEndpoint.NodeUrl) == 0 { return nil, utils.LavaFormatError("rpcProviderEndpoint.NodeUrl list is empty missing node url", nil, &map[string]string{"chainID": rpcProviderEndpoint.ChainID, "ApiInterface": rpcProviderEndpoint.ApiInterface}) } @@ -348,7 +348,7 @@ func (cp *JrpcChainProxy) start(ctx context.Context, nConns uint, nodeUrl string return nil } -func (cp *JrpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ParsableChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { +func (cp *JrpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { // Get node rpc, err := cp.conn.GetRpc(ctx, true) if err != nil { diff --git a/protocol/chainlib/rest.go b/protocol/chainlib/rest.go index 8cb0919303..d024d7033e 100644 --- a/protocol/chainlib/rest.go +++ b/protocol/chainlib/rest.go @@ -276,7 +276,7 @@ type RestChainProxy struct { nodeUrl string } -func NewRestChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *lavasession.RPCProviderEndpoint, averageBlockTime time.Duration) (ParsableChainProxy, error) { +func NewRestChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *lavasession.RPCProviderEndpoint, averageBlockTime time.Duration) (ChainProxy, error) { if len(rpcProviderEndpoint.NodeUrl) == 0 { return nil, utils.LavaFormatError("rpcProviderEndpoint.NodeUrl list is empty missing node url", nil, &map[string]string{"chainID": rpcProviderEndpoint.ChainID, "ApiInterface": rpcProviderEndpoint.ApiInterface}) } @@ -287,7 +287,7 @@ func NewRestChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *la return rcp, nil } -func (rcp *RestChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ParsableChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { +func (rcp *RestChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { if ch != nil { return nil, "", nil, utils.LavaFormatError("Subscribe is not allowed on rest", nil, nil) } diff --git a/protocol/chainlib/tendermintRPC.go b/protocol/chainlib/tendermintRPC.go index f2e07b36f0..3d0224f69a 100644 --- a/protocol/chainlib/tendermintRPC.go +++ b/protocol/chainlib/tendermintRPC.go @@ -400,7 +400,7 @@ type tendermintRpcChainProxy struct { httpNodeUrl string } -func NewtendermintRpcChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *lavasession.RPCProviderEndpoint, averageBlockTime time.Duration) (ParsableChainProxy, error) { +func NewtendermintRpcChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *lavasession.RPCProviderEndpoint, averageBlockTime time.Duration) (ChainProxy, error) { var httpUrl string var websocketUrl string if len(rpcProviderEndpoint.NodeUrl) == 0 { @@ -414,7 +414,7 @@ func NewtendermintRpcChainProxy(ctx context.Context, nConns uint, rpcProviderEnd return cp, cp.start(ctx, nConns, websocketUrl) } -func (cp *tendermintRpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ParsableChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { +func (cp *tendermintRpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { rpc, err := cp.conn.GetRpc(ctx, true) if err != nil { return nil, "", nil, err @@ -433,7 +433,7 @@ func (cp *tendermintRpcChainProxy) SendNodeMsg(ctx context.Context, ch chan inte return cp.SendRPC(ctx, &nodeMessage, ch, chainMessage) } -func (cp *tendermintRpcChainProxy) SendURI(ctx context.Context, nodeMessage *rpcInterfaceMessages.TendermintrpcMessage, ch chan interface{}, chainMessage ParsableChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { +func (cp *tendermintRpcChainProxy) SendURI(ctx context.Context, nodeMessage *rpcInterfaceMessages.TendermintrpcMessage, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { // check if the input channel is not nil if ch != nil { // return an error if the channel is not nil @@ -489,7 +489,7 @@ func (cp *tendermintRpcChainProxy) SendURI(ctx context.Context, nodeMessage *rpc } // SendRPC sends Tendermint HTTP or WebSockets call -func (cp *tendermintRpcChainProxy) SendRPC(ctx context.Context, nodeMessage *rpcInterfaceMessages.TendermintrpcMessage, ch chan interface{}, chainMessage ParsableChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { +func (cp *tendermintRpcChainProxy) SendRPC(ctx context.Context, nodeMessage *rpcInterfaceMessages.TendermintrpcMessage, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { // Get rpc connection from the connection pool rpc, err := cp.conn.GetRpc(ctx, true) if err != nil { From 283b139ae6613e1ba47c219e041c59f764bfcc45 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Fri, 17 Feb 2023 04:53:40 +0200 Subject: [PATCH 043/123] continued work on chainFetcher WIP --- protocol/chainlib/chain_fetcher.go | 61 +++++++++++++++++-- protocol/chainlib/chainlib.go | 9 ++- protocol/chainlib/chainproxy/common.go | 28 +++++++++ .../rpcInterfaceMessages/grpcMessage.go | 52 ++++++++++++++-- protocol/chainlib/common.go | 24 ++++++++ protocol/chainlib/grpc.go | 24 ++++---- protocol/chainlib/jsonRPC.go | 6 +- protocol/chainlib/rest.go | 15 ++--- protocol/chainlib/tendermintRPC.go | 10 +-- protocol/rpcprovider/rpcprovider.go | 2 +- 10 files changed, 192 insertions(+), 39 deletions(-) diff --git a/protocol/chainlib/chain_fetcher.go b/protocol/chainlib/chain_fetcher.go index 84a4207bdb..158ee66b42 100644 --- a/protocol/chainlib/chain_fetcher.go +++ b/protocol/chainlib/chain_fetcher.go @@ -5,6 +5,11 @@ import ( "fmt" "github.com/cosmos/cosmos-sdk/client" + "github.com/lavanet/lava/protocol/chainlib/chainproxy" + "github.com/lavanet/lava/protocol/lavasession" + "github.com/lavanet/lava/relayer/parser" + "github.com/lavanet/lava/utils" + spectypes "github.com/lavanet/lava/x/spec/types" ) const ( @@ -12,20 +17,66 @@ const ( ) type ChainFetcher struct { - chainProxy ChainProxy + endpoint *lavasession.RPCProviderEndpoint + chainProxy ChainProxy + chainParser ChainParser } func (cf *ChainFetcher) FetchLatestBlockNum(ctx context.Context) (int64, error) { + + serviceApi, ok := cf.chainParser.GetSpecApiByTag(spectypes.GET_BLOCKNUM) + if !ok { + return spectypes.NOT_APPLICABLE, utils.LavaFormatError(spectypes.GET_BLOCKNUM+" tag function not found", nil, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) + } + CraftChainMessage(serviceApi, cf.endpoint) return 0, fmt.Errorf("not implemented") } func (cf *ChainFetcher) FetchBlockHashByNum(ctx context.Context, blockNum int64) (string, error) { - return "", fmt.Errorf("not implemented") + serviceApi, ok := cf.chainParser.GetSpecApiByTag(spectypes.GET_BLOCK_BY_NUM) + if !ok { + return "", utils.LavaFormatError(spectypes.GET_BLOCK_BY_NUM+" tag function not found", nil, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) + } + if serviceApi.GetParsing().FunctionTemplate == "" { + return "", utils.LavaFormatError(spectypes.GET_BLOCK_BY_NUM+" missing function template", nil, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) + } + path := serviceApi.Name + data := []byte(fmt.Sprintf(serviceApi.GetParsing().FunctionTemplate, blockNum)) + chainMessage, err := cf.chainParser.ParseMsg(path, data, "") + if err != nil { + return "", utils.LavaFormatError(spectypes.GET_BLOCK_BY_NUM+" failed parseMsg on function template", err, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) + } + reply, _, _, err := cf.chainProxy.SendNodeMsg(ctx, nil, chainMessage) + if err != nil { + return "", utils.LavaFormatError(spectypes.GET_BLOCK_BY_NUM+" failed sending chainMessage", err, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) + } + var parserInput parser.RPCInput + respData := reply.Data + if customParsingMessage, ok := chainMessage.(chainproxy.CustomParsingMessage); ok { + parserInput, err = customParsingMessage.NewParsableRPCInput(respData) + if err != nil { + return "", utils.LavaFormatError(spectypes.GET_BLOCK_BY_NUM+" failed creating NewParsableRPCInput from CustomParsingMessage", err, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) + } + } else { + parserInput = chainproxy.DefaultParsableRPCInput(respData) + } + + blockData, err := parser.ParseMessageResponse(parserInput, serviceApi.Parsing.ResultParsing) + if err != nil { + return "", err + } + + // blockData is an interface array with the parsed result in index 0. + // we know to expect a string result for a hash. + ret, ok := blockData[spectypes.DEFAULT_PARSED_RESULT_INDEX].(string) + if !ok { + return "", utils.LavaFormatError("Failed to Convert blockData[spectypes.DEFAULT_PARSED_RESULT_INDEX].(string)", nil, &map[string]string{"blockData": fmt.Sprintf("%v", blockData[spectypes.DEFAULT_PARSED_RESULT_INDEX])}) + } + return ret, nil } -func NewChainFetcher(ctx context.Context, chainProxy ChainProxy /*here needs some more params, maybe chainParser*/) *ChainFetcher { - // save here the information needed to fetch the latest block and it's hash - cf := &ChainFetcher{chainProxy: chainProxy} +func NewChainFetcher(ctx context.Context, chainProxy ChainProxy, chainParser ChainParser, endpoint *lavasession.RPCProviderEndpoint) *ChainFetcher { + cf := &ChainFetcher{chainProxy: chainProxy, chainParser: chainParser, endpoint: endpoint} return cf } diff --git a/protocol/chainlib/chainlib.go b/protocol/chainlib/chainlib.go index 8f4f285059..c8a48b240a 100644 --- a/protocol/chainlib/chainlib.go +++ b/protocol/chainlib/chainlib.go @@ -54,12 +54,17 @@ type ChainParser interface { SetSpec(spec spectypes.Spec) DataReliabilityParams() (enabled bool, dataReliabilityThreshold uint32) ChainBlockStats() (allowedBlockLagForQosSync int64, averageBlockTime time.Duration, blockDistanceForFinalizedData uint32, blocksInFinalizationProof uint32) + GetSpecApiByTag(tag string) (specApi spectypes.ServiceApi, existed bool) } type ChainMessage interface { + RequestedBlock() int64 + ChainMessageForSend +} + +type ChainMessageForSend interface { GetServiceApi() *spectypes.ServiceApi GetInterface() *spectypes.ApiInterface - RequestedBlock() int64 GetRPCMessage() parser.RPCInput } @@ -79,7 +84,7 @@ type ChainListener interface { } type ChainProxy interface { - SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) // has to be thread safe, reuse code within ParseMsg as common functionality + SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessageForSend) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) // has to be thread safe, reuse code within ParseMsg as common functionality } func GetChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *lavasession.RPCProviderEndpoint, averageBlockTime time.Duration) (ChainProxy, error) { diff --git a/protocol/chainlib/chainproxy/common.go b/protocol/chainlib/chainproxy/common.go index 51f3e12411..28894682fe 100644 --- a/protocol/chainlib/chainproxy/common.go +++ b/protocol/chainlib/chainproxy/common.go @@ -1,6 +1,34 @@ package chainproxy +import ( + "encoding/json" + + "github.com/lavanet/lava/relayer/parser" +) + const ( LavaErrorCode = 555 InternalErrorString = "Internal Error" ) + +type CustomParsingMessage interface { + NewParsableRPCInput(input json.RawMessage) (parser.RPCInput, error) +} + +type DefaultRPCInput struct { + Result json.RawMessage +} + +func (dri DefaultRPCInput) GetParams() interface{} { + return nil +} +func (dri DefaultRPCInput) GetResult() json.RawMessage { + return dri.Result +} +func (dri DefaultRPCInput) ParseBlock(inp string) (int64, error) { + return parser.ParseDefaultBlockParameter(inp) +} + +func DefaultParsableRPCInput(input json.RawMessage) parser.RPCInput { + return DefaultRPCInput{Result: input} +} diff --git a/protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage.go b/protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage.go index c1cb520951..824162233d 100644 --- a/protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage.go +++ b/protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage.go @@ -7,7 +7,9 @@ import ( "github.com/fullstorydev/grpcurl" "github.com/gogo/status" + "github.com/golang/protobuf/proto" "github.com/jhump/protoreflect/desc" + "github.com/jhump/protoreflect/dynamic" "github.com/jhump/protoreflect/grpcreflect" "github.com/lavanet/lava/relayer/parser" "github.com/lavanet/lava/utils" @@ -15,23 +17,47 @@ import ( ) type GrpcMessage struct { - Msg []byte - Path string + Msg []byte + Path string + methodDesc *desc.MethodDescriptor + formatter grpcurl.Formatter } // GetParams will be deprecated after we remove old client // Currently needed because of parser.RPCInput interface -func (cp GrpcMessage) GetParams() interface{} { +func (gm GrpcMessage) GetParams() interface{} { return nil } // GetResult will be deprecated after we remove old client // Currently needed because of parser.RPCInput interface -func (cp GrpcMessage) GetResult() json.RawMessage { +func (gm GrpcMessage) GetResult() json.RawMessage { return nil } -func (cp GrpcMessage) ParseBlock(inp string) (int64, error) { +func (gm GrpcMessage) NewParsableRPCInput(input json.RawMessage) (parser.RPCInput, error) { + msgFactory := dynamic.NewMessageFactoryWithDefaults() + if gm.methodDesc == nil { + return nil, utils.LavaFormatError("fdoes not have a methodDescriptor set in grpcMessage", nil, nil) + } + msg := msgFactory.NewMessage(gm.methodDesc.GetOutputType()) + if err := proto.Unmarshal(input, msg); err != nil { + return nil, utils.LavaFormatError("failed to unmarshal GetResult", err, nil) + } + + formattedInput, err := gm.formatter(msg) + if err != nil { + return nil, utils.LavaFormatError("m.formatter(msg)", err, nil) + } + return ParsableRPCInput{Result: []byte(formattedInput)}, nil +} + +func (gm *GrpcMessage) SetParsingData(methodDesc *desc.MethodDescriptor, formatter grpcurl.Formatter) { + gm.formatter = formatter + gm.methodDesc = methodDesc +} + +func (gm GrpcMessage) ParseBlock(inp string) (int64, error) { return parser.ParseDefaultBlockParameter(inp) } @@ -96,3 +122,19 @@ func ParseSymbol(svcAndMethod string) (string, string) { } return svcAndMethod[:pos], svcAndMethod[pos+1:] } + +type ParsableRPCInput struct { + Result json.RawMessage +} + +func (pri ParsableRPCInput) ParseBlock(inp string) (int64, error) { + return parser.ParseDefaultBlockParameter(inp) +} + +func (pri ParsableRPCInput) GetParams() interface{} { + return nil +} + +func (pri ParsableRPCInput) GetResult() json.RawMessage { + return pri.Result +} diff --git a/protocol/chainlib/common.go b/protocol/chainlib/common.go index 49c823b633..2a995f585c 100644 --- a/protocol/chainlib/common.go +++ b/protocol/chainlib/common.go @@ -6,10 +6,12 @@ import ( "net/url" "regexp" "strings" + "sync" "time" "github.com/gofiber/fiber/v2" "github.com/gofiber/websocket/v2" + "github.com/lavanet/lava/protocol/lavasession" "github.com/lavanet/lava/relayer/parser" "github.com/lavanet/lava/utils" spectypes "github.com/lavanet/lava/x/spec/types" @@ -19,6 +21,23 @@ const ( ContextUserValueKeyDappID = "dappID" ) +type BaseChainParser struct { + taggedApis map[string]spectypes.ServiceApi + rwLock sync.RWMutex +} + +func (bcp *BaseChainParser) SetTaggedApis(taggedApis map[string]spectypes.ServiceApi) { + bcp.taggedApis = taggedApis +} + +func (bcp *BaseChainParser) GetSpecApiByTag(tag string) (spectypes.ServiceApi, bool) { + bcp.rwLock.RLock() + defer bcp.rwLock.RUnlock() + + val, ok := bcp.taggedApis[tag] + return val, ok +} + type parsedMessage struct { serviceApi *spectypes.ServiceApi apiInterface *spectypes.ApiInterface @@ -176,3 +195,8 @@ func verifyTendermintEndpoint(endpoints []string) (websocketEndpoint string, htt } return websocketEndpoint, httpEndpoint } + +func CraftChainMessage(serviceApi spectypes.ServiceApi, endpoint *lavasession.RPCProviderEndpoint) ChainMessageForSend { + // TODO: implement + return nil +} diff --git a/protocol/chainlib/grpc.go b/protocol/chainlib/grpc.go index 1b1ab8d454..42ac9bef8b 100644 --- a/protocol/chainlib/grpc.go +++ b/protocol/chainlib/grpc.go @@ -35,7 +35,7 @@ type GrpcChainParser struct { spec spectypes.Spec rwLock sync.RWMutex serverApis map[string]spectypes.ServiceApi - taggedApis map[string]spectypes.ServiceApi + BaseChainParser } // NewGrpcChainParser creates a new instance of GrpcChainParser @@ -73,11 +73,12 @@ func (apip *GrpcChainParser) ParseMsg(url string, data []byte, connectionType st Path: url, } - // TODO why we don't have requested block here? + // TODO: fix requested block nodeMsg := &parsedMessage{ - serviceApi: serviceApi, - apiInterface: apiInterface, - msg: grpcMessage, + serviceApi: serviceApi, + apiInterface: apiInterface, + msg: grpcMessage, + requestedBlock: spectypes.NOT_APPLICABLE, } return nodeMsg, nil } @@ -126,7 +127,7 @@ func (apip *GrpcChainParser) SetSpec(spec spectypes.Spec) { // Set the spec field of the JsonRPCChainParser object apip.spec = spec apip.serverApis = serverApis - apip.taggedApis = taggedApis + apip.BaseChainParser.SetTaggedApis(taggedApis) } // DataReliabilityParams returns data reliability params from spec (spec.enabled and spec.dataReliabilityThreshold) @@ -240,7 +241,7 @@ func NewGrpcChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *la return cp, nil } -func (cp *GrpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { +func (cp *GrpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessageForSend) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { if ch != nil { return nil, "", nil, utils.LavaFormatError("Subscribe is not allowed on rest", nil, nil) } @@ -289,9 +290,7 @@ func (cp *GrpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, formatMessage = true } - // nodeMessage.MethodDesc = methodDescriptor // TODO: this is useful for parsing the response - // rp, formatter, err := grpcurl.RequestParserAndFormatter(grpcurl.FormatJSON, descriptorSource, reader, grpcurl.FormatOptions{ - rp, _, err := grpcurl.RequestParserAndFormatter(grpcurl.FormatJSON, descriptorSource, reader, grpcurl.FormatOptions{ + rp, formatter, err := grpcurl.RequestParserAndFormatter(grpcurl.FormatJSON, descriptorSource, reader, grpcurl.FormatOptions{ EmitJSONDefaultFields: false, IncludeTextSeparator: false, AllowUnknownFields: true, @@ -299,7 +298,10 @@ func (cp *GrpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, if err != nil { return nil, "", nil, utils.LavaFormatError("Failed to create formatter", err, nil) } - // nm.formatter = formatter + + // used when parsing the grpc result + nodeMessage.SetParsingData(methodDescriptor, formatter) + if formatMessage { err = rp.Next(msg) if err != nil { diff --git a/protocol/chainlib/jsonRPC.go b/protocol/chainlib/jsonRPC.go index b0b68c197f..2891c1696d 100644 --- a/protocol/chainlib/jsonRPC.go +++ b/protocol/chainlib/jsonRPC.go @@ -30,7 +30,7 @@ type JsonRPCChainParser struct { spec spectypes.Spec rwLock sync.RWMutex serverApis map[string]spectypes.ServiceApi - taggedApis map[string]spectypes.ServiceApi + BaseChainParser } // NewJrpcChainParser creates a new instance of JsonRPCChainParser @@ -100,7 +100,7 @@ func (apip *JsonRPCChainParser) SetSpec(spec spectypes.Spec) { // Set the spec field of the JsonRPCChainParser object apip.spec = spec apip.serverApis = serverApis - apip.taggedApis = taggedApis + apip.BaseChainParser.SetTaggedApis(taggedApis) } // getSupportedApi fetches service api from spec by name @@ -348,7 +348,7 @@ func (cp *JrpcChainProxy) start(ctx context.Context, nConns uint, nodeUrl string return nil } -func (cp *JrpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { +func (cp *JrpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessageForSend) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { // Get node rpc, err := cp.conn.GetRpc(ctx, true) if err != nil { diff --git a/protocol/chainlib/rest.go b/protocol/chainlib/rest.go index d024d7033e..a7c33125d0 100644 --- a/protocol/chainlib/rest.go +++ b/protocol/chainlib/rest.go @@ -29,7 +29,7 @@ type RestChainParser struct { spec spectypes.Spec rwLock sync.RWMutex serverApis map[string]spectypes.ServiceApi - taggedApis map[string]spectypes.ServiceApi + BaseChainParser } // NewRestChainParser creates a new instance of RestChainParser @@ -67,11 +67,12 @@ func (apip *RestChainParser) ParseMsg(url string, data []byte, connectionType st Path: url, } - // TODO why we don't have requested block here? + // TODO fix requested block nodeMsg := &parsedMessage{ - serviceApi: serviceApi, - apiInterface: apiInterface, - msg: restMessage, + serviceApi: serviceApi, + apiInterface: apiInterface, + msg: restMessage, + requestedBlock: spectypes.NOT_APPLICABLE, } return nodeMsg, nil } @@ -120,7 +121,7 @@ func (apip *RestChainParser) SetSpec(spec spectypes.Spec) { // Set the spec field of the RestChainParser object apip.spec = spec apip.serverApis = serverApis - apip.taggedApis = taggedApis + apip.BaseChainParser.SetTaggedApis(taggedApis) } // DataReliabilityParams returns data reliability params from spec (spec.enabled and spec.dataReliabilityThreshold) @@ -287,7 +288,7 @@ func NewRestChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *la return rcp, nil } -func (rcp *RestChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { +func (rcp *RestChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessageForSend) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { if ch != nil { return nil, "", nil, utils.LavaFormatError("Subscribe is not allowed on rest", nil, nil) } diff --git a/protocol/chainlib/tendermintRPC.go b/protocol/chainlib/tendermintRPC.go index 3d0224f69a..5295e2a1ce 100644 --- a/protocol/chainlib/tendermintRPC.go +++ b/protocol/chainlib/tendermintRPC.go @@ -29,7 +29,7 @@ type TendermintChainParser struct { spec spectypes.Spec rwLock sync.RWMutex serverApis map[string]spectypes.ServiceApi - taggedApis map[string]spectypes.ServiceApi + BaseChainParser } // NewTendermintRpcChainParser creates a new instance of TendermintChainParser @@ -173,7 +173,7 @@ func (apip *TendermintChainParser) SetSpec(spec spectypes.Spec) { // Set the spec field of the TendermintChainParser object apip.spec = spec apip.serverApis = serverApis - apip.taggedApis = taggedApis + apip.BaseChainParser.SetTaggedApis(taggedApis) } // DataReliabilityParams returns data reliability params from spec (spec.enabled and spec.dataReliabilityThreshold) @@ -414,7 +414,7 @@ func NewtendermintRpcChainProxy(ctx context.Context, nConns uint, rpcProviderEnd return cp, cp.start(ctx, nConns, websocketUrl) } -func (cp *tendermintRpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { +func (cp *tendermintRpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, chainMessage ChainMessageForSend) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { rpc, err := cp.conn.GetRpc(ctx, true) if err != nil { return nil, "", nil, err @@ -433,7 +433,7 @@ func (cp *tendermintRpcChainProxy) SendNodeMsg(ctx context.Context, ch chan inte return cp.SendRPC(ctx, &nodeMessage, ch, chainMessage) } -func (cp *tendermintRpcChainProxy) SendURI(ctx context.Context, nodeMessage *rpcInterfaceMessages.TendermintrpcMessage, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { +func (cp *tendermintRpcChainProxy) SendURI(ctx context.Context, nodeMessage *rpcInterfaceMessages.TendermintrpcMessage, ch chan interface{}, chainMessage ChainMessageForSend) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { // check if the input channel is not nil if ch != nil { // return an error if the channel is not nil @@ -489,7 +489,7 @@ func (cp *tendermintRpcChainProxy) SendURI(ctx context.Context, nodeMessage *rpc } // SendRPC sends Tendermint HTTP or WebSockets call -func (cp *tendermintRpcChainProxy) SendRPC(ctx context.Context, nodeMessage *rpcInterfaceMessages.TendermintrpcMessage, ch chan interface{}, chainMessage ChainMessage) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { +func (cp *tendermintRpcChainProxy) SendRPC(ctx context.Context, nodeMessage *rpcInterfaceMessages.TendermintrpcMessage, ch chan interface{}, chainMessage ChainMessageForSend) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { // Get rpc connection from the connection pool rpc, err := cp.conn.GetRpc(ctx, true) if err != nil { diff --git a/protocol/rpcprovider/rpcprovider.go b/protocol/rpcprovider/rpcprovider.go index 7602ff51cc..8e6dc71132 100644 --- a/protocol/rpcprovider/rpcprovider.go +++ b/protocol/rpcprovider/rpcprovider.go @@ -107,7 +107,7 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client AverageBlockTime: averageBlockTime, ServerBlockMemory: ChainTrackerDefaultMemory + blocksToSaveChainTracker, } - chainFetcher := chainlib.NewChainFetcher(ctx, chainProxy) + chainFetcher := chainlib.NewChainFetcher(ctx, chainProxy, chainParser, rpcProviderEndpoint) chainTracker, err := chaintracker.New(ctx, chainFetcher, chainTrackerConfig) if err != nil { utils.LavaFormatFatal("failed creating chain tracker", err, &map[string]string{"chainTrackerConfig": fmt.Sprintf("%+v", chainTrackerConfig)}) From aa2931cacc5db8e5ef1177e850362888c5c83f29 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Tue, 21 Feb 2023 17:00:00 +0100 Subject: [PATCH 044/123] WIP psm methods almost done --- protocol/lavasession/errors.go | 4 +- .../lavasession/provider_session_manager.go | 75 ++++++++++++------- protocol/lavasession/provider_types.go | 36 +++++++++ protocol/rpcprovider/rpcprovider.go | 7 +- .../statetracker/provider_state_tracker.go | 4 + protocol/statetracker/state_query.go | 8 ++ 6 files changed, 106 insertions(+), 28 deletions(-) diff --git a/protocol/lavasession/errors.go b/protocol/lavasession/errors.go index e1679a6567..59b2a30917 100644 --- a/protocol/lavasession/errors.go +++ b/protocol/lavasession/errors.go @@ -36,5 +36,7 @@ var ( // Provider Side Errors MaximumCULimitReachedByConsumer = sdkerrors.New("MaximumCULimitReachedByConsumer Error", 886, "Consumer reached maximum cu limit") ProviderConsumerCuMisMatch = sdkerrors.New("ProviderConsumerCuMisMatch Error", 887, "Provider and Consumer disagree on total cu for session") RelayNumberMismatch = sdkerrors.New("RelayNumberMismatch Error", 888, "Provider and Consumer disagree on relay number for session") - SubscriptionInitiationError = sdkerrors.New("SubscriptionInitiationError Error", 889, "provider failed initiating subscription") + SubscriptionInitiationError = sdkerrors.New("SubscriptionInitiationError Error", 889, "Provider failed initiating subscription") + EpochIsNotRegisteredError = sdkerrors.New("EpochIsNotRegisteredError Error", 890, "Epoch is not registered in provider session manager") + ConsumerIsNotRegisteredError = sdkerrors.New("ConsumerIsNotRegisteredError Error", 891, "Consumer is not registered in provider session manager") ) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index 5396340c45..fd57a5cf64 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -11,25 +11,26 @@ import ( ) type ProviderSessionManager struct { - sessionsWithAllConsumers map[uint64]map[string]*ProviderSessionsWithConsumer // first key is epochs, second key is a consumer address - lock sync.RWMutex - blockedEpoch uint64 // requests from this epoch are blocked - rpcProviderEndpoint *RPCProviderEndpoint + sessionsWithAllConsumers map[uint64]map[string]*ProviderSessionsWithConsumer // first key is epochs, second key is a consumer address + lock sync.RWMutex + blockedEpochHeight uint64 // requests from this epoch are blocked + rpcProviderEndpoint *RPCProviderEndpoint + numberOfBlocksKeptInMemory uint64 // sessionsWithAllConsumers with epochs older than ((latest epoch) - numberOfBlocksKeptInMemory) are deleted. } // reads cs.BlockedEpoch atomically func (psm *ProviderSessionManager) atomicWriteBlockedEpoch(epoch uint64) { - atomic.StoreUint64(&psm.blockedEpoch, epoch) + atomic.StoreUint64(&psm.blockedEpochHeight, epoch) } // reads cs.BlockedEpoch atomically func (psm *ProviderSessionManager) atomicReadBlockedEpoch() (epoch uint64) { - return atomic.LoadUint64(&psm.blockedEpoch) + return atomic.LoadUint64(&psm.blockedEpochHeight) } -func (psm *ProviderSessionManager) IsValidEpoch(epoch uint64) (valid bool, thresholdEpoch uint64) { - threshold := psm.atomicReadBlockedEpoch() - return epoch > threshold, threshold +func (psm *ProviderSessionManager) IsValidEpoch(epoch uint64) (valid bool, blockedEpochHeight uint64) { + blockedEpochHeight = psm.atomicReadBlockedEpoch() + return epoch > blockedEpochHeight, blockedEpochHeight } // Check if consumer exists and is not blocked, if all is valid return the ProviderSessionsWithConsumer pointer @@ -154,27 +155,21 @@ func (psm *ProviderSessionManager) getSessionFromAnActiveConsumer(providerSessio } func (psm *ProviderSessionManager) ReportConsumer() (address string, epoch uint64, err error) { - return "", 0, nil + return "", 0, nil // TBD } func (psm *ProviderSessionManager) GetDataReliabilitySession(address string, epoch uint64) (*SingleProviderSession, error) { return nil, fmt.Errorf("not implemented") } +// OnSessionDone unlocks the session gracefully, this happens when session finished with an error func (psm *ProviderSessionManager) OnSessionFailure(singleProviderSession *SingleProviderSession) (err error) { - // need to handle dataReliability session failure separately - return nil + return singleProviderSession.onSessionFailure() } // OnSessionDone unlocks the session gracefully, this happens when session finished successfully func (psm *ProviderSessionManager) OnSessionDone(singleProviderSession *SingleProviderSession) (err error) { - err = singleProviderSession.VerifyLock() - if err != nil { - return err - } - singleProviderSession.lock.Unlock() - // session finished successfully - return nil + return singleProviderSession.onSessionDone() } func (psm *ProviderSessionManager) RPCProviderEndpoint() *RPCProviderEndpoint { @@ -182,8 +177,20 @@ func (psm *ProviderSessionManager) RPCProviderEndpoint() *RPCProviderEndpoint { } func (psm *ProviderSessionManager) UpdateEpoch(epoch uint64) { - // update the epoch to limit consumer usage - // when updating the blocked epoch, we also need to clean old epochs from the map. sessionsWithAllConsumers + psm.lock.Lock() + defer psm.lock.Unlock() + psm.blockedEpochHeight = epoch - psm.numberOfBlocksKeptInMemory + newMap := make(map[uint64]map[string]*ProviderSessionsWithConsumer) + // In order to avoid running over the map twice, (1. mark 2. delete.) better technique is to copy and filter + // which has better O(n) vs O(2n) + for epochStored, value := range psm.sessionsWithAllConsumers { + if epochStored < psm.blockedEpochHeight { // check if key is skipped. + continue + } + // if epochStored is ok, copy the value into the new map + newMap[epochStored] = value + } + psm.sessionsWithAllConsumers = newMap } func (psm *ProviderSessionManager) ProcessUnsubscribeEthereum(subscriptionID string, consumerAddress sdk.AccAddress) error { @@ -220,13 +227,29 @@ func (psm *ProviderSessionManager) SubscriptionFailure(consumerAddress string, e // userSessions.Lock.Unlock() } -// called when the reward server has information on a higher cu proof and usage and this providerSessionsManager needs to sync up on it -func (psm *ProviderSessionManager) UpdateSessionCU(consumerAddress string, epoch uint64, sessionID uint64, storedCU uint64) error { +// Called when the reward server has information on a higher cu proof and usage and this providerSessionsManager needs to sync up on it +func (psm *ProviderSessionManager) UpdateSessionCU(consumerAddress string, epoch uint64, sessionID uint64, newCU uint64) error { // load the session and update the CU inside - return fmt.Errorf("not implemented") + psm.lock.Lock() + defer psm.lock.Unlock() + providerSessionWithConsumerList, ok := psm.sessionsWithAllConsumers[epoch] + if !ok { + return utils.LavaFormatError("UpdateSessionCU Failed", EpochIsNotRegisteredError, &map[string]string{"epoch": strconv.FormatUint(epoch, 10)}) + } + providerSessionWithConsumer, foundConsumer := providerSessionWithConsumerList[consumerAddress] + if !foundConsumer { + return utils.LavaFormatError("UpdateSessionCU Failed", ConsumerIsNotRegisteredError, &map[string]string{"epoch": strconv.FormatUint(epoch, 10), "consumer": consumerAddress}) + } + + usedCu := providerSessionWithConsumer.atomicReadUsedComputeUnits() // check used cu now + if usedCu < newCU { + // if newCU proof is higher than current state, update. + providerSessionWithConsumer.atomicWriteUsedComputeUnits(newCU) + } + return nil } // Returning a new provider session manager -func NewProviderSessionManager(rpcProviderEndpoint *RPCProviderEndpoint) *ProviderSessionManager { - return &ProviderSessionManager{rpcProviderEndpoint: rpcProviderEndpoint} +func NewProviderSessionManager(rpcProviderEndpoint *RPCProviderEndpoint, numberOfBlocksKeptInMemory uint64) *ProviderSessionManager { + return &ProviderSessionManager{rpcProviderEndpoint: rpcProviderEndpoint, numberOfBlocksKeptInMemory: numberOfBlocksKeptInMemory} } diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index 4d00c376a2..cc2c213b9d 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -83,6 +83,10 @@ func (pswc *ProviderSessionsWithConsumer) atomicReadUsedComputeUnits() (usedComp return atomic.LoadUint64(&pswc.epochData.UsedComputeUnits) } +func (pswc *ProviderSessionsWithConsumer) atomicWriteUsedComputeUnits(cu uint64) { + atomic.StoreUint64(&pswc.epochData.UsedComputeUnits, cu) +} + func (pswc *ProviderSessionsWithConsumer) atomicWriteMaxComputeUnits(maxComputeUnits uint64) { atomic.StoreUint64(&pswc.epochData.MaxComputeUnits, maxComputeUnits) } @@ -177,3 +181,35 @@ func (sps *SingleProviderSession) validateAndAddUsedCU(currentCU uint64, maxCu u } } } + +func (sps *SingleProviderSession) validateAndSubUsedCU(currentCU uint64) error { + for { + usedCu := sps.userSessionsParent.atomicReadUsedComputeUnits() // check used cu now + if sps.userSessionsParent.atomicCompareAndWriteUsedComputeUnits(usedCu-currentCU, usedCu) { // decrease the amount of used cu from the known value + return nil + } + } +} + +func (sps *SingleProviderSession) onSessionFailure() error { + err := sps.VerifyLock() // sps is locked + if err != nil { + return utils.LavaFormatError("sps.verifyLock() failed in onSessionFailure", err, nil) + } + sps.CuSum = sps.CuSum - sps.LatestRelayCu + sps.RelayNum = sps.RelayNum - 1 + sps.validateAndSubUsedCU(sps.LatestRelayCu) + sps.LatestRelayCu = 0 + sps.lock.Unlock() + return nil +} + +func (sps *SingleProviderSession) onSessionDone() error { + err := sps.VerifyLock() // sps is locked + if err != nil { + return utils.LavaFormatError("sps.verifyLock() failed in onSessionDone", err, nil) + } + sps.LatestRelayCu = 0 // reset the cu, we can also verify its 0 when loading. + sps.lock.Unlock() + return nil +} diff --git a/protocol/rpcprovider/rpcprovider.go b/protocol/rpcprovider/rpcprovider.go index 7602ff51cc..8141405e76 100644 --- a/protocol/rpcprovider/rpcprovider.go +++ b/protocol/rpcprovider/rpcprovider.go @@ -47,6 +47,7 @@ type ProviderStateTrackerInf interface { GetEpochSize(ctx context.Context) (uint64, error) EarliestBlockInMemory(ctx context.Context) (uint64, error) RegisterPaymentUpdatableForPayments(ctx context.Context, paymentUpdatable statetracker.PaymentUpdatable) + GetRecommendedEpochNumToCollectPayment(ctx context.Context) (uint64, error) } type RPCProvider struct { @@ -85,8 +86,12 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client } utils.LavaFormatInfo("RPCProvider pubkey: "+addr.String(), nil) utils.LavaFormatInfo("RPCProvider setting up endpoints", &map[string]string{"length": strconv.Itoa(len(rpcProviderEndpoints))}) + recommendedEpochNumToCollectPayment, err := rpcp.providerStateTracker.GetRecommendedEpochNumToCollectPayment(ctx) + if err != nil { + utils.LavaFormatFatal("Failed fetching epoch size in RPCProvider Start", err, nil) + } for _, rpcProviderEndpoint := range rpcProviderEndpoints { - providerSessionManager := lavasession.NewProviderSessionManager(rpcProviderEndpoint) + providerSessionManager := lavasession.NewProviderSessionManager(rpcProviderEndpoint, recommendedEpochNumToCollectPayment) key := rpcProviderEndpoint.Key() rpcp.providerStateTracker.RegisterForEpochUpdates(ctx, providerSessionManager) chainParser, err := chainlib.NewChainParser(rpcProviderEndpoint.ApiInterface) diff --git a/protocol/statetracker/provider_state_tracker.go b/protocol/statetracker/provider_state_tracker.go index e995cc8825..ee0302b9c1 100644 --- a/protocol/statetracker/provider_state_tracker.go +++ b/protocol/statetracker/provider_state_tracker.go @@ -110,3 +110,7 @@ func (pst *ProviderStateTracker) GetEpochSize(ctx context.Context) (uint64, erro func (pst *ProviderStateTracker) EarliestBlockInMemory(ctx context.Context) (uint64, error) { return pst.stateQuery.EarliestBlockInMemory(ctx) } + +func (pst *ProviderStateTracker) GetRecommendedEpochNumToCollectPayment(ctx context.Context) (uint64, error) { + return pst.stateQuery.GetRecommendedEpochNumToCollectPayment(ctx) +} diff --git a/protocol/statetracker/state_query.go b/protocol/statetracker/state_query.go index aed1c05416..0da2571be3 100644 --- a/protocol/statetracker/state_query.go +++ b/protocol/statetracker/state_query.go @@ -281,3 +281,11 @@ func (psq *ProviderStateQuery) EarliestBlockInMemory(ctx context.Context) (uint6 } return res.EpochDetails.EarliestStart, nil } + +func (psq *ProviderStateQuery) GetRecommendedEpochNumToCollectPayment(ctx context.Context) (uint64, error) { + res, err := psq.PairingQueryClient.Params(ctx, &pairingtypes.QueryParamsRequest{}) + if err != nil { + return 0, err + } + return res.GetParams().RecommendedEpochNumToCollectPayment, nil +} From 8b99a0a618217eb7a16aa0cce5d8973e7c2cd0eb Mon Sep 17 00:00:00 2001 From: omer mishael Date: Tue, 21 Feb 2023 18:29:21 +0200 Subject: [PATCH 045/123] finished implementing chainFetcher --- protocol/chainlib/chain_fetcher.go | 2 +- protocol/chainlib/chainlib.go | 1 + protocol/chainlib/common.go | 6 ++---- protocol/chainlib/grpc.go | 17 +++++++++++++++-- protocol/chainlib/jsonRPC.go | 17 ++++++++++++++++- protocol/chainlib/rest.go | 17 +++++++++++++++-- protocol/chainlib/tendermintRPC.go | 17 ++++++++++++++++- 7 files changed, 66 insertions(+), 11 deletions(-) diff --git a/protocol/chainlib/chain_fetcher.go b/protocol/chainlib/chain_fetcher.go index 158ee66b42..fae81ae87d 100644 --- a/protocol/chainlib/chain_fetcher.go +++ b/protocol/chainlib/chain_fetcher.go @@ -28,7 +28,7 @@ func (cf *ChainFetcher) FetchLatestBlockNum(ctx context.Context) (int64, error) if !ok { return spectypes.NOT_APPLICABLE, utils.LavaFormatError(spectypes.GET_BLOCKNUM+" tag function not found", nil, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) } - CraftChainMessage(serviceApi, cf.endpoint) + CraftChainMessage(serviceApi, cf.chainParser) return 0, fmt.Errorf("not implemented") } diff --git a/protocol/chainlib/chainlib.go b/protocol/chainlib/chainlib.go index c8a48b240a..1519870676 100644 --- a/protocol/chainlib/chainlib.go +++ b/protocol/chainlib/chainlib.go @@ -55,6 +55,7 @@ type ChainParser interface { DataReliabilityParams() (enabled bool, dataReliabilityThreshold uint32) ChainBlockStats() (allowedBlockLagForQosSync int64, averageBlockTime time.Duration, blockDistanceForFinalizedData uint32, blocksInFinalizationProof uint32) GetSpecApiByTag(tag string) (specApi spectypes.ServiceApi, existed bool) + CraftMessage(serviceApi spectypes.ServiceApi) ChainMessageForSend } type ChainMessage interface { diff --git a/protocol/chainlib/common.go b/protocol/chainlib/common.go index 2a995f585c..9507664070 100644 --- a/protocol/chainlib/common.go +++ b/protocol/chainlib/common.go @@ -11,7 +11,6 @@ import ( "github.com/gofiber/fiber/v2" "github.com/gofiber/websocket/v2" - "github.com/lavanet/lava/protocol/lavasession" "github.com/lavanet/lava/relayer/parser" "github.com/lavanet/lava/utils" spectypes "github.com/lavanet/lava/x/spec/types" @@ -196,7 +195,6 @@ func verifyTendermintEndpoint(endpoints []string) (websocketEndpoint string, htt return websocketEndpoint, httpEndpoint } -func CraftChainMessage(serviceApi spectypes.ServiceApi, endpoint *lavasession.RPCProviderEndpoint) ChainMessageForSend { - // TODO: implement - return nil +func CraftChainMessage(serviceApi spectypes.ServiceApi, chainParser ChainParser) ChainMessageForSend { + return chainParser.CraftMessage(serviceApi) } diff --git a/protocol/chainlib/grpc.go b/protocol/chainlib/grpc.go index 42ac9bef8b..55ff08337a 100644 --- a/protocol/chainlib/grpc.go +++ b/protocol/chainlib/grpc.go @@ -43,6 +43,14 @@ func NewGrpcChainParser() (chainParser *GrpcChainParser, err error) { return &GrpcChainParser{}, nil } +func (apip *GrpcChainParser) CraftMessage(serviceApi spectypes.ServiceApi) ChainMessageForSend { + grpcMessage := rpcInterfaceMessages.GrpcMessage{ + Msg: nil, + Path: serviceApi.GetName(), + } + return apip.newMethod(&serviceApi, &serviceApi.ApiInterfaces[0], spectypes.NOT_APPLICABLE, grpcMessage) +} + // ParseMsg parses message data into chain message object func (apip *GrpcChainParser) ParseMsg(url string, data []byte, connectionType string) (ChainMessage, error) { // Guard that the GrpcChainParser instance exists @@ -74,13 +82,18 @@ func (apip *GrpcChainParser) ParseMsg(url string, data []byte, connectionType st } // TODO: fix requested block + nodeMsg := apip.newMethod(serviceApi, apiInterface, spectypes.NOT_APPLICABLE, grpcMessage) + return nodeMsg, nil +} + +func (*GrpcChainParser) newMethod(serviceApi *spectypes.ServiceApi, apiInterface *spectypes.ApiInterface, requestedBlock int64, grpcMessage rpcInterfaceMessages.GrpcMessage) *parsedMessage { nodeMsg := &parsedMessage{ serviceApi: serviceApi, apiInterface: apiInterface, msg: grpcMessage, - requestedBlock: spectypes.NOT_APPLICABLE, + requestedBlock: requestedBlock, } - return nodeMsg, nil + return nodeMsg } // getSupportedApi fetches service api from spec by name diff --git a/protocol/chainlib/jsonRPC.go b/protocol/chainlib/jsonRPC.go index 2891c1696d..8157905cfe 100644 --- a/protocol/chainlib/jsonRPC.go +++ b/protocol/chainlib/jsonRPC.go @@ -38,6 +38,16 @@ func NewJrpcChainParser() (chainParser *JsonRPCChainParser, err error) { return &JsonRPCChainParser{}, nil } +func (apip *JsonRPCChainParser) CraftMessage(serviceApi spectypes.ServiceApi) ChainMessageForSend { + msg := &rpcInterfaceMessages.JsonrpcMessage{ + Version: "2.0", + ID: []byte("1"), + Method: serviceApi.GetName(), + Params: nil, + } + return apip.newChainMessage(&serviceApi, &serviceApi.ApiInterfaces[0], spectypes.NOT_APPLICABLE, msg) +} + // ParseMsg parses message data into chain message object func (apip *JsonRPCChainParser) ParseMsg(url string, data []byte, connectionType string) (ChainMessage, error) { // Guard that the JsonRPCChainParser instance exists @@ -74,13 +84,18 @@ func (apip *JsonRPCChainParser) ParseMsg(url string, data []byte, connectionType return nil, err } + nodeMsg := apip.newChainMessage(serviceApi, apiInterface, requestedBlock, msg) + return nodeMsg, nil +} + +func (*JsonRPCChainParser) newChainMessage(serviceApi *spectypes.ServiceApi, apiInterface *spectypes.ApiInterface, requestedBlock int64, msg *rpcInterfaceMessages.JsonrpcMessage) *parsedMessage { nodeMsg := &parsedMessage{ serviceApi: serviceApi, apiInterface: apiInterface, requestedBlock: requestedBlock, msg: msg, } - return nodeMsg, nil + return nodeMsg } // SetSpec sets the spec for the JsonRPCChainParser diff --git a/protocol/chainlib/rest.go b/protocol/chainlib/rest.go index a7c33125d0..90adfc6287 100644 --- a/protocol/chainlib/rest.go +++ b/protocol/chainlib/rest.go @@ -37,6 +37,14 @@ func NewRestChainParser() (chainParser *RestChainParser, err error) { return &RestChainParser{}, nil } +func (apip *RestChainParser) CraftMessage(serviceApi spectypes.ServiceApi) ChainMessageForSend { + restMessage := rpcInterfaceMessages.RestMessage{ + Msg: nil, + Path: serviceApi.GetName(), + } + return apip.newChainMessage(&serviceApi, &serviceApi.ApiInterfaces[0], spectypes.NOT_APPLICABLE, restMessage) +} + // ParseMsg parses message data into chain message object func (apip *RestChainParser) ParseMsg(url string, data []byte, connectionType string) (ChainMessage, error) { // Guard that the RestChainParser instance exists @@ -68,13 +76,18 @@ func (apip *RestChainParser) ParseMsg(url string, data []byte, connectionType st } // TODO fix requested block + nodeMsg := apip.newChainMessage(serviceApi, apiInterface, spectypes.NOT_APPLICABLE, restMessage) + return nodeMsg, nil +} + +func (*RestChainParser) newChainMessage(serviceApi *spectypes.ServiceApi, apiInterface *spectypes.ApiInterface, requestBlock int64, restMessage rpcInterfaceMessages.RestMessage) *parsedMessage { nodeMsg := &parsedMessage{ serviceApi: serviceApi, apiInterface: apiInterface, msg: restMessage, - requestedBlock: spectypes.NOT_APPLICABLE, + requestedBlock: requestBlock, } - return nodeMsg, nil + return nodeMsg } // getSupportedApi fetches service api from spec by name diff --git a/protocol/chainlib/tendermintRPC.go b/protocol/chainlib/tendermintRPC.go index 5295e2a1ce..9110bfe934 100644 --- a/protocol/chainlib/tendermintRPC.go +++ b/protocol/chainlib/tendermintRPC.go @@ -37,6 +37,16 @@ func NewTendermintRpcChainParser() (chainParser *TendermintChainParser, err erro return &TendermintChainParser{}, nil } +func (apip *TendermintChainParser) CraftMessage(serviceApi spectypes.ServiceApi) ChainMessageForSend { + msg := rpcInterfaceMessages.JsonrpcMessage{ + Version: "2.0", + ID: []byte("1"), + Method: serviceApi.GetName(), + Params: nil, + } + return apip.newChainMessage(&serviceApi, &serviceApi.ApiInterfaces[0], spectypes.NOT_APPLICABLE, msg) +} + // ParseMsg parses message data into chain message object func (apip *TendermintChainParser) ParseMsg(url string, data []byte, connectionType string) (ChainMessage, error) { // Guard that the TendermintChainParser instance exists @@ -120,13 +130,18 @@ func (apip *TendermintChainParser) ParseMsg(url string, data []byte, connectionT return nil, err } + nodeMsg := apip.newChainMessage(serviceApi, apiInterface, requestedBlock, msg) + return nodeMsg, nil +} + +func (*TendermintChainParser) newChainMessage(serviceApi *spectypes.ServiceApi, apiInterface *spectypes.ApiInterface, requestedBlock int64, msg rpcInterfaceMessages.JsonrpcMessage) ChainMessage { nodeMsg := &parsedMessage{ serviceApi: serviceApi, apiInterface: apiInterface, requestedBlock: requestedBlock, msg: msg, } - return nodeMsg, nil + return nodeMsg } // getSupportedApi fetches service api from spec by name From 43d1ff0b2a7e5e24d3d9e9a7af697cb2971bacc8 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Tue, 21 Feb 2023 17:31:14 +0100 Subject: [PATCH 046/123] adding recommendedEpochNumToCollectPayment --- .../rpcprovider/rewardserver/reward_server.go | 25 +++++++++++++------ protocol/rpcprovider/rpcprovider.go | 19 ++++++++++---- 2 files changed, 32 insertions(+), 12 deletions(-) diff --git a/protocol/rpcprovider/rewardserver/reward_server.go b/protocol/rpcprovider/rewardserver/reward_server.go index 6f45b98006..7175e8a5ce 100644 --- a/protocol/rpcprovider/rewardserver/reward_server.go +++ b/protocol/rpcprovider/rewardserver/reward_server.go @@ -15,10 +15,6 @@ import ( terderminttypes "github.com/tendermint/tendermint/abci/types" ) -const ( - StaleEpochDistance = 2 -) - type PaymentRequest struct { CU uint64 BlockHeightDeadline int64 @@ -71,6 +67,7 @@ type RewardServer struct { type RewardsTxSender interface { TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest, description string) error GetEpochSize(ctx context.Context) (uint64, error) + GetRecommendedEpochNumToCollectPayment(ctx context.Context) (uint64, error) EarliestBlockInMemory(ctx context.Context) (uint64, error) } @@ -227,17 +224,31 @@ func (rws *RewardServer) RemoveExpectedPayment(paidCUToFInd uint64, expectedClie return false } +// returns how long to wait until asking for payments for each epoch. +func (rws *RewardServer) getEpochSizeWithRecommendedPaymentDelay(ctx context.Context) (uint64, error) { + epochSize, err := rws.rewardsTxSender.GetEpochSize(ctx) + if err != nil { + return 0, err + } + recommendedEpochNumToCollectPayment, err := rws.rewardsTxSender.GetRecommendedEpochNumToCollectPayment(ctx) + if err != nil { + return 0, err + } + return recommendedEpochNumToCollectPayment * epochSize, nil +} + func (rws *RewardServer) gatherRewardsForClaim(ctx context.Context, current_epoch uint64) (rewardsForClaim []*pairingtypes.RelayRequest, errRet error) { rws.lock.Lock() defer rws.lock.Unlock() - epochSize, err := rws.rewardsTxSender.GetEpochSize(ctx) + epochSizeWithRecommendedPaymentDelay, err := rws.getEpochSizeWithRecommendedPaymentDelay(ctx) if err != nil { return nil, err } - if epochSize*StaleEpochDistance > current_epoch { + + if epochSizeWithRecommendedPaymentDelay > current_epoch { return nil, utils.LavaFormatError("current epoch too low", nil, &map[string]string{"current epoch": strconv.FormatUint(current_epoch, 10)}) } - target_epoch_to_claim_rewards := current_epoch - epochSize*StaleEpochDistance + target_epoch_to_claim_rewards := current_epoch - epochSizeWithRecommendedPaymentDelay for epoch, epochRewards := range rws.rewards { if epoch >= uint64(target_epoch_to_claim_rewards) { continue diff --git a/protocol/rpcprovider/rpcprovider.go b/protocol/rpcprovider/rpcprovider.go index 8767b0ed50..da7ef3156f 100644 --- a/protocol/rpcprovider/rpcprovider.go +++ b/protocol/rpcprovider/rpcprovider.go @@ -56,6 +56,18 @@ type RPCProvider struct { rpcProviderListeners map[string]*ProviderListener } +func (rpcp *RPCProvider) getEpochSizeForPSMBlockMemory(ctx context.Context) uint64 { + epochSize, err := rpcp.providerStateTracker.GetEpochSize(ctx) + if err != nil { + utils.LavaFormatFatal("Failed fetching GetEpochSize in RPCProvider Start", err, nil) + } + recommendedEpochNumToCollectPayment, err := rpcp.providerStateTracker.GetRecommendedEpochNumToCollectPayment(ctx) + if err != nil { + utils.LavaFormatFatal("Failed fetching GetRecommendedEpochNumToCollectPayment in RPCProvider Start", err, nil) + } + return recommendedEpochNumToCollectPayment * epochSize +} + func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, clientCtx client.Context, rpcProviderEndpoints []*lavasession.RPCProviderEndpoint, cache *performance.Cache, parallelConnections uint) (err error) { // single state tracker lavaChainFetcher := chainlib.NewLavaChainFetcher(ctx, clientCtx) @@ -86,12 +98,9 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client } utils.LavaFormatInfo("RPCProvider pubkey: "+addr.String(), nil) utils.LavaFormatInfo("RPCProvider setting up endpoints", &map[string]string{"length": strconv.Itoa(len(rpcProviderEndpoints))}) - recommendedEpochNumToCollectPayment, err := rpcp.providerStateTracker.GetRecommendedEpochNumToCollectPayment(ctx) - if err != nil { - utils.LavaFormatFatal("Failed fetching epoch size in RPCProvider Start", err, nil) - } + blockMemorySize := rpcp.getEpochSizeForPSMBlockMemory(ctx) // get the number of blocks to keep in PSM. for _, rpcProviderEndpoint := range rpcProviderEndpoints { - providerSessionManager := lavasession.NewProviderSessionManager(rpcProviderEndpoint, recommendedEpochNumToCollectPayment) + providerSessionManager := lavasession.NewProviderSessionManager(rpcProviderEndpoint, blockMemorySize) key := rpcProviderEndpoint.Key() rpcp.providerStateTracker.RegisterForEpochUpdates(ctx, providerSessionManager) chainParser, err := chainlib.NewChainParser(rpcProviderEndpoint.ApiInterface) From 6c2bd2da31fbe52138bed29c176744d5a9b957fb Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Tue, 21 Feb 2023 17:34:12 +0100 Subject: [PATCH 047/123] adding recommendedEpochNumToCollectPayment --- protocol/rpcprovider/rewardserver/reward_server.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/protocol/rpcprovider/rewardserver/reward_server.go b/protocol/rpcprovider/rewardserver/reward_server.go index 7175e8a5ce..8e1c5c5dc6 100644 --- a/protocol/rpcprovider/rewardserver/reward_server.go +++ b/protocol/rpcprovider/rewardserver/reward_server.go @@ -228,11 +228,11 @@ func (rws *RewardServer) RemoveExpectedPayment(paidCUToFInd uint64, expectedClie func (rws *RewardServer) getEpochSizeWithRecommendedPaymentDelay(ctx context.Context) (uint64, error) { epochSize, err := rws.rewardsTxSender.GetEpochSize(ctx) if err != nil { - return 0, err + return 0, utils.LavaFormatError("Failed fetching rws.rewardsTxSender.GetEpochSize(ctx)", err, nil) } recommendedEpochNumToCollectPayment, err := rws.rewardsTxSender.GetRecommendedEpochNumToCollectPayment(ctx) if err != nil { - return 0, err + return 0, utils.LavaFormatError("Failed fetching rws.rewardsTxSender.GetRecommendedEpochNumToCollectPayment(ctx)", err, nil) } return recommendedEpochNumToCollectPayment * epochSize, nil } From 500f2ae72a957d78a66fafee458cdc7b0036064e Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Tue, 21 Feb 2023 17:45:34 +0100 Subject: [PATCH 048/123] adjusting code in state_query --- .../rpcprovider/rewardserver/reward_server.go | 18 ++---------------- protocol/rpcprovider/rpcprovider.go | 18 +++++------------- .../statetracker/provider_state_tracker.go | 4 ++++ protocol/statetracker/state_query.go | 12 ++++++++++++ 4 files changed, 23 insertions(+), 29 deletions(-) diff --git a/protocol/rpcprovider/rewardserver/reward_server.go b/protocol/rpcprovider/rewardserver/reward_server.go index 8e1c5c5dc6..d8fd567cce 100644 --- a/protocol/rpcprovider/rewardserver/reward_server.go +++ b/protocol/rpcprovider/rewardserver/reward_server.go @@ -66,8 +66,7 @@ type RewardServer struct { type RewardsTxSender interface { TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest, description string) error - GetEpochSize(ctx context.Context) (uint64, error) - GetRecommendedEpochNumToCollectPayment(ctx context.Context) (uint64, error) + GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment(ctx context.Context) (uint64, error) EarliestBlockInMemory(ctx context.Context) (uint64, error) } @@ -224,23 +223,10 @@ func (rws *RewardServer) RemoveExpectedPayment(paidCUToFInd uint64, expectedClie return false } -// returns how long to wait until asking for payments for each epoch. -func (rws *RewardServer) getEpochSizeWithRecommendedPaymentDelay(ctx context.Context) (uint64, error) { - epochSize, err := rws.rewardsTxSender.GetEpochSize(ctx) - if err != nil { - return 0, utils.LavaFormatError("Failed fetching rws.rewardsTxSender.GetEpochSize(ctx)", err, nil) - } - recommendedEpochNumToCollectPayment, err := rws.rewardsTxSender.GetRecommendedEpochNumToCollectPayment(ctx) - if err != nil { - return 0, utils.LavaFormatError("Failed fetching rws.rewardsTxSender.GetRecommendedEpochNumToCollectPayment(ctx)", err, nil) - } - return recommendedEpochNumToCollectPayment * epochSize, nil -} - func (rws *RewardServer) gatherRewardsForClaim(ctx context.Context, current_epoch uint64) (rewardsForClaim []*pairingtypes.RelayRequest, errRet error) { rws.lock.Lock() defer rws.lock.Unlock() - epochSizeWithRecommendedPaymentDelay, err := rws.getEpochSizeWithRecommendedPaymentDelay(ctx) + epochSizeWithRecommendedPaymentDelay, err := rws.rewardsTxSender.GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment(ctx) if err != nil { return nil, err } diff --git a/protocol/rpcprovider/rpcprovider.go b/protocol/rpcprovider/rpcprovider.go index da7ef3156f..e8c28c336c 100644 --- a/protocol/rpcprovider/rpcprovider.go +++ b/protocol/rpcprovider/rpcprovider.go @@ -48,6 +48,7 @@ type ProviderStateTrackerInf interface { EarliestBlockInMemory(ctx context.Context) (uint64, error) RegisterPaymentUpdatableForPayments(ctx context.Context, paymentUpdatable statetracker.PaymentUpdatable) GetRecommendedEpochNumToCollectPayment(ctx context.Context) (uint64, error) + GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment(ctx context.Context) (uint64, error) } type RPCProvider struct { @@ -56,18 +57,6 @@ type RPCProvider struct { rpcProviderListeners map[string]*ProviderListener } -func (rpcp *RPCProvider) getEpochSizeForPSMBlockMemory(ctx context.Context) uint64 { - epochSize, err := rpcp.providerStateTracker.GetEpochSize(ctx) - if err != nil { - utils.LavaFormatFatal("Failed fetching GetEpochSize in RPCProvider Start", err, nil) - } - recommendedEpochNumToCollectPayment, err := rpcp.providerStateTracker.GetRecommendedEpochNumToCollectPayment(ctx) - if err != nil { - utils.LavaFormatFatal("Failed fetching GetRecommendedEpochNumToCollectPayment in RPCProvider Start", err, nil) - } - return recommendedEpochNumToCollectPayment * epochSize -} - func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, clientCtx client.Context, rpcProviderEndpoints []*lavasession.RPCProviderEndpoint, cache *performance.Cache, parallelConnections uint) (err error) { // single state tracker lavaChainFetcher := chainlib.NewLavaChainFetcher(ctx, clientCtx) @@ -98,7 +87,10 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client } utils.LavaFormatInfo("RPCProvider pubkey: "+addr.String(), nil) utils.LavaFormatInfo("RPCProvider setting up endpoints", &map[string]string{"length": strconv.Itoa(len(rpcProviderEndpoints))}) - blockMemorySize := rpcp.getEpochSizeForPSMBlockMemory(ctx) // get the number of blocks to keep in PSM. + blockMemorySize, err := rpcp.providerStateTracker.GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment(ctx) // get the number of blocks to keep in PSM. + if err != nil { + utils.LavaFormatFatal("Failed fetching GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment in RPCProvider Start", err, nil) + } for _, rpcProviderEndpoint := range rpcProviderEndpoints { providerSessionManager := lavasession.NewProviderSessionManager(rpcProviderEndpoint, blockMemorySize) key := rpcProviderEndpoint.Key() diff --git a/protocol/statetracker/provider_state_tracker.go b/protocol/statetracker/provider_state_tracker.go index ee0302b9c1..845740272b 100644 --- a/protocol/statetracker/provider_state_tracker.go +++ b/protocol/statetracker/provider_state_tracker.go @@ -114,3 +114,7 @@ func (pst *ProviderStateTracker) EarliestBlockInMemory(ctx context.Context) (uin func (pst *ProviderStateTracker) GetRecommendedEpochNumToCollectPayment(ctx context.Context) (uint64, error) { return pst.stateQuery.GetRecommendedEpochNumToCollectPayment(ctx) } + +func (pst *ProviderStateTracker) GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment(ctx context.Context) (uint64, error) { + return pst.stateQuery.GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment(ctx) +} diff --git a/protocol/statetracker/state_query.go b/protocol/statetracker/state_query.go index 0da2571be3..833bb35dc6 100644 --- a/protocol/statetracker/state_query.go +++ b/protocol/statetracker/state_query.go @@ -289,3 +289,15 @@ func (psq *ProviderStateQuery) GetRecommendedEpochNumToCollectPayment(ctx contex } return res.GetParams().RecommendedEpochNumToCollectPayment, nil } + +func (psq *ProviderStateQuery) GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment(ctx context.Context) (uint64, error) { + epochSize, err := psq.GetEpochSize(ctx) + if err != nil { + return 0, err + } + recommendedEpochNumToCollectPayment, err := psq.GetRecommendedEpochNumToCollectPayment(ctx) + if err != nil { + return 0, err + } + return epochSize * recommendedEpochNumToCollectPayment, nil +} From 8e0d68a2b6a8ba6f3c439176177dc47a0136d6e7 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Tue, 21 Feb 2023 17:46:17 +0100 Subject: [PATCH 049/123] indicative error in reward server --- protocol/rpcprovider/rewardserver/reward_server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/protocol/rpcprovider/rewardserver/reward_server.go b/protocol/rpcprovider/rewardserver/reward_server.go index d8fd567cce..fe5c574850 100644 --- a/protocol/rpcprovider/rewardserver/reward_server.go +++ b/protocol/rpcprovider/rewardserver/reward_server.go @@ -228,7 +228,7 @@ func (rws *RewardServer) gatherRewardsForClaim(ctx context.Context, current_epoc defer rws.lock.Unlock() epochSizeWithRecommendedPaymentDelay, err := rws.rewardsTxSender.GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment(ctx) if err != nil { - return nil, err + return nil, utils.LavaFormatError("gatherRewardsForClaim failed to GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment", err, nil) } if epochSizeWithRecommendedPaymentDelay > current_epoch { From e424455aa1d295b9e9fbbcda338bc7b576fb8307 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Tue, 21 Feb 2023 18:47:33 +0200 Subject: [PATCH 050/123] finished missing chain fetcher block parsing --- protocol/chainlib/chain_fetcher.go | 49 ++++++++++++++++++++++-------- 1 file changed, 37 insertions(+), 12 deletions(-) diff --git a/protocol/chainlib/chain_fetcher.go b/protocol/chainlib/chain_fetcher.go index fae81ae87d..95ca79eca0 100644 --- a/protocol/chainlib/chain_fetcher.go +++ b/protocol/chainlib/chain_fetcher.go @@ -3,12 +3,14 @@ package chainlib import ( "context" "fmt" + "strings" "github.com/cosmos/cosmos-sdk/client" "github.com/lavanet/lava/protocol/chainlib/chainproxy" "github.com/lavanet/lava/protocol/lavasession" "github.com/lavanet/lava/relayer/parser" "github.com/lavanet/lava/utils" + "github.com/lavanet/lava/x/pairing/types" spectypes "github.com/lavanet/lava/x/spec/types" ) @@ -23,13 +25,28 @@ type ChainFetcher struct { } func (cf *ChainFetcher) FetchLatestBlockNum(ctx context.Context) (int64, error) { - serviceApi, ok := cf.chainParser.GetSpecApiByTag(spectypes.GET_BLOCKNUM) if !ok { return spectypes.NOT_APPLICABLE, utils.LavaFormatError(spectypes.GET_BLOCKNUM+" tag function not found", nil, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) } - CraftChainMessage(serviceApi, cf.chainParser) - return 0, fmt.Errorf("not implemented") + chainMessage := CraftChainMessage(serviceApi, cf.chainParser) + reply, _, _, err := cf.chainProxy.SendNodeMsg(ctx, nil, chainMessage) + if err != nil { + return spectypes.NOT_APPLICABLE, utils.LavaFormatError(spectypes.GET_BLOCKNUM+" failed sending chainMessage", err, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) + } + parserInput, err := cf.formatResponseForParsing(reply, chainMessage) + if err != nil { + return spectypes.NOT_APPLICABLE, err + } + blockNum, err := parser.ParseBlockFromReply(parserInput, serviceApi.Parsing.ResultParsing) + if err != nil { + return spectypes.NOT_APPLICABLE, utils.LavaFormatError("Failed To Parse FetchLatestBlockNum", err, &map[string]string{ + "nodeUrl": strings.Join(cf.endpoint.NodeUrl, ","), + "Method": serviceApi.GetName(), + "Response": string(reply.Data), + }) + } + return blockNum, nil } func (cf *ChainFetcher) FetchBlockHashByNum(ctx context.Context, blockNum int64) (string, error) { @@ -50,15 +67,9 @@ func (cf *ChainFetcher) FetchBlockHashByNum(ctx context.Context, blockNum int64) if err != nil { return "", utils.LavaFormatError(spectypes.GET_BLOCK_BY_NUM+" failed sending chainMessage", err, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) } - var parserInput parser.RPCInput - respData := reply.Data - if customParsingMessage, ok := chainMessage.(chainproxy.CustomParsingMessage); ok { - parserInput, err = customParsingMessage.NewParsableRPCInput(respData) - if err != nil { - return "", utils.LavaFormatError(spectypes.GET_BLOCK_BY_NUM+" failed creating NewParsableRPCInput from CustomParsingMessage", err, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) - } - } else { - parserInput = chainproxy.DefaultParsableRPCInput(respData) + parserInput, err := cf.formatResponseForParsing(reply, chainMessage) + if err != nil { + return "", err } blockData, err := parser.ParseMessageResponse(parserInput, serviceApi.Parsing.ResultParsing) @@ -75,6 +86,20 @@ func (cf *ChainFetcher) FetchBlockHashByNum(ctx context.Context, blockNum int64) return ret, nil } +func (cf *ChainFetcher) formatResponseForParsing(reply *types.RelayReply, chainMessage ChainMessageForSend) (parsable parser.RPCInput, err error) { + var parserInput parser.RPCInput + respData := reply.Data + if customParsingMessage, ok := chainMessage.(chainproxy.CustomParsingMessage); ok { + parserInput, err = customParsingMessage.NewParsableRPCInput(respData) + if err != nil { + return nil, utils.LavaFormatError(spectypes.GET_BLOCK_BY_NUM+" failed creating NewParsableRPCInput from CustomParsingMessage", err, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) + } + } else { + parserInput = chainproxy.DefaultParsableRPCInput(respData) + } + return parserInput, nil +} + func NewChainFetcher(ctx context.Context, chainProxy ChainProxy, chainParser ChainParser, endpoint *lavasession.RPCProviderEndpoint) *ChainFetcher { cf := &ChainFetcher{chainProxy: chainProxy, chainParser: chainParser, endpoint: endpoint} return cf From 341680ba7e278b5e3cdf43c7a00e381a2692a4da Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Tue, 21 Feb 2023 18:24:55 +0100 Subject: [PATCH 051/123] fixing reward server provider session manager reward sync --- .../lavasession/provider_session_manager.go | 34 +++++++++++++------ .../rpcprovider/rewardserver/reward_server.go | 14 ++++---- 2 files changed, 32 insertions(+), 16 deletions(-) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index fd57a5cf64..04a9cb8b23 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -11,11 +11,11 @@ import ( ) type ProviderSessionManager struct { - sessionsWithAllConsumers map[uint64]map[string]*ProviderSessionsWithConsumer // first key is epochs, second key is a consumer address - lock sync.RWMutex - blockedEpochHeight uint64 // requests from this epoch are blocked - rpcProviderEndpoint *RPCProviderEndpoint - numberOfBlocksKeptInMemory uint64 // sessionsWithAllConsumers with epochs older than ((latest epoch) - numberOfBlocksKeptInMemory) are deleted. + sessionsWithAllConsumers map[uint64]map[string]*ProviderSessionsWithConsumer // first key is epochs, second key is a consumer address + lock sync.RWMutex + blockedEpochHeight uint64 // requests from this epoch are blocked + rpcProviderEndpoint *RPCProviderEndpoint + blockDistanceForEpochValidity uint64 // sessionsWithAllConsumers with epochs older than ((latest epoch) - numberOfBlocksKeptInMemory) are deleted. } // reads cs.BlockedEpoch atomically @@ -179,12 +179,17 @@ func (psm *ProviderSessionManager) RPCProviderEndpoint() *RPCProviderEndpoint { func (psm *ProviderSessionManager) UpdateEpoch(epoch uint64) { psm.lock.Lock() defer psm.lock.Unlock() - psm.blockedEpochHeight = epoch - psm.numberOfBlocksKeptInMemory + if epoch > psm.blockDistanceForEpochValidity { + psm.blockedEpochHeight = epoch - psm.blockDistanceForEpochValidity + } else { + psm.blockedEpochHeight = 0 + } newMap := make(map[uint64]map[string]*ProviderSessionsWithConsumer) // In order to avoid running over the map twice, (1. mark 2. delete.) better technique is to copy and filter // which has better O(n) vs O(2n) for epochStored, value := range psm.sessionsWithAllConsumers { - if epochStored < psm.blockedEpochHeight { // check if key is skipped. + if !IsEpochValidForUse(epochStored, psm.blockedEpochHeight) { + // epoch is not valid so we dont keep its key in the new map continue } // if epochStored is ok, copy the value into the new map @@ -232,11 +237,16 @@ func (psm *ProviderSessionManager) UpdateSessionCU(consumerAddress string, epoch // load the session and update the CU inside psm.lock.Lock() defer psm.lock.Unlock() - providerSessionWithConsumerList, ok := psm.sessionsWithAllConsumers[epoch] + valid, _ := psm.IsValidEpoch(epoch) + if valid { // checking again because we are now locked and epoch cant change now. + return utils.LavaFormatError("UpdateSessionCU", InvalidEpochError, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10)}) + } + + providerSessionsWithConsumerMap, ok := psm.sessionsWithAllConsumers[epoch] if !ok { return utils.LavaFormatError("UpdateSessionCU Failed", EpochIsNotRegisteredError, &map[string]string{"epoch": strconv.FormatUint(epoch, 10)}) } - providerSessionWithConsumer, foundConsumer := providerSessionWithConsumerList[consumerAddress] + providerSessionWithConsumer, foundConsumer := providerSessionsWithConsumerMap[consumerAddress] if !foundConsumer { return utils.LavaFormatError("UpdateSessionCU Failed", ConsumerIsNotRegisteredError, &map[string]string{"epoch": strconv.FormatUint(epoch, 10), "consumer": consumerAddress}) } @@ -251,5 +261,9 @@ func (psm *ProviderSessionManager) UpdateSessionCU(consumerAddress string, epoch // Returning a new provider session manager func NewProviderSessionManager(rpcProviderEndpoint *RPCProviderEndpoint, numberOfBlocksKeptInMemory uint64) *ProviderSessionManager { - return &ProviderSessionManager{rpcProviderEndpoint: rpcProviderEndpoint, numberOfBlocksKeptInMemory: numberOfBlocksKeptInMemory} + return &ProviderSessionManager{rpcProviderEndpoint: rpcProviderEndpoint, blockDistanceForEpochValidity: numberOfBlocksKeptInMemory} +} + +func IsEpochValidForUse(targetEpoch uint64, blockedEpochHeight uint64) bool { + return targetEpoch > blockedEpochHeight } diff --git a/protocol/rpcprovider/rewardserver/reward_server.go b/protocol/rpcprovider/rewardserver/reward_server.go index fe5c574850..39f3cfaa94 100644 --- a/protocol/rpcprovider/rewardserver/reward_server.go +++ b/protocol/rpcprovider/rewardserver/reward_server.go @@ -10,6 +10,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/lavanet/lava/protocol/lavaprotocol" + "github.com/lavanet/lava/protocol/lavasession" "github.com/lavanet/lava/utils" pairingtypes "github.com/lavanet/lava/x/pairing/types" terderminttypes "github.com/tendermint/tendermint/abci/types" @@ -223,20 +224,21 @@ func (rws *RewardServer) RemoveExpectedPayment(paidCUToFInd uint64, expectedClie return false } -func (rws *RewardServer) gatherRewardsForClaim(ctx context.Context, current_epoch uint64) (rewardsForClaim []*pairingtypes.RelayRequest, errRet error) { +func (rws *RewardServer) gatherRewardsForClaim(ctx context.Context, currentEpoch uint64) (rewardsForClaim []*pairingtypes.RelayRequest, errRet error) { rws.lock.Lock() defer rws.lock.Unlock() - epochSizeWithRecommendedPaymentDelay, err := rws.rewardsTxSender.GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment(ctx) + blockDistanceForEpochValidity, err := rws.rewardsTxSender.GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment(ctx) if err != nil { return nil, utils.LavaFormatError("gatherRewardsForClaim failed to GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment", err, nil) } - if epochSizeWithRecommendedPaymentDelay > current_epoch { - return nil, utils.LavaFormatError("current epoch too low", nil, &map[string]string{"current epoch": strconv.FormatUint(current_epoch, 10)}) + if blockDistanceForEpochValidity > currentEpoch { + return nil, utils.LavaFormatError("current epoch too low", nil, &map[string]string{"current epoch": strconv.FormatUint(currentEpoch, 10)}) } - target_epoch_to_claim_rewards := current_epoch - epochSizeWithRecommendedPaymentDelay + activeEpochThreshold := currentEpoch - blockDistanceForEpochValidity for epoch, epochRewards := range rws.rewards { - if epoch >= uint64(target_epoch_to_claim_rewards) { + if lavasession.IsEpochValidForUse(epoch, uint64(activeEpochThreshold)) { + // Epoch is still active so we don't claim the rewards yet. continue } From 4554affea871c9ea1215f4b0c424e6efb73f7629 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Wed, 22 Feb 2023 08:31:45 +0200 Subject: [PATCH 052/123] tendermintrpc chain_fetcher now works --- .gitignore | 1 + cmd/lavad/main.go | 11 ++++++----- go.mod | 2 +- go.sum | 4 ++-- protocol/chainlib/grpc.go | 2 +- protocol/chainlib/tendermintRPC.go | 12 +++++++----- protocol/common/conf.go | 12 +++++++++--- 7 files changed, 27 insertions(+), 17 deletions(-) diff --git a/.gitignore b/.gitignore index 903b5a2fd1..cbbdc45511 100644 --- a/.gitignore +++ b/.gitignore @@ -26,3 +26,4 @@ testutil/debugging/ # Misc scripts/vars/ +rpcprovider.yml \ No newline at end of file diff --git a/cmd/lavad/main.go b/cmd/lavad/main.go index d3f6a50cbb..e444a06cf8 100644 --- a/cmd/lavad/main.go +++ b/cmd/lavad/main.go @@ -313,7 +313,7 @@ rpcconsumer 127.0.0.1:3333 COS3 tendermintrpc 127.0.0.1:3334 COS3 rest `, } cmdRPCProvider := &cobra.Command{ - Use: "rpcprovider [config-file] | { {listen-ip:listen-port spec-chain-id api-interface node-url} ... }", + Use: `rpcprovider [config-file] | { {listen-ip:listen-port spec-chain-id api-interface "comma-separated-node-urls"} ... }`, Short: `rpcprovider sets up a server to listen for rpc-consumers requests from the lava protocol send them to a configured node and respond with the reply`, Long: `rpcprovider sets up a server to listen for rpc-consumers requests from the lava protocol send them to a configured node and respond with the reply all configs should be located in` + app.DefaultNodeHome + "/config or the local running directory" + ` @@ -323,8 +323,9 @@ rpcconsumer 127.0.0.1:3333 COS3 tendermintrpc 127.0.0.1:3334 COS3 rest `, Example: `required flags: --geolocation 1 --from alice optional: --save-conf rpcprovider -rpcprovider rpcprovider_conf -rpcprovider 127.0.0.1:3333 COS3 tendermintrpc https://www.node-path.com:80 127.0.0.1:3334 COS3 rest https://www.node-path.com:1317 `, +rpcprovider rpcprovider_conf.yml +rpcprovider 127.0.0.1:3333 ETH1 jsonrpc wss://www.eth-node.com:80 +rpcprovider 127.0.0.1:3333 COS3 tendermintrpc "wss://www.node-path.com:80,https://www.node-path.com:80" 127.0.0.1:3333 COS3 rest https://www.node-path.com:1317 `, Args: func(cmd *cobra.Command, args []string) error { // Optionally run one of the validators provided by cobra if err := cobra.RangeArgs(0, 1)(cmd, args); err == nil { @@ -362,13 +363,13 @@ rpcprovider 127.0.0.1:3333 COS3 tendermintrpc https://www.node-path.com:80 127.0 if err != nil { return utils.LavaFormatError("failed reading flag", err, &map[string]string{"flag_name": common.SaveConfigFlagName}) } + viper.MergeConfigMap(viper_endpoints.AllSettings()) if save_config { - viper.MergeConfigMap(viper_endpoints.AllSettings()) err := viper.SafeWriteConfigAs(DefaultRPCProviderFileName) if err != nil { utils.LavaFormatInfo("did not create new config file, if it's desired remove the config file", &map[string]string{"file_name": DefaultRPCProviderFileName, "error": err.Error()}) } else { - utils.LavaFormatInfo("created new config file", &map[string]string{"file_name": DefaultRPCProviderFileName + ".yml"}) + utils.LavaFormatInfo("created new config file", &map[string]string{"file_name": DefaultRPCProviderFileName}) } } } else { diff --git a/go.mod b/go.mod index 3ca5104658..30e73c1d05 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/stretchr/testify v1.8.1 github.com/tendermint/tendermint v0.34.23 github.com/tendermint/tm-db v0.6.7 - google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc + google.golang.org/genproto v0.0.0-20230221151758-ace64dc21148 google.golang.org/grpc v1.53.0 google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index efb4a71696..f1d446db8b 100644 --- a/go.sum +++ b/go.sum @@ -1990,8 +1990,8 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc h1:ijGwO+0vL2hJt5gaygqP2j6PfflOBrRot0IczKbmtio= -google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230221151758-ace64dc21148 h1:muK+gVBJBfFb4SejshDBlN2/UgxCCOKH9Y34ljqEGOc= +google.golang.org/genproto v0.0.0-20230221151758-ace64dc21148/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= diff --git a/protocol/chainlib/grpc.go b/protocol/chainlib/grpc.go index d76235467e..69026c2389 100644 --- a/protocol/chainlib/grpc.go +++ b/protocol/chainlib/grpc.go @@ -266,7 +266,7 @@ func (cp *GrpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, rpcInputMessage := chainMessage.GetRPCMessage() nodeMessage, ok := rpcInputMessage.(rpcInterfaceMessages.GrpcMessage) if !ok { - return nil, "", nil, utils.LavaFormatError("invalid message type in jsonrpc failed to cast RPCInput from chainMessage", nil, &map[string]string{"rpcMessage": fmt.Sprintf("%+v", rpcInputMessage)}) + return nil, "", nil, utils.LavaFormatError("invalid message type in grpc failed to cast RPCInput from chainMessage", nil, &map[string]string{"rpcMessage": fmt.Sprintf("%+v", rpcInputMessage)}) } relayTimeout := LocalNodeTimePerCu(chainMessage.GetServiceApi().ComputeUnits) // check if this API is hanging (waiting for block confirmation) diff --git a/protocol/chainlib/tendermintRPC.go b/protocol/chainlib/tendermintRPC.go index c40f6f42aa..503949f0ff 100644 --- a/protocol/chainlib/tendermintRPC.go +++ b/protocol/chainlib/tendermintRPC.go @@ -44,7 +44,8 @@ func (apip *TendermintChainParser) CraftMessage(serviceApi spectypes.ServiceApi) Method: serviceApi.GetName(), Params: nil, } - return apip.newChainMessage(&serviceApi, &serviceApi.ApiInterfaces[0], spectypes.NOT_APPLICABLE, msg) + tenderMsg := rpcInterfaceMessages.TendermintrpcMessage{JsonrpcMessage: msg, Path: serviceApi.GetName()} + return apip.newChainMessage(&serviceApi, &serviceApi.ApiInterfaces[0], spectypes.NOT_APPLICABLE, tenderMsg) } // ParseMsg parses message data into chain message object @@ -122,12 +123,12 @@ func (apip *TendermintChainParser) ParseMsg(url string, data []byte, connectionT if err != nil { return nil, err } - - nodeMsg := apip.newChainMessage(serviceApi, apiInterface, requestedBlock, msg) + tenderMsg := rpcInterfaceMessages.TendermintrpcMessage{JsonrpcMessage: msg, Path: url} + nodeMsg := apip.newChainMessage(serviceApi, apiInterface, requestedBlock, tenderMsg) return nodeMsg, nil } -func (*TendermintChainParser) newChainMessage(serviceApi *spectypes.ServiceApi, apiInterface *spectypes.ApiInterface, requestedBlock int64, msg rpcInterfaceMessages.JsonrpcMessage) ChainMessage { +func (*TendermintChainParser) newChainMessage(serviceApi *spectypes.ServiceApi, apiInterface *spectypes.ApiInterface, requestedBlock int64, msg rpcInterfaceMessages.TendermintrpcMessage) ChainMessage { nodeMsg := &parsedMessage{ serviceApi: serviceApi, apiInterface: apiInterface, @@ -433,7 +434,8 @@ func (cp *tendermintRpcChainProxy) SendNodeMsg(ctx context.Context, ch chan inte rpcInputMessage := chainMessage.GetRPCMessage() nodeMessage, ok := rpcInputMessage.(rpcInterfaceMessages.TendermintrpcMessage) if !ok { - return nil, "", nil, utils.LavaFormatError("invalid message type in jsonrpc failed to cast RPCInput from chainMessage", nil, &map[string]string{"rpcMessage": fmt.Sprintf("%+v", rpcInputMessage)}) + _, ok := rpcInputMessage.(*rpcInterfaceMessages.TendermintrpcMessage) + return nil, "", nil, utils.LavaFormatError("invalid message type in tendermintrpc failed to cast RPCInput from chainMessage", nil, &map[string]string{"rpcMessage": fmt.Sprintf("%+v", rpcInputMessage), "ptrCast": fmt.Sprintf("%t", ok)}) } if nodeMessage.Path != "" { return cp.SendURI(ctx, &nodeMessage, ch, chainMessage) diff --git a/protocol/common/conf.go b/protocol/common/conf.go index ebd64283ae..038c6e8af5 100644 --- a/protocol/common/conf.go +++ b/protocol/common/conf.go @@ -2,6 +2,7 @@ package common import ( "fmt" + "strings" "github.com/spf13/viper" ) @@ -17,14 +18,19 @@ func ParseEndpointArgs(endpoint_strings []string, yaml_config_properties []strin if len(endpoint_strings)%numFieldsInConfig != 0 { return nil, fmt.Errorf("invalid endpoint_strings length %d, needs to divide by %d without residue", len(endpoint_strings), numFieldsInConfig) } - endpoints := []map[string]string{} + endpoints := []map[string]interface{}{} for idx := 0; idx < len(endpoint_strings); idx += numFieldsInConfig { - toAdd := map[string]string{} + toAdd := map[string]interface{}{} for inner_idx := 0; inner_idx < numFieldsInConfig; inner_idx++ { - toAdd[yaml_config_properties[inner_idx]] = endpoint_strings[idx+inner_idx] + if strings.Contains(endpoint_strings[idx+inner_idx], ",") { + toAdd[yaml_config_properties[inner_idx]] = strings.Split(endpoint_strings[idx+inner_idx], ",") + } else { + toAdd[yaml_config_properties[inner_idx]] = endpoint_strings[idx+inner_idx] + } } endpoints = append(endpoints, toAdd) } + viper_endpoints.Set(endpointsConfigName, endpoints) return } From 25e406f7b733037f6c0caf22908a2a485bca1afa Mon Sep 17 00:00:00 2001 From: omer mishael Date: Wed, 22 Feb 2023 10:59:50 +0200 Subject: [PATCH 053/123] added support for rest chain fetcher --- protocol/chainlib/chain_fetcher.go | 3 +-- protocol/chainlib/rest.go | 17 +++++++++++++---- protocol/chainlib/tendermintRPC.go | 10 +++++++--- 3 files changed, 21 insertions(+), 9 deletions(-) diff --git a/protocol/chainlib/chain_fetcher.go b/protocol/chainlib/chain_fetcher.go index 6e6e076875..43bd1185db 100644 --- a/protocol/chainlib/chain_fetcher.go +++ b/protocol/chainlib/chain_fetcher.go @@ -61,9 +61,8 @@ func (cf *ChainFetcher) FetchBlockHashByNum(ctx context.Context, blockNum int64) if serviceApi.GetParsing().FunctionTemplate == "" { return "", utils.LavaFormatError(spectypes.GET_BLOCK_BY_NUM+" missing function template", nil, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) } - path := serviceApi.Name data := []byte(fmt.Sprintf(serviceApi.GetParsing().FunctionTemplate, blockNum)) - chainMessage, err := cf.chainParser.ParseMsg(path, data, serviceApi.ApiInterfaces[0].Type) + chainMessage, err := cf.chainParser.ParseMsg(string(data), data, serviceApi.ApiInterfaces[0].Type) if err != nil { return "", utils.LavaFormatError(spectypes.GET_BLOCK_BY_NUM+" failed parseMsg on function template", err, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) } diff --git a/protocol/chainlib/rest.go b/protocol/chainlib/rest.go index 58899231a6..e739c4d214 100644 --- a/protocol/chainlib/rest.go +++ b/protocol/chainlib/rest.go @@ -68,6 +68,19 @@ func (apip *RestChainParser) ParseMsg(url string, data []byte, connectionType st Msg: data, Path: url, } + if connectionType == http.MethodGet { + if string(data) == url { // happens on chain fetcher where we send the formatted string on both data and msg for cross api interface compatibility + restMessage = rpcInterfaceMessages.RestMessage{ + Msg: nil, + Path: url, + } + } else { // support for optional params, our listener puts them inside Msg data + restMessage = rpcInterfaceMessages.RestMessage{ + Msg: nil, + Path: url + string(data), + } + } + } // TODO fix requested block nodeMsg := apip.newChainMessage(serviceApi, apiInterface, spectypes.NOT_APPLICABLE, restMessage) @@ -318,10 +331,6 @@ func (rcp *RestChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, msgBuffer := bytes.NewBuffer(nodeMessage.Msg) url := rcp.nodeUrl + nodeMessage.Path - // Only get calls uses query params the rest uses the body - if connectionTypeSlected == http.MethodGet { - url += string(nodeMessage.Msg) - } relayTimeout := LocalNodeTimePerCu(chainMessage.GetServiceApi().ComputeUnits) // check if this API is hanging (waiting for block confirmation) diff --git a/protocol/chainlib/tendermintRPC.go b/protocol/chainlib/tendermintRPC.go index 503949f0ff..9f379d3e36 100644 --- a/protocol/chainlib/tendermintRPC.go +++ b/protocol/chainlib/tendermintRPC.go @@ -58,7 +58,8 @@ func (apip *TendermintChainParser) ParseMsg(url string, data []byte, connectionT // connectionType is currently only used in rest api // Unmarshal request var msg rpcInterfaceMessages.JsonrpcMessage - if string(data) != "" { + isJsonrpc := string(data) != "" + if isJsonrpc { // Fetch pointer to message and error msgPtr, err := rpcInterfaceMessages.ParseJsonRPCMsg(data) if err != nil { @@ -114,7 +115,7 @@ func (apip *TendermintChainParser) ParseMsg(url string, data []byte, connectionT // Check if custom block parser exists in the api interface // Use custom block parser only for URI calls - if apiInterface.GetOverwriteBlockParsing() != nil && url != "" { + if apiInterface.GetOverwriteBlockParsing() != nil && !isJsonrpc { blockParser = *apiInterface.GetOverwriteBlockParsing() } @@ -123,7 +124,10 @@ func (apip *TendermintChainParser) ParseMsg(url string, data []byte, connectionT if err != nil { return nil, err } - tenderMsg := rpcInterfaceMessages.TendermintrpcMessage{JsonrpcMessage: msg, Path: url} + tenderMsg := rpcInterfaceMessages.TendermintrpcMessage{JsonrpcMessage: msg, Path: ""} + if !isJsonrpc { + tenderMsg.Path = url // add path + } nodeMsg := apip.newChainMessage(serviceApi, apiInterface, requestedBlock, tenderMsg) return nodeMsg, nil } From 8b867adb6c82386578d7798a345c384c98aa4c11 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Tue, 21 Feb 2023 23:32:33 +0200 Subject: [PATCH 054/123] Fixed context shutdown issues, fixed bugs --- cmd/lavad/main.go | 46 +++++++++++-------- config/rpcprovider.yml | 10 ++-- go.mod | 1 - go.sum | 4 +- protocol/chainlib/chain_fetcher.go | 13 +++++- .../chainproxy/rpcInterfaceMessages/common.go | 23 ++++++++++ .../rpcInterfaceMessages/grpcMessage.go | 16 ------- .../rpcInterfaceMessages/jsonRPCmessage.go | 10 ++++ protocol/chainlib/common.go | 19 +++++--- protocol/chainlib/common_test.go | 5 -- protocol/chainlib/grpc.go | 17 +++---- protocol/chainlib/jsonRPC.go | 16 ++----- protocol/chainlib/rest.go | 8 +--- protocol/chainlib/tendermintRPC.go | 9 +--- protocol/chaintracker/chain_tracker.go | 20 +++++--- protocol/chaintracker/chain_tracker_test.go | 5 ++ protocol/common/conf.go | 30 ++++++++++++ protocol/lavasession/common.go | 12 ----- protocol/lavasession/consumer_types.go | 5 ++ protocol/lavasession/provider_types.go | 5 ++ protocol/rpcconsumer/rpcconsumer.go | 28 ++--------- protocol/rpcprovider/provider_listener.go | 44 ++++++------------ .../rpcprovider/rewardserver/reward_server.go | 10 ++-- protocol/rpcprovider/rpcprovider.go | 37 +++++++++++---- rpcconsumer.yml | 35 -------------- 25 files changed, 216 insertions(+), 212 deletions(-) create mode 100644 protocol/chainlib/chainproxy/rpcInterfaceMessages/common.go create mode 100644 protocol/common/conf.go delete mode 100644 rpcconsumer.yml diff --git a/cmd/lavad/main.go b/cmd/lavad/main.go index 23a57afd55..d3f6a50cbb 100644 --- a/cmd/lavad/main.go +++ b/cmd/lavad/main.go @@ -18,6 +18,7 @@ import ( "github.com/cosmos/cosmos-sdk/version" "github.com/ignite-hq/cli/ignite/pkg/cosmoscmd" "github.com/lavanet/lava/app" + "github.com/lavanet/lava/protocol/common" "github.com/lavanet/lava/protocol/lavasession" "github.com/lavanet/lava/protocol/rpcconsumer" "github.com/lavanet/lava/protocol/rpcprovider" @@ -199,16 +200,16 @@ func main() { if one argument is passed, its assumed the config file name `, Example: `required flags: --geolocation 1 --from alice - rpcconsumer - rpcconsumer rpcconsumer_conf - rpcconsumer 127.0.0.1:3333 COS3 tendermintrpc 127.0.0.1:3334 COS3 rest `, +rpcconsumer +rpcconsumer rpcconsumer_conf +rpcconsumer 127.0.0.1:3333 COS3 tendermintrpc 127.0.0.1:3334 COS3 rest `, Args: func(cmd *cobra.Command, args []string) error { // Optionally run one of the validators provided by cobra if err := cobra.RangeArgs(0, 1)(cmd, args); err == nil { // zero or one argument is allowed return nil } - if len(args)%rpcconsumer.NumFieldsInConfig != 0 { + if len(args)%len(rpcconsumer.Yaml_config_properties) != 0 { return fmt.Errorf("invalid number of arguments, either its a single config file or repeated groups of 3 IP:PORT chain-id api-interface") } return nil @@ -232,7 +233,7 @@ func main() { var endpoints_strings []string var viper_endpoints *viper.Viper if len(args) > 1 { - viper_endpoints, err = rpcconsumer.ParseEndpointArgs(args, rpcconsumer.Yaml_config_properties, rpcconsumer.EndpointsConfigName) + viper_endpoints, err = common.ParseEndpointArgs(args, rpcconsumer.Yaml_config_properties, common.EndpointsConfigName) if err != nil { return utils.LavaFormatError("invalid endpoints arguments", err, &map[string]string{"endpoint_strings": strings.Join(args, "")}) } @@ -320,17 +321,18 @@ func main() { if one argument is passed, its assumed the config file name `, Example: `required flags: --geolocation 1 --from alice - rpcprovider - rpcprovider rpcprovider_conf - rpcprovider 127.0.0.1:3333 COS3 tendermintrpc https://www.node-path.com:80 127.0.0.1:3334 COS3 rest https://www.node-path.com:1317 `, +optional: --save-conf +rpcprovider +rpcprovider rpcprovider_conf +rpcprovider 127.0.0.1:3333 COS3 tendermintrpc https://www.node-path.com:80 127.0.0.1:3334 COS3 rest https://www.node-path.com:1317 `, Args: func(cmd *cobra.Command, args []string) error { // Optionally run one of the validators provided by cobra if err := cobra.RangeArgs(0, 1)(cmd, args); err == nil { // zero or one argument is allowed return nil } - if len(args)%rpcprovider.NumFieldsInConfig != 0 { - return fmt.Errorf("invalid number of arguments, either its a single config file or repeated groups of 3 IP:PORT chain-id api-interface") + if len(args)%len(rpcprovider.Yaml_config_properties) != 0 { + return fmt.Errorf("invalid number of arguments, either its a single config file or repeated groups of 4 IP:PORT chain-id api-interface [node_url,node_url_2]") } return nil }, @@ -352,16 +354,22 @@ func main() { var endpoints_strings []string var viper_endpoints *viper.Viper if len(args) > 1 { - viper_endpoints, err = rpcconsumer.ParseEndpointArgs(args, rpcprovider.Yaml_config_properties, rpcprovider.EndpointsConfigName) + viper_endpoints, err = common.ParseEndpointArgs(args, rpcprovider.Yaml_config_properties, common.EndpointsConfigName) if err != nil { return utils.LavaFormatError("invalid endpoints arguments", err, &map[string]string{"endpoint_strings": strings.Join(args, "")}) } - viper.MergeConfigMap(viper_endpoints.AllSettings()) - err := viper.SafeWriteConfigAs(DefaultRPCProviderFileName) + save_config, err := cmd.Flags().GetBool(common.SaveConfigFlagName) if err != nil { - utils.LavaFormatInfo("did not create new config file, if it's desired remove the config file", &map[string]string{"file_name": viper.ConfigFileUsed()}) - } else { - utils.LavaFormatInfo("created new config file", &map[string]string{"file_name": DefaultRPCProviderFileName + ".yml"}) + return utils.LavaFormatError("failed reading flag", err, &map[string]string{"flag_name": common.SaveConfigFlagName}) + } + if save_config { + viper.MergeConfigMap(viper_endpoints.AllSettings()) + err := viper.SafeWriteConfigAs(DefaultRPCProviderFileName) + if err != nil { + utils.LavaFormatInfo("did not create new config file, if it's desired remove the config file", &map[string]string{"file_name": DefaultRPCProviderFileName, "error": err.Error()}) + } else { + utils.LavaFormatInfo("created new config file", &map[string]string{"file_name": DefaultRPCProviderFileName + ".yml"}) + } } } else { err = viper.ReadInConfig() @@ -406,7 +414,7 @@ func main() { return utils.LavaFormatError("failed to start pprof HTTP server", err, nil) } } - rpcProvider := rpcprovider.RPCProvider{} + utils.LavaFormatInfo("lavad Binary Version: "+version.Version, nil) rand.Seed(time.Now().UnixNano()) var cache *performance.Cache = nil @@ -425,6 +433,7 @@ func main() { if err != nil { utils.LavaFormatFatal("error fetching chainproxy.ParallelConnectionsFlag", err, nil) } + rpcProvider := rpcprovider.RPCProvider{} err = rpcProvider.Start(ctx, txFactory, clientCtx, rpcProviderEndpoints, cache, numberOfNodeParallelConnections) return err }, @@ -475,13 +484,14 @@ func main() { // RPCProvider command flags flags.AddTxFlagsToCmd(cmdRPCProvider) cmdRPCProvider.MarkFlagRequired(flags.FlagFrom) + cmdRPCProvider.Flags().Bool(common.SaveConfigFlagName, false, "save cmd args to a config file") cmdRPCProvider.Flags().String(flags.FlagChainID, app.Name, "network chain id") cmdRPCProvider.Flags().Uint64(sentry.GeolocationFlag, 0, "geolocation to run from") cmdRPCProvider.MarkFlagRequired(sentry.GeolocationFlag) cmdRPCProvider.Flags().String(performance.PprofAddressFlagName, "", "pprof server address, used for code profiling") cmdRPCProvider.Flags().String(performance.CacheFlagName, "", "address for a cache server to improve performance") cmdRPCProvider.Flags().Uint(chainproxy.ParallelConnectionsFlag, chainproxy.NumberOfParallelConnections, "parallel connections") - // rootCmd.AddCommand(cmdRPCProvider) // TODO: DISABLE COMMAND SO IT'S NOT EXPOSED ON MAIN YET + rootCmd.AddCommand(cmdRPCProvider) // TODO: DISABLE COMMAND SO IT'S NOT EXPOSED ON MAIN YET if err := svrcmd.Execute(rootCmd, app.DefaultNodeHome); err != nil { os.Exit(1) diff --git a/config/rpcprovider.yml b/config/rpcprovider.yml index b5b69efe1b..598c04fed2 100644 --- a/config/rpcprovider.yml +++ b/config/rpcprovider.yml @@ -1,13 +1,17 @@ endpoints: - chain-id: COS3 api-interface: tendermintrpc - network-address: 127.0.0.1:3333 + network-address: 127.0.0.1:2241 node-url: ["wss://tendermint-websocket.xyz","https://tendermint-https.xyz"] + - chain-id: COS3 + api-interface: grpc + network-address: 127.0.0.1:2234 + node-url: ["https://cosmos-grpc.xyz"] - chain-id: COS3 api-interface: rest - network-address: 127.0.0.1:3333 + network-address: 127.0.0.1:2231 node-url: ["https://tendermint-rest.xyz"] - chain-id: ETH1 api-interface: jsonrpc - network-address: 127.0.0.1:3333 + network-address: 127.0.0.1:2221 node-url: ["wss://ethereum-websocket.xyz"] \ No newline at end of file diff --git a/go.mod b/go.mod index a2797a75c8..3ca5104658 100644 --- a/go.mod +++ b/go.mod @@ -49,7 +49,6 @@ require ( github.com/ghodss/yaml v1.0.0 // indirect github.com/gogo/googleapis v1.4.0 // indirect github.com/golang/glog v1.0.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0 // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect golang.org/x/mod v0.7.0 // indirect golang.org/x/tools v0.2.0 // indirect diff --git a/go.sum b/go.sum index 0cde574a04..efb4a71696 100644 --- a/go.sum +++ b/go.sum @@ -657,7 +657,6 @@ github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoB github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= @@ -785,8 +784,6 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0 h1:1JYBfzqrWPcCclBwxFCPAou9n+q86mfnu7NAeHfte7A= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0/go.mod h1:YDZoGHuwE+ov0c8smSH49WLF3F2LaWnYYuDVd+EWrc0= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= @@ -1799,6 +1796,7 @@ golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= diff --git a/protocol/chainlib/chain_fetcher.go b/protocol/chainlib/chain_fetcher.go index 95ca79eca0..6e6e076875 100644 --- a/protocol/chainlib/chain_fetcher.go +++ b/protocol/chainlib/chain_fetcher.go @@ -24,6 +24,10 @@ type ChainFetcher struct { chainParser ChainParser } +func (cf *ChainFetcher) FetchEndpoint() lavasession.RPCProviderEndpoint { + return *cf.endpoint +} + func (cf *ChainFetcher) FetchLatestBlockNum(ctx context.Context) (int64, error) { serviceApi, ok := cf.chainParser.GetSpecApiByTag(spectypes.GET_BLOCKNUM) if !ok { @@ -59,7 +63,7 @@ func (cf *ChainFetcher) FetchBlockHashByNum(ctx context.Context, blockNum int64) } path := serviceApi.Name data := []byte(fmt.Sprintf(serviceApi.GetParsing().FunctionTemplate, blockNum)) - chainMessage, err := cf.chainParser.ParseMsg(path, data, "") + chainMessage, err := cf.chainParser.ParseMsg(path, data, serviceApi.ApiInterfaces[0].Type) if err != nil { return "", utils.LavaFormatError(spectypes.GET_BLOCK_BY_NUM+" failed parseMsg on function template", err, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) } @@ -89,7 +93,8 @@ func (cf *ChainFetcher) FetchBlockHashByNum(ctx context.Context, blockNum int64) func (cf *ChainFetcher) formatResponseForParsing(reply *types.RelayReply, chainMessage ChainMessageForSend) (parsable parser.RPCInput, err error) { var parserInput parser.RPCInput respData := reply.Data - if customParsingMessage, ok := chainMessage.(chainproxy.CustomParsingMessage); ok { + rpcMessage := chainMessage.GetRPCMessage() + if customParsingMessage, ok := rpcMessage.(chainproxy.CustomParsingMessage); ok { parserInput, err = customParsingMessage.NewParsableRPCInput(respData) if err != nil { return nil, utils.LavaFormatError(spectypes.GET_BLOCK_BY_NUM+" failed creating NewParsableRPCInput from CustomParsingMessage", err, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) @@ -109,6 +114,10 @@ type LavaChainFetcher struct { clientCtx client.Context } +func (lcf *LavaChainFetcher) FetchEndpoint() lavasession.RPCProviderEndpoint { + return lavasession.RPCProviderEndpoint{NodeUrl: []string{lcf.clientCtx.NodeURI}, ChainID: "Lava-node", ApiInterface: "tendermintrpc"} +} + func (lcf *LavaChainFetcher) FetchLatestBlockNum(ctx context.Context) (int64, error) { resultStatus, err := lcf.clientCtx.Client.Status(ctx) if err != nil { diff --git a/protocol/chainlib/chainproxy/rpcInterfaceMessages/common.go b/protocol/chainlib/chainproxy/rpcInterfaceMessages/common.go new file mode 100644 index 0000000000..8e82548173 --- /dev/null +++ b/protocol/chainlib/chainproxy/rpcInterfaceMessages/common.go @@ -0,0 +1,23 @@ +package rpcInterfaceMessages + +import ( + "encoding/json" + + "github.com/lavanet/lava/relayer/parser" +) + +type ParsableRPCInput struct { + Result json.RawMessage +} + +func (pri ParsableRPCInput) ParseBlock(inp string) (int64, error) { + return parser.ParseDefaultBlockParameter(inp) +} + +func (pri ParsableRPCInput) GetParams() interface{} { + return nil +} + +func (pri ParsableRPCInput) GetResult() json.RawMessage { + return pri.Result +} diff --git a/protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage.go b/protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage.go index 824162233d..902915f133 100644 --- a/protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage.go +++ b/protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage.go @@ -122,19 +122,3 @@ func ParseSymbol(svcAndMethod string) (string, string) { } return svcAndMethod[:pos], svcAndMethod[pos+1:] } - -type ParsableRPCInput struct { - Result json.RawMessage -} - -func (pri ParsableRPCInput) ParseBlock(inp string) (int64, error) { - return parser.ParseDefaultBlockParameter(inp) -} - -func (pri ParsableRPCInput) GetParams() interface{} { - return nil -} - -func (pri ParsableRPCInput) GetResult() json.RawMessage { - return pri.Result -} diff --git a/protocol/chainlib/chainproxy/rpcInterfaceMessages/jsonRPCmessage.go b/protocol/chainlib/chainproxy/rpcInterfaceMessages/jsonRPCmessage.go index 6f6c128356..6cda5b06b2 100644 --- a/protocol/chainlib/chainproxy/rpcInterfaceMessages/jsonRPCmessage.go +++ b/protocol/chainlib/chainproxy/rpcInterfaceMessages/jsonRPCmessage.go @@ -6,6 +6,7 @@ import ( sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" "github.com/lavanet/lava/protocol/chainlib/chainproxy/rpcclient" "github.com/lavanet/lava/relayer/parser" + "github.com/lavanet/lava/utils" ) var ErrFailedToConvertMessage = sdkerrors.New("RPC error", 1000, "failed to convert a message") @@ -40,6 +41,15 @@ func ConvertJsonRPCMsg(rpcMsg *rpcclient.JsonrpcMessage) (*JsonrpcMessage, error return msg, nil } +func (gm JsonrpcMessage) NewParsableRPCInput(input json.RawMessage) (parser.RPCInput, error) { + msg := &JsonrpcMessage{} + err := json.Unmarshal(input, msg) + if err != nil { + return nil, utils.LavaFormatError("failed unmarshaling JsonrpcMessage", err, &map[string]string{"input": string(input)}) + } + return ParsableRPCInput{Result: msg.Result}, nil +} + func (cp JsonrpcMessage) GetParams() interface{} { return cp.Params } diff --git a/protocol/chainlib/common.go b/protocol/chainlib/common.go index 39c0b2f966..1ede0e16d9 100644 --- a/protocol/chainlib/common.go +++ b/protocol/chainlib/common.go @@ -42,7 +42,7 @@ type parsedMessage struct { serviceApi *spectypes.ServiceApi apiInterface *spectypes.ApiInterface requestedBlock int64 - msg interface{} + msg parser.RPCInput } type BaseChainProxy struct { @@ -62,11 +62,7 @@ func (pm parsedMessage) RequestedBlock() int64 { } func (pm parsedMessage) GetRPCMessage() parser.RPCInput { - rpcInput, ok := pm.msg.(parser.RPCInput) - if !ok { - return nil - } - return rpcInput + return pm.msg } func extractDappIDFromFiberContext(c *fiber.Ctx) (dappID string) { @@ -199,6 +195,17 @@ func verifyTendermintEndpoint(endpoints []string) (websocketEndpoint string, htt return websocketEndpoint, httpEndpoint } +func GetApiInterfaceFromServiceApi(serviceApi *spectypes.ServiceApi, connectionType string) *spectypes.ApiInterface { + var apiInterface *spectypes.ApiInterface = nil + for i := range serviceApi.ApiInterfaces { + if serviceApi.ApiInterfaces[i].Type == connectionType { + apiInterface = &serviceApi.ApiInterfaces[i] + break + } + } + return apiInterface +} + func CraftChainMessage(serviceApi spectypes.ServiceApi, chainParser ChainParser) ChainMessageForSend { return chainParser.CraftMessage(serviceApi) } diff --git a/protocol/chainlib/common_test.go b/protocol/chainlib/common_test.go index f8057890cb..dd6c1aa880 100644 --- a/protocol/chainlib/common_test.go +++ b/protocol/chainlib/common_test.go @@ -299,11 +299,6 @@ func TestParsedMessage_GetRPCMessage(t *testing.T) { msg: rpcInput, } assert.Equal(t, rpcInput, pm.GetRPCMessage()) - - pm = parsedMessage{ - msg: 123, - } - assert.Nil(t, pm.GetRPCMessage()) } type mockRPCInput struct{} diff --git a/protocol/chainlib/grpc.go b/protocol/chainlib/grpc.go index 15f0bdd424..d76235467e 100644 --- a/protocol/chainlib/grpc.go +++ b/protocol/chainlib/grpc.go @@ -5,7 +5,6 @@ import ( "context" "errors" "fmt" - "google.golang.org/grpc/metadata" "io" "net" "net/http" @@ -13,6 +12,8 @@ import ( "sync" "time" + "google.golang.org/grpc/metadata" + "github.com/fullstorydev/grpcurl" "github.com/golang/protobuf/proto" "github.com/jhump/protoreflect/desc" @@ -49,7 +50,7 @@ func (apip *GrpcChainParser) CraftMessage(serviceApi spectypes.ServiceApi) Chain Msg: nil, Path: serviceApi.GetName(), } - return apip.newMethod(&serviceApi, &serviceApi.ApiInterfaces[0], spectypes.NOT_APPLICABLE, grpcMessage) + return apip.newChainMessage(&serviceApi, &serviceApi.ApiInterfaces[0], spectypes.NOT_APPLICABLE, grpcMessage) } // ParseMsg parses message data into chain message object @@ -65,13 +66,7 @@ func (apip *GrpcChainParser) ParseMsg(url string, data []byte, connectionType st return nil, utils.LavaFormatError("failed to getSupportedApi gRPC", err, nil) } - var apiInterface *spectypes.ApiInterface = nil - for i := range serviceApi.ApiInterfaces { - if serviceApi.ApiInterfaces[i].Type == connectionType { - apiInterface = &serviceApi.ApiInterfaces[i] - break - } - } + apiInterface := GetApiInterfaceFromServiceApi(serviceApi, connectionType) if apiInterface == nil { return nil, fmt.Errorf("could not find the interface %s in the service %s", connectionType, serviceApi.Name) } @@ -83,11 +78,11 @@ func (apip *GrpcChainParser) ParseMsg(url string, data []byte, connectionType st } // TODO: fix requested block - nodeMsg := apip.newMethod(serviceApi, apiInterface, spectypes.NOT_APPLICABLE, grpcMessage) + nodeMsg := apip.newChainMessage(serviceApi, apiInterface, spectypes.NOT_APPLICABLE, grpcMessage) return nodeMsg, nil } -func (*GrpcChainParser) newMethod(serviceApi *spectypes.ServiceApi, apiInterface *spectypes.ApiInterface, requestedBlock int64, grpcMessage rpcInterfaceMessages.GrpcMessage) *parsedMessage { +func (*GrpcChainParser) newChainMessage(serviceApi *spectypes.ServiceApi, apiInterface *spectypes.ApiInterface, requestedBlock int64, grpcMessage rpcInterfaceMessages.GrpcMessage) *parsedMessage { nodeMsg := &parsedMessage{ serviceApi: serviceApi, apiInterface: apiInterface, diff --git a/protocol/chainlib/jsonRPC.go b/protocol/chainlib/jsonRPC.go index 3b77fe8283..194fc5d201 100644 --- a/protocol/chainlib/jsonRPC.go +++ b/protocol/chainlib/jsonRPC.go @@ -39,7 +39,7 @@ func NewJrpcChainParser() (chainParser *JsonRPCChainParser, err error) { } func (apip *JsonRPCChainParser) CraftMessage(serviceApi spectypes.ServiceApi) ChainMessageForSend { - msg := &rpcInterfaceMessages.JsonrpcMessage{ + msg := rpcInterfaceMessages.JsonrpcMessage{ Version: "2.0", ID: []byte("1"), Method: serviceApi.GetName(), @@ -48,7 +48,7 @@ func (apip *JsonRPCChainParser) CraftMessage(serviceApi spectypes.ServiceApi) Ch return apip.newChainMessage(&serviceApi, &serviceApi.ApiInterfaces[0], spectypes.NOT_APPLICABLE, msg) } -// ParseMsg parses message data into chain message object +// this func parses message data into chain message object func (apip *JsonRPCChainParser) ParseMsg(url string, data []byte, connectionType string) (ChainMessage, error) { // Guard that the JsonRPCChainParser instance exists if apip == nil { @@ -68,13 +68,7 @@ func (apip *JsonRPCChainParser) ParseMsg(url string, data []byte, connectionType return nil, utils.LavaFormatError("getSupportedApi failed", err, &map[string]string{"method": msg.Method}) } - var apiInterface *spectypes.ApiInterface = nil - for i := range serviceApi.ApiInterfaces { - if serviceApi.ApiInterfaces[i].Type == connectionType { - apiInterface = &serviceApi.ApiInterfaces[i] - break - } - } + apiInterface := GetApiInterfaceFromServiceApi(serviceApi, connectionType) if apiInterface == nil { return nil, fmt.Errorf("could not find the interface %s in the service %s", connectionType, serviceApi.Name) } @@ -84,11 +78,11 @@ func (apip *JsonRPCChainParser) ParseMsg(url string, data []byte, connectionType return nil, err } - nodeMsg := apip.newChainMessage(serviceApi, apiInterface, requestedBlock, msg) + nodeMsg := apip.newChainMessage(serviceApi, apiInterface, requestedBlock, *msg) return nodeMsg, nil } -func (*JsonRPCChainParser) newChainMessage(serviceApi *spectypes.ServiceApi, apiInterface *spectypes.ApiInterface, requestedBlock int64, msg *rpcInterfaceMessages.JsonrpcMessage) *parsedMessage { +func (*JsonRPCChainParser) newChainMessage(serviceApi *spectypes.ServiceApi, apiInterface *spectypes.ApiInterface, requestedBlock int64, msg rpcInterfaceMessages.JsonrpcMessage) *parsedMessage { nodeMsg := &parsedMessage{ serviceApi: serviceApi, apiInterface: apiInterface, diff --git a/protocol/chainlib/rest.go b/protocol/chainlib/rest.go index c549bb6aa4..58899231a6 100644 --- a/protocol/chainlib/rest.go +++ b/protocol/chainlib/rest.go @@ -58,13 +58,7 @@ func (apip *RestChainParser) ParseMsg(url string, data []byte, connectionType st return nil, err } - var apiInterface *spectypes.ApiInterface = nil - for i := range serviceApi.ApiInterfaces { - if serviceApi.ApiInterfaces[i].Type == connectionType { - apiInterface = &serviceApi.ApiInterfaces[i] - break - } - } + apiInterface := GetApiInterfaceFromServiceApi(serviceApi, connectionType) if apiInterface == nil { return nil, fmt.Errorf("could not find the interface %s in the service %s", connectionType, serviceApi.Name) } diff --git a/protocol/chainlib/tendermintRPC.go b/protocol/chainlib/tendermintRPC.go index 4b45613963..c40f6f42aa 100644 --- a/protocol/chainlib/tendermintRPC.go +++ b/protocol/chainlib/tendermintRPC.go @@ -106,14 +106,7 @@ func (apip *TendermintChainParser) ParseMsg(url string, data []byte, connectionT // Extract default block parser blockParser := serviceApi.BlockParsing - // Find matched api interface by connection type - var apiInterface *spectypes.ApiInterface = nil - for i := range serviceApi.ApiInterfaces { - if serviceApi.ApiInterfaces[i].Type == connectionType { - apiInterface = &serviceApi.ApiInterfaces[i] - break - } - } + apiInterface := GetApiInterfaceFromServiceApi(serviceApi, connectionType) if apiInterface == nil { return nil, fmt.Errorf("could not find the interface %s in the service %s", connectionType, serviceApi.Name) } diff --git a/protocol/chaintracker/chain_tracker.go b/protocol/chaintracker/chain_tracker.go index 27b5ae8feb..813c30e5f5 100644 --- a/protocol/chaintracker/chain_tracker.go +++ b/protocol/chaintracker/chain_tracker.go @@ -14,6 +14,7 @@ import ( "time" "github.com/improbable-eng/grpc-web/go/grpcweb" + "github.com/lavanet/lava/protocol/lavasession" "github.com/lavanet/lava/utils" "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" @@ -23,6 +24,7 @@ import ( type ChainFetcher interface { FetchLatestBlockNum(ctx context.Context) (int64, error) FetchBlockHashByNum(ctx context.Context, blockNum int64) (string, error) + FetchEndpoint() lavasession.RPCProviderEndpoint } type ChainTracker struct { @@ -35,6 +37,7 @@ type ChainTracker struct { newLatestCallback func(int64) // a function to be called when a new block is detected serverBlockMemory uint64 quit chan bool + endpoint lavasession.RPCProviderEndpoint } // this function returns block hashes of the blocks: [from block - to block) non inclusive. an additional specific block hash can be provided. order is sorted ascending @@ -119,16 +122,17 @@ func (cs *ChainTracker) fetchAllPreviousBlocks(ctx context.Context, latestBlock blockNumToFetch := latestBlock - idx // reading the blocks from the newest to oldest newHashForBlock, err := cs.fetchBlockHashByNum(ctx, blockNumToFetch) if err != nil { - return utils.LavaFormatError("could not get block data in Chain Tracker", err, &map[string]string{"block": strconv.FormatInt(blockNumToFetch, 10)}) + return utils.LavaFormatError("could not get block data in Chain Tracker", err, &map[string]string{"block": strconv.FormatInt(blockNumToFetch, 10), "ChainID": cs.endpoint.ChainID, "ApiInterface": cs.endpoint.ApiInterface}) } var foundOverlap bool foundOverlap, blocksQueueStartIndex, blocksQueueEndIndex, newQueueStartIndex = cs.hashesOverlapIndexes(readIndexDiff, idx, blockNumToFetch, newHashForBlock) if foundOverlap { - utils.LavaFormatDebug("Chain Tracker read a block Hash, and it existed, stopping fetch", &map[string]string{"block": strconv.FormatInt(blockNumToFetch, 10), "hash": newHashForBlock, "KeptBlocks": strconv.FormatInt(blocksQueueEndIndex-blocksQueueStartIndex, 10)}) + utils.LavaFormatDebug("Chain Tracker read a block Hash, and it existed, stopping fetch", &map[string]string{"block": strconv.FormatInt(blockNumToFetch, 10), "hash": newHashForBlock, "KeptBlocks": strconv.FormatInt(blocksQueueEndIndex-blocksQueueStartIndex, 10), "ChainID": cs.endpoint.ChainID, "ApiInterface": cs.endpoint.ApiInterface}) break } // there is no existing hash for this block - utils.LavaFormatDebug("Chain Tracker read a new block hash", &map[string]string{"block": strconv.FormatInt(blockNumToFetch, 10), "newHash": newHashForBlock}) + // this is very spammy + // utils.LavaFormatDebug("Chain Tracker read a new block hash", &map[string]string{"block": strconv.FormatInt(blockNumToFetch, 10), "newHash": newHashForBlock, "ChainID": cs.endpoint.ChainID, "ApiInterface": cs.endpoint.ApiInterface}) newBlocksQueue[int64(cs.blocksToSave)-1-idx] = BlockStore{Block: blockNumToFetch, Hash: newHashForBlock} } cs.blockQueueMu.RUnlock() @@ -148,9 +152,8 @@ func (cs *ChainTracker) fetchAllPreviousBlocks(ctx context.Context, latestBlock cs.blockQueueMu.Unlock() if blocksQueueLen < cs.blocksToSave { return utils.LavaFormatError("fetchAllPreviousBlocks didn't save enough blocks in Chain Tracker", nil, &map[string]string{"blocksQueueLen": strconv.FormatUint(blocksQueueLen, 10)}) - } else { - utils.LavaFormatInfo("Chain Tracker Updated latest block", &map[string]string{"block": strconv.FormatInt(latestBlock, 10), "latestHash": latestHash, "blocksQueueLen": strconv.FormatUint(blocksQueueLen, 10), "blocksQueried": strconv.FormatInt(blocksCopied, 10)}) } + utils.LavaFormatDebug("Chain Tracker Updated block hashes", &map[string]string{"latest_block": strconv.FormatInt(latestBlock, 10), "latestHash": latestHash, "blocksQueueLen": strconv.FormatUint(blocksQueueLen, 10), "blocksQueried": strconv.FormatInt(int64(int64(cs.blocksToSave)-blocksCopied), 10), "blocksKept": strconv.FormatInt(blocksCopied, 10), "ChainID": cs.endpoint.ChainID, "ApiInterface": cs.endpoint.ApiInterface}) return nil } @@ -229,8 +232,7 @@ func (cs *ChainTracker) fetchAllPreviousBlocksIfNecessary(ctx context.Context) ( return utils.LavaFormatError("could not fetchLatestBlock Hash in ChainTracker", err, &map[string]string{"block": strconv.FormatInt(newLatestBlock, 10)}) } if gotNewBlock || forked { - utils.LavaFormatDebug("ChainTracker should update state", &map[string]string{"gotNewBlock": fmt.Sprintf("%t", gotNewBlock), "forked": fmt.Sprintf("%t", forked), "newLatestBlock": strconv.FormatInt(newLatestBlock, 10), "currentBlock": strconv.FormatInt(cs.GetLatestBlockNum(), 10)}) - + // utils.LavaFormatDebug("ChainTracker should update state", &map[string]string{"gotNewBlock": fmt.Sprintf("%t", gotNewBlock), "forked": fmt.Sprintf("%t", forked), "newLatestBlock": strconv.FormatInt(newLatestBlock, 10), "currentBlock": strconv.FormatInt(cs.GetLatestBlockNum(), 10)}) prev_latest := cs.GetLatestBlockNum() cs.fetchAllPreviousBlocks(ctx, newLatestBlock) if gotNewBlock { @@ -345,6 +347,10 @@ func New(ctx context.Context, chainFetcher ChainFetcher, config ChainTrackerConf return nil, err } chainTracker = &ChainTracker{forkCallback: config.ForkCallback, newLatestCallback: config.NewLatestCallback, blocksToSave: config.BlocksToSave, chainFetcher: chainFetcher, latestBlockNum: 0, serverBlockMemory: config.ServerBlockMemory} + if chainFetcher == nil { + return nil, utils.LavaFormatError("can't start chainTracker with nil chainFetcher argument", nil, nil) + } + chainTracker.endpoint = chainFetcher.FetchEndpoint() err = chainTracker.start(ctx, config.AverageBlockTime) if err != nil { return nil, err diff --git a/protocol/chaintracker/chain_tracker_test.go b/protocol/chaintracker/chain_tracker_test.go index dd22cf7e0d..5b7c6a69b6 100644 --- a/protocol/chaintracker/chain_tracker_test.go +++ b/protocol/chaintracker/chain_tracker_test.go @@ -9,6 +9,7 @@ import ( "time" chaintracker "github.com/lavanet/lava/protocol/chaintracker" + "github.com/lavanet/lava/protocol/lavasession" "github.com/lavanet/lava/utils" spectypes "github.com/lavanet/lava/x/spec/types" "github.com/stretchr/testify/require" @@ -28,6 +29,10 @@ type MockChainFetcher struct { fork string } +func (mcf *MockChainFetcher) FetchEndpoint() lavasession.RPCProviderEndpoint { + return lavasession.RPCProviderEndpoint{} +} + func (mcf *MockChainFetcher) FetchLatestBlockNum(ctx context.Context) (int64, error) { mcf.mutex.Lock() defer mcf.mutex.Unlock() diff --git a/protocol/common/conf.go b/protocol/common/conf.go new file mode 100644 index 0000000000..ebd64283ae --- /dev/null +++ b/protocol/common/conf.go @@ -0,0 +1,30 @@ +package common + +import ( + "fmt" + + "github.com/spf13/viper" +) + +const ( + EndpointsConfigName = "endpoints" + SaveConfigFlagName = "save-conf" +) + +func ParseEndpointArgs(endpoint_strings []string, yaml_config_properties []string, endpointsConfigName string) (viper_endpoints *viper.Viper, err error) { + numFieldsInConfig := len(yaml_config_properties) + viper_endpoints = viper.New() + if len(endpoint_strings)%numFieldsInConfig != 0 { + return nil, fmt.Errorf("invalid endpoint_strings length %d, needs to divide by %d without residue", len(endpoint_strings), numFieldsInConfig) + } + endpoints := []map[string]string{} + for idx := 0; idx < len(endpoint_strings); idx += numFieldsInConfig { + toAdd := map[string]string{} + for inner_idx := 0; inner_idx < numFieldsInConfig; inner_idx++ { + toAdd[yaml_config_properties[inner_idx]] = endpoint_strings[idx+inner_idx] + } + endpoints = append(endpoints, toAdd) + } + viper_endpoints.Set(endpointsConfigName, endpoints) + return +} diff --git a/protocol/lavasession/common.go b/protocol/lavasession/common.go index f6ebda6877..6035074d8f 100644 --- a/protocol/lavasession/common.go +++ b/protocol/lavasession/common.go @@ -1,8 +1,6 @@ package lavasession import ( - "strconv" - "strings" "time" sdk "github.com/cosmos/cosmos-sdk/types" @@ -29,13 +27,3 @@ const ( StaleEpochDistance = 3 // relays done 3 epochs back are ready to be rewarded ) - -func PrintRPCEndpoint(endpoint *RPCEndpoint) (retStr string) { - retStr = endpoint.ChainID + ":" + endpoint.ApiInterface + " Network Address:" + endpoint.NetworkAddress + " Geolocation:" + strconv.FormatUint(endpoint.Geolocation, 10) - return -} - -func PrintRPCProviderEndpoint(endpoint *RPCProviderEndpoint) (retStr string) { - retStr = endpoint.ChainID + ":" + endpoint.ApiInterface + " Network Address:" + endpoint.NetworkAddress + "Node: " + strings.Join(endpoint.NodeUrl, ", ") + " Geolocation:" + strconv.FormatUint(endpoint.Geolocation, 10) - return -} diff --git a/protocol/lavasession/consumer_types.go b/protocol/lavasession/consumer_types.go index 407aa7253a..004b6e1f41 100644 --- a/protocol/lavasession/consumer_types.go +++ b/protocol/lavasession/consumer_types.go @@ -66,6 +66,11 @@ type RPCEndpoint struct { Geolocation uint64 `yaml:"geolocation,omitempty" json:"geolocation,omitempty" mapstructure:"geolocation"` } +func (endpoint *RPCEndpoint) String() (retStr string) { + retStr = endpoint.ChainID + ":" + endpoint.ApiInterface + " Network Address:" + endpoint.NetworkAddress + " Geolocation:" + strconv.FormatUint(endpoint.Geolocation, 10) + return +} + func (rpce *RPCEndpoint) New(address string, chainID string, apiInterface string, geolocation uint64) *RPCEndpoint { // TODO: validate correct url address rpce.NetworkAddress = address diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index cc2c213b9d..45e47c6f85 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -2,6 +2,7 @@ package lavasession import ( "strconv" + "strings" "sync" "sync/atomic" @@ -31,6 +32,10 @@ type RPCProviderEndpoint struct { NodeUrl []string `yaml:"node-url,omitempty" json:"node-url,omitempty" mapstructure:"node-url"` } +func (endpoint *RPCProviderEndpoint) String() (retStr string) { + return endpoint.ChainID + ":" + endpoint.ApiInterface + " Network Address:" + endpoint.NetworkAddress + "Node: " + strings.Join(endpoint.NodeUrl, ", ") + " Geolocation:" + strconv.FormatUint(endpoint.Geolocation, 10) +} + type RPCSubscription struct { Id string Sub *rpcclient.ClientSubscription diff --git a/protocol/rpcconsumer/rpcconsumer.go b/protocol/rpcconsumer/rpcconsumer.go index 90ad72450e..a5b4851ba0 100644 --- a/protocol/rpcconsumer/rpcconsumer.go +++ b/protocol/rpcconsumer/rpcconsumer.go @@ -12,6 +12,7 @@ import ( "github.com/cosmos/cosmos-sdk/client/tx" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/lavanet/lava/protocol/chainlib" + "github.com/lavanet/lava/protocol/common" "github.com/lavanet/lava/protocol/lavaprotocol" "github.com/lavanet/lava/protocol/lavasession" "github.com/lavanet/lava/protocol/statetracker" @@ -22,13 +23,8 @@ import ( "github.com/spf13/viper" ) -const ( - EndpointsConfigName = "endpoints" -) - var ( Yaml_config_properties = []string{"network-address", "chain-id", "api-interface"} - NumFieldsInConfig = len(Yaml_config_properties) ) type ConsumerStateTrackerInf interface { @@ -86,7 +82,7 @@ func (rpcc *RPCConsumer) Start(ctx context.Context, txFactory tx.Factory, client finalizationConsensus := &lavaprotocol.FinalizationConsensus{} consumerStateTracker.RegisterFinalizationConsensusForUpdates(ctx, finalizationConsensus) rpcc.rpcConsumerServers[key] = &RPCConsumerServer{} - utils.LavaFormatInfo("RPCConsumer Listening", &map[string]string{"endpoints": lavasession.PrintRPCEndpoint(rpcEndpoint)}) + utils.LavaFormatInfo("RPCConsumer Listening", &map[string]string{"endpoints": rpcEndpoint.String()}) rpcc.rpcConsumerServers[key].ServeRPCRequests(ctx, rpcEndpoint, rpcc.consumerStateTracker, chainParser, finalizationConsensus, consumerSessionManager, requiredResponses, privKey, vrf_sk, cache) } @@ -96,26 +92,8 @@ func (rpcc *RPCConsumer) Start(ctx context.Context, txFactory tx.Factory, client return nil } -func ParseEndpointArgs(endpoint_strings []string, yaml_config_properties []string, endpointsConfigName string) (viper_endpoints *viper.Viper, err error) { - numFieldsInConfig := len(yaml_config_properties) - viper_endpoints = viper.New() - if len(endpoint_strings)%numFieldsInConfig != 0 { - return nil, fmt.Errorf("invalid endpoint_strings length %d, needs to divide by %d without residue", len(endpoint_strings), NumFieldsInConfig) - } - endpoints := []map[string]string{} - for idx := 0; idx < len(endpoint_strings); idx += numFieldsInConfig { - toAdd := map[string]string{} - for inner_idx := 0; inner_idx < numFieldsInConfig; inner_idx++ { - toAdd[yaml_config_properties[inner_idx]] = endpoint_strings[idx+inner_idx] - } - endpoints = append(endpoints, toAdd) - } - viper_endpoints.Set(endpointsConfigName, endpoints) - return -} - func ParseEndpoints(viper_endpoints *viper.Viper, geolocation uint64) (endpoints []*lavasession.RPCEndpoint, err error) { - err = viper_endpoints.UnmarshalKey(EndpointsConfigName, &endpoints) + err = viper_endpoints.UnmarshalKey(common.EndpointsConfigName, &endpoints) if err != nil { utils.LavaFormatFatal("could not unmarshal endpoints", err, &map[string]string{"viper_endpoints": fmt.Sprintf("%v", viper_endpoints.AllSettings())}) } diff --git a/protocol/rpcprovider/provider_listener.go b/protocol/rpcprovider/provider_listener.go index 5ae86e8604..5059661e7f 100644 --- a/protocol/rpcprovider/provider_listener.go +++ b/protocol/rpcprovider/provider_listener.go @@ -2,15 +2,12 @@ package rpcprovider import ( "context" - "os" - "os/signal" "strings" "sync" "errors" "net" "net/http" - "time" "github.com/lavanet/lava/protocol/lavasession" @@ -26,6 +23,7 @@ import ( type ProviderListener struct { networkAddress string relayServer *relayServer + httpServer http.Server } func (pl *ProviderListener) Key() string { @@ -46,15 +44,16 @@ func (pl *ProviderListener) RegisterReceiver(existingReceiver RelayReceiver, end return nil } +func (pl *ProviderListener) Shutdown(shutdownCtx context.Context) error { + if err := pl.httpServer.Shutdown(shutdownCtx); err != nil { + utils.LavaFormatFatal("Provider failed to shutdown", err, nil) + } + return nil +} + func NewProviderListener(ctx context.Context, networkAddress string) *ProviderListener { pl := &ProviderListener{networkAddress: networkAddress} - ctx, cancel := context.WithCancel(ctx) - signalChan := make(chan os.Signal, 1) - signal.Notify(signalChan, os.Interrupt) - defer func() { - signal.Stop(signalChan) - cancel() - }() + // GRPC lis, err := net.Listen("tcp", networkAddress) if err != nil { @@ -74,34 +73,17 @@ func NewProviderListener(ctx context.Context, networkAddress string) *ProviderLi httpServer := http.Server{ Handler: h2c.NewHandler(http.HandlerFunc(handler), &http2.Server{}), } - - go func() { - select { - case <-ctx.Done(): - utils.LavaFormatInfo("Provider Server ctx.Done", nil) - case <-signalChan: - utils.LavaFormatInfo("Provider Server signalChan", nil) - } - - shutdownCtx, shutdownRelease := context.WithTimeout(context.Background(), 10*time.Second) - defer shutdownRelease() - - if err := httpServer.Shutdown(shutdownCtx); err != nil { - utils.LavaFormatFatal("Provider failed to shutdown", err, &map[string]string{}) - } - }() - + pl.httpServer = httpServer relayServer := &relayServer{relayReceivers: map[string]RelayReceiver{}} pl.relayServer = relayServer + pairingtypes.RegisterRelayerServer(grpcServer, relayServer) go func() { - pairingtypes.RegisterRelayerServer(grpcServer, relayServer) - + utils.LavaFormatInfo("New provider listener active", &map[string]string{"address": networkAddress}) if err := httpServer.Serve(lis); !errors.Is(err, http.ErrServerClosed) { utils.LavaFormatFatal("provider failed to serve", err, &map[string]string{"Address": lis.Addr().String()}) } - + utils.LavaFormatInfo("listener closed server", &map[string]string{"address": networkAddress}) }() - return pl } diff --git a/protocol/rpcprovider/rewardserver/reward_server.go b/protocol/rpcprovider/rewardserver/reward_server.go index 39f3cfaa94..f17e9091fe 100644 --- a/protocol/rpcprovider/rewardserver/reward_server.go +++ b/protocol/rpcprovider/rewardserver/reward_server.go @@ -149,9 +149,13 @@ func (rws *RewardServer) sendRewardsClaim(ctx context.Context, epoch uint64) err rws.addExpectedPayment(expectedPay) rws.updateCUServiced(relay.CuSum) } - err = rws.rewardsTxSender.TxRelayPayment(ctx, rewardsToClaim, strconv.FormatUint(rws.serverID, 10)) - if err != nil { - return utils.LavaFormatError("failed sending rewards claim", err, nil) + if len(rewardsToClaim) > 0 { + err = rws.rewardsTxSender.TxRelayPayment(ctx, rewardsToClaim, strconv.FormatUint(rws.serverID, 10)) + if err != nil { + return utils.LavaFormatError("failed sending rewards claim", err, nil) + } + } else { + utils.LavaFormatDebug("no rewards to claim", nil) } return nil } diff --git a/protocol/rpcprovider/rpcprovider.go b/protocol/rpcprovider/rpcprovider.go index e8c28c336c..7d03f73109 100644 --- a/protocol/rpcprovider/rpcprovider.go +++ b/protocol/rpcprovider/rpcprovider.go @@ -6,12 +6,14 @@ import ( "os" "os/signal" "strconv" + "time" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/tx" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/lavanet/lava/protocol/chainlib" "github.com/lavanet/lava/protocol/chaintracker" + "github.com/lavanet/lava/protocol/common" "github.com/lavanet/lava/protocol/lavasession" "github.com/lavanet/lava/protocol/rpcprovider/reliabilitymanager" "github.com/lavanet/lava/protocol/rpcprovider/rewardserver" @@ -24,13 +26,11 @@ import ( ) const ( - EndpointsConfigName = "endpoints" ChainTrackerDefaultMemory = 100 ) var ( Yaml_config_properties = []string{"network-address", "chain-id", "api-interface", "node-url"} - NumFieldsInConfig = len(Yaml_config_properties) ) type ProviderStateTrackerInf interface { @@ -58,6 +58,15 @@ type RPCProvider struct { } func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, clientCtx client.Context, rpcProviderEndpoints []*lavasession.RPCProviderEndpoint, cache *performance.Cache, parallelConnections uint) (err error) { + ctx, cancel := context.WithCancel(ctx) + signalChan := make(chan os.Signal, 1) + signal.Notify(signalChan, os.Interrupt) + defer func() { + signal.Stop(signalChan) + cancel() + }() + rpcp.rpcProviderServers = make(map[string]*RPCProviderServer) + rpcp.rpcProviderListeners = make(map[string]*ProviderListener) // single state tracker lavaChainFetcher := chainlib.NewLavaChainFetcher(ctx, clientCtx) providerStateTracker, err := statetracker.NewProviderStateTracker(ctx, txFactory, clientCtx, lavaChainFetcher) @@ -135,7 +144,6 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client // handle case only one network address was defined for _, listener_p := range rpcp.rpcProviderListeners { listener = listener_p - listener.RegisterReceiver(rpcProviderServer, rpcProviderEndpoint) break } } else { @@ -145,18 +153,31 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client listener = NewProviderListener(ctx, rpcProviderEndpoint.NetworkAddress) rpcp.rpcProviderListeners[listener.Key()] = listener } - listener.RegisterReceiver(rpcProviderServer, rpcProviderEndpoint) } + if listener == nil { + utils.LavaFormatFatal("listener not defined, cant register RPCProviderServer", nil, &map[string]string{"RPCProviderEndpoint": rpcProviderEndpoint.String()}) + } + listener.RegisterReceiver(rpcProviderServer, rpcProviderEndpoint) + } + + select { + case <-ctx.Done(): + utils.LavaFormatInfo("Provider Server ctx.Done", nil) + case <-signalChan: + utils.LavaFormatInfo("Provider Server signalChan", nil) + } + + for _, listener := range rpcp.rpcProviderListeners { + shutdownCtx, shutdownRelease := context.WithTimeout(context.Background(), 10*time.Second) + listener.Shutdown(shutdownCtx) + defer shutdownRelease() } - signalChan := make(chan os.Signal, 1) - signal.Notify(signalChan, os.Interrupt) - <-signalChan return nil } func ParseEndpoints(viper_endpoints *viper.Viper, geolocation uint64) (endpoints []*lavasession.RPCProviderEndpoint, err error) { - err = viper_endpoints.UnmarshalKey(EndpointsConfigName, &endpoints) + err = viper_endpoints.UnmarshalKey(common.EndpointsConfigName, &endpoints) if err != nil { utils.LavaFormatFatal("could not unmarshal endpoints", err, &map[string]string{"viper_endpoints": fmt.Sprintf("%v", viper_endpoints.AllSettings())}) } diff --git a/rpcconsumer.yml b/rpcconsumer.yml deleted file mode 100644 index 90c58ff84b..0000000000 --- a/rpcconsumer.yml +++ /dev/null @@ -1,35 +0,0 @@ -account-number: "0" -broadcast-mode: sync -cache-be: "" -chain-id: lava -dry-run: false -endpoints: - - api-interface: tendermintrpc - chain-id: COS3 - network-address: 127.0.0.1:3333 -fee-account: "" -fees: "" -from: user1 -gas: "" -gas-adjustment: "1" -gas-prices: "" -generate-only: false -geolocation: "1" -help: false -home: /home/user -keyring-backend: os -keyring-dir: "" -ledger: false -log_format: plain -log_level: info -node: tcp://localhost:26657 -note: "" -offline: false -output: json -pprof-address: "" -secure: false -sequence: "0" -sign-mode: "" -timeout-height: "0" -trace: false -"yes": false From 8745a23c1265be9efd0c37ab08fd31c1fb363f38 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Thu, 23 Feb 2023 13:46:37 +0100 Subject: [PATCH 055/123] adding data reliability --- protocol/lavasession/common.go | 1 + .../lavasession/provider_session_manager.go | 76 +++++++++++++++---- protocol/lavasession/provider_types.go | 3 - protocol/rpcprovider/rpcprovider_server.go | 10 +-- 4 files changed, 64 insertions(+), 26 deletions(-) diff --git a/protocol/lavasession/common.go b/protocol/lavasession/common.go index 6035074d8f..4a401c7905 100644 --- a/protocol/lavasession/common.go +++ b/protocol/lavasession/common.go @@ -14,6 +14,7 @@ const ( MaximumNumberOfFailuresAllowedPerConsumerSession = 3 RelayNumberIncrement = 1 DataReliabilitySessionId = 0 // data reliability session id is 0. we can change to more sessions later if needed. + DataReliabilityRelayNumber = 1 DataReliabilityCuSum = 0 GeolocationFlag = "geolocation" ) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index 04a9cb8b23..f496632892 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -11,11 +11,12 @@ import ( ) type ProviderSessionManager struct { - sessionsWithAllConsumers map[uint64]map[string]*ProviderSessionsWithConsumer // first key is epochs, second key is a consumer address - lock sync.RWMutex - blockedEpochHeight uint64 // requests from this epoch are blocked - rpcProviderEndpoint *RPCProviderEndpoint - blockDistanceForEpochValidity uint64 // sessionsWithAllConsumers with epochs older than ((latest epoch) - numberOfBlocksKeptInMemory) are deleted. + sessionsWithAllConsumers map[uint64]map[string]*ProviderSessionsWithConsumer // first key is epochs, second key is a consumer address + dataReliabilitySessionsWithAllConsumers map[uint64]map[string]*ProviderSessionsWithConsumer // first key is epochs, second key is a consumer address + lock sync.RWMutex + blockedEpochHeight uint64 // requests from this epoch are blocked + rpcProviderEndpoint *RPCProviderEndpoint + blockDistanceForEpochValidity uint64 // sessionsWithAllConsumers with epochs older than ((latest epoch) - numberOfBlocksKeptInMemory) are deleted. } // reads cs.BlockedEpoch atomically @@ -59,6 +60,55 @@ func (psm *ProviderSessionManager) getSingleSessionFromProviderSessionWithConsum return singleProviderSession, err } +func (psm *ProviderSessionManager) getOrCreateDataReliabilitySessionWithConsumer(address string, epoch uint64, sessionId uint64) (providerSessionWithConsumer *ProviderSessionsWithConsumer, err error) { + if mapOfDataReliabilitySessionsWithConsumer, consumerFoundInEpoch := psm.dataReliabilitySessionsWithAllConsumers[epoch]; consumerFoundInEpoch { + if providerSessionWithConsumer, consumerAddressFound := mapOfDataReliabilitySessionsWithConsumer[address]; consumerAddressFound { + if providerSessionWithConsumer.atomicReadConsumerBlocked() == blockListedConsumer { // we atomic read block listed so we dont need to lock the provider. (double lock is always a bad idea.) + // consumer is blocked. + utils.LavaFormatWarning("getActiveConsumer", ConsumerIsBlockListed, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10), "ConsumerAddress": address}) + return nil, ConsumerIsBlockListed + } + return providerSessionWithConsumer, nil // no error + } + } else { + // If Epoch is missing from map, create a new instance + psm.dataReliabilitySessionsWithAllConsumers[epoch] = make(map[string]*ProviderSessionsWithConsumer) + } + + // If we got here, we need to create a new instance for this consumer address. + providerSessionWithConsumer = &ProviderSessionsWithConsumer{ + consumer: address, + } + psm.dataReliabilitySessionsWithAllConsumers[epoch][address] = providerSessionWithConsumer + return providerSessionWithConsumer, nil +} + +// GetDataReliabilitySession fetches a data reliability session, and assumes the user +func (psm *ProviderSessionManager) GetDataReliabilitySession(address string, epoch uint64, sessionId uint64, relayNumber uint64) (*SingleProviderSession, error) { + // validate Epoch + valid, _ := psm.IsValidEpoch(epoch) + if valid { // fast checking to see if epoch is even relevant + utils.LavaFormatError("GetSession", InvalidEpochError, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10)}) + return nil, InvalidEpochError + } + + // validate sessionId + if sessionId > DataReliabilitySessionId { + return nil, utils.LavaFormatError("request's sessionId is larger than the data reliability allowed session ID", nil, &map[string]string{"sessionId": strconv.FormatUint(sessionId, 10), "DataReliabilitySessionId": strconv.Itoa(DataReliabilitySessionId)}) + } + + // validate RelayNumber + if relayNumber > DataReliabilityRelayNumber { + return nil, utils.LavaFormatError("request's relayNumber is larger than the DataReliabilityRelayNumber allowed relay number", nil, &map[string]string{"relayNumber": strconv.FormatUint(relayNumber, 10), "DataReliabilityRelayNumber": strconv.Itoa(DataReliabilityRelayNumber)}) + } + + // validate active consumer. + psm.getOrCreateDataReliabilitySessionWithConsumer(address, epoch, sessionId) + + return nil, nil + +} + func (psm *ProviderSessionManager) GetSession(address string, epoch uint64, sessionId uint64, relayNumber uint64) (*SingleProviderSession, error) { valid, _ := psm.IsValidEpoch(epoch) if valid { // fast checking to see if epoch is even relevant @@ -74,7 +124,7 @@ func (psm *ProviderSessionManager) GetSession(address string, epoch uint64, sess return psm.getSingleSessionFromProviderSessionWithConsumer(providerSessionWithConsumer, sessionId, epoch, relayNumber) } -func (psm *ProviderSessionManager) registerNewSession(address string, epoch uint64, sessionId uint64, vrfPk *utils.VrfPubKey, maxCuForConsumer uint64) (*ProviderSessionsWithConsumer, error) { +func (psm *ProviderSessionManager) registerNewSession(address string, epoch uint64, sessionId uint64, maxCuForConsumer uint64) (*ProviderSessionsWithConsumer, error) { psm.lock.Lock() defer psm.lock.Unlock() @@ -95,7 +145,6 @@ func (psm *ProviderSessionManager) registerNewSession(address string, epoch uint providerSessionWithConsumer = &ProviderSessionsWithConsumer{ consumer: address, epochData: &ProviderSessionsEpochData{ - VrfPk: vrfPk, MaxComputeUnits: maxCuForConsumer, }, } @@ -104,12 +153,11 @@ func (psm *ProviderSessionManager) registerNewSession(address string, epoch uint return providerSessionWithConsumer, nil } -// TODO add vrfPk and Max compute units. -func (psm *ProviderSessionManager) RegisterProviderSessionWithConsumer(address string, epoch uint64, sessionId uint64, relayNumber uint64, vrfPk *utils.VrfPubKey, maxCuForConsumer uint64) (*SingleProviderSession, error) { +func (psm *ProviderSessionManager) RegisterProviderSessionWithConsumer(address string, epoch uint64, sessionId uint64, relayNumber uint64, maxCuForConsumer uint64) (*SingleProviderSession, error) { providerSessionWithConsumer, err := psm.IsActiveConsumer(epoch, address) if err != nil { if ConsumerNotRegisteredYet.Is(err) { - providerSessionWithConsumer, err = psm.registerNewSession(address, epoch, sessionId, vrfPk, maxCuForConsumer) + providerSessionWithConsumer, err = psm.registerNewSession(address, epoch, sessionId, maxCuForConsumer) if err != nil { return nil, utils.LavaFormatError("RegisterProviderSessionWithConsumer Failed to registerNewSession", err, nil) } @@ -128,8 +176,8 @@ func (psm *ProviderSessionManager) getActiveConsumer(epoch uint64, address strin utils.LavaFormatError("getActiveConsumer", InvalidEpochError, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10)}) return nil, InvalidEpochError } - if mapOfProviderSessionsWithConsumer, ok := psm.sessionsWithAllConsumers[epoch]; ok { - if providerSessionWithConsumer, ok := mapOfProviderSessionsWithConsumer[address]; ok { + if mapOfProviderSessionsWithConsumer, consumerFoundInEpoch := psm.sessionsWithAllConsumers[epoch]; consumerFoundInEpoch { + if providerSessionWithConsumer, consumerAddressFound := mapOfProviderSessionsWithConsumer[address]; consumerAddressFound { if providerSessionWithConsumer.atomicReadConsumerBlocked() == blockListedConsumer { // we atomic read block listed so we dont need to lock the provider. (double lock is always a bad idea.) // consumer is blocked. utils.LavaFormatWarning("getActiveConsumer", ConsumerIsBlockListed, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10), "ConsumerAddress": address}) @@ -158,10 +206,6 @@ func (psm *ProviderSessionManager) ReportConsumer() (address string, epoch uint6 return "", 0, nil // TBD } -func (psm *ProviderSessionManager) GetDataReliabilitySession(address string, epoch uint64) (*SingleProviderSession, error) { - return nil, fmt.Errorf("not implemented") -} - // OnSessionDone unlocks the session gracefully, this happens when session finished with an error func (psm *ProviderSessionManager) OnSessionFailure(singleProviderSession *SingleProviderSession) (err error) { return singleProviderSession.onSessionFailure() diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index 45e47c6f85..290cc03b4f 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -8,7 +8,6 @@ import ( "github.com/lavanet/lava/protocol/chainlib/chainproxy/rpcclient" "github.com/lavanet/lava/utils" - pairingtypes "github.com/lavanet/lava/x/pairing/types" ) type voteData struct { @@ -20,8 +19,6 @@ type voteData struct { type ProviderSessionsEpochData struct { UsedComputeUnits uint64 MaxComputeUnits uint64 - DataReliability *pairingtypes.VRFData - VrfPk *utils.VrfPubKey } type RPCProviderEndpoint struct { diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 5b71ebad96..def0ad39e6 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -313,7 +313,7 @@ func (rpcps *RPCProviderServer) verifyRelaySession(ctx context.Context, request if err != nil { return nil, nil, utils.LavaFormatError("failed data reliability validation", err, nil) } - dataReliabilitySingleProviderSession, err := rpcps.providerSessionManager.GetDataReliabilitySession(extractedConsumerAddress.String(), uint64(request.BlockHeight)) + dataReliabilitySingleProviderSession, err := rpcps.providerSessionManager.GetDataReliabilitySession(extractedConsumerAddress.String(), uint64(request.BlockHeight), request.SessionId, request.RelayNum) if err != nil { return nil, nil, utils.LavaFormatError("failed to get a provider data reliability session", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": extractedConsumerAddress.String(), "epoch": strconv.FormatInt(request.BlockHeight, 10)}) } @@ -332,12 +332,12 @@ func (rpcps *RPCProviderServer) getSingleProviderSession(ctx context.Context, re if !valid { return nil, utils.LavaFormatError("VerifyPairing, this consumer address is not valid with this provider", nil, &map[string]string{"epoch": strconv.FormatInt(request.BlockHeight, 10), "sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) } - vrfPk, maxCuForConsumer, getVrfAndMaxCuError := rpcps.stateTracker.GetVrfPkAndMaxCuForUser(ctx, consumerAddressString, request.ChainID, uint64(request.BlockHeight)) + _, maxCuForConsumer, getVrfAndMaxCuError := rpcps.stateTracker.GetVrfPkAndMaxCuForUser(ctx, consumerAddressString, request.ChainID, uint64(request.BlockHeight)) if getVrfAndMaxCuError != nil { return nil, utils.LavaFormatError("ConsumerNotRegisteredYet: GetVrfPkAndMaxCuForUser failed", getVrfAndMaxCuError, &map[string]string{"epoch": strconv.FormatInt(request.BlockHeight, 10), "sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) } // After validating the consumer we can register it with provider session manager. - singleProviderSession, err = rpcps.providerSessionManager.RegisterProviderSessionWithConsumer(consumerAddressString, uint64(request.BlockHeight), request.SessionId, request.RelayNum, vrfPk, maxCuForConsumer) + singleProviderSession, err = rpcps.providerSessionManager.RegisterProviderSessionWithConsumer(consumerAddressString, uint64(request.BlockHeight), request.SessionId, request.RelayNum, maxCuForConsumer) if err != nil { return nil, utils.LavaFormatError("Failed to RegisterProviderSessionWithConsumer", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "relayNum": strconv.FormatUint(request.RelayNum, 10)}) } @@ -360,10 +360,6 @@ func (rpcps *RPCProviderServer) verifyRelayRequestMetaData(request *pairingtypes } func (rpcps *RPCProviderServer) verifyDataReliabilityRelayRequest(ctx context.Context, request *pairingtypes.RelayRequest, consumerAddress sdk.AccAddress) error { - - if request.RelayNum > lavasession.DataReliabilitySessionId { - return utils.LavaFormatError("request's relay num is larger than the data reliability session ID", nil, &map[string]string{"relayNum": strconv.FormatUint(request.RelayNum, 10), "DataReliabilitySessionId": strconv.Itoa(lavasession.DataReliabilitySessionId)}) - } if request.CuSum != lavasession.DataReliabilityCuSum { return utils.LavaFormatError("request's CU sum is not equal to the data reliability CU sum", nil, &map[string]string{"cuSum": strconv.FormatUint(request.CuSum, 10), "DataReliabilityCuSum": strconv.Itoa(lavasession.DataReliabilityCuSum)}) } From a71c0da9ac516353cd50a27d0c8b8a5ef53082a9 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Wed, 22 Feb 2023 13:54:02 +0200 Subject: [PATCH 056/123] added support for all apiInterfaces, using chain fetcher --- protocol/chainlib/chain_fetcher.go | 11 ++++++++--- protocol/chainlib/chainlib.go | 2 +- protocol/chainlib/common.go | 10 ++++++++-- protocol/chainlib/grpc.go | 22 +++++++++++++--------- protocol/chainlib/jsonRPC.go | 8 ++++++-- protocol/chainlib/rest.go | 24 ++++++++++++------------ protocol/chainlib/tendermintRPC.go | 8 ++++++-- 7 files changed, 54 insertions(+), 31 deletions(-) diff --git a/protocol/chainlib/chain_fetcher.go b/protocol/chainlib/chain_fetcher.go index 43bd1185db..8e4ed7ce3a 100644 --- a/protocol/chainlib/chain_fetcher.go +++ b/protocol/chainlib/chain_fetcher.go @@ -33,7 +33,10 @@ func (cf *ChainFetcher) FetchLatestBlockNum(ctx context.Context) (int64, error) if !ok { return spectypes.NOT_APPLICABLE, utils.LavaFormatError(spectypes.GET_BLOCKNUM+" tag function not found", nil, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) } - chainMessage := CraftChainMessage(serviceApi, cf.chainParser) + chainMessage, err := CraftChainMessage(serviceApi, cf.chainParser, nil) + if err != nil { + return spectypes.NOT_APPLICABLE, utils.LavaFormatError(spectypes.GET_BLOCKNUM+" failed creating chainMessage", err, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) + } reply, _, _, err := cf.chainProxy.SendNodeMsg(ctx, nil, chainMessage) if err != nil { return spectypes.NOT_APPLICABLE, utils.LavaFormatError(spectypes.GET_BLOCKNUM+" failed sending chainMessage", err, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) @@ -61,10 +64,12 @@ func (cf *ChainFetcher) FetchBlockHashByNum(ctx context.Context, blockNum int64) if serviceApi.GetParsing().FunctionTemplate == "" { return "", utils.LavaFormatError(spectypes.GET_BLOCK_BY_NUM+" missing function template", nil, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) } + path := serviceApi.Name data := []byte(fmt.Sprintf(serviceApi.GetParsing().FunctionTemplate, blockNum)) - chainMessage, err := cf.chainParser.ParseMsg(string(data), data, serviceApi.ApiInterfaces[0].Type) + + chainMessage, err := CraftChainMessage(serviceApi, cf.chainParser, &CraftData{Path: path, Data: data, ConnectionType: serviceApi.ApiInterfaces[0].Type}) if err != nil { - return "", utils.LavaFormatError(spectypes.GET_BLOCK_BY_NUM+" failed parseMsg on function template", err, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) + return "", utils.LavaFormatError(spectypes.GET_BLOCK_BY_NUM+" failed CraftChainMessage on function template", err, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) } reply, _, _, err := cf.chainProxy.SendNodeMsg(ctx, nil, chainMessage) if err != nil { diff --git a/protocol/chainlib/chainlib.go b/protocol/chainlib/chainlib.go index 1519870676..fa26e6fc4e 100644 --- a/protocol/chainlib/chainlib.go +++ b/protocol/chainlib/chainlib.go @@ -55,7 +55,7 @@ type ChainParser interface { DataReliabilityParams() (enabled bool, dataReliabilityThreshold uint32) ChainBlockStats() (allowedBlockLagForQosSync int64, averageBlockTime time.Duration, blockDistanceForFinalizedData uint32, blocksInFinalizationProof uint32) GetSpecApiByTag(tag string) (specApi spectypes.ServiceApi, existed bool) - CraftMessage(serviceApi spectypes.ServiceApi) ChainMessageForSend + CraftMessage(serviceApi spectypes.ServiceApi, craftData *CraftData) (ChainMessageForSend, error) } type ChainMessage interface { diff --git a/protocol/chainlib/common.go b/protocol/chainlib/common.go index 1ede0e16d9..b47f8e60ab 100644 --- a/protocol/chainlib/common.go +++ b/protocol/chainlib/common.go @@ -206,6 +206,12 @@ func GetApiInterfaceFromServiceApi(serviceApi *spectypes.ServiceApi, connectionT return apiInterface } -func CraftChainMessage(serviceApi spectypes.ServiceApi, chainParser ChainParser) ChainMessageForSend { - return chainParser.CraftMessage(serviceApi) +type CraftData struct { + Path string + Data []byte + ConnectionType string +} + +func CraftChainMessage(serviceApi spectypes.ServiceApi, chainParser ChainParser, craftData *CraftData) (ChainMessageForSend, error) { + return chainParser.CraftMessage(serviceApi, craftData) } diff --git a/protocol/chainlib/grpc.go b/protocol/chainlib/grpc.go index 69026c2389..dd99579f04 100644 --- a/protocol/chainlib/grpc.go +++ b/protocol/chainlib/grpc.go @@ -45,12 +45,16 @@ func NewGrpcChainParser() (chainParser *GrpcChainParser, err error) { return &GrpcChainParser{}, nil } -func (apip *GrpcChainParser) CraftMessage(serviceApi spectypes.ServiceApi) ChainMessageForSend { - grpcMessage := rpcInterfaceMessages.GrpcMessage{ +func (apip *GrpcChainParser) CraftMessage(serviceApi spectypes.ServiceApi, craftData *CraftData) (ChainMessageForSend, error) { + if craftData != nil { + return apip.ParseMsg(craftData.Path, craftData.Data, craftData.ConnectionType) + } + + grpcMessage := &rpcInterfaceMessages.GrpcMessage{ Msg: nil, Path: serviceApi.GetName(), } - return apip.newChainMessage(&serviceApi, &serviceApi.ApiInterfaces[0], spectypes.NOT_APPLICABLE, grpcMessage) + return apip.newChainMessage(&serviceApi, &serviceApi.ApiInterfaces[0], spectypes.NOT_APPLICABLE, grpcMessage), nil } // ParseMsg parses message data into chain message object @@ -78,15 +82,15 @@ func (apip *GrpcChainParser) ParseMsg(url string, data []byte, connectionType st } // TODO: fix requested block - nodeMsg := apip.newChainMessage(serviceApi, apiInterface, spectypes.NOT_APPLICABLE, grpcMessage) + nodeMsg := apip.newChainMessage(serviceApi, apiInterface, spectypes.NOT_APPLICABLE, &grpcMessage) return nodeMsg, nil } -func (*GrpcChainParser) newChainMessage(serviceApi *spectypes.ServiceApi, apiInterface *spectypes.ApiInterface, requestedBlock int64, grpcMessage rpcInterfaceMessages.GrpcMessage) *parsedMessage { +func (*GrpcChainParser) newChainMessage(serviceApi *spectypes.ServiceApi, apiInterface *spectypes.ApiInterface, requestedBlock int64, grpcMessage *rpcInterfaceMessages.GrpcMessage) *parsedMessage { nodeMsg := &parsedMessage{ serviceApi: serviceApi, apiInterface: apiInterface, - msg: grpcMessage, + msg: grpcMessage, // setting the grpc message as a pointer so we can set descriptors for parsing requestedBlock: requestedBlock, } return nodeMsg @@ -108,12 +112,12 @@ func (apip *GrpcChainParser) getSupportedApi(name string) (*spectypes.ServiceApi // Return an error if spec does not exist if !ok { - return nil, errors.New("GRPC api not supported") + return nil, utils.LavaFormatError("GRPC api not supported", nil, &map[string]string{"name": name}) } // Return an error if api is disabled if !api.Enabled { - return nil, errors.New("api is disabled") + return nil, utils.LavaFormatError("GRPC api is disabled", nil, &map[string]string{"name": name}) } return &api, nil @@ -264,7 +268,7 @@ func (cp *GrpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, defer cp.conn.ReturnRpc(conn) rpcInputMessage := chainMessage.GetRPCMessage() - nodeMessage, ok := rpcInputMessage.(rpcInterfaceMessages.GrpcMessage) + nodeMessage, ok := rpcInputMessage.(*rpcInterfaceMessages.GrpcMessage) if !ok { return nil, "", nil, utils.LavaFormatError("invalid message type in grpc failed to cast RPCInput from chainMessage", nil, &map[string]string{"rpcMessage": fmt.Sprintf("%+v", rpcInputMessage)}) } diff --git a/protocol/chainlib/jsonRPC.go b/protocol/chainlib/jsonRPC.go index 194fc5d201..77c0a1d9fc 100644 --- a/protocol/chainlib/jsonRPC.go +++ b/protocol/chainlib/jsonRPC.go @@ -38,14 +38,18 @@ func NewJrpcChainParser() (chainParser *JsonRPCChainParser, err error) { return &JsonRPCChainParser{}, nil } -func (apip *JsonRPCChainParser) CraftMessage(serviceApi spectypes.ServiceApi) ChainMessageForSend { +func (apip *JsonRPCChainParser) CraftMessage(serviceApi spectypes.ServiceApi, craftData *CraftData) (ChainMessageForSend, error) { + if craftData != nil { + return apip.ParseMsg("", craftData.Data, craftData.ConnectionType) + } + msg := rpcInterfaceMessages.JsonrpcMessage{ Version: "2.0", ID: []byte("1"), Method: serviceApi.GetName(), Params: nil, } - return apip.newChainMessage(&serviceApi, &serviceApi.ApiInterfaces[0], spectypes.NOT_APPLICABLE, msg) + return apip.newChainMessage(&serviceApi, &serviceApi.ApiInterfaces[0], spectypes.NOT_APPLICABLE, msg), nil } // this func parses message data into chain message object diff --git a/protocol/chainlib/rest.go b/protocol/chainlib/rest.go index e739c4d214..13e5492300 100644 --- a/protocol/chainlib/rest.go +++ b/protocol/chainlib/rest.go @@ -37,12 +37,17 @@ func NewRestChainParser() (chainParser *RestChainParser, err error) { return &RestChainParser{}, nil } -func (apip *RestChainParser) CraftMessage(serviceApi spectypes.ServiceApi) ChainMessageForSend { +func (apip *RestChainParser) CraftMessage(serviceApi spectypes.ServiceApi, craftData *CraftData) (ChainMessageForSend, error) { + if craftData != nil { + // chain fetcher sends the replaced request inside data + return apip.ParseMsg(string(craftData.Data), nil, craftData.ConnectionType) + } + restMessage := rpcInterfaceMessages.RestMessage{ Msg: nil, Path: serviceApi.GetName(), } - return apip.newChainMessage(&serviceApi, &serviceApi.ApiInterfaces[0], spectypes.NOT_APPLICABLE, restMessage) + return apip.newChainMessage(&serviceApi, &serviceApi.ApiInterfaces[0], spectypes.NOT_APPLICABLE, restMessage), nil } // ParseMsg parses message data into chain message object @@ -69,17 +74,12 @@ func (apip *RestChainParser) ParseMsg(url string, data []byte, connectionType st Path: url, } if connectionType == http.MethodGet { - if string(data) == url { // happens on chain fetcher where we send the formatted string on both data and msg for cross api interface compatibility - restMessage = rpcInterfaceMessages.RestMessage{ - Msg: nil, - Path: url, - } - } else { // support for optional params, our listener puts them inside Msg data - restMessage = rpcInterfaceMessages.RestMessage{ - Msg: nil, - Path: url + string(data), - } + // support for optional params, our listener puts them inside Msg data + restMessage = rpcInterfaceMessages.RestMessage{ + Msg: nil, + Path: url + string(data), } + } // TODO fix requested block diff --git a/protocol/chainlib/tendermintRPC.go b/protocol/chainlib/tendermintRPC.go index 9f379d3e36..9dfd3d9288 100644 --- a/protocol/chainlib/tendermintRPC.go +++ b/protocol/chainlib/tendermintRPC.go @@ -37,7 +37,11 @@ func NewTendermintRpcChainParser() (chainParser *TendermintChainParser, err erro return &TendermintChainParser{}, nil } -func (apip *TendermintChainParser) CraftMessage(serviceApi spectypes.ServiceApi) ChainMessageForSend { +func (apip *TendermintChainParser) CraftMessage(serviceApi spectypes.ServiceApi, craftData *CraftData) (ChainMessageForSend, error) { + if craftData != nil { + return apip.ParseMsg("", craftData.Data, craftData.ConnectionType) + } + msg := rpcInterfaceMessages.JsonrpcMessage{ Version: "2.0", ID: []byte("1"), @@ -45,7 +49,7 @@ func (apip *TendermintChainParser) CraftMessage(serviceApi spectypes.ServiceApi) Params: nil, } tenderMsg := rpcInterfaceMessages.TendermintrpcMessage{JsonrpcMessage: msg, Path: serviceApi.GetName()} - return apip.newChainMessage(&serviceApi, &serviceApi.ApiInterfaces[0], spectypes.NOT_APPLICABLE, tenderMsg) + return apip.newChainMessage(&serviceApi, &serviceApi.ApiInterfaces[0], spectypes.NOT_APPLICABLE, tenderMsg), nil } // ParseMsg parses message data into chain message object From beca007331540c330c6ecff9090b41f29b1eecd8 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Wed, 22 Feb 2023 14:19:00 +0200 Subject: [PATCH 057/123] example configuration to run rpc provider --- config/rpcprovider.yml | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/config/rpcprovider.yml b/config/rpcprovider.yml index 598c04fed2..27bd4c12b5 100644 --- a/config/rpcprovider.yml +++ b/config/rpcprovider.yml @@ -1,17 +1,19 @@ endpoints: - - chain-id: COS3 - api-interface: tendermintrpc - network-address: 127.0.0.1:2241 - node-url: ["wss://tendermint-websocket.xyz","https://tendermint-https.xyz"] - - chain-id: COS3 - api-interface: grpc - network-address: 127.0.0.1:2234 - node-url: ["https://cosmos-grpc.xyz"] - - chain-id: COS3 - api-interface: rest - network-address: 127.0.0.1:2231 - node-url: ["https://tendermint-rest.xyz"] - - chain-id: ETH1 - api-interface: jsonrpc + - api-interface: tendermintrpc + chain-id: LAV1 network-address: 127.0.0.1:2221 - node-url: ["wss://ethereum-websocket.xyz"] \ No newline at end of file + node-url: + - ws://127.0.0.1:26657/websocket + - http://127.0.0.1:26657 + - api-interface: grpc + chain-id: LAV1 + network-address: 127.0.0.1:2222 + node-url: 127.0.0.1:9090 + - api-interface: rest + chain-id: LAV1 + network-address: 127.0.0.1:2221 + node-url: http://127.0.0.1:1317 + # - api-interface: jsonrpc + # chain-id: ETH1 + # network-address: 127.0.0.1:2221 + # node-url: wss://ethereum-rpc.com/ws/ \ No newline at end of file From fd8875610e625f10bb5122018582cf1e97b64e24 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Wed, 22 Feb 2023 14:20:26 +0200 Subject: [PATCH 058/123] added examples for configs --- .../{ => consumer_examples}/ethereum_example.yml | 0 .../{ => consumer_examples}/osmosis_example.yml | 0 config/provider_examples/lava_example.yml | 15 +++++++++++++++ 3 files changed, 15 insertions(+) rename config/{ => consumer_examples}/ethereum_example.yml (100%) rename config/{ => consumer_examples}/osmosis_example.yml (100%) create mode 100644 config/provider_examples/lava_example.yml diff --git a/config/ethereum_example.yml b/config/consumer_examples/ethereum_example.yml similarity index 100% rename from config/ethereum_example.yml rename to config/consumer_examples/ethereum_example.yml diff --git a/config/osmosis_example.yml b/config/consumer_examples/osmosis_example.yml similarity index 100% rename from config/osmosis_example.yml rename to config/consumer_examples/osmosis_example.yml diff --git a/config/provider_examples/lava_example.yml b/config/provider_examples/lava_example.yml new file mode 100644 index 0000000000..9f8a4ff284 --- /dev/null +++ b/config/provider_examples/lava_example.yml @@ -0,0 +1,15 @@ +endpoints: + - api-interface: tendermintrpc + chain-id: LAV1 + network-address: 127.0.0.1:2221 + node-url: + - ws://127.0.0.1:26657/websocket + - http://127.0.0.1:26657 + - api-interface: grpc + chain-id: LAV1 + network-address: 127.0.0.1:2222 + node-url: 127.0.0.1:9090 + - api-interface: rest + chain-id: LAV1 + network-address: 127.0.0.1:2221 + node-url: http://127.0.0.1:1317 \ No newline at end of file From ec8723182d75fc107b51dd39ec9b9391903bf556 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Thu, 23 Feb 2023 17:29:19 +0100 Subject: [PATCH 059/123] Finished provider session manager integration and implementation!!! --- protocol/lavasession/common.go | 1 + protocol/lavasession/errors.go | 1 + .../lavasession/provider_session_manager.go | 82 ++++++++++++------- protocol/lavasession/provider_types.go | 6 +- protocol/rpcprovider/rpcprovider_server.go | 29 ++++--- 5 files changed, 74 insertions(+), 45 deletions(-) diff --git a/protocol/lavasession/common.go b/protocol/lavasession/common.go index 4a401c7905..ea6e67b6e6 100644 --- a/protocol/lavasession/common.go +++ b/protocol/lavasession/common.go @@ -17,6 +17,7 @@ const ( DataReliabilityRelayNumber = 1 DataReliabilityCuSum = 0 GeolocationFlag = "geolocation" + TendermintUnsubscribeAll = "unsubscribe_all" ) var AvailabilityPercentage sdk.Dec = sdk.NewDecWithPrec(5, 2) // TODO move to params pairing diff --git a/protocol/lavasession/errors.go b/protocol/lavasession/errors.go index 59b2a30917..817e997cab 100644 --- a/protocol/lavasession/errors.go +++ b/protocol/lavasession/errors.go @@ -39,4 +39,5 @@ var ( // Provider Side Errors SubscriptionInitiationError = sdkerrors.New("SubscriptionInitiationError Error", 889, "Provider failed initiating subscription") EpochIsNotRegisteredError = sdkerrors.New("EpochIsNotRegisteredError Error", 890, "Epoch is not registered in provider session manager") ConsumerIsNotRegisteredError = sdkerrors.New("ConsumerIsNotRegisteredError Error", 891, "Consumer is not registered in provider session manager") + SubscriptionAlreadyExistsError = sdkerrors.New("SubscriptionAlreadyExists Error", 892, "Subscription already exists in single provider session") ) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index f496632892..ebb725a66b 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -1,18 +1,17 @@ package lavasession import ( - "fmt" "strconv" "sync" "sync/atomic" - sdk "github.com/cosmos/cosmos-sdk/types" "github.com/lavanet/lava/utils" ) type ProviderSessionManager struct { sessionsWithAllConsumers map[uint64]map[string]*ProviderSessionsWithConsumer // first key is epochs, second key is a consumer address dataReliabilitySessionsWithAllConsumers map[uint64]map[string]*ProviderSessionsWithConsumer // first key is epochs, second key is a consumer address + subscriptionSessionsWithAllConsumers map[uint64]map[string]map[string]*RPCSubscription // first key is an epoch, second key is a consumer address, third key is subscriptionId lock sync.RWMutex blockedEpochHeight uint64 // requests from this epoch are blocked rpcProviderEndpoint *RPCProviderEndpoint @@ -242,38 +241,63 @@ func (psm *ProviderSessionManager) UpdateEpoch(epoch uint64) { psm.sessionsWithAllConsumers = newMap } -func (psm *ProviderSessionManager) ProcessUnsubscribeEthereum(subscriptionID string, consumerAddress sdk.AccAddress) error { - return fmt.Errorf("not implemented") -} +func (psm *ProviderSessionManager) ProcessUnsubscribe(apiName string, subscriptionID string, consumerAddress string, epoch uint64) error { + psm.lock.Lock() + defer psm.lock.Unlock() + mapOfConsumers, foundMapOfConsumers := psm.subscriptionSessionsWithAllConsumers[epoch] + if !foundMapOfConsumers { + return utils.LavaFormatError("Couldn't find epoch in psm.subscriptionSessionsWithAllConsumers", nil, &map[string]string{"epoch": strconv.FormatUint(epoch, 10), "address": consumerAddress}) + } + mapOfSubscriptionId, foundMapOfSubscriptionId := mapOfConsumers[consumerAddress] + if !foundMapOfSubscriptionId { + return utils.LavaFormatError("Couldn't find consumer address in psm.subscriptionSessionsWithAllConsumers", nil, &map[string]string{"epoch": strconv.FormatUint(epoch, 10), "address": consumerAddress}) + } -func (psm *ProviderSessionManager) ProcessUnsubscribeTendermint(apiName string, subscriptionID string, consumerAddress sdk.AccAddress) error { - return fmt.Errorf("not implemented") + if apiName == TendermintUnsubscribeAll { + // unsubscribe all subscriptions + for _, v := range mapOfSubscriptionId { + v.Sub.Unsubscribe() + } + return nil + } + + subscription, foundSubscription := mapOfSubscriptionId[subscriptionID] + if !foundSubscription { + return utils.LavaFormatError("Couldn't find subscription Id in psm.subscriptionSessionsWithAllConsumers", nil, &map[string]string{"epoch": strconv.FormatUint(epoch, 10), "address": consumerAddress, "subscriptionId": subscriptionID}) + } + subscription.Sub.Unsubscribe() + delete(mapOfSubscriptionId, subscriptionID) // delete subscription after finished with it + return nil } -func (psm *ProviderSessionManager) NewSubscription(consumerAddress string, epoch uint64, subscription *RPCSubscription) error { - // return an error if subscriptionID exists - // original code: - // userSessions.Lock.Lock() - // if _, ok := userSessions.Subs[subscriptionID]; ok { - // return utils.LavaFormatError("SubscriptiodID: "+subscriptionID+"exists", nil, nil) - // } - // userSessions.Subs[subscriptionID] = &subscription{ - // id: subscriptionID, - // sub: clientSub, - // subscribeRepliesChan: subscribeRepliesChan, - // } - // userSessions.Lock.Unlock() - return fmt.Errorf("not implemented") +func (psm *ProviderSessionManager) ReleaseSessionAndCreateSubscription(session *SingleProviderSession, subscription *RPCSubscription, consumerAddress string, epoch uint64) error { + err := psm.OnSessionDone(session) + if err != nil { + return utils.LavaFormatError("Failed ReleaseSessionAndCreateSubscription", err, nil) + } + return nil } -func (psm *ProviderSessionManager) SubscriptionFailure(consumerAddress string, epoch uint64, subscriptionID string) { - // original code - // userSessions.Lock.Lock() - // if sub, ok := userSessions.Subs[subscriptionID]; ok { - // sub.disconnect() - // delete(userSessions.Subs, subscriptionID) - // } - // userSessions.Lock.Unlock() +// try to disconnect the subscription incase we got an error. +// if fails to find assumes it was unsubscribed normally +func (psm *ProviderSessionManager) SubscriptionEnded(consumerAddress string, epoch uint64, subscriptionID string) { + psm.lock.Lock() + defer psm.lock.Unlock() + mapOfConsumers, foundMapOfConsumers := psm.subscriptionSessionsWithAllConsumers[epoch] + if !foundMapOfConsumers { + return + } + mapOfSubscriptionId, foundMapOfSubscriptionId := mapOfConsumers[consumerAddress] + if !foundMapOfSubscriptionId { + return + } + + subscription, foundSubscription := mapOfSubscriptionId[subscriptionID] + if !foundSubscription { + return + } + subscription.Sub.Unsubscribe() + delete(mapOfSubscriptionId, subscriptionID) // delete subscription after finished with it } // Called when the reward server has information on a higher cu proof and usage and this providerSessionsManager needs to sync up on it diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index 290cc03b4f..d6c270d267 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -111,7 +111,11 @@ func (pswc *ProviderSessionsWithConsumer) GetExistingSession(sessionId uint64) ( pswc.Lock.RLock() defer pswc.Lock.RUnlock() if session, ok := pswc.Sessions[sessionId]; ok { - session.lock.Lock() + locked := session.lock.TryLock() + if locked { + defer session.lock.Unlock() + return nil, utils.LavaFormatError("GetExistingSession failed", LockMisUseDetectedError, nil) + } return session, nil } return nil, SessionDoesNotExist diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index def0ad39e6..6073483fe3 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -167,7 +167,7 @@ func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayReques if err != nil { return rpcps.handleRelayErrorStatus(err) } - subscribed, err := rpcps.TryRelaySubscribe(ctx, request, srv, chainMessage, consumerAddress) // this function does not return until subscription ends + subscribed, err := rpcps.TryRelaySubscribe(ctx, uint64(request.BlockHeight), srv, chainMessage, consumerAddress, relaySession) // this function does not return until subscription ends if subscribed { // meaning we created a subscription and used it for at least a message relayError := rpcps.providerSessionManager.OnSessionDone(relaySession) // TODO: when we pay as u go on subscription this will need to change @@ -209,7 +209,7 @@ func (rpcps *RPCProviderServer) SendProof(ctx context.Context, relaySession *lav return nil } -func (rpcps *RPCProviderServer) TryRelaySubscribe(ctx context.Context, request *pairingtypes.RelayRequest, srv pairingtypes.Relayer_RelaySubscribeServer, chainMessage chainlib.ChainMessage, consumerAddress sdk.AccAddress) (subscribed bool, errRet error) { +func (rpcps *RPCProviderServer) TryRelaySubscribe(ctx context.Context, requestBlockHeight uint64, srv pairingtypes.Relayer_RelaySubscribeServer, chainMessage chainlib.ChainMessage, consumerAddress sdk.AccAddress, relaySession *lavasession.SingleProviderSession) (subscribed bool, errRet error) { var reply *pairingtypes.RelayReply var clientSub *rpcclient.ClientSubscription var subscriptionID string @@ -223,11 +223,11 @@ func (rpcps *RPCProviderServer) TryRelaySubscribe(ctx context.Context, request * Sub: clientSub, SubscribeRepliesChan: subscribeRepliesChan, } - err = rpcps.providerSessionManager.NewSubscription(consumerAddress.String(), uint64(request.BlockHeight), subscription) + err = rpcps.providerSessionManager.ReleaseSessionAndCreateSubscription(relaySession, subscription, consumerAddress.String(), requestBlockHeight) if err != nil { return false, err } - rpcps.rewardServer.SubscribeStarted(consumerAddress.String(), uint64(request.BlockHeight), subscriptionID) + rpcps.rewardServer.SubscribeStarted(consumerAddress.String(), requestBlockHeight, subscriptionID) processSubscribeMessages := func() (subscribed bool, errRet error) { err = srv.Send(reply) // this reply contains the RPC ID if err != nil { @@ -271,8 +271,8 @@ func (rpcps *RPCProviderServer) TryRelaySubscribe(ctx context.Context, request * } } subscribed, errRet = processSubscribeMessages() - rpcps.providerSessionManager.SubscriptionFailure(consumerAddress.String(), uint64(request.BlockHeight), subscriptionID) - rpcps.rewardServer.SubscribeEnded(consumerAddress.String(), uint64(request.BlockHeight), subscriptionID) + rpcps.providerSessionManager.SubscriptionEnded(consumerAddress.String(), requestBlockHeight, subscriptionID) + rpcps.rewardServer.SubscribeEnded(consumerAddress.String(), requestBlockHeight, subscriptionID) return subscribed, errRet } @@ -539,7 +539,7 @@ func (rpcps *RPCProviderServer) TryRelay(ctx context.Context, request *pairingty apiName := chainMsg.GetServiceApi().Name if reqMsg != nil && strings.Contains(apiName, "unsubscribe") { - err := rpcps.processUnsubscribe(apiName, consumerAddr, reqParams) + err := rpcps.processUnsubscribe(apiName, consumerAddr, reqParams, uint64(request.RequestBlock)) if err != nil { return nil, err } @@ -564,24 +564,23 @@ func (rpcps *RPCProviderServer) TryRelay(ctx context.Context, request *pairingty return reply, nil } -func (rpcps *RPCProviderServer) processUnsubscribe(apiName string, consumerAddr sdk.AccAddress, reqParams interface{}) error { +func (rpcps *RPCProviderServer) processUnsubscribe(apiName string, consumerAddr sdk.AccAddress, reqParams interface{}, epoch uint64) error { + var subscriptionID string switch p := reqParams.(type) { case []interface{}: - subscriptionID, ok := p[0].(string) + var ok bool + subscriptionID, ok = p[0].(string) if !ok { - return fmt.Errorf("processUnsubscribe - p[0].(string) - type assertion failed, type:" + fmt.Sprintf("%s", p[0])) + return utils.LavaFormatError("processUnsubscribe - p[0].(string) - type assertion failed", nil, &map[string]string{"type": fmt.Sprintf("%s", p[0])}) } - return rpcps.providerSessionManager.ProcessUnsubscribeEthereum(subscriptionID, consumerAddr) case map[string]interface{}: - subscriptionID := "" if apiName == "unsubscribe" { var ok bool subscriptionID, ok = p["query"].(string) if !ok { - return fmt.Errorf("processUnsubscribe - p['query'].(string) - type assertion failed, type:" + fmt.Sprintf("%s", p["query"])) + return utils.LavaFormatError("processUnsubscribe - p['query'].(string) - type assertion failed", nil, &map[string]string{"type": fmt.Sprintf("%s", p["query"])}) } } - return rpcps.providerSessionManager.ProcessUnsubscribeTendermint(apiName, subscriptionID, consumerAddr) } - return nil + return rpcps.providerSessionManager.ProcessUnsubscribe(apiName, subscriptionID, consumerAddr.String(), epoch) } From bfe1814d8b861e0516b25f2d112fb05015de2e71 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Thu, 23 Feb 2023 18:04:15 +0100 Subject: [PATCH 060/123] fixing is valid epoch mistake. --- .../lavasession/provider_session_manager.go | 27 +++++++++---------- protocol/rpcprovider/rpcprovider_server.go | 4 +-- scripts/init_lava_over_lava.sh | 5 +++- .../init_chain_commands_one_provider.sh | 2 +- scripts/pre_setups/init_gth_only.sh | 2 +- scripts/pre_setups/init_lava_grpc.sh | 26 ++++++++++-------- scripts/pre_setups/init_osmosis_test.sh | 2 +- 7 files changed, 36 insertions(+), 32 deletions(-) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index ebb725a66b..6cefe736a4 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -28,9 +28,12 @@ func (psm *ProviderSessionManager) atomicReadBlockedEpoch() (epoch uint64) { return atomic.LoadUint64(&psm.blockedEpochHeight) } -func (psm *ProviderSessionManager) IsValidEpoch(epoch uint64) (valid bool, blockedEpochHeight uint64) { - blockedEpochHeight = psm.atomicReadBlockedEpoch() - return epoch > blockedEpochHeight, blockedEpochHeight +func (psm *ProviderSessionManager) GetBlockedEpochHeight() uint64 { + return psm.atomicReadBlockedEpoch() +} + +func (psm *ProviderSessionManager) IsValidEpoch(epoch uint64) (valid bool) { + return epoch > psm.atomicReadBlockedEpoch() } // Check if consumer exists and is not blocked, if all is valid return the ProviderSessionsWithConsumer pointer @@ -85,8 +88,7 @@ func (psm *ProviderSessionManager) getOrCreateDataReliabilitySessionWithConsumer // GetDataReliabilitySession fetches a data reliability session, and assumes the user func (psm *ProviderSessionManager) GetDataReliabilitySession(address string, epoch uint64, sessionId uint64, relayNumber uint64) (*SingleProviderSession, error) { // validate Epoch - valid, _ := psm.IsValidEpoch(epoch) - if valid { // fast checking to see if epoch is even relevant + if !psm.IsValidEpoch(epoch) { // fast checking to see if epoch is even relevant utils.LavaFormatError("GetSession", InvalidEpochError, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10)}) return nil, InvalidEpochError } @@ -109,9 +111,8 @@ func (psm *ProviderSessionManager) GetDataReliabilitySession(address string, epo } func (psm *ProviderSessionManager) GetSession(address string, epoch uint64, sessionId uint64, relayNumber uint64) (*SingleProviderSession, error) { - valid, _ := psm.IsValidEpoch(epoch) - if valid { // fast checking to see if epoch is even relevant - utils.LavaFormatError("GetSession", InvalidEpochError, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10)}) + if !psm.IsValidEpoch(epoch) { // fast checking to see if epoch is even relevant + utils.LavaFormatError("GetSession", InvalidEpochError, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10), "blockedEpochHeight": strconv.FormatUint(psm.blockedEpochHeight, 10), "blockDistanceForEpochValidity": strconv.FormatUint(psm.blockDistanceForEpochValidity, 10)}) return nil, InvalidEpochError } @@ -126,9 +127,7 @@ func (psm *ProviderSessionManager) GetSession(address string, epoch uint64, sess func (psm *ProviderSessionManager) registerNewSession(address string, epoch uint64, sessionId uint64, maxCuForConsumer uint64) (*ProviderSessionsWithConsumer, error) { psm.lock.Lock() defer psm.lock.Unlock() - - valid, _ := psm.IsValidEpoch(epoch) - if valid { // checking again because we are now locked and epoch cant change now. + if !psm.IsValidEpoch(epoch) { // checking again because we are now locked and epoch cant change now. utils.LavaFormatError("getActiveConsumer", InvalidEpochError, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10)}) return nil, InvalidEpochError } @@ -170,8 +169,7 @@ func (psm *ProviderSessionManager) RegisterProviderSessionWithConsumer(address s func (psm *ProviderSessionManager) getActiveConsumer(epoch uint64, address string) (providerSessionWithConsumer *ProviderSessionsWithConsumer, err error) { psm.lock.RLock() defer psm.lock.RUnlock() - valid, _ := psm.IsValidEpoch(epoch) - if valid { // checking again because we are now locked and epoch cant change now. + if !psm.IsValidEpoch(epoch) { // checking again because we are now locked and epoch cant change now. utils.LavaFormatError("getActiveConsumer", InvalidEpochError, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10)}) return nil, InvalidEpochError } @@ -305,8 +303,7 @@ func (psm *ProviderSessionManager) UpdateSessionCU(consumerAddress string, epoch // load the session and update the CU inside psm.lock.Lock() defer psm.lock.Unlock() - valid, _ := psm.IsValidEpoch(epoch) - if valid { // checking again because we are now locked and epoch cant change now. + if !psm.IsValidEpoch(epoch) { // checking again because we are now locked and epoch cant change now. return utils.LavaFormatError("UpdateSessionCU", InvalidEpochError, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10)}) } diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 6073483fe3..9b690d9784 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -278,12 +278,12 @@ func (rpcps *RPCProviderServer) TryRelaySubscribe(ctx context.Context, requestBl // verifies basic relay fields, and gets a provider session func (rpcps *RPCProviderServer) verifyRelaySession(ctx context.Context, request *pairingtypes.RelayRequest) (singleProviderSession *lavasession.SingleProviderSession, extractedConsumerAddress sdk.AccAddress, err error) { - valid, thresholdEpoch := rpcps.providerSessionManager.IsValidEpoch(uint64(request.BlockHeight)) + valid := rpcps.providerSessionManager.IsValidEpoch(uint64(request.BlockHeight)) if !valid { return nil, nil, utils.LavaFormatError("user reported invalid lava block height", nil, &map[string]string{ "current lava block": strconv.FormatInt(rpcps.stateTracker.LatestBlock(), 10), "requested lava block": strconv.FormatInt(request.BlockHeight, 10), - "threshold": strconv.FormatUint(thresholdEpoch, 10), + "threshold": strconv.FormatUint(rpcps.providerSessionManager.GetBlockedEpochHeight(), 10), }) } diff --git a/scripts/init_lava_over_lava.sh b/scripts/init_lava_over_lava.sh index a9d746e00b..f4df0c5f63 100755 --- a/scripts/init_lava_over_lava.sh +++ b/scripts/init_lava_over_lava.sh @@ -6,8 +6,11 @@ source $__dir/useful_commands.sh killall screen screen -wipe GASPRICE="0.000000001ulava" -lavad tx gov submit-proposal spec-add ./cookbook/spec_add_lava.json,./cookbook/spec_add_ethereum.json,./cookbook/spec_add_osmosis.json,./cookbook/spec_add_fantom.json,./cookbook/spec_add_goerli.json --from alice -y --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx gov submit-proposal spec-add ./cookbook/spec_add_cosmoshub.json,./cookbook/spec_add_ethereum.json --from alice -y --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx gov vote 1 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +sleep 3 +lavad tx gov submit-proposal spec-add ./cookbook/spec_add_lava.json --from alice -y --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx gov vote 2 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE sleep 4 lavad tx pairing stake-client "LAV1" 200000ulava 1 -y --from user4 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE diff --git a/scripts/pre_setups/init_chain_commands_one_provider.sh b/scripts/pre_setups/init_chain_commands_one_provider.sh index da63a998d8..77d0169c2a 100755 --- a/scripts/pre_setups/init_chain_commands_one_provider.sh +++ b/scripts/pre_setups/init_chain_commands_one_provider.sh @@ -6,7 +6,7 @@ source "$__dir"/../useful_commands.sh killall screen screen -wipe GASPRICE="0.000000001ulava" -lavad tx gov submit-proposal spec-add ./cookbook/spec_add_lava.json,./cookbook/spec_add_ethereum.json,./cookbook/spec_add_osmosis.json,./cookbook/spec_add_fantom.json,./cookbook/spec_add_goerli.json,./cookbook/spec_add_celo.json,./cookbook/spec_add_alfajores.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx gov submit-proposal spec-add ./cookbook/spec_add_lava.json,./cookbook/spec_add_ethereum.json,./cookbook/spec_add_osmosis.json,./cookbook/spec_add_fantom.json,./cookbook/spec_add_celo.json,./cookbook/spec_add_alfajores.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx gov vote 1 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx gov submit-proposal spec-add ./cookbook/spec_add_arbitrum.json,./cookbook/spec_add_starknet.json,./cookbook/spec_add_aptos.json,./cookbook/spec_add_juno.json,./cookbook/spec_add_cosmoshub.json,./cookbook/spec_add_polygon.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE diff --git a/scripts/pre_setups/init_gth_only.sh b/scripts/pre_setups/init_gth_only.sh index 1795240022..2f5a871918 100755 --- a/scripts/pre_setups/init_gth_only.sh +++ b/scripts/pre_setups/init_gth_only.sh @@ -7,7 +7,7 @@ killall screen screen -wipe LOGS_DIR=${__dir}/../../testutil/debugging/logs GASPRICE="0.000000001ulava" -lavad tx gov submit-proposal spec-add ./cookbook/spec_add_goerli.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx gov submit-proposal spec-add ./cookbook/spec_add_ethereum.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx gov vote 1 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE sleep 4 diff --git a/scripts/pre_setups/init_lava_grpc.sh b/scripts/pre_setups/init_lava_grpc.sh index 1d44f92294..3bc89d8c77 100755 --- a/scripts/pre_setups/init_lava_grpc.sh +++ b/scripts/pre_setups/init_lava_grpc.sh @@ -7,8 +7,11 @@ killall screen screen -wipe LOGS_DIR=${__dir}/../../testutil/debugging/logs GASPRICE="0.000000001ulava" -lavad tx gov submit-proposal spec-add ./cookbook/spec_add_lava.json,./cookbook/spec_add_ethereum.json,./cookbook/spec_add_osmosis.json,./cookbook/spec_add_fantom.json,./cookbook/spec_add_goerli.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx gov submit-proposal spec-add ./cookbook/spec_add_cosmoshub.json,./cookbook/spec_add_ethereum.json --from alice -y --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx gov vote 1 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +sleep 3 +lavad tx gov submit-proposal spec-add ./cookbook/spec_add_lava.json --from alice -y --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx gov vote 2 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE sleep 4 @@ -25,18 +28,19 @@ lavad tx pairing stake-provider "LAV1" $STAKE "127.0.0.1:2263,tendermintrpc,1 12 sleep_until_next_epoch # Lava providers -screen -d -m -S lav1_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 2271 $LAVA_REST LAV1 rest --from servicer1 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug 2>&1 | tee $LOGS_DIR/LAV1_2271.log"; sleep 0.3 -screen -S lav1_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2272 $LAVA_REST LAV1 rest --from servicer2 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug 2>&1 | tee $LOGS_DIR/LAV1_2272.log" -screen -S lav1_providers -X screen -t win2 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2273 $LAVA_REST LAV1 rest --from servicer3 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug 2>&1 | tee $LOGS_DIR/LAV1_2273.log" -screen -S lav1_providers -X screen -t win3 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2261 $LAVA_RPC LAV1 tendermintrpc --from servicer1 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --tendermint-http-endpoint $LAVA_RPC_HTTP 2>&1 | tee $LOGS_DIR/LAV1_2261.log" -screen -S lav1_providers -X screen -t win4 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2262 $LAVA_RPC LAV1 tendermintrpc --from servicer2 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --tendermint-http-endpoint $LAVA_RPC_HTTP 2>&1 | tee $LOGS_DIR/LAV1_2262.log" -screen -S lav1_providers -X screen -t win5 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2263 $LAVA_RPC LAV1 tendermintrpc --from servicer3 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --tendermint-http-endpoint $LAVA_RPC_HTTP 2>&1 | tee $LOGS_DIR/LAV1_2263.log" -screen -S lav1_providers -X screen -t win6 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2281 $LAVA_GRPC LAV1 grpc --from servicer1 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug 2>&1 | tee $LOGS_DIR/LAV1_2281.log" -screen -S lav1_providers -X screen -t win7 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2282 $LAVA_GRPC LAV1 grpc --from servicer2 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug 2>&1 | tee $LOGS_DIR/LAV1_2282.log" -screen -S lav1_providers -X screen -t win8 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2283 $LAVA_GRPC LAV1 grpc --from servicer3 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug 2>&1 | tee $LOGS_DIR/LAV1_2283.log" +# screen -d -m -S lav1_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 2271 $LAVA_REST LAV1 rest --from servicer1 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug 2>&1 | tee $LOGS_DIR/LAV1_2271.log"; sleep 0.3 +# screen -S lav1_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2272 $LAVA_REST LAV1 rest --from servicer2 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug 2>&1 | tee $LOGS_DIR/LAV1_2272.log" +# screen -S lav1_providers -X screen -t win2 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2273 $LAVA_REST LAV1 rest --from servicer3 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug 2>&1 | tee $LOGS_DIR/LAV1_2273.log" +# screen -S lav1_providers -X screen -t win3 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2261 $LAVA_RPC LAV1 tendermintrpc --from servicer1 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --tendermint-http-endpoint $LAVA_RPC_HTTP 2>&1 | tee $LOGS_DIR/LAV1_2261.log" +# screen -S lav1_providers -X screen -t win4 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2262 $LAVA_RPC LAV1 tendermintrpc --from servicer2 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --tendermint-http-endpoint $LAVA_RPC_HTTP 2>&1 | tee $LOGS_DIR/LAV1_2262.log" +# screen -S lav1_providers -X screen -t win5 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2263 $LAVA_RPC LAV1 tendermintrpc --from servicer3 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --tendermint-http-endpoint $LAVA_RPC_HTTP 2>&1 | tee $LOGS_DIR/LAV1_2263.log" +# screen -S lav1_providers -X screen -t win6 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2281 $LAVA_GRPC LAV1 grpc --from servicer1 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug 2>&1 | tee $LOGS_DIR/LAV1_2281.log" +# screen -S lav1_providers -X screen -t win7 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2282 $LAVA_GRPC LAV1 grpc --from servicer2 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug 2>&1 | tee $LOGS_DIR/LAV1_2282.log" +# screen -S lav1_providers -X screen -t win8 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2283 $LAVA_GRPC LAV1 grpc --from servicer3 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug 2>&1 | tee $LOGS_DIR/LAV1_2283.log" screen -d -m -S portals bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3340 LAV1 rest 127.0.0.1:3341 LAV1 tendermintrpc 127.0.0.1:3342 LAV1 grpc --from user1 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug | tee $LOGS_DIR/LAV1_tendermint_portal.log"; sleep 0.3 -screen -r portals +lavad rpcprovider 127.0.0.1:2272 LAV1 rest http://0.0.0.0:1317 --from servicer2 --geolocation 1 +# screen -r portals # Lava Over Lava ETH sleep 3 # wait for the portal to start. diff --git a/scripts/pre_setups/init_osmosis_test.sh b/scripts/pre_setups/init_osmosis_test.sh index f3d63dcc75..d0efff5c7b 100755 --- a/scripts/pre_setups/init_osmosis_test.sh +++ b/scripts/pre_setups/init_osmosis_test.sh @@ -7,7 +7,7 @@ killall screen screen -wipe LOGS_DIR=${__dir}/../../testutil/debugging/logs GASPRICE="0.000000001ulava" -lavad tx gov submit-proposal spec-add ./cookbook/spec_add_lava.json,./cookbook/spec_add_ethereum.json,./cookbook/spec_add_osmosis.json,./cookbook/spec_add_fantom.json,./cookbook/spec_add_goerli.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx gov submit-proposal spec-add ./cookbook/spec_add_lava.json,./cookbook/spec_add_ethereum.json,./cookbook/spec_add_osmosis.json,./cookbook/spec_add_fantom.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx gov vote 1 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE sleep 4 From 1afeace9bb5b3e37c0c2618538c6fdf118d1e0af Mon Sep 17 00:00:00 2001 From: omer mishael Date: Wed, 22 Feb 2023 19:28:12 +0200 Subject: [PATCH 061/123] changed chain tracker range to be inclusive and fixed a bug --- protocol/chaintracker/chain_tracker.go | 2 +- protocol/chaintracker/chain_tracker_test.go | 9 ++- protocol/chaintracker/errors.go | 1 + protocol/chaintracker/wanted_block_data.go | 20 ++--- .../chaintracker/wanted_block_data_test.go | 78 ++++++++++--------- protocol/rpcprovider/rpcprovider_server.go | 12 ++- protocol/statetracker/state_query.go | 8 +- 7 files changed, 71 insertions(+), 59 deletions(-) diff --git a/protocol/chaintracker/chain_tracker.go b/protocol/chaintracker/chain_tracker.go index 813c30e5f5..94dd7708da 100644 --- a/protocol/chaintracker/chain_tracker.go +++ b/protocol/chaintracker/chain_tracker.go @@ -40,7 +40,7 @@ type ChainTracker struct { endpoint lavasession.RPCProviderEndpoint } -// this function returns block hashes of the blocks: [from block - to block) non inclusive. an additional specific block hash can be provided. order is sorted ascending +// this function returns block hashes of the blocks: [from block - to block] inclusive. an additional specific block hash can be provided. order is sorted ascending // it supports requests for [spectypes.LATEST_BLOCK-distance1, spectypes.LATEST_BLOCK-distance2) // spectypes.NOT_APPLICABLE in fromBlock or toBlock results in only returning specific block. // if specific block is spectypes.NOT_APPLICABLE it is ignored diff --git a/protocol/chaintracker/chain_tracker_test.go b/protocol/chaintracker/chain_tracker_test.go index 5b7c6a69b6..1064605e54 100644 --- a/protocol/chaintracker/chain_tracker_test.go +++ b/protocol/chaintracker/chain_tracker_test.go @@ -115,7 +115,8 @@ func TestChainTracker(t *testing.T) { specificBlock int64 }{ {name: "one block memory + fetch", mockBlocks: 20, requestBlocks: 1, fetcherBlocks: 1, advancements: []int64{0, 1, 0, 0, 1, 1, 1, 0, 2, 0, 5, 1, 10, 1, 1, 1}, requestBlockFrom: spectypes.NOT_APPLICABLE, requestBlockTo: spectypes.NOT_APPLICABLE, specificBlock: spectypes.LATEST_BLOCK}, - {name: "ten block memory 4 block fetch", mockBlocks: 20, requestBlocks: 4, fetcherBlocks: 10, advancements: []int64{0, 1, 0, 0, 1, 1, 1, 0, 2, 0, 5, 1, 10, 1, 1, 1}, requestBlockFrom: spectypes.LATEST_BLOCK - 9, requestBlockTo: spectypes.LATEST_BLOCK - 6, specificBlock: spectypes.LATEST_BLOCK}, + {name: "ten block memory 4 block fetch", mockBlocks: 20, requestBlocks: 4, fetcherBlocks: 10, advancements: []int64{0, 1, 0, 0, 1, 1, 1, 0, 2, 0, 5, 1, 10, 1, 1, 1}, requestBlockFrom: spectypes.LATEST_BLOCK - 9, requestBlockTo: spectypes.LATEST_BLOCK - 6, specificBlock: spectypes.NOT_APPLICABLE}, + {name: "ten block memory one block fetch", mockBlocks: 20, requestBlocks: 1, fetcherBlocks: 10, advancements: []int64{0, 1, 0, 0, 1, 1, 1, 0, 2, 0, 5, 1, 10, 1, 1, 1}, requestBlockFrom: spectypes.LATEST_BLOCK, requestBlockTo: spectypes.LATEST_BLOCK, specificBlock: spectypes.NOT_APPLICABLE}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -173,7 +174,7 @@ func TestChainTrackerRangeOnly(t *testing.T) { requestBlockTo int64 specificBlock int64 }{ - {name: "ten block memory + 3 block fetch", mockBlocks: 100, requestBlocks: 3, fetcherBlocks: 10, advancements: []int64{0, 1, 0, 0, 1, 1, 1, 0, 2, 0, 5, 1, 10, 1, 1, 1}, requestBlockFrom: spectypes.LATEST_BLOCK - 6, requestBlockTo: spectypes.LATEST_BLOCK - 3, specificBlock: spectypes.NOT_APPLICABLE}, + {name: "ten block memory + 3 block fetch", mockBlocks: 100, requestBlocks: 3, fetcherBlocks: 10, advancements: []int64{0, 1, 0, 0, 1, 1, 1, 0, 2, 0, 5, 1, 10, 1, 1, 1}, requestBlockFrom: spectypes.LATEST_BLOCK - 6, requestBlockTo: spectypes.LATEST_BLOCK - 4, specificBlock: spectypes.NOT_APPLICABLE}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -219,7 +220,7 @@ func TestChainTrackerCallbacks(t *testing.T) { requestBlocks := 3 fetcherBlocks := 10 requestBlockFrom := spectypes.LATEST_BLOCK - 6 - requestBlockTo := spectypes.LATEST_BLOCK - 3 + requestBlockTo := spectypes.LATEST_BLOCK - 4 specificBlock := spectypes.NOT_APPLICABLE tests := []struct { name string @@ -313,7 +314,7 @@ func TestChainTrackerMaintainMemory(t *testing.T) { requestBlocks := 4 fetcherBlocks := 50 requestBlockFrom := spectypes.LATEST_BLOCK - 6 - requestBlockTo := spectypes.LATEST_BLOCK - 3 + requestBlockTo := spectypes.LATEST_BLOCK - 4 specificBlock := spectypes.LATEST_BLOCK - 30 //needs to be smaller than requestBlockFrom, can't be NOT_APPLICABLE tests := []struct { name string diff --git a/protocol/chaintracker/errors.go b/protocol/chaintracker/errors.go index a33129814c..28b58fb670 100644 --- a/protocol/chaintracker/errors.go +++ b/protocol/chaintracker/errors.go @@ -13,4 +13,5 @@ var ( // Consumer Side Errors InvalidRequestedBlocks = sdkerrors.New("Error InvalidRequestedBlocks", 10706, "provided requested blocks for function do not compse a valid request") RequestedBlocksOutOfRange = sdkerrors.New("RequestedBlocksOutOfRange", 10707, "requested blocks are outside the supported range by the state tracker") ErrorFailedToFetchTooEarlyBlock = sdkerrors.New("Error ErrorFailedToFetchTooEarlyBlock", 10708, "server memory protection triggered, requested block is too early") + InvalidRequestedSpecificBlock = sdkerrors.New("Error InvalidRequestedSpecificBlock", 10709, "provided requested specific blocks for function do not compose a stored entry") ) diff --git a/protocol/chaintracker/wanted_block_data.go b/protocol/chaintracker/wanted_block_data.go index 69b110dbb5..e0e739c0f7 100644 --- a/protocol/chaintracker/wanted_block_data.go +++ b/protocol/chaintracker/wanted_block_data.go @@ -42,13 +42,13 @@ func (wbd *WantedBlocksData) New(fromBlock int64, toBlock int64, specificBlock i if !ignoreSpecific { fromBlockArg := LatestArgToBlockNum(specificBlock, latestBlock) if wbd.rangeBlocks.IsWanted(fromBlockArg) { - // this means the specific block is within the range of [from-to) and there is no reason to create specific block range + // this means the specific block is within the range of [from-to] and there is no reason to create specific block range wbd.specificBlock = nil } else { - toBlockArg := fromBlockArg + 1 + toBlockArg := fromBlockArg // [from,to] with only one block wbd.specificBlock, err = NewBlockRange(fromBlockArg, toBlockArg, earliestBlockSaved, latestBlock) if err != nil { - return err + return InvalidRequestedSpecificBlock.Wrapf("specific " + err.Error()) } } } else { @@ -102,15 +102,15 @@ func (wbd *WantedBlocksData) String() string { func NewBlockRange(fromBlock int64, toBlock int64, earliestBlockSaved int64, latestBlock int64) (br *BlockRange, err error) { if fromBlock < 0 || toBlock < 0 || earliestBlockSaved < 0 { - return nil, RequestedBlocksOutOfRange.Wrapf("invalid input block range: from=%d to=%d earliest=%d", fromBlock, toBlock, earliestBlockSaved) + return nil, RequestedBlocksOutOfRange.Wrapf("invalid input block range: from=%d to=%d earliest=%d latest=%d", fromBlock, toBlock, earliestBlockSaved, latestBlock) } - if toBlock <= fromBlock { // if we don't have a range, it should be set with NOT_APPLICABLE - return nil, InvalidRequestedBlocks.Wrapf("invalid input block range: from=%d to=%d earliest=%d", fromBlock, toBlock, earliestBlockSaved) + if toBlock < fromBlock { // if we don't have a range, it should be set with NOT_APPLICABLE + return nil, InvalidRequestedBlocks.Wrapf("invalid input block range: from=%d to=%d earliest=%d latest=%d", fromBlock, toBlock, earliestBlockSaved, latestBlock) } if fromBlock < earliestBlockSaved { - return nil, RequestedBlocksOutOfRange.Wrapf("invalid input block fromBlock: from=%d to=%d earliest=%d", fromBlock, toBlock, earliestBlockSaved) + return nil, RequestedBlocksOutOfRange.Wrapf("invalid input block fromBlock: from=%d to=%d earliest=%d latest=%d", fromBlock, toBlock, earliestBlockSaved, latestBlock) } - if toBlock > latestBlock+1 { // latest+1 to allow a range to include latest block + if toBlock > latestBlock { return nil, RequestedBlocksOutOfRange.Wrapf("invalid input block toBlock: from=%d to=%d latest=%d", fromBlock, toBlock, latestBlock) } blockRange := &BlockRange{} @@ -122,7 +122,7 @@ func NewBlockRange(fromBlock int64, toBlock int64, earliestBlockSaved int64, lat } func (br *BlockRange) IterationIndexes() []int { - indexes := make([]int, br.endIndexFromEarliest-br.startIndexFromEarliest) + indexes := make([]int, br.endIndexFromEarliest-br.startIndexFromEarliest+1) for i := 0; i < len(indexes); i++ { indexes[i] = int(br.startIndexFromEarliest) + i } @@ -136,7 +136,7 @@ func (br *BlockRange) IsWanted(blockNum int64) bool { if br.fromBlock > blockNum { return false } - if br.toBlock <= blockNum { + if br.toBlock < blockNum { return false } return true diff --git a/protocol/chaintracker/wanted_block_data_test.go b/protocol/chaintracker/wanted_block_data_test.go index 3a1c15c6e3..ec4dc72da6 100644 --- a/protocol/chaintracker/wanted_block_data_test.go +++ b/protocol/chaintracker/wanted_block_data_test.go @@ -21,16 +21,16 @@ func TestWantedBlockData(t *testing.T) { valid bool expectedElements int }{ - {name: "only one saved block range 1", earliestBlock: 1000, latestBlock: 1000, fromBlock: 1000, toBlock: 1001, specificBlock: spectypes.NOT_APPLICABLE, valid: true, expectedElements: 1}, - {name: "only one saved block overlap", earliestBlock: 1000, latestBlock: 1000, fromBlock: 1000, toBlock: 1001, specificBlock: 1000, valid: true, expectedElements: 1}, - {name: "only one saved block specific from N/A", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.NOT_APPLICABLE, toBlock: 1001, specificBlock: 1000, valid: true, expectedElements: 1}, + {name: "only one saved block range 1", earliestBlock: 1000, latestBlock: 1000, fromBlock: 1000, toBlock: 1000, specificBlock: spectypes.NOT_APPLICABLE, valid: true, expectedElements: 1}, + {name: "only one saved block overlap", earliestBlock: 1000, latestBlock: 1000, fromBlock: 1000, toBlock: 1000, specificBlock: 1000, valid: true, expectedElements: 1}, + {name: "only one saved block specific from N/A", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.NOT_APPLICABLE, toBlock: 1000, specificBlock: 1000, valid: true, expectedElements: 1}, {name: "only one saved block specific to N/A", earliestBlock: 1000, latestBlock: 1000, fromBlock: 1000, toBlock: spectypes.NOT_APPLICABLE, specificBlock: 1000, valid: true, expectedElements: 1}, {name: "only one saved block specific both N/A", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.NOT_APPLICABLE, toBlock: spectypes.NOT_APPLICABLE, specificBlock: 1000, valid: true, expectedElements: 1}, {name: "only one saved block specific from N/A other is latest", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.NOT_APPLICABLE, toBlock: spectypes.LATEST_BLOCK, specificBlock: 1000, valid: true, expectedElements: 1}, {name: "only one saved block specific from N/A other is latest with distance", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.NOT_APPLICABLE, toBlock: spectypes.LATEST_BLOCK - 5, specificBlock: 1000, valid: true, expectedElements: 1}, {name: "only one saved block specific to N/A other is latest", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.LATEST_BLOCK, toBlock: spectypes.NOT_APPLICABLE, specificBlock: 1000, valid: true, expectedElements: 1}, {name: "only one saved block specific to N/A other is latest with distance", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.LATEST_BLOCK - 5, toBlock: spectypes.NOT_APPLICABLE, specificBlock: 1000, valid: true, expectedElements: 1}, - {name: "latest only one saved block specific from N/A", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.NOT_APPLICABLE, toBlock: 1001, specificBlock: spectypes.LATEST_BLOCK, valid: true, expectedElements: 1}, + {name: "latest only one saved block specific from N/A", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.NOT_APPLICABLE, toBlock: 1000, specificBlock: spectypes.LATEST_BLOCK, valid: true, expectedElements: 1}, {name: "latest only one saved block specific to N/A", earliestBlock: 1000, latestBlock: 1000, fromBlock: 1000, toBlock: spectypes.NOT_APPLICABLE, specificBlock: spectypes.LATEST_BLOCK, valid: true, expectedElements: 1}, {name: "latest only one saved block specific both N/A", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.NOT_APPLICABLE, toBlock: spectypes.NOT_APPLICABLE, specificBlock: spectypes.LATEST_BLOCK, valid: true, expectedElements: 1}, {name: "latest only one saved block specific from N/A other is latest", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.NOT_APPLICABLE, toBlock: spectypes.LATEST_BLOCK, specificBlock: spectypes.LATEST_BLOCK, valid: true, expectedElements: 1}, @@ -38,16 +38,16 @@ func TestWantedBlockData(t *testing.T) { {name: "latest only one saved block specific to N/A other is latest", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.LATEST_BLOCK, toBlock: spectypes.NOT_APPLICABLE, specificBlock: spectypes.LATEST_BLOCK, valid: true, expectedElements: 1}, {name: "latest only one saved block specific to N/A other is latest with distance", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.LATEST_BLOCK - 5, toBlock: spectypes.NOT_APPLICABLE, specificBlock: spectypes.LATEST_BLOCK, valid: true, expectedElements: 1}, - {name: "ten saved blocks range 1", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1001, specificBlock: spectypes.NOT_APPLICABLE, valid: true, expectedElements: 1}, - {name: "ten saved blocks overlap", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1001, specificBlock: 1000, valid: true, expectedElements: 1}, - {name: "ten saved blocks specific from N/A", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.NOT_APPLICABLE, toBlock: 1001, specificBlock: 1000, valid: true, expectedElements: 1}, + {name: "ten saved blocks range 1", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1000, specificBlock: spectypes.NOT_APPLICABLE, valid: true, expectedElements: 1}, + {name: "ten saved blocks overlap", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1000, specificBlock: 1000, valid: true, expectedElements: 1}, + {name: "ten saved blocks specific from N/A", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.NOT_APPLICABLE, toBlock: 1000, specificBlock: 1000, valid: true, expectedElements: 1}, {name: "ten saved blocks specific to N/A", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: spectypes.NOT_APPLICABLE, specificBlock: 1000, valid: true, expectedElements: 1}, {name: "ten saved blocks specific both N/A", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.NOT_APPLICABLE, toBlock: spectypes.NOT_APPLICABLE, specificBlock: 1000, valid: true, expectedElements: 1}, {name: "ten saved blocks specific from N/A other is latest", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.NOT_APPLICABLE, toBlock: spectypes.LATEST_BLOCK, specificBlock: 1000, valid: true, expectedElements: 1}, {name: "ten saved blocks specific from N/A other is latest with distance", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.NOT_APPLICABLE, toBlock: spectypes.LATEST_BLOCK - 5, specificBlock: 1000, valid: true, expectedElements: 1}, {name: "ten saved blocks specific to N/A other is latest", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK, toBlock: spectypes.NOT_APPLICABLE, specificBlock: 1000, valid: true, expectedElements: 1}, {name: "ten saved blocks specific to N/A other is latest with distance", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 5, toBlock: spectypes.NOT_APPLICABLE, specificBlock: 1000, valid: true, expectedElements: 1}, - {name: "latest ten saved blocks specific from N/A", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.NOT_APPLICABLE, toBlock: 1001, specificBlock: spectypes.LATEST_BLOCK, valid: true, expectedElements: 1}, + {name: "latest ten saved blocks specific from N/A", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.NOT_APPLICABLE, toBlock: 1000, specificBlock: spectypes.LATEST_BLOCK, valid: true, expectedElements: 1}, {name: "latest ten saved blocks specific to N/A", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: spectypes.NOT_APPLICABLE, specificBlock: spectypes.LATEST_BLOCK, valid: true, expectedElements: 1}, {name: "latest ten saved blocks specific both N/A", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.NOT_APPLICABLE, toBlock: spectypes.NOT_APPLICABLE, specificBlock: spectypes.LATEST_BLOCK, valid: true, expectedElements: 1}, {name: "latest ten saved blocks specific from N/A other is latest", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.NOT_APPLICABLE, toBlock: spectypes.LATEST_BLOCK, specificBlock: spectypes.LATEST_BLOCK, valid: true, expectedElements: 1}, @@ -55,47 +55,49 @@ func TestWantedBlockData(t *testing.T) { {name: "latest ten saved blocks specific to N/A other is latest", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK, toBlock: spectypes.NOT_APPLICABLE, specificBlock: spectypes.LATEST_BLOCK, valid: true, expectedElements: 1}, {name: "latest ten saved blocks specific to N/A other is latest with distance", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 5, toBlock: spectypes.NOT_APPLICABLE, specificBlock: spectypes.LATEST_BLOCK, valid: true, expectedElements: 1}, - {name: "ten saved blocks range 5", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1005, specificBlock: spectypes.NOT_APPLICABLE, valid: true, expectedElements: 5}, - {name: "ten saved blocks range 5 the end of the list", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1005, toBlock: 1010, specificBlock: spectypes.NOT_APPLICABLE, valid: true, expectedElements: 5}, - {name: "ten saved blocks range 5 overlap", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1005, specificBlock: 1003, valid: true, expectedElements: 5}, - {name: "ten saved blocks range 5 w specific next", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1005, specificBlock: 1006, valid: true, expectedElements: 6}, - {name: "ten saved blocks range 5 w specific", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1005, specificBlock: 1007, valid: true, expectedElements: 6}, - {name: "ten saved blocks range 5 w specific before", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1002, toBlock: 1007, specificBlock: 1000, valid: true, expectedElements: 6}, - {name: "ten saved blocks range 5 w specific prev", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1002, toBlock: 1007, specificBlock: 1001, valid: true, expectedElements: 6}, - {name: "ten saved blocks range 10", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1010, specificBlock: spectypes.NOT_APPLICABLE, valid: true, expectedElements: 10}, + {name: "ten saved blocks range 5", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1004, specificBlock: spectypes.NOT_APPLICABLE, valid: true, expectedElements: 5}, + {name: "ten saved blocks range 5 the end of the list", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1005, toBlock: 1009, specificBlock: spectypes.NOT_APPLICABLE, valid: true, expectedElements: 5}, + {name: "ten saved blocks range 5 overlap", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1004, specificBlock: 1003, valid: true, expectedElements: 5}, + {name: "ten saved blocks range 5 w specific next", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1004, specificBlock: 1006, valid: true, expectedElements: 6}, + {name: "ten saved blocks range 5 w specific", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1004, specificBlock: 1007, valid: true, expectedElements: 6}, + {name: "ten saved blocks range 5 w specific before", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1002, toBlock: 1006, specificBlock: 1000, valid: true, expectedElements: 6}, + {name: "ten saved blocks range 5 w specific prev", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1002, toBlock: 1006, specificBlock: 1001, valid: true, expectedElements: 6}, + {name: "ten saved blocks range 10", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1009, specificBlock: spectypes.NOT_APPLICABLE, valid: true, expectedElements: 10}, - {name: "latest ten saved blocks range 5", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 9, toBlock: spectypes.LATEST_BLOCK - 4, specificBlock: spectypes.NOT_APPLICABLE, valid: true, expectedElements: 5}, - {name: "latest ten saved blocks range 5 the end of the list", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 4, toBlock: spectypes.LATEST_BLOCK, specificBlock: spectypes.LATEST_BLOCK, valid: true, expectedElements: 5}, - {name: "latest ten saved blocks range 5 overlap the end of the list", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 4, toBlock: spectypes.LATEST_BLOCK, specificBlock: spectypes.LATEST_BLOCK - 2, valid: true, expectedElements: 4}, - {name: "latest ten saved blocks range 5 overlap", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 5, toBlock: spectypes.LATEST_BLOCK, specificBlock: spectypes.LATEST_BLOCK - 2, valid: true, expectedElements: 5}, - {name: "latest ten saved blocks range 5 w specific next", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 9, toBlock: spectypes.LATEST_BLOCK - 4, specificBlock: spectypes.LATEST_BLOCK - 4, valid: true, expectedElements: 6}, - {name: "latest ten saved blocks range 5 w specific", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 9, toBlock: spectypes.LATEST_BLOCK - 4, specificBlock: spectypes.LATEST_BLOCK - 2, valid: true, expectedElements: 6}, - {name: "latest ten saved blocks range 5 w specific before", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 6, toBlock: spectypes.LATEST_BLOCK - 1, specificBlock: spectypes.LATEST_BLOCK - 8, valid: true, expectedElements: 6}, - {name: "latest ten saved blocks range 5 w specific prev", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 6, toBlock: spectypes.LATEST_BLOCK - 1, specificBlock: spectypes.LATEST_BLOCK - 7, valid: true, expectedElements: 6}, - {name: "latest ten saved blocks range 10", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 9, toBlock: spectypes.LATEST_BLOCK, specificBlock: spectypes.LATEST_BLOCK, valid: true, expectedElements: 10}, + {name: "latest ten saved blocks range 5", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 9, toBlock: spectypes.LATEST_BLOCK - 5, specificBlock: spectypes.NOT_APPLICABLE, valid: true, expectedElements: 5}, + {name: "latest ten saved blocks range 5 the end of the list", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 4, toBlock: spectypes.LATEST_BLOCK - 1, specificBlock: spectypes.LATEST_BLOCK, valid: true, expectedElements: 5}, + {name: "latest ten saved blocks range 5 overlap the end of the list", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 4, toBlock: spectypes.LATEST_BLOCK - 1, specificBlock: spectypes.LATEST_BLOCK - 2, valid: true, expectedElements: 4}, + {name: "latest ten saved blocks range 5 overlap", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 5, toBlock: spectypes.LATEST_BLOCK - 1, specificBlock: spectypes.LATEST_BLOCK - 2, valid: true, expectedElements: 5}, + {name: "latest ten saved blocks range 5 w specific next", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 9, toBlock: spectypes.LATEST_BLOCK - 5, specificBlock: spectypes.LATEST_BLOCK - 4, valid: true, expectedElements: 6}, + {name: "latest ten saved blocks range 5 w specific", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 9, toBlock: spectypes.LATEST_BLOCK - 5, specificBlock: spectypes.LATEST_BLOCK - 2, valid: true, expectedElements: 6}, + {name: "latest ten saved blocks range 5 w specific before", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 6, toBlock: spectypes.LATEST_BLOCK - 2, specificBlock: spectypes.LATEST_BLOCK - 8, valid: true, expectedElements: 6}, + {name: "latest ten saved blocks range 5 w specific prev", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 6, toBlock: spectypes.LATEST_BLOCK - 2, specificBlock: spectypes.LATEST_BLOCK - 7, valid: true, expectedElements: 6}, + {name: "latest ten saved blocks range 9 w specific", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 9, toBlock: spectypes.LATEST_BLOCK - 1, specificBlock: spectypes.LATEST_BLOCK, valid: true, expectedElements: 10}, + {name: "latest ten saved blocks range 10 no specific", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 9, toBlock: spectypes.LATEST_BLOCK, specificBlock: spectypes.NOT_APPLICABLE, valid: true, expectedElements: 10}, + {name: "latest ten saved blocks range 10 w specific overlap", earliestBlock: 1000, latestBlock: 1009, fromBlock: spectypes.LATEST_BLOCK - 9, toBlock: spectypes.LATEST_BLOCK, specificBlock: spectypes.LATEST_BLOCK, valid: true, expectedElements: 10}, // test invalid cases {name: "invalid only one saved block all N/A", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.NOT_APPLICABLE, toBlock: spectypes.NOT_APPLICABLE, specificBlock: spectypes.NOT_APPLICABLE, valid: false, expectedElements: 0}, - {name: "invalid only one saved block from/specific N/A", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.NOT_APPLICABLE, toBlock: 1001, specificBlock: spectypes.NOT_APPLICABLE, valid: false, expectedElements: 0}, + {name: "invalid only one saved block from/specific N/A", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.NOT_APPLICABLE, toBlock: 1000, specificBlock: spectypes.NOT_APPLICABLE, valid: false, expectedElements: 0}, {name: "invalid only one saved block to/specific", earliestBlock: 1000, latestBlock: 1000, fromBlock: 1000, toBlock: spectypes.NOT_APPLICABLE, specificBlock: spectypes.NOT_APPLICABLE, valid: false, expectedElements: 0}, {name: "invalid only one saved block invalid range bigger", earliestBlock: 1000, latestBlock: 1000, fromBlock: 1001, toBlock: 1002, specificBlock: spectypes.NOT_APPLICABLE, valid: false, expectedElements: 0}, - {name: "invalid only one saved block invalid range size", earliestBlock: 1000, latestBlock: 1000, fromBlock: 1000, toBlock: 1000, specificBlock: spectypes.NOT_APPLICABLE, valid: false, expectedElements: 0}, + {name: "invalid only one saved block invalid range size", earliestBlock: 1000, latestBlock: 1000, fromBlock: 1001, toBlock: 1000, specificBlock: spectypes.NOT_APPLICABLE, valid: false, expectedElements: 0}, {name: "invalid only one saved block invalid range smaller", earliestBlock: 1000, latestBlock: 1000, fromBlock: 999, toBlock: 1000, specificBlock: spectypes.NOT_APPLICABLE, valid: false, expectedElements: 0}, - {name: "invalid only one saved block invalid specific bigger", earliestBlock: 1000, latestBlock: 1000, fromBlock: 1000, toBlock: 1001, specificBlock: 1001, valid: false, expectedElements: 0}, - {name: "invalid only one saved block invalid specific smaller", earliestBlock: 1000, latestBlock: 1000, fromBlock: 1000, toBlock: 1001, specificBlock: 999, valid: false, expectedElements: 0}, - {name: "invalid only one saved block invalid specific bigger range N/A", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.NOT_APPLICABLE, toBlock: 1001, specificBlock: 1001, valid: false, expectedElements: 0}, - {name: "invalid only one saved block invalid specific smaller range N/A", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.NOT_APPLICABLE, toBlock: 1001, specificBlock: 999, valid: false, expectedElements: 0}, + {name: "invalid only one saved block invalid specific bigger", earliestBlock: 1000, latestBlock: 1000, fromBlock: 1000, toBlock: 1000, specificBlock: 1001, valid: false, expectedElements: 0}, + {name: "invalid only one saved block invalid specific smaller", earliestBlock: 1000, latestBlock: 1000, fromBlock: 1000, toBlock: 1000, specificBlock: 999, valid: false, expectedElements: 0}, + {name: "invalid only one saved block invalid specific bigger range N/A", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.NOT_APPLICABLE, toBlock: 1000, specificBlock: 1001, valid: false, expectedElements: 0}, + {name: "invalid only one saved block invalid specific smaller range N/A", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.NOT_APPLICABLE, toBlock: 1000, specificBlock: 999, valid: false, expectedElements: 0}, {name: "invalid latest only one saved block invalid range smaller", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.LATEST_BLOCK - 1, toBlock: spectypes.LATEST_BLOCK, specificBlock: spectypes.NOT_APPLICABLE, valid: false, expectedElements: 0}, {name: "invalid latest only one saved block invalid range size", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.LATEST_BLOCK - 2, toBlock: spectypes.LATEST_BLOCK, specificBlock: spectypes.NOT_APPLICABLE, valid: false, expectedElements: 0}, {name: "invalid latest only one saved block invalid specific smaller", earliestBlock: 1000, latestBlock: 1000, fromBlock: 1000, toBlock: 1001, specificBlock: spectypes.LATEST_BLOCK - 1, valid: false, expectedElements: 0}, - {name: "invalid latest only one saved block invalid specific smaller range N/A", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.NOT_APPLICABLE, toBlock: 1001, specificBlock: spectypes.LATEST_BLOCK - 1, valid: false, expectedElements: 0}, + {name: "invalid latest only one saved block invalid specific smaller range N/A", earliestBlock: 1000, latestBlock: 1000, fromBlock: spectypes.NOT_APPLICABLE, toBlock: 1000, specificBlock: spectypes.LATEST_BLOCK - 1, valid: false, expectedElements: 0}, - {name: "invalid specific ten saved blocks range 5", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1005, specificBlock: 999, valid: false, expectedElements: 0}, - {name: "invalid specific ten saved blocks range 5 the end of the list", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1005, toBlock: 1010, specificBlock: 999, valid: false, expectedElements: 0}, - {name: "invalid specific ten saved blocks range 10", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1010, specificBlock: 999, valid: false, expectedElements: 0}, - {name: "invalid specific latest ten saved blocks range 5", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1005, specificBlock: spectypes.LATEST_BLOCK - 10, valid: false, expectedElements: 0}, - {name: "invalid specific latest ten saved blocks range 5 the end of the list", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1005, toBlock: 1010, specificBlock: spectypes.LATEST_BLOCK - 10, valid: false, expectedElements: 0}, - {name: "invalid specific latest ten saved blocks range 10", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1010, specificBlock: spectypes.LATEST_BLOCK - 10, valid: false, expectedElements: 0}, + {name: "invalid specific ten saved blocks range 5", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1004, specificBlock: 999, valid: false, expectedElements: 0}, + {name: "invalid specific ten saved blocks range 5 the end of the list", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1005, toBlock: 1009, specificBlock: 999, valid: false, expectedElements: 0}, + {name: "invalid specific ten saved blocks range 10", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1009, specificBlock: 999, valid: false, expectedElements: 0}, + {name: "invalid specific latest ten saved blocks range 5", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1004, specificBlock: spectypes.LATEST_BLOCK - 10, valid: false, expectedElements: 0}, + {name: "invalid specific latest ten saved blocks range 5 the end of the list", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1005, toBlock: 1009, specificBlock: spectypes.LATEST_BLOCK - 10, valid: false, expectedElements: 0}, + {name: "invalid specific latest ten saved blocks range 10", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1000, toBlock: 1009, specificBlock: spectypes.LATEST_BLOCK - 10, valid: false, expectedElements: 0}, {name: "invalid range ten saved blocks smaller no range overlap", earliestBlock: 1000, latestBlock: 1009, fromBlock: 900, toBlock: 905, specificBlock: spectypes.NOT_APPLICABLE, valid: false, expectedElements: 0}, {name: "invalid range ten saved blocks smaller with range overlap", earliestBlock: 1000, latestBlock: 1009, fromBlock: 999, toBlock: 1004, specificBlock: spectypes.NOT_APPLICABLE, valid: false, expectedElements: 0}, {name: "invalid range ten saved blocks bigger no range overlap", earliestBlock: 1000, latestBlock: 1009, fromBlock: 1010, toBlock: 1015, specificBlock: spectypes.NOT_APPLICABLE, valid: false, expectedElements: 0}, diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 9b690d9784..de99cbe8b1 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -487,11 +487,19 @@ func (rpcps *RPCProviderServer) TryRelay(ctx context.Context, request *pairingty // Add latest block and finalization data var err error _, _, blockDistanceToFinalization, blocksInFinalizationData := rpcps.chainParser.ChainBlockStats() - fromBlock := spectypes.LATEST_BLOCK - int64(blockDistanceToFinalization) - int64(blocksInFinalizationData) toBlock := spectypes.LATEST_BLOCK - int64(blockDistanceToFinalization) + fromBlock := toBlock - int64(blocksInFinalizationData) + 1 latestBlock, requestedHashes, err := rpcps.reliabilityManager.GetLatestBlockData(fromBlock, toBlock, request.RequestBlock) if err != nil { - return nil, utils.LavaFormatError("Could not guarantee data reliability", err, &map[string]string{"requestedBlock": strconv.FormatInt(request.RequestBlock, 10), "latestBlock": strconv.FormatInt(latestBlock, 10)}) + if chaintracker.InvalidRequestedSpecificBlock.Is(err) { + // specific block is invalid, try again without specific block + latestBlock, requestedHashes, err = rpcps.reliabilityManager.GetLatestBlockData(fromBlock, toBlock, spectypes.NOT_APPLICABLE) + if err != nil { + return nil, utils.LavaFormatError("error getting range even without specific block", err, &map[string]string{"fromBlock": strconv.FormatInt(fromBlock, 10), "latestBlock": strconv.FormatInt(latestBlock, 10), "toBlock": strconv.FormatInt(toBlock, 10)}) + } + } else { + return nil, utils.LavaFormatError("Could not guarantee data reliability", err, &map[string]string{"requestedBlock": strconv.FormatInt(request.RequestBlock, 10), "latestBlock": strconv.FormatInt(latestBlock, 10), "fromBlock": strconv.FormatInt(fromBlock, 10), "toBlock": strconv.FormatInt(toBlock, 10)}) + } } request.RequestBlock = lavaprotocol.ReplaceRequestedBlock(request.RequestBlock, latestBlock) for _, block := range requestedHashes { diff --git a/protocol/statetracker/state_query.go b/protocol/statetracker/state_query.go index 833bb35dc6..cd4eda5886 100644 --- a/protocol/statetracker/state_query.go +++ b/protocol/statetracker/state_query.go @@ -83,9 +83,9 @@ func (csq *ConsumerStateQuery) GetPairing(ctx context.Context, chainID string, l if cachedResp, ok := cachedInterface.(*pairingtypes.QueryGetPairingResponse); ok { if cachedResp.BlockOfNextPairing > uint64(latestBlock) { return cachedResp.Providers, cachedResp.CurrentEpoch, cachedResp.BlockOfNextPairing, nil - } else { - utils.LavaFormatError("invalid cache entry - failed casting response", nil, &map[string]string{"castingType": "*pairingtypes.QueryGetPairingResponse", "type": fmt.Sprintf("%t", cachedInterface)}) } + } else { + utils.LavaFormatError("invalid cache entry - failed casting response", nil, &map[string]string{"castingType": "*pairingtypes.QueryGetPairingResponse", "type": fmt.Sprintf("%T", cachedInterface)}) } } @@ -128,7 +128,7 @@ func (psq *ProviderStateQuery) GetVrfPkAndMaxCuForUser(ctx context.Context, cons if cachedResp, ok := cachedInterface.(*pairingtypes.QueryUserEntryResponse); ok { userEntryRes = cachedResp } else { - utils.LavaFormatError("invalid cache entry - failed casting response", nil, &map[string]string{"castingType": "*pairingtypes.QueryUserEntryResponse", "type": fmt.Sprintf("%t", cachedInterface)}) + utils.LavaFormatError("invalid cache entry - failed casting response", nil, &map[string]string{"castingType": "*pairingtypes.QueryUserEntryResponse", "type": fmt.Sprintf("%T", cachedInterface)}) } } if userEntryRes == nil { @@ -237,7 +237,7 @@ func (psq *ProviderStateQuery) VerifyPairing(ctx context.Context, consumerAddres if cachedResp, ok := cachedInterface.(*pairingtypes.QueryVerifyPairingResponse); ok { verifyResponse = cachedResp } else { - utils.LavaFormatError("invalid cache entry - failed casting response", nil, &map[string]string{"castingType": "*pairingtypes.QueryVerifyPairingResponse", "type": fmt.Sprintf("%t", cachedInterface)}) + utils.LavaFormatError("invalid cache entry - failed casting response", nil, &map[string]string{"castingType": "*pairingtypes.QueryVerifyPairingResponse", "type": fmt.Sprintf("%T", cachedInterface)}) } } if verifyResponse == nil { From 509844c432dcd2ed2838221201a65bbaee2838ca Mon Sep 17 00:00:00 2001 From: omer mishael Date: Wed, 22 Feb 2023 19:36:31 +0200 Subject: [PATCH 062/123] new configurations for rpcprovider --- config/provider_examples/lava_example.yml | 2 +- config/rpcconsumer.yml | 9 +++++++ scripts/init_chain_commands.sh | 30 +++++++++++------------ 3 files changed, 25 insertions(+), 16 deletions(-) diff --git a/config/provider_examples/lava_example.yml b/config/provider_examples/lava_example.yml index 9f8a4ff284..2de5ddaaa6 100644 --- a/config/provider_examples/lava_example.yml +++ b/config/provider_examples/lava_example.yml @@ -7,7 +7,7 @@ endpoints: - http://127.0.0.1:26657 - api-interface: grpc chain-id: LAV1 - network-address: 127.0.0.1:2222 + network-address: 127.0.0.1:2221 node-url: 127.0.0.1:9090 - api-interface: rest chain-id: LAV1 diff --git a/config/rpcconsumer.yml b/config/rpcconsumer.yml index 049cb0627c..95086d122a 100644 --- a/config/rpcconsumer.yml +++ b/config/rpcconsumer.yml @@ -1,4 +1,13 @@ endpoints: + - chain-id: LAV1 + api-interface: tendermintrpc + network-address: 127.0.0.1:3341 + - chain-id: LAV1 + api-interface: rest + network-address: 127.0.0.1:3340 + - chain-id: LAV1 + api-interface: grpc + network-address: 127.0.0.1:3352 - chain-id: COS3 api-interface: tendermintrpc network-address: 127.0.0.1:3335 diff --git a/scripts/init_chain_commands.sh b/scripts/init_chain_commands.sh index 277474a94c..262dd3f913 100755 --- a/scripts/init_chain_commands.sh +++ b/scripts/init_chain_commands.sh @@ -43,73 +43,73 @@ lavad tx pairing stake-provider "ETH1" $PROVIDERSTAKE "127.0.0.1:2224,jsonrpc,1" lavad tx pairing stake-provider "ETH1" $PROVIDERSTAKE "127.0.0.1:2225,jsonrpc,1" 1 -y --from servicer5 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE #Goerli providers -lavad tx pairing stake-provider "GTH1" $PROVIDERSTAKE "127.0.0.1:2121,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "GTH1" $PROVIDERSTAKE "127.0.0.1:2221,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "GTH1" $PROVIDERSTAKE "127.0.0.1:2122,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "GTH1" $PROVIDERSTAKE "127.0.0.1:2123,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "GTH1" $PROVIDERSTAKE "127.0.0.1:2124,jsonrpc,1" 1 -y --from servicer4 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "GTH1" $PROVIDERSTAKE "127.0.0.1:2125,jsonrpc,1" 1 -y --from servicer5 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # Fantom providers -lavad tx pairing stake-provider "FTM250" $PROVIDERSTAKE "127.0.0.1:2251,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "FTM250" $PROVIDERSTAKE "127.0.0.1:2221,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "FTM250" $PROVIDERSTAKE "127.0.0.1:2252,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "FTM250" $PROVIDERSTAKE "127.0.0.1:2253,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "FTM250" $PROVIDERSTAKE "127.0.0.1:2254,jsonrpc,1" 1 -y --from servicer4 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "FTM250" $PROVIDERSTAKE "127.0.0.1:2255,jsonrpc,1" 1 -y --from servicer5 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # Celo providers -lavad tx pairing stake-provider "CELO" $PROVIDERSTAKE "127.0.0.1:5241,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "CELO" $PROVIDERSTAKE "127.0.0.1:2221,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "CELO" $PROVIDERSTAKE "127.0.0.1:5242,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "CELO" $PROVIDERSTAKE "127.0.0.1:5243,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE #Celo alfahores testnet providers -lavad tx pairing stake-provider "ALFAJORES" $PROVIDERSTAKE "127.0.0.1:6241,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "ALFAJORES" $PROVIDERSTAKE "127.0.0.1:2221,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "ALFAJORES" $PROVIDERSTAKE "127.0.0.1:6242,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "ALFAJORES" $PROVIDERSTAKE "127.0.0.1:6243,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE #Arbitrum mainet providers -lavad tx pairing stake-provider "ARB1" $PROVIDERSTAKE "127.0.0.1:7241,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "ARB1" $PROVIDERSTAKE "127.0.0.1:2221,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "ARB1" $PROVIDERSTAKE "127.0.0.1:7242,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "ARB1" $PROVIDERSTAKE "127.0.0.1:7243,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE #Aptos mainet providers -lavad tx pairing stake-provider "APT1" $PROVIDERSTAKE "127.0.0.1:10031,rest,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "APT1" $PROVIDERSTAKE "127.0.0.1:2221,rest,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "APT1" $PROVIDERSTAKE "127.0.0.1:10032,rest,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "APT1" $PROVIDERSTAKE "127.0.0.1:10033,rest,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE #Starknet mainet providers -lavad tx pairing stake-provider "STRK" $PROVIDERSTAKE "127.0.0.1:8241,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "STRK" $PROVIDERSTAKE "127.0.0.1:2221,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "STRK" $PROVIDERSTAKE "127.0.0.1:8242,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "STRK" $PROVIDERSTAKE "127.0.0.1:8243,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # Polygon Providers -lavad tx pairing stake-provider "POLYGON1" $PROVIDERSTAKE "127.0.0.1:4344,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "POLYGON1" $PROVIDERSTAKE "127.0.0.1:2221,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "POLYGON1" $PROVIDERSTAKE "127.0.0.1:4345,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "POLYGON1" $PROVIDERSTAKE "127.0.0.1:4346,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # Cosmos Chains: # Osmosis providers -lavad tx pairing stake-provider "COS3" $PROVIDERSTAKE "127.0.0.1:2241,tendermintrpc,1 127.0.0.1:2231,rest,1 127.0.0.1:2234,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "COS3" $PROVIDERSTAKE "127.0.0.1:2221,tendermintrpc,1 127.0.0.1:2221,rest,1 127.0.0.1:2221,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "COS3" $PROVIDERSTAKE "127.0.0.1:2242,tendermintrpc,1 127.0.0.1:2232,rest,1 127.0.0.1:2235,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "COS3" $PROVIDERSTAKE "127.0.0.1:2243,tendermintrpc,1 127.0.0.1:2233,rest,1 127.0.0.1:2236,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # Lava Providers -lavad tx pairing stake-provider "LAV1" $PROVIDERSTAKE "127.0.0.1:2261,tendermintrpc,1 127.0.0.1:2271,rest,1 127.0.0.1:2274,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "LAV1" $PROVIDERSTAKE "127.0.0.1:2221,tendermintrpc,1 127.0.0.1:2221,rest,1 127.0.0.1:2221,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "LAV1" $PROVIDERSTAKE "127.0.0.1:2262,tendermintrpc,1 127.0.0.1:2272,rest,1 127.0.0.1:2275,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "LAV1" $PROVIDERSTAKE "127.0.0.1:2263,tendermintrpc,1 127.0.0.1:2273,rest,1 127.0.0.1:2276,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +# lavad tx pairing stake-provider "LAV1" $PROVIDERSTAKE "127.0.0.1:2263,tendermintrpc,1 127.0.0.1:2273,rest,1 127.0.0.1:2276,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # Juno providers -lavad tx pairing stake-provider "JUN1" $PROVIDERSTAKE "127.0.0.1:2361,tendermintrpc,1 127.0.0.1:2371,rest,1 127.0.0.1:2374,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "JUN1" $PROVIDERSTAKE "127.0.0.1:2221,tendermintrpc,1 127.0.0.1:2221,rest,1 127.0.0.1:2221,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "JUN1" $PROVIDERSTAKE "127.0.0.1:2362,tendermintrpc,1 127.0.0.1:2372,rest,1 127.0.0.1:2375,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "JUN1" $PROVIDERSTAKE "127.0.0.1:2363,tendermintrpc,1 127.0.0.1:2373,rest,1 127.0.0.1:2376,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # Osmosis testnet providers -lavad tx pairing stake-provider "COS4" $PROVIDERSTAKE "127.0.0.1:4241,tendermintrpc,1 127.0.0.1:4231,rest,1 127.0.0.1:4234,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "COS4" $PROVIDERSTAKE "127.0.0.1:2221,tendermintrpc,1 127.0.0.1:2221,rest,1 127.0.0.1:2221,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "COS4" $PROVIDERSTAKE "127.0.0.1:4242,tendermintrpc,1 127.0.0.1:4232,rest,1 127.0.0.1:4235,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "COS4" $PROVIDERSTAKE "127.0.0.1:4243,tendermintrpc,1 127.0.0.1:4233,rest,1 127.0.0.1:4236,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # Cosmoshub Providers -lavad tx pairing stake-provider "COS5" $PROVIDERSTAKE "127.0.0.1:2344,tendermintrpc,1 127.0.0.1:2331,rest,1 127.0.0.1:2334,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "COS5" $PROVIDERSTAKE "127.0.0.1:2221,tendermintrpc,1 127.0.0.1:2221,rest,1 127.0.0.1:2221,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "COS5" $PROVIDERSTAKE "127.0.0.1:2342,tendermintrpc,1 127.0.0.1:2332,rest,1 127.0.0.1:2335,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx pairing stake-provider "COS5" $PROVIDERSTAKE "127.0.0.1:2343,tendermintrpc,1 127.0.0.1:2333,rest,1 127.0.0.1:2336,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE @@ -120,4 +120,4 @@ lavad query pairing clients "ETH1" # we need to wait for the next epoch for the stake to take action. sleep_until_next_epoch -. ${__dir}/setup_providers.sh +# . ${__dir}/setup_providers.sh From 58ffac4344234d7ebea2f93656d6cb1c23a58dc7 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 23 Feb 2023 01:12:33 +0200 Subject: [PATCH 063/123] change lava conf --- config/rpcprovider.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/rpcprovider.yml b/config/rpcprovider.yml index 27bd4c12b5..c09e250881 100644 --- a/config/rpcprovider.yml +++ b/config/rpcprovider.yml @@ -7,7 +7,7 @@ endpoints: - http://127.0.0.1:26657 - api-interface: grpc chain-id: LAV1 - network-address: 127.0.0.1:2222 + network-address: 127.0.0.1:2221 node-url: 127.0.0.1:9090 - api-interface: rest chain-id: LAV1 From 913d084be649d508a84f51b8c662846be002079e Mon Sep 17 00:00:00 2001 From: omer mishael Date: Sun, 5 Mar 2023 13:26:39 +0200 Subject: [PATCH 064/123] fix reference overwriting --- protocol/rpcprovider/rpcprovider_server.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index de99cbe8b1..2bed4361cd 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -489,7 +489,8 @@ func (rpcps *RPCProviderServer) TryRelay(ctx context.Context, request *pairingty _, _, blockDistanceToFinalization, blocksInFinalizationData := rpcps.chainParser.ChainBlockStats() toBlock := spectypes.LATEST_BLOCK - int64(blockDistanceToFinalization) fromBlock := toBlock - int64(blocksInFinalizationData) + 1 - latestBlock, requestedHashes, err := rpcps.reliabilityManager.GetLatestBlockData(fromBlock, toBlock, request.RequestBlock) + var requestedHashes []*chaintracker.BlockStore + latestBlock, requestedHashes, err = rpcps.reliabilityManager.GetLatestBlockData(fromBlock, toBlock, request.RequestBlock) if err != nil { if chaintracker.InvalidRequestedSpecificBlock.Is(err) { // specific block is invalid, try again without specific block From 9b5894cfa85d8a82dcd35f725a58102d5332964b Mon Sep 17 00:00:00 2001 From: omer mishael Date: Sun, 5 Mar 2023 14:15:05 +0200 Subject: [PATCH 065/123] handle initialization on ProviderSessionManager --- .../lavasession/provider_session_manager.go | 29 ++++++++++++++----- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index 6cefe736a4..eaa1361c62 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -10,7 +10,7 @@ import ( type ProviderSessionManager struct { sessionsWithAllConsumers map[uint64]map[string]*ProviderSessionsWithConsumer // first key is epochs, second key is a consumer address - dataReliabilitySessionsWithAllConsumers map[uint64]map[string]*ProviderSessionsWithConsumer // first key is epochs, second key is a consumer address + dataReliabilitySessionsWithAllConsumers map[uint64]map[string]*ProviderSessionsWithConsumer // separate handling of data reliability so later on we can use it outside of pairing, first key is epochs, second key is a consumer address subscriptionSessionsWithAllConsumers map[uint64]map[string]map[string]*RPCSubscription // first key is an epoch, second key is a consumer address, third key is subscriptionId lock sync.RWMutex blockedEpochHeight uint64 // requests from this epoch are blocked @@ -217,6 +217,7 @@ func (psm *ProviderSessionManager) RPCProviderEndpoint() *RPCProviderEndpoint { return psm.rpcProviderEndpoint } +// on a new epoch we are cleaning stale provider data, also we are making sure consumers who are trying to use past data are not capable to func (psm *ProviderSessionManager) UpdateEpoch(epoch uint64) { psm.lock.Lock() defer psm.lock.Unlock() @@ -225,18 +226,24 @@ func (psm *ProviderSessionManager) UpdateEpoch(epoch uint64) { } else { psm.blockedEpochHeight = 0 } - newMap := make(map[uint64]map[string]*ProviderSessionsWithConsumer) + psm.sessionsWithAllConsumers = filterOldEpochEntries(psm.blockedEpochHeight, psm.sessionsWithAllConsumers) + psm.dataReliabilitySessionsWithAllConsumers = filterOldEpochEntries(psm.blockedEpochHeight, psm.dataReliabilitySessionsWithAllConsumers) + psm.subscriptionSessionsWithAllConsumers = filterOldEpochEntries(psm.blockedEpochHeight, psm.subscriptionSessionsWithAllConsumers) +} + +func filterOldEpochEntries[T any](blockedEpochHeight uint64, allEpochsMap map[uint64]T) (validEpochsMap map[uint64]T) { // In order to avoid running over the map twice, (1. mark 2. delete.) better technique is to copy and filter // which has better O(n) vs O(2n) - for epochStored, value := range psm.sessionsWithAllConsumers { - if !IsEpochValidForUse(epochStored, psm.blockedEpochHeight) { - // epoch is not valid so we dont keep its key in the new map + validEpochsMap = map[uint64]T{} + for epochStored, value := range allEpochsMap { + if !IsEpochValidForUse(epochStored, blockedEpochHeight) { + // epoch is not valid so we don't keep its key in the new map continue } // if epochStored is ok, copy the value into the new map - newMap[epochStored] = value + validEpochsMap[epochStored] = value } - psm.sessionsWithAllConsumers = newMap + return } func (psm *ProviderSessionManager) ProcessUnsubscribe(apiName string, subscriptionID string, consumerAddress string, epoch uint64) error { @@ -326,7 +333,13 @@ func (psm *ProviderSessionManager) UpdateSessionCU(consumerAddress string, epoch // Returning a new provider session manager func NewProviderSessionManager(rpcProviderEndpoint *RPCProviderEndpoint, numberOfBlocksKeptInMemory uint64) *ProviderSessionManager { - return &ProviderSessionManager{rpcProviderEndpoint: rpcProviderEndpoint, blockDistanceForEpochValidity: numberOfBlocksKeptInMemory} + return &ProviderSessionManager{ + rpcProviderEndpoint: rpcProviderEndpoint, + blockDistanceForEpochValidity: numberOfBlocksKeptInMemory, + sessionsWithAllConsumers: map[uint64]map[string]*ProviderSessionsWithConsumer{}, + dataReliabilitySessionsWithAllConsumers: map[uint64]map[string]*ProviderSessionsWithConsumer{}, + subscriptionSessionsWithAllConsumers: map[uint64]map[string]map[string]*RPCSubscription{}, + } } func IsEpochValidForUse(targetEpoch uint64, blockedEpochHeight uint64) bool { From 82ca39feba9504385344e4714e253aa03da05e96 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Sun, 5 Mar 2023 15:36:44 +0200 Subject: [PATCH 066/123] fix providerSessionsManager bugs --- cookbook/spec_add_cosmoshub.json | 4 ++-- .../lavasession/provider_session_manager.go | 14 +++---------- protocol/lavasession/provider_types.go | 21 +++++++++++++++++-- 3 files changed, 24 insertions(+), 15 deletions(-) diff --git a/cookbook/spec_add_cosmoshub.json b/cookbook/spec_add_cosmoshub.json index 680318a5df..7dc6274f82 100644 --- a/cookbook/spec_add_cosmoshub.json +++ b/cookbook/spec_add_cosmoshub.json @@ -556,9 +556,9 @@ "name": "status", "block_parsing": { "parser_arg": [ - "" + "latest" ], - "parser_func": "EMPTY" + "parser_func": "DEFAULT" }, "compute_units": "10", "enabled": true, diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index eaa1361c62..24d5a8580c 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -47,14 +47,13 @@ func (psm *ProviderSessionManager) IsActiveConsumer(epoch uint64, address string func (psm *ProviderSessionManager) getSingleSessionFromProviderSessionWithConsumer(providerSessionWithConsumer *ProviderSessionsWithConsumer, sessionId uint64, epoch uint64, relayNumber uint64) (*SingleProviderSession, error) { if providerSessionWithConsumer.atomicReadConsumerBlocked() != notBlockListedConsumer { - return nil, utils.LavaFormatError("This consumer address is blocked.", nil, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10), "consumer": providerSessionWithConsumer.consumer}) + return nil, utils.LavaFormatError("This consumer address is blocked.", nil, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10), "consumer": providerSessionWithConsumer.consumerAddr}) } // before getting any sessions. singleProviderSession, err := psm.getSessionFromAnActiveConsumer(providerSessionWithConsumer, sessionId, epoch) // after getting session verify relayNum etc.. if err != nil { return nil, utils.LavaFormatError("getSessionFromAnActiveConsumer Failure", err, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10), "sessionId": strconv.FormatUint(sessionId, 10)}) } - if singleProviderSession.RelayNum+1 < relayNumber { // validate relay number here, but add only in PrepareSessionForUsage return nil, utils.LavaFormatError("singleProviderSession.RelayNum mismatch, session out of sync", SessionOutOfSyncError, &map[string]string{"singleProviderSession.RelayNum": strconv.FormatUint(singleProviderSession.RelayNum+1, 10), "request.relayNumber": strconv.FormatUint(relayNumber, 10)}) } @@ -78,9 +77,7 @@ func (psm *ProviderSessionManager) getOrCreateDataReliabilitySessionWithConsumer } // If we got here, we need to create a new instance for this consumer address. - providerSessionWithConsumer = &ProviderSessionsWithConsumer{ - consumer: address, - } + providerSessionWithConsumer = NewProviderSessionsWithConsumer(address, nil) psm.dataReliabilitySessionsWithAllConsumers[epoch][address] = providerSessionWithConsumer return providerSessionWithConsumer, nil } @@ -140,12 +137,7 @@ func (psm *ProviderSessionManager) registerNewSession(address string, epoch uint providerSessionWithConsumer, foundAddressInMap := mapOfProviderSessionsWithConsumer[address] if !foundAddressInMap { - providerSessionWithConsumer = &ProviderSessionsWithConsumer{ - consumer: address, - epochData: &ProviderSessionsEpochData{ - MaxComputeUnits: maxCuForConsumer, - }, - } + providerSessionWithConsumer = NewProviderSessionsWithConsumer(address, &ProviderSessionsEpochData{MaxComputeUnits: maxCuForConsumer}) mapOfProviderSessionsWithConsumer[address] = providerSessionWithConsumer } return providerSessionWithConsumer, nil diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index d6c270d267..bdf3fb5b1d 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -52,7 +52,7 @@ const ( type ProviderSessionsWithConsumer struct { Sessions map[uint64]*SingleProviderSession isBlockListed uint32 - consumer string + consumerAddr string epochData *ProviderSessionsEpochData Lock sync.RWMutex } @@ -67,6 +67,16 @@ type SingleProviderSession struct { PairingEpoch uint64 } +func NewProviderSessionsWithConsumer(consumerAddr string, epochData *ProviderSessionsEpochData) *ProviderSessionsWithConsumer { + pswc := &ProviderSessionsWithConsumer{ + Sessions: map[uint64]*SingleProviderSession{}, + isBlockListed: 0, + consumerAddr: consumerAddr, + epochData: epochData, + } + return pswc +} + // reads cs.BlockedEpoch atomically, notBlockListedConsumer = 0, blockListedConsumer = 1 func (pswc *ProviderSessionsWithConsumer) atomicWriteConsumerBlocked(blockStatus uint32) { // rename to blocked consumer not blocked epoch atomic.StoreUint32(&pswc.isBlockListed, blockStatus) @@ -97,16 +107,24 @@ func (pswc *ProviderSessionsWithConsumer) atomicCompareAndWriteUsedComputeUnits( return atomic.CompareAndSwapUint64(&pswc.epochData.UsedComputeUnits, knownUsed, newUsed) } +// create a new session with a consumer, and store it inside it's providerSessions parent func (pswc *ProviderSessionsWithConsumer) createNewSingleProviderSession(sessionId uint64, epoch uint64) (session *SingleProviderSession, err error) { + utils.LavaFormatDebug("Provider creating new sessionID", &map[string]string{"SessionID": strconv.FormatUint(sessionId, 10), "epoch": strconv.FormatUint(epoch, 10)}) session = &SingleProviderSession{ userSessionsParent: pswc, SessionID: sessionId, PairingEpoch: epoch, } + pswc.Lock.Lock() + defer pswc.Lock.Unlock() + // this is a double lock and risky but we just created session and nobody has reference to it yet session.lock.Lock() + pswc.Sessions[sessionId] = session + // session is still locked when we return it return session, nil } +// this function returns the session locked to be used func (pswc *ProviderSessionsWithConsumer) GetExistingSession(sessionId uint64) (session *SingleProviderSession, err error) { pswc.Lock.RLock() defer pswc.Lock.RUnlock() @@ -165,7 +183,6 @@ func (sps *SingleProviderSession) PrepareSessionForUsage(currentCU uint64, relay sps.LatestRelayCu = currentCU // 1. update latest sps.CuSum = relayRequestTotalCU // 2. update CuSum, if consumer wants to pay more, let it sps.RelayNum = sps.RelayNum + 1 // 3. update RelayNum, we already verified relayNum is valid in GetSession. - return nil } From ea56d6ea5f98f42757813e656b744705dfd4931a Mon Sep 17 00:00:00 2001 From: omer mishael Date: Sun, 5 Mar 2023 16:38:50 +0200 Subject: [PATCH 067/123] fixed wrong lock checks --- .../lavasession/provider_session_manager.go | 18 +++++++++--------- protocol/lavasession/provider_types.go | 5 ++--- protocol/rpcprovider/rpcprovider_server.go | 5 +++-- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index 24d5a8580c..ea6e118106 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -121,7 +121,7 @@ func (psm *ProviderSessionManager) GetSession(address string, epoch uint64, sess return psm.getSingleSessionFromProviderSessionWithConsumer(providerSessionWithConsumer, sessionId, epoch, relayNumber) } -func (psm *ProviderSessionManager) registerNewSession(address string, epoch uint64, sessionId uint64, maxCuForConsumer uint64) (*ProviderSessionsWithConsumer, error) { +func (psm *ProviderSessionManager) registerNewConsumer(consumerAddr string, epoch uint64, maxCuForConsumer uint64) (*ProviderSessionsWithConsumer, error) { psm.lock.Lock() defer psm.lock.Unlock() if !psm.IsValidEpoch(epoch) { // checking again because we are now locked and epoch cant change now. @@ -135,25 +135,26 @@ func (psm *ProviderSessionManager) registerNewSession(address string, epoch uint psm.sessionsWithAllConsumers[epoch] = mapOfProviderSessionsWithConsumer } - providerSessionWithConsumer, foundAddressInMap := mapOfProviderSessionsWithConsumer[address] + providerSessionWithConsumer, foundAddressInMap := mapOfProviderSessionsWithConsumer[consumerAddr] if !foundAddressInMap { - providerSessionWithConsumer = NewProviderSessionsWithConsumer(address, &ProviderSessionsEpochData{MaxComputeUnits: maxCuForConsumer}) - mapOfProviderSessionsWithConsumer[address] = providerSessionWithConsumer + providerSessionWithConsumer = NewProviderSessionsWithConsumer(consumerAddr, &ProviderSessionsEpochData{MaxComputeUnits: maxCuForConsumer}) + mapOfProviderSessionsWithConsumer[consumerAddr] = providerSessionWithConsumer } return providerSessionWithConsumer, nil } -func (psm *ProviderSessionManager) RegisterProviderSessionWithConsumer(address string, epoch uint64, sessionId uint64, relayNumber uint64, maxCuForConsumer uint64) (*SingleProviderSession, error) { - providerSessionWithConsumer, err := psm.IsActiveConsumer(epoch, address) +func (psm *ProviderSessionManager) RegisterProviderSessionWithConsumer(consumerAddress string, epoch uint64, sessionId uint64, relayNumber uint64, maxCuForConsumer uint64) (*SingleProviderSession, error) { + providerSessionWithConsumer, err := psm.IsActiveConsumer(epoch, consumerAddress) if err != nil { if ConsumerNotRegisteredYet.Is(err) { - providerSessionWithConsumer, err = psm.registerNewSession(address, epoch, sessionId, maxCuForConsumer) + providerSessionWithConsumer, err = psm.registerNewConsumer(consumerAddress, epoch, maxCuForConsumer) if err != nil { return nil, utils.LavaFormatError("RegisterProviderSessionWithConsumer Failed to registerNewSession", err, nil) } } else { return nil, utils.LavaFormatError("RegisterProviderSessionWithConsumer Failed", err, nil) } + utils.LavaFormatDebug("provider registered consumer", &map[string]string{"consumer": consumerAddress, "epoch": strconv.FormatUint(epoch, 10)}) } return psm.getSingleSessionFromProviderSessionWithConsumer(providerSessionWithConsumer, sessionId, epoch, relayNumber) } @@ -186,8 +187,7 @@ func (psm *ProviderSessionManager) getSessionFromAnActiveConsumer(providerSessio // if we don't have a session we need to create a new one. return providerSessionWithConsumer.createNewSingleProviderSession(sessionId, epoch) } else { - utils.LavaFormatFatal("GetExistingSession Unexpected Error", err, nil) - return nil, err + return nil, utils.LavaFormatError("could not get existing session", err, nil) } } diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index bdf3fb5b1d..d44b6b9fcc 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -130,9 +130,8 @@ func (pswc *ProviderSessionsWithConsumer) GetExistingSession(sessionId uint64) ( defer pswc.Lock.RUnlock() if session, ok := pswc.Sessions[sessionId]; ok { locked := session.lock.TryLock() - if locked { - defer session.lock.Unlock() - return nil, utils.LavaFormatError("GetExistingSession failed", LockMisUseDetectedError, nil) + if !locked { + return nil, utils.LavaFormatError("GetExistingSession failed to lock when getting session", LockMisUseDetectedError, nil) } return session, nil } diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 2bed4361cd..b293f7179b 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -325,6 +325,7 @@ func (rpcps *RPCProviderServer) getSingleProviderSession(ctx context.Context, re singleProviderSession, err := rpcps.providerSessionManager.GetSession(consumerAddressString, uint64(request.BlockHeight), request.SessionId, request.RelayNum) if err != nil { if lavasession.ConsumerNotRegisteredYet.Is(err) { + valid, _, verifyPairingError := rpcps.stateTracker.VerifyPairing(ctx, consumerAddressString, rpcps.providerAddress.String(), uint64(request.BlockHeight), request.ChainID) if verifyPairingError != nil { return nil, utils.LavaFormatError("Failed to VerifyPairing after ConsumerNotRegisteredYet", verifyPairingError, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) @@ -510,7 +511,7 @@ func (rpcps *RPCProviderServer) TryRelay(ctx context.Context, request *pairingty finalizedBlockHashes[block.Block] = block.Hash } } - if requestedBlockHash == nil { + if requestedBlockHash == nil && request.RequestBlock != spectypes.NOT_APPLICABLE { // avoid using cache, but can still service utils.LavaFormatWarning("no hash data for requested block", nil, &map[string]string{"requestedBlock": strconv.FormatInt(request.RequestBlock, 10), "latestBlock": strconv.FormatInt(latestBlock, 10)}) } @@ -540,7 +541,7 @@ func (rpcps *RPCProviderServer) TryRelay(ctx context.Context, request *pairingty } if requestedBlockHash != nil || finalized { err := cache.SetEntry(ctx, request, rpcps.rpcProviderEndpoint.ApiInterface, requestedBlockHash, rpcps.rpcProviderEndpoint.ChainID, consumerAddr.String(), reply, finalized) - if err != nil && !performance.NotInitialisedError.Is(err) { + if err != nil && !performance.NotInitialisedError.Is(err) && request.BlockHeight != spectypes.NOT_APPLICABLE { utils.LavaFormatWarning("error updating cache with new entry", err, nil) } } From 98e614fe1be15c5ab32e115b5f1b3a2cd2d616bb Mon Sep 17 00:00:00 2001 From: omer mishael Date: Sun, 5 Mar 2023 16:58:05 +0200 Subject: [PATCH 068/123] fixed tx sender parsing issue --- protocol/statetracker/tx_sender.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/protocol/statetracker/tx_sender.go b/protocol/statetracker/tx_sender.go index ba73d791fd..142f284206 100644 --- a/protocol/statetracker/tx_sender.go +++ b/protocol/statetracker/tx_sender.go @@ -121,6 +121,7 @@ func (ts *TxSender) SimulateAndBroadCastTxWithRetryOnSeqMismatch(msg sdk.Msg, ch }) } var transactionResult string + clientCtx.Output = &myWriter err = tx.GenerateOrBroadcastTxWithFactory(clientCtx, txfactory, msg) if err != nil { utils.LavaFormatWarning("Sending CheckProfitabilityAndBroadCastTx failed", err, &map[string]string{ @@ -132,7 +133,7 @@ func (ts *TxSender) SimulateAndBroadCastTxWithRetryOnSeqMismatch(msg sdk.Msg, ch } var returnCode int summarizedTransactionResult, returnCode = common.ParseTransactionResult(transactionResult) - + // utils.LavaFormatDebug("parsed transaction code", &map[string]string{"code": strconv.Itoa(returnCode), "transactionResult": transactionResult}) if returnCode == 0 { // if we get some other code which isn't 0 then keep retrying success = true } else if strings.Contains(transactionResult, "account sequence") { From 7bf3098fb3e88ad2b6437b1fd7190308cd750465 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Sun, 5 Mar 2023 17:35:54 +0200 Subject: [PATCH 069/123] changed IP:PORT to HOST:PORT --- cmd/lavad/main.go | 4 ++-- config/provider_examples/eth_example.yml | 5 +++++ protocol/chainlib/jsonRPC.go | 2 +- protocol/chainlib/tendermintRPC.go | 2 +- protocol/lavasession/consumer_types.go | 2 +- protocol/lavasession/provider_types.go | 2 +- relayer/chainproxy/jsonRPC.go | 2 +- relayer/chainproxy/tendermintRPC.go | 2 +- x/pairing/client/cli/tx_stake_provider.go | 2 +- 9 files changed, 14 insertions(+), 9 deletions(-) create mode 100644 config/provider_examples/eth_example.yml diff --git a/cmd/lavad/main.go b/cmd/lavad/main.go index e444a06cf8..dd9535c157 100644 --- a/cmd/lavad/main.go +++ b/cmd/lavad/main.go @@ -210,7 +210,7 @@ rpcconsumer 127.0.0.1:3333 COS3 tendermintrpc 127.0.0.1:3334 COS3 rest `, return nil } if len(args)%len(rpcconsumer.Yaml_config_properties) != 0 { - return fmt.Errorf("invalid number of arguments, either its a single config file or repeated groups of 3 IP:PORT chain-id api-interface") + return fmt.Errorf("invalid number of arguments, either its a single config file or repeated groups of 3 HOST:PORT chain-id api-interface") } return nil }, @@ -333,7 +333,7 @@ rpcprovider 127.0.0.1:3333 COS3 tendermintrpc "wss://www.node-path.com:80,https: return nil } if len(args)%len(rpcprovider.Yaml_config_properties) != 0 { - return fmt.Errorf("invalid number of arguments, either its a single config file or repeated groups of 4 IP:PORT chain-id api-interface [node_url,node_url_2]") + return fmt.Errorf("invalid number of arguments, either its a single config file or repeated groups of 4 HOST:PORT chain-id api-interface [node_url,node_url_2]") } return nil }, diff --git a/config/provider_examples/eth_example.yml b/config/provider_examples/eth_example.yml new file mode 100644 index 0000000000..956f91894b --- /dev/null +++ b/config/provider_examples/eth_example.yml @@ -0,0 +1,5 @@ +endpoints: + - api-interface: jsonrpc + chain-id: ETH1 + network-address: 127.0.0.1:2221 + node-url: wss://eth-rpc/ws \ No newline at end of file diff --git a/protocol/chainlib/jsonRPC.go b/protocol/chainlib/jsonRPC.go index 77c0a1d9fc..4891d0f923 100644 --- a/protocol/chainlib/jsonRPC.go +++ b/protocol/chainlib/jsonRPC.go @@ -287,7 +287,7 @@ func (apil *JsonRPCChainListener) Serve(ctx context.Context) { }) websocketCallbackWithDappID := constructFiberCallbackWithHeaderAndParameterExtraction(webSocketCallback, apil.logger.StoreMetricData) app.Get("/ws/:dappId", websocketCallbackWithDappID) - app.Get("/:dappId/websocket", websocketCallbackWithDappID) // catching http://ip:port/1/websocket requests. + app.Get("/:dappId/websocket", websocketCallbackWithDappID) // catching http://HOST:PORT/1/websocket requests. app.Post("/:dappId/*", func(c *fiber.Ctx) error { apil.logger.LogStartTransaction("jsonRpc-http post") diff --git a/protocol/chainlib/tendermintRPC.go b/protocol/chainlib/tendermintRPC.go index 9dfd3d9288..53ffc5e729 100644 --- a/protocol/chainlib/tendermintRPC.go +++ b/protocol/chainlib/tendermintRPC.go @@ -333,7 +333,7 @@ func (apil *TendermintRpcChainListener) Serve(ctx context.Context) { }) websocketCallbackWithDappID := constructFiberCallbackWithHeaderAndParameterExtraction(webSocketCallback, apil.logger.StoreMetricData) app.Get("/ws/:dappId", websocketCallbackWithDappID) - app.Get("/:dappId/websocket", websocketCallbackWithDappID) // catching http://ip:port/1/websocket requests. + app.Get("/:dappId/websocket", websocketCallbackWithDappID) // catching http://HOST:PORT/1/websocket requests. app.Post("/:dappId/*", func(c *fiber.Ctx) error { apil.logger.LogStartTransaction("tendermint-WebSocket") diff --git a/protocol/lavasession/consumer_types.go b/protocol/lavasession/consumer_types.go index 004b6e1f41..fef70c8e04 100644 --- a/protocol/lavasession/consumer_types.go +++ b/protocol/lavasession/consumer_types.go @@ -60,7 +60,7 @@ type Endpoint struct { } type RPCEndpoint struct { - NetworkAddress string `yaml:"network-address,omitempty" json:"network-address,omitempty" mapstructure:"network-address"` // IP:PORT + NetworkAddress string `yaml:"network-address,omitempty" json:"network-address,omitempty" mapstructure:"network-address"` // HOST:PORT ChainID string `yaml:"chain-id,omitempty" json:"chain-id,omitempty" mapstructure:"chain-id"` // spec chain identifier ApiInterface string `yaml:"api-interface,omitempty" json:"api-interface,omitempty" mapstructure:"api-interface"` Geolocation uint64 `yaml:"geolocation,omitempty" json:"geolocation,omitempty" mapstructure:"geolocation"` diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index d44b6b9fcc..bf316768a1 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -22,7 +22,7 @@ type ProviderSessionsEpochData struct { } type RPCProviderEndpoint struct { - NetworkAddress string `yaml:"network-address,omitempty" json:"network-address,omitempty" mapstructure:"network-address,omitempty"` // IP:PORT + NetworkAddress string `yaml:"network-address,omitempty" json:"network-address,omitempty" mapstructure:"network-address,omitempty"` // HOST:PORT ChainID string `yaml:"chain-id,omitempty" json:"chain-id,omitempty" mapstructure:"chain-id"` // spec chain identifier ApiInterface string `yaml:"api-interface,omitempty" json:"api-interface,omitempty" mapstructure:"api-interface"` Geolocation uint64 `yaml:"geolocation,omitempty" json:"geolocation,omitempty" mapstructure:"geolocation"` diff --git a/relayer/chainproxy/jsonRPC.go b/relayer/chainproxy/jsonRPC.go index 3b76dfa0c3..0f8fac5025 100644 --- a/relayer/chainproxy/jsonRPC.go +++ b/relayer/chainproxy/jsonRPC.go @@ -375,7 +375,7 @@ func (cp *JrpcChainProxy) PortalStart(ctx context.Context, privKey *btcec.Privat }) websocketCallbackWithDappID := constructFiberCallbackWithHeaderAndParameterExtraction(webSocketCallback, cp.portalLogs.StoreMetricData) app.Get("/ws/:dappId", websocketCallbackWithDappID) - app.Get("/:dappId/websocket", websocketCallbackWithDappID) // catching http://ip:port/1/websocket requests. + app.Get("/:dappId/websocket", websocketCallbackWithDappID) // catching http://HOST:PORT/1/websocket requests. app.Post("/:dappId/*", func(c *fiber.Ctx) error { cp.portalLogs.LogStartTransaction("jsonRpc-http post") diff --git a/relayer/chainproxy/tendermintRPC.go b/relayer/chainproxy/tendermintRPC.go index f7c6dd2a5e..5af8c76e9b 100644 --- a/relayer/chainproxy/tendermintRPC.go +++ b/relayer/chainproxy/tendermintRPC.go @@ -372,7 +372,7 @@ func (cp *tendermintRpcChainProxy) PortalStart(ctx context.Context, privKey *btc }) websocketCallbackWithDappID := constructFiberCallbackWithHeaderAndParameterExtraction(webSocketCallback, cp.portalLogs.StoreMetricData) app.Get("/ws/:dappId", websocketCallbackWithDappID) - app.Get("/:dappId/websocket", websocketCallbackWithDappID) // catching http://ip:port/1/websocket requests. + app.Get("/:dappId/websocket", websocketCallbackWithDappID) // catching http://HOST:PORT/1/websocket requests. app.Post("/:dappId/*", func(c *fiber.Ctx) error { cp.portalLogs.LogStartTransaction("tendermint-WebSocket") diff --git a/x/pairing/client/cli/tx_stake_provider.go b/x/pairing/client/cli/tx_stake_provider.go index 2343d9ecc8..e33d2156b9 100644 --- a/x/pairing/client/cli/tx_stake_provider.go +++ b/x/pairing/client/cli/tx_stake_provider.go @@ -35,7 +35,7 @@ func CmdStakeProvider() *cobra.Command { for _, endpointStr := range tmpArg { splitted := strings.Split(endpointStr, ",") if len(splitted) != 3 { - return fmt.Errorf("invalid argument format in endpoints, must be: IP:PORT,useType,geolocation IP:PORT,useType,geolocation") + return fmt.Errorf("invalid argument format in endpoints, must be: HOST:PORT,useType,geolocation HOST:PORT,useType,geolocation") } geoloc, err := strconv.ParseUint(splitted[2], 10, 64) if err != nil { From 037559261fbc1ff93eb8f7c57c6db5912c35ce5f Mon Sep 17 00:00:00 2001 From: omer mishael Date: Sun, 5 Mar 2023 17:37:36 +0200 Subject: [PATCH 070/123] do not crash if cache isn't connected --- relayer/performance/cache.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/relayer/performance/cache.go b/relayer/performance/cache.go index ef712c37e4..47da57fc59 100644 --- a/relayer/performance/cache.go +++ b/relayer/performance/cache.go @@ -53,6 +53,9 @@ func (cache *Cache) SetEntry(ctx context.Context, request *pairingtypes.RelayReq // TODO: try to connect again once in a while return NotInitialisedError } + if cache.client == nil { + return NotConnectedError.Wrapf("No client connected to address: %s", cache.address) + } // TODO: handle disconnections and SetRelay error types here _, err := cache.client.SetRelay(ctx, &pairingtypes.RelayCacheSet{Request: request, ApiInterface: apiInterface, BlockHash: blockHash, ChainID: chainID, Response: reply, Finalized: finalized, BucketID: bucketID}) return err From 07a4b058732025a5b21560cbc0efef45c40279e2 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Sun, 5 Mar 2023 18:41:24 +0200 Subject: [PATCH 071/123] still some bugs to fix --- cmd/lavad/main.go | 2 +- scripts/init_chain_commands.sh | 120 +++++++------- scripts/setup_providers.sh | 293 ++++++++++++++------------------- 3 files changed, 187 insertions(+), 228 deletions(-) diff --git a/cmd/lavad/main.go b/cmd/lavad/main.go index dd9535c157..ec17e11b71 100644 --- a/cmd/lavad/main.go +++ b/cmd/lavad/main.go @@ -333,7 +333,7 @@ rpcprovider 127.0.0.1:3333 COS3 tendermintrpc "wss://www.node-path.com:80,https: return nil } if len(args)%len(rpcprovider.Yaml_config_properties) != 0 { - return fmt.Errorf("invalid number of arguments, either its a single config file or repeated groups of 4 HOST:PORT chain-id api-interface [node_url,node_url_2]") + return fmt.Errorf("invalid number of arguments, either its a single config file or repeated groups of 4 HOST:PORT chain-id api-interface [node_url,node_url_2], arg count: %d", len(args)) } return nil }, diff --git a/scripts/init_chain_commands.sh b/scripts/init_chain_commands.sh index a39d7dfbc8..695e6aa8b0 100755 --- a/scripts/init_chain_commands.sh +++ b/scripts/init_chain_commands.sh @@ -6,6 +6,7 @@ source $__dir/useful_commands.sh killall screen screen -wipe GASPRICE="0.000000001ulava" + lavad tx gov submit-proposal spec-add ./cookbook/spec_add_ethereum.json,./cookbook/spec_add_cosmoshub.json,./cookbook/spec_add_lava.json,./cookbook/spec_add_osmosis.json,./cookbook/spec_add_fantom.json,./cookbook/spec_add_celo.json,./cookbook/spec_add_optimism.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx gov vote 1 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE sleep 4 @@ -24,6 +25,9 @@ lavad tx gov vote 4 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --ga CLIENTSTAKE="500000000000ulava" PROVIDERSTAKE="500000000000ulava" +PROVIDER1_LISTENER="127.0.0.1:2221" +PROVIDER2_LISTENER="127.0.0.1:2222" +PROVIDER3_LISTENER="127.0.0.1:2223" sleep 4 lavad tx pairing stake-client "ETH1" $CLIENTSTAKE 1 -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE @@ -48,102 +52,96 @@ lavad tx pairing stake-client "CANTO" $CLIENTSTAKE 1 -y --from user1 --gas-adju # Ethereum providers -lavad tx pairing stake-provider "ETH1" $PROVIDERSTAKE "127.0.0.1:2221,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "ETH1" $PROVIDERSTAKE "127.0.0.1:2222,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "ETH1" $PROVIDERSTAKE "127.0.0.1:2223,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "ETH1" $PROVIDERSTAKE "127.0.0.1:2224,jsonrpc,1" 1 -y --from servicer4 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "ETH1" $PROVIDERSTAKE "127.0.0.1:2225,jsonrpc,1" 1 -y --from servicer5 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "ETH1" $PROVIDERSTAKE "$PROVIDER1_LISTENER,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "ETH1" $PROVIDERSTAKE "$PROVIDER2_LISTENER,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "ETH1" $PROVIDERSTAKE "$PROVIDER3_LISTENER,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE #Goerli providers -lavad tx pairing stake-provider "GTH1" $PROVIDERSTAKE "127.0.0.1:2221,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "GTH1" $PROVIDERSTAKE "127.0.0.1:2122,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "GTH1" $PROVIDERSTAKE "127.0.0.1:2123,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "GTH1" $PROVIDERSTAKE "127.0.0.1:2124,jsonrpc,1" 1 -y --from servicer4 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "GTH1" $PROVIDERSTAKE "127.0.0.1:2125,jsonrpc,1" 1 -y --from servicer5 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "GTH1" $PROVIDERSTAKE "$PROVIDER1_LISTENER,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "GTH1" $PROVIDERSTAKE "$PROVIDER2_LISTENER,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "GTH1" $PROVIDERSTAKE "$PROVIDER3_LISTENER,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # Fantom providers -lavad tx pairing stake-provider "FTM250" $PROVIDERSTAKE "127.0.0.1:2221,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "FTM250" $PROVIDERSTAKE "127.0.0.1:2252,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "FTM250" $PROVIDERSTAKE "127.0.0.1:2253,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "FTM250" $PROVIDERSTAKE "127.0.0.1:2254,jsonrpc,1" 1 -y --from servicer4 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "FTM250" $PROVIDERSTAKE "127.0.0.1:2255,jsonrpc,1" 1 -y --from servicer5 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "FTM250" $PROVIDERSTAKE "$PROVIDER1_LISTENER,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "FTM250" $PROVIDERSTAKE "$PROVIDER2_LISTENER,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "FTM250" $PROVIDERSTAKE "$PROVIDER3_LISTENER,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # Celo providers -lavad tx pairing stake-provider "CELO" $PROVIDERSTAKE "127.0.0.1:2221,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "CELO" $PROVIDERSTAKE "127.0.0.1:5242,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "CELO" $PROVIDERSTAKE "127.0.0.1:5243,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "CELO" $PROVIDERSTAKE "$PROVIDER1_LISTENER,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "CELO" $PROVIDERSTAKE "$PROVIDER2_LISTENER,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "CELO" $PROVIDERSTAKE "$PROVIDER3_LISTENER,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE #Celo alfahores testnet providers -lavad tx pairing stake-provider "ALFAJORES" $PROVIDERSTAKE "127.0.0.1:2221,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "ALFAJORES" $PROVIDERSTAKE "127.0.0.1:6242,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "ALFAJORES" $PROVIDERSTAKE "127.0.0.1:6243,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "ALFAJORES" $PROVIDERSTAKE "$PROVIDER1_LISTENER,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "ALFAJORES" $PROVIDERSTAKE "$PROVIDER2_LISTENER,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "ALFAJORES" $PROVIDERSTAKE "$PROVIDER3_LISTENER,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE #Arbitrum mainet providers -lavad tx pairing stake-provider "ARB1" $PROVIDERSTAKE "127.0.0.1:2221,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "ARB1" $PROVIDERSTAKE "127.0.0.1:7242,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "ARB1" $PROVIDERSTAKE "127.0.0.1:7243,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "ARB1" $PROVIDERSTAKE "$PROVIDER1_LISTENER,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "ARB1" $PROVIDERSTAKE "$PROVIDER2_LISTENER,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "ARB1" $PROVIDERSTAKE "$PROVIDER3_LISTENER,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE #Aptos mainet providers -lavad tx pairing stake-provider "APT1" $PROVIDERSTAKE "127.0.0.1:2221,rest,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "APT1" $PROVIDERSTAKE "127.0.0.1:10032,rest,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "APT1" $PROVIDERSTAKE "127.0.0.1:10033,rest,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "APT1" $PROVIDERSTAKE "$PROVIDER1_LISTENER,rest,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "APT1" $PROVIDERSTAKE "$PROVIDER2_LISTENER,rest,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "APT1" $PROVIDERSTAKE "$PROVIDER3_LISTENER,rest,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE #Starknet mainet providers -lavad tx pairing stake-provider "STRK" $PROVIDERSTAKE "127.0.0.1:2221,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "STRK" $PROVIDERSTAKE "127.0.0.1:8242,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "STRK" $PROVIDERSTAKE "127.0.0.1:8243,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "STRK" $PROVIDERSTAKE "$PROVIDER1_LISTENER,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "STRK" $PROVIDERSTAKE "$PROVIDER2_LISTENER,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "STRK" $PROVIDERSTAKE "$PROVIDER3_LISTENER,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # Polygon Providers -lavad tx pairing stake-provider "POLYGON1" $PROVIDERSTAKE "127.0.0.1:2221,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "POLYGON1" $PROVIDERSTAKE "127.0.0.1:4345,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "POLYGON1" $PROVIDERSTAKE "127.0.0.1:4346,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "POLYGON1" $PROVIDERSTAKE "$PROVIDER1_LISTENER,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "POLYGON1" $PROVIDERSTAKE "$PROVIDER2_LISTENER,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "POLYGON1" $PROVIDERSTAKE "$PROVIDER3_LISTENER,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # Optimism Providers -lavad tx pairing stake-provider "OPTM" $PROVIDERSTAKE "127.0.0.1:6003,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "OPTM" $PROVIDERSTAKE "127.0.0.1:6004,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "OPTM" $PROVIDERSTAKE "127.0.0.1:6005,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "OPTM" $PROVIDERSTAKE "$PROVIDER1_LISTENER,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "OPTM" $PROVIDERSTAKE "$PROVIDER2_LISTENER,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "OPTM" $PROVIDERSTAKE "$PROVIDER3_LISTENER,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # Base Providers -lavad tx pairing stake-provider "BASET" $PROVIDERSTAKE "127.0.0.1:6000,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "BASET" $PROVIDERSTAKE "127.0.0.1:6001,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "BASET" $PROVIDERSTAKE "127.0.0.1:6002,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "BASET" $PROVIDERSTAKE "$PROVIDER1_LISTENER,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "BASET" $PROVIDERSTAKE "$PROVIDER2_LISTENER,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "BASET" $PROVIDERSTAKE "$PROVIDER3_LISTENER,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # Cosmos Chains: # Osmosis providers -lavad tx pairing stake-provider "COS3" $PROVIDERSTAKE "127.0.0.1:2221,tendermintrpc,1 127.0.0.1:2221,rest,1 127.0.0.1:2221,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "COS3" $PROVIDERSTAKE "127.0.0.1:2242,tendermintrpc,1 127.0.0.1:2232,rest,1 127.0.0.1:2235,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "COS3" $PROVIDERSTAKE "127.0.0.1:2243,tendermintrpc,1 127.0.0.1:2233,rest,1 127.0.0.1:2236,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "COS3" $PROVIDERSTAKE "$PROVIDER1_LISTENER,tendermintrpc,1 $PROVIDER1_LISTENER,rest,1 $PROVIDER1_LISTENER,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "COS3" $PROVIDERSTAKE "$PROVIDER2_LISTENER,tendermintrpc,1 $PROVIDER2_LISTENER,rest,1 $PROVIDER2_LISTENER,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "COS3" $PROVIDERSTAKE "$PROVIDER3_LISTENER,tendermintrpc,1 $PROVIDER3_LISTENER,rest,1 $PROVIDER3_LISTENER,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # Lava Providers -lavad tx pairing stake-provider "LAV1" $PROVIDERSTAKE "127.0.0.1:2221,tendermintrpc,1 127.0.0.1:2221,rest,1 127.0.0.1:2221,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "LAV1" $PROVIDERSTAKE "127.0.0.1:2262,tendermintrpc,1 127.0.0.1:2272,rest,1 127.0.0.1:2275,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -# lavad tx pairing stake-provider "LAV1" $PROVIDERSTAKE "127.0.0.1:2263,tendermintrpc,1 127.0.0.1:2273,rest,1 127.0.0.1:2276,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "LAV1" $PROVIDERSTAKE "$PROVIDER1_LISTENER,tendermintrpc,1 $PROVIDER1_LISTENER,rest,1 $PROVIDER1_LISTENER,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "LAV1" $PROVIDERSTAKE "$PROVIDER2_LISTENER,tendermintrpc,1 $PROVIDER2_LISTENER,rest,1 $PROVIDER2_LISTENER,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "LAV1" $PROVIDERSTAKE "$PROVIDER3_LISTENER,tendermintrpc,1 $PROVIDER3_LISTENER,rest,1 $PROVIDER3_LISTENER,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # Juno providers -lavad tx pairing stake-provider "JUN1" $PROVIDERSTAKE "127.0.0.1:2221,tendermintrpc,1 127.0.0.1:2221,rest,1 127.0.0.1:2221,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "JUN1" $PROVIDERSTAKE "127.0.0.1:2362,tendermintrpc,1 127.0.0.1:2372,rest,1 127.0.0.1:2375,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "JUN1" $PROVIDERSTAKE "127.0.0.1:2363,tendermintrpc,1 127.0.0.1:2373,rest,1 127.0.0.1:2376,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "JUN1" $PROVIDERSTAKE "$PROVIDER1_LISTENER,tendermintrpc,1 $PROVIDER1_LISTENER,rest,1 $PROVIDER1_LISTENER,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "JUN1" $PROVIDERSTAKE "$PROVIDER2_LISTENER,tendermintrpc,1 $PROVIDER2_LISTENER,rest,1 $PROVIDER2_LISTENER,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "JUN1" $PROVIDERSTAKE "$PROVIDER3_LISTENER,tendermintrpc,1 $PROVIDER3_LISTENER,rest,1 $PROVIDER3_LISTENER,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # Osmosis testnet providers -lavad tx pairing stake-provider "COS4" $PROVIDERSTAKE "127.0.0.1:2221,tendermintrpc,1 127.0.0.1:2221,rest,1 127.0.0.1:2221,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "COS4" $PROVIDERSTAKE "127.0.0.1:4242,tendermintrpc,1 127.0.0.1:4232,rest,1 127.0.0.1:4235,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "COS4" $PROVIDERSTAKE "127.0.0.1:4243,tendermintrpc,1 127.0.0.1:4233,rest,1 127.0.0.1:4236,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "COS4" $PROVIDERSTAKE "$PROVIDER1_LISTENER,tendermintrpc,1 $PROVIDER1_LISTENER,rest,1 $PROVIDER1_LISTENER,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "COS4" $PROVIDERSTAKE "$PROVIDER2_LISTENER,tendermintrpc,1 $PROVIDER2_LISTENER,rest,1 $PROVIDER2_LISTENER,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "COS4" $PROVIDERSTAKE "$PROVIDER3_LISTENER,tendermintrpc,1 $PROVIDER3_LISTENER,rest,1 $PROVIDER3_LISTENER,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # Cosmoshub Providers -lavad tx pairing stake-provider "COS5" $PROVIDERSTAKE "127.0.0.1:2221,tendermintrpc,1 127.0.0.1:2221,rest,1 127.0.0.1:2221,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "COS5" $PROVIDERSTAKE "127.0.0.1:2342,tendermintrpc,1 127.0.0.1:2332,rest,1 127.0.0.1:2335,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "COS5" $PROVIDERSTAKE "127.0.0.1:2343,tendermintrpc,1 127.0.0.1:2333,rest,1 127.0.0.1:2336,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "COS5" $PROVIDERSTAKE "$PROVIDER1_LISTENER,tendermintrpc,1 $PROVIDER1_LISTENER,rest,1 $PROVIDER1_LISTENER,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "COS5" $PROVIDERSTAKE "$PROVIDER2_LISTENER,tendermintrpc,1 $PROVIDER2_LISTENER,rest,1 $PROVIDER2_LISTENER,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "COS5" $PROVIDERSTAKE "$PROVIDER3_LISTENER,tendermintrpc,1 $PROVIDER3_LISTENER,rest,1 $PROVIDER3_LISTENER,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # Evmos providers -lavad tx pairing stake-provider "EVMOS" $PROVIDERSTAKE "127.0.0.1:4347,jsonrpc,1 127.0.0.1:4348,tendermintrpc,1 127.0.0.1:4349,rest,1 127.0.0.1:4350,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "EVMOS" $PROVIDERSTAKE "127.0.0.1:4351,jsonrpc,1 127.0.0.1:4352,tendermintrpc,1 127.0.0.1:4353,rest,1 127.0.0.1:4354,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "EVMOS" $PROVIDERSTAKE "127.0.0.1:4355,jsonrpc,1 127.0.0.1:4356,tendermintrpc,1 127.0.0.1:4357,rest,1 127.0.0.1:4358,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "EVMOS" $PROVIDERSTAKE "$PROVIDER1_LISTENER,jsonrpc,1 $PROVIDER1_LISTENER,tendermintrpc,1 $PROVIDER1_LISTENER,rest,1 $PROVIDER1_LISTENER,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "EVMOS" $PROVIDERSTAKE "$PROVIDER2_LISTENER,jsonrpc,1 $PROVIDER2_LISTENER,tendermintrpc,1 $PROVIDER2_LISTENER,rest,1 $PROVIDER2_LISTENER,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "EVMOS" $PROVIDERSTAKE "$PROVIDER3_LISTENER,jsonrpc,1 $PROVIDER3_LISTENER,tendermintrpc,1 $PROVIDER3_LISTENER,rest,1 $PROVIDER3_LISTENER,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # canto Providers -lavad tx pairing stake-provider "CANTO" $PROVIDERSTAKE "127.0.0.1:6006,jsonrpc,1 127.0.0.1:6009,tendermintrpc,1 127.0.0.1:6012,rest,1 127.0.0.1:6015,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "CANTO" $PROVIDERSTAKE "127.0.0.1:6007,jsonrpc,1 127.0.0.1:6010,tendermintrpc,1 127.0.0.1:6013,rest,1 127.0.0.1:6016,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "CANTO" $PROVIDERSTAKE "127.0.0.1:6008,jsonrpc,1 127.0.0.1:6011,tendermintrpc,1 127.0.0.1:6014,rest,1 127.0.0.1:6017,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "CANTO" $PROVIDERSTAKE "$PROVIDER1_LISTENER,jsonrpc,1 $PROVIDER1_LISTENER,tendermintrpc,1 $PROVIDER1_LISTENER,rest,1 $PROVIDER1_LISTENER,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "CANTO" $PROVIDERSTAKE "$PROVIDER1_LISTENER,jsonrpc,1 $PROVIDER1_LISTENER,tendermintrpc,1 $PROVIDER1_LISTENER,rest,1 $PROVIDER1_LISTENER,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "CANTO" $PROVIDERSTAKE "$PROVIDER1_LISTENER,jsonrpc,1 $PROVIDER1_LISTENER,tendermintrpc,1 $PROVIDER1_LISTENER,rest,1 $PROVIDER1_LISTENER,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE echo "---------------Queries------------------" lavad query pairing providers "ETH1" @@ -152,4 +150,4 @@ lavad query pairing clients "ETH1" # we need to wait for the next epoch for the stake to take action. sleep_until_next_epoch -# . ${__dir}/setup_providers.sh +. ${__dir}/setup_providers.sh diff --git a/scripts/setup_providers.sh b/scripts/setup_providers.sh index ade75d10b6..8d88964c20 100755 --- a/scripts/setup_providers.sh +++ b/scripts/setup_providers.sh @@ -10,173 +10,134 @@ echo "---------------Setup Providers------------------" killall screen screen -wipe -#ETH providers -screen -d -m -S eth1_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 2221 $ETH_RPC_WS ETH1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/ETH1_2221.log" && sleep 0.25 -screen -S eth1_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2222 $ETH_RPC_WS ETH1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/ETH1_2222.log" -screen -S eth1_providers -X screen -t win2 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2223 $ETH_RPC_WS ETH1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/ETH1_2223.log" -screen -S eth1_providers -X screen -t win3 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2224 $ETH_RPC_WS ETH1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer4 2>&1 | tee $LOGS_DIR/ETH1_2224.log" -screen -S eth1_providers -X screen -t win4 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2225 $ETH_RPC_WS ETH1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer5 2>&1 | tee $LOGS_DIR/ETH1_2225.log" - -#GTH providers -screen -d -m -S gth_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 2121 $GTH_RPC_WS GTH1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/GTH1_2121.log" && sleep 0.25 -screen -S gth_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2122 $GTH_RPC_WS GTH1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/GTH1_2122.log" -screen -S gth_providers -X screen -t win2 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2123 $GTH_RPC_WS GTH1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/GTH1_2123.log" -screen -S gth_providers -X screen -t win3 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2124 $GTH_RPC_WS GTH1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer4 2>&1 | tee $LOGS_DIR/GTH1_2124.log" -screen -S gth_providers -X screen -t win4 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2125 $GTH_RPC_WS GTH1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer5 2>&1 | tee $LOGS_DIR/GTH1_2125.log" - - -#FTM providers -screen -d -m -S ftm250_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 2251 $FTM_RPC_HTTP FTM250 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/FTM250_2251.log" && sleep 0.25 -screen -S ftm250_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2252 $FTM_RPC_HTTP FTM250 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/FTM250_2252.log" -screen -S ftm250_providers -X screen -t win2 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2253 $FTM_RPC_HTTP FTM250 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/FTM250_2253.log" -screen -S ftm250_providers -X screen -t win3 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2254 $FTM_RPC_HTTP FTM250 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer4 2>&1 | tee $LOGS_DIR/FTM250_2254.log" -screen -S ftm250_providers -X screen -t win4 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2255 $FTM_RPC_HTTP FTM250 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer5 2>&1 | tee $LOGS_DIR/FTM250_2255.log" - -#Celo providers -screen -d -m -S celo_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 5241 $CELO_HTTP CELO jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/CELO_2221.log" && sleep 0.25 -screen -S celo_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 5242 $CELO_HTTP CELO jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/CELO_2222.log" -screen -S celo_providers -X screen -t win2 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 5243 $CELO_HTTP CELO jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/CELO_2223.log" - -# #Celo alfahores providers -screen -d -m -S alfajores_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 6241 $CELO_ALFAJORES_HTTP ALFAJORES jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/ALFAJORES_2221.log" && sleep 0.25 -screen -S alfajores_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 6242 $CELO_ALFAJORES_HTTP ALFAJORES jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/ALFAJORES_2222.log" -screen -S alfajores_providers -X screen -t win2 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 6243 $CELO_ALFAJORES_HTTP ALFAJORES jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/ALFAJORES_2223.log" - -#Arbitrum providers -screen -d -m -S arb_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 7241 $ARB1_HTTP ARB1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/ARB1_2221.log" && sleep 0.25 -screen -S arb_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 7242 $ARB1_HTTP ARB1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/ARB1_2222.log" -screen -S arb_providers -X screen -t win2 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 7243 $ARB1_HTTP ARB1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/ARB1_2223.log" - -#Aptos providers -screen -d -m -S apt1_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 10031 $APTOS_REST APT1 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/APT1_10031.log" && sleep 0.25 -screen -S apt1_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 10032 $APTOS_REST APT1 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/APT1_10032.log" -screen -S apt1_providers -X screen -t win2 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 10033 $APTOS_REST APT1 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/APT1_10033.log" - -#Starknet providers -screen -d -m -S strk_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 8241 $STARKNET_RPC STRK jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/STRK_2221.log" -screen -S strk_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 8242 $STARKNET_RPC STRK jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/STRK_2222.log" -screen -S strk_providers -X screen -t win2 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 8243 $STARKNET_RPC STRK jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/STRK_2223.log" - -#Polygon providers -screen -d -m -S polygon_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 4344 $POLYGON_MAINNET_RPC POLYGON1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/POLYGON_4344.log" -screen -S polygon_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 4345 $POLYGON_MAINNET_RPC POLYGON1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/POLYGON_4345.log" -screen -S polygon_providers -X screen -t win2 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 4346 $POLYGON_MAINNET_RPC POLYGON1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/POLYGON_4346.log" - -# Optimism providers -screen -d -m -S optimism_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 6003 $OPTIMISM_RPC OPTM jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/OPTM_6003.log" -screen -S optimism_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 6004 $OPTIMISM_RPC OPTM jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/OPTM_6004.log" -screen -S optimism_providers -X screen -t win2 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 6005 $OPTIMISM_RPC OPTM jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/OPTM_6005.log" -# Base providers -screen -d -m -S base_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 6000 $BASE_GOERLI_RPC BASET jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/BASET_6000.log" -screen -S base_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 6001 $BASE_GOERLI_RPC BASET jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/BASET_6001.log" -screen -S base_providers -X screen -t win2 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 6002 $BASE_GOERLI_RPC BASET jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/BASET_6002.log" - -# Cosmos-SDK Chains - -# Osmosis providers -screen -d -m -S cos3_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 2231 $OSMO_REST COS3 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/COS3_2231.log" && sleep 0.25 -screen -S cos3_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2232 $OSMO_REST COS3 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/COS3_2232.log" -screen -S cos3_providers -X screen -t win2 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2233 $OSMO_REST COS3 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/COS3_2233.log" -screen -S cos3_providers -X screen -t win3 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2241 $OSMO_RPC COS3 tendermintrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 --tendermint-http-endpoint $OSMO_RPC_HTTP 2>&1 | tee $LOGS_DIR/COS3_2241.log" -screen -S cos3_providers -X screen -t win4 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2242 $OSMO_RPC COS3 tendermintrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 --tendermint-http-endpoint $OSMO_RPC_HTTP 2>&1 | tee $LOGS_DIR/COS3_2242.log" -screen -S cos3_providers -X screen -t win5 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2243 $OSMO_RPC COS3 tendermintrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 --tendermint-http-endpoint $OSMO_RPC_HTTP 2>&1 | tee $LOGS_DIR/COS3_2243.log" -screen -S cos3_providers -X screen -t win6 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2234 $OSMO_GRPC COS3 grpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/COS3_2234.log" -screen -S cos3_providers -X screen -t win7 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2235 $OSMO_GRPC COS3 grpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/COS3_2235.log" -screen -S cos3_providers -X screen -t win8 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2236 $OSMO_GRPC COS3 grpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/COS3_2236.log" - -# Osmosis testnet providers -screen -d -m -S cos4_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 4231 $OSMO_TEST_REST COS4 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/COS4_4231.log" && sleep 0.25 -screen -S cos4_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 4232 $OSMO_TEST_REST COS4 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/COS4_4232.log" -screen -S cos4_providers -X screen -t win2 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 4233 $OSMO_TEST_REST COS4 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/COS4_4233.log" -screen -S cos4_providers -X screen -t win3 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 4241 $OSMO_TEST_RPC COS4 tendermintrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 --tendermint-http-endpoint $OSMO_TEST_RPC_HTTP 2>&1 | tee $LOGS_DIR/COS4_4241.log" -screen -S cos4_providers -X screen -t win4 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 4242 $OSMO_TEST_RPC COS4 tendermintrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 --tendermint-http-endpoint $OSMO_TEST_RPC_HTTP 2>&1 | tee $LOGS_DIR/COS4_4242.log" -screen -S cos4_providers -X screen -t win5 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 4243 $OSMO_TEST_RPC COS4 tendermintrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 --tendermint-http-endpoint $OSMO_TEST_RPC_HTTP 2>&1 | tee $LOGS_DIR/COS4_4243.log" -screen -S cos4_providers -X screen -t win6 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 4234 $OSMO_TEST_GRPC COS4 grpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/COS4_4234.log" -screen -S cos4_providers -X screen -t win7 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 4235 $OSMO_TEST_GRPC COS4 grpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/COS4_4235.log" -screen -S cos4_providers -X screen -t win8 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 4236 $OSMO_TEST_GRPC COS4 grpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/COS4_4236.log" - -# Lava providers -screen -d -m -S lav1_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 2271 $LAVA_REST LAV1 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/LAV1_2271.log" && sleep 0.25 -screen -S lav1_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2272 $LAVA_REST LAV1 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/LAV1_2272.log" -screen -S lav1_providers -X screen -t win2 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2273 $LAVA_REST LAV1 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/LAV1_2273.log" -screen -S lav1_providers -X screen -t win3 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2261 $LAVA_RPC LAV1 tendermintrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 --tendermint-http-endpoint $LAVA_RPC_HTTP 2>&1 | tee $LOGS_DIR/LAV1_2261.log" -screen -S lav1_providers -X screen -t win4 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2262 $LAVA_RPC LAV1 tendermintrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 --tendermint-http-endpoint $LAVA_RPC_HTTP 2>&1 | tee $LOGS_DIR/LAV1_2262.log" -screen -S lav1_providers -X screen -t win5 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2263 $LAVA_RPC LAV1 tendermintrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 --tendermint-http-endpoint $LAVA_RPC_HTTP 2>&1 | tee $LOGS_DIR/LAV1_2263.log" -screen -S lav1_providers -X screen -t win6 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2274 $LAVA_GRPC LAV1 grpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/LAV1_2274.log" -screen -S lav1_providers -X screen -t win7 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2275 $LAVA_GRPC LAV1 grpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/LAV1_2275.log" -screen -S lav1_providers -X screen -t win8 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2276 $LAVA_GRPC LAV1 grpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/LAV1_2276.log" - -# Cosmoshub providers -screen -d -m -S cos5_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 2331 $GAIA_REST COS5 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/COS5_2331.log" -screen -S cos5_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2332 $GAIA_REST COS5 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/COS5_2332.log" -screen -S cos5_providers -X screen -t win2 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2333 $GAIA_REST COS5 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/COS5_2333.log" -screen -S cos5_providers -X screen -t win3 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2344 $GAIA_RPC COS5 tendermintrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 --tendermint-http-endpoint $GAIA_RPC_HTTP 2>&1 | tee $LOGS_DIR/COS5_2344.log" -screen -S cos5_providers -X screen -t win4 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2342 $GAIA_RPC COS5 tendermintrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 --tendermint-http-endpoint $GAIA_RPC_HTTP 2>&1 | tee $LOGS_DIR/COS5_2342.log" -screen -S cos5_providers -X screen -t win5 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2343 $GAIA_RPC COS5 tendermintrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 --tendermint-http-endpoint $GAIA_RPC_HTTP 2>&1 | tee $LOGS_DIR/COS5_2343.log" -screen -S cos5_providers -X screen -t win6 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2334 $GAIA_GRPC COS5 grpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/COS5_2334.log" -screen -S cos5_providers -X screen -t win7 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2335 $GAIA_GRPC COS5 grpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/COS5_2335.log" -screen -S cos5_providers -X screen -t win8 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2336 $GAIA_GRPC COS5 grpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/COS5_2336.log" - -# Juno providers -screen -d -m -S jun1_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 2371 $JUNO_REST JUN1 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/JUN1_2371.log" -screen -S jun1_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2372 $JUNO_REST JUN1 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/JUN1_2372.log" -screen -S jun1_providers -X screen -t win2 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2373 $JUNO_REST JUN1 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/JUN1_2373.log" -screen -S jun1_providers -X screen -t win3 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2361 $JUNO_RPC JUN1 tendermintrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 --tendermint-http-endpoint $JUNO_RPC_HTTP 2>&1 | tee $LOGS_DIR/JUN1_2361.log" -screen -S jun1_providers -X screen -t win4 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2362 $JUNO_RPC JUN1 tendermintrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 --tendermint-http-endpoint $JUNO_RPC_HTTP 2>&1 | tee $LOGS_DIR/JUN1_2362.log" -screen -S jun1_providers -X screen -t win5 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2363 $JUNO_RPC JUN1 tendermintrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 --tendermint-http-endpoint $JUNO_RPC_HTTP 2>&1 | tee $LOGS_DIR/JUN1_2363.log" -screen -S jun1_providers -X screen -t win6 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2374 $JUNO_GRPC JUN1 grpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/JUN1_2374.log" -screen -S jun1_providers -X screen -t win7 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2375 $JUNO_GRPC JUN1 grpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/JUN1_2375.log" -screen -S jun1_providers -X screen -t win8 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2376 $JUNO_GRPC JUN1 grpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/JUN1_2376.log" - -# Evmos providers -screen -d -m -S evmos_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 4347 $EVMOS_RPC EVMOS jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/EVMOS_4347.log" -screen -S evmos_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 4351 $EVMOS_RPC EVMOS jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/EVMOS_4351.log" -screen -S evmos_providers -X screen -t win2 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 4355 $EVMOS_RPC EVMOS jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/EVMOS_4355.log" -screen -S evmos_providers -X screen -t win3 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 4348 $EVMOS_TENDERMINTRPC EVMOS tendermintrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 --tendermint-http-endpoint $EVMOS_TENDERMINTRPC 2>&1 | tee $LOGS_DIR/EVMOS_4348.log" -screen -S evmos_providers -X screen -t win4 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 4352 $EVMOS_TENDERMINTRPC EVMOS tendermintrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 --tendermint-http-endpoint $EVMOS_TENDERMINTRPC 2>&1 | tee $LOGS_DIR/EVMOS_4352.log" -screen -S evmos_providers -X screen -t win5 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 4356 $EVMOS_TENDERMINTRPC EVMOS tendermintrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 --tendermint-http-endpoint $EVMOS_TENDERMINTRPC 2>&1 | tee $LOGS_DIR/EVMOS_4356.log" -screen -S evmos_providers -X screen -t win6 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 4349 $EVMOS_REST EVMOS rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/EVMOS_4349.log" -screen -S evmos_providers -X screen -t win7 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 4353 $EVMOS_REST EVMOS rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/EVMOS_4353.log" -screen -S evmos_providers -X screen -t win8 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 4357 $EVMOS_REST EVMOS rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/EVMOS_4357.log" -screen -S evmos_providers -X screen -t win09 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 4350 $EVMOS_GRPC EVMOS grpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/EVMOS_grpc0.log" -screen -S evmos_providers -X screen -t win10 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 4354 $EVMOS_GRPC EVMOS grpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/EVMOS_grpc1.log" -screen -S evmos_providers -X screen -t win11 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 4358 $EVMOS_GRPC EVMOS grpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/EVMOS_grpc2.log" - -# Canto Providers -screen -d -m -S canto_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 6006 $CANTO_RPC CANTO jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/CANTO_jsonrpc1.log" -screen -S canto_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 6007 $CANTO_RPC CANTO jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/CANTO_jsonrpc2.log" -screen -S canto_providers -X screen -t win2 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 6008 $CANTO_RPC CANTO jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/CANTO_jsonrpc3.log" -screen -S canto_providers -X screen -t win3 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 6009 $CANTO_TENDERMINT CANTO tendermintrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 --tendermint-http-endpoint $CANTO_TENDERMINT 2>&1 | tee $LOGS_DIR/CANTO_tender1.log" -screen -S canto_providers -X screen -t win4 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 6010 $CANTO_TENDERMINT CANTO tendermintrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 --tendermint-http-endpoint $CANTO_TENDERMINT 2>&1 | tee $LOGS_DIR/CANTO_tender2.log" -screen -S canto_providers -X screen -t win5 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 6011 $CANTO_TENDERMINT CANTO tendermintrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 --tendermint-http-endpoint $CANTO_TENDERMINT 2>&1 | tee $LOGS_DIR/CANTO_tender3.log" -screen -S canto_providers -X screen -t win6 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 6012 $CANTO_REST CANTO rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/CANTO_rest1.log" -screen -S canto_providers -X screen -t win7 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 6013 $CANTO_REST CANTO rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/CANTO_rest2.log" -screen -S canto_providers -X screen -t win8 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 6014 $CANTO_REST CANTO rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/CANTO_rest3.log" -screen -S canto_providers -X screen -t win09 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 6015 $CANTO_GRPC CANTO grpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/CANTO_grpc1.log" -screen -S canto_providers -X screen -t win10 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 6016 $CANTO_GRPC CANTO grpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/CANTO_grpc2.log" -screen -S canto_providers -X screen -t win11 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 6017 $CANTO_GRPC CANTO grpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/CANTO_grpc3.log" - -# Setup Portals -screen -d -m -S portals bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3333 ETH1 jsonrpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_ETH_3333.log" && sleep 0.25 -screen -S portals -X screen -t win3 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3336 FTM250 jsonrpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_FTM250_3336.log" -screen -S portals -X screen -t win6 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3339 GTH1 jsonrpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_3339.log" -screen -S portals -X screen -t win9 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3342 CELO jsonrpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_3342.log" -screen -S portals -X screen -t win12 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3345 ALFAJORES jsonrpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_3345.log" -screen -S portals -X screen -t win13 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3346 ARB1 jsonrpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_3346.log" -screen -S portals -X screen -t win14 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3347 STRK jsonrpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_3347.log" -screen -S portals -X screen -t win15 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3348 APT1 rest $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_3348.log" -screen -S portals -X screen -t win18 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3351 POLYGON1 jsonrpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_3351.log" -screen -S portals -X screen -t win21 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3362 OPTM jsonrpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_OPTM.log" -screen -S portals -X screen -t win20 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3361 BASET jsonrpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_BASET.log" -# Cosmos-SDK based chains -screen -S portals -X screen -t win1 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3334 COS3 rest 127.0.0.1:3335 COS3 tendermintrpc 127.0.0.1:3353 COS3 grpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_COS3_3334.log" -screen -S portals -X screen -t win4 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3337 COS4 rest 127.0.0.1:3338 COS4 tendermintrpc 127.0.0.1:3354 COS4 grpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_COS4_3337.log" -screen -S portals -X screen -t win7 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3340 LAV1 rest 127.0.0.1:3341 LAV1 tendermintrpc 127.0.0.1:3352 LAV1 grpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_LAV1_3340.log" -screen -S portals -X screen -t win10 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3343 COS5 rest 127.0.0.1:3344 COS5 tendermintrpc 127.0.0.1:3356 COS5 grpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_3343.log" -screen -S portals -X screen -t win16 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3349 JUN1 rest 127.0.0.1:3350 JUN1 tendermintrpc 127.0.0.1:3355 JUN1 grpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_3349.log" -screen -S portals -X screen -t win19 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3360 EVMOS jsonrpc 127.0.0.1:3357 EVMOS rest 127.0.0.1:3358 EVMOS tendermintrpc 127.0.0.1:3359 EVMOS grpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_EVMOS.log" -screen -S portals -X screen -t win22 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3363 CANTO jsonrpc 127.0.0.1:3364 CANTO rest 127.0.0.1:3365 CANTO tendermintrpc 127.0.0.1:3366 CANTO grpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_CANTO.log" +PROVIDER1_LISTENER="127.0.0.1:2221" +PROVIDER2_LISTENER="127.0.0.1:2222" +PROVIDER3_LISTENER="127.0.0.1:2223" +#ETH providers +screen -d -m -S provider1 bash -c "source ~/.bashrc; lavad rpcprovider \ +$PROVIDER1_LISTENER ETH1 jsonrpc '$ETH_RPC_WS' \ +$PROVIDER1_LISTENER GTH1 jsonrpc '$GTH_RPC_WS' \ +$PROVIDER1_LISTENER FTM250 jsonrpc '$FTM_RPC_HTTP' \ +$PROVIDER1_LISTENER CELO jsonrpc '$CELO_HTTP' \ +$PROVIDER1_LISTENER ALFAJORES jsonrpc '$CELO_ALFAJORES_HTTP' \ +$PROVIDER1_LISTENER ARB1 jsonrpc '$ARB1_HTTP' \ +$PROVIDER1_LISTENER APT1 rest '$APTOS_REST' \ +$PROVIDER1_LISTENER STRK jsonrpc '$STARKNET_RPC' \ +$PROVIDER1_LISTENER POLYGON1 jsonrpc '$POLYGON_MAINNET_RPC' \ +$PROVIDER1_LISTENER OPTM jsonrpc '$OPTIMISM_RPC' \ +$PROVIDER1_LISTENER BASET jsonrpc '$BASE_GOERLI_RPC' \ +$PROVIDER1_LISTENER COS3 rest '$OSMO_REST' \ +$PROVIDER1_LISTENER COS3 tendermintrpc '$OSMO_RPC' \ +$PROVIDER1_LISTENER COS3 grpc '$OSMO_GRPC' \ +$PROVIDER1_LISTENER LAV1 rest '$LAVA_REST' \ +$PROVIDER1_LISTENER LAV1 tendermintrpc '$LAVA_RPC' \ +$PROVIDER1_LISTENER LAV1 grpc '$LAVA_GRPC' \ +$PROVIDER1_LISTENER COS5 rest '$GAIA_REST' \ +$PROVIDER1_LISTENER COS5 tendermintrpc '$GAIA_RPC' \ +$PROVIDER1_LISTENER COS5 grpc '$GAIA_GRPC' \ +$PROVIDER1_LISTENER JUN1 rest '$JUNO_REST' \ +$PROVIDER1_LISTENER JUN1 tendermintrpc '$JUNO_RPC' \ +$PROVIDER1_LISTENER JUN1 grpc '$JUNO_GRPC' \ +$PROVIDER1_LISTENER EVMOS jsonrpc '$EVMOS_RPC' \ +$PROVIDER1_LISTENER EVMOS tendermintrpc '$EVMOS_TENDERMINTRPC' \ +$PROVIDER1_LISTENER EVMOS rest '$EVMOS_REST' \ +$PROVIDER1_LISTENER EVMOS grpc '$EVMOS_GRPC' \ +$PROVIDER1_LISTENER CANTO jsonrpc '$CANTO_RPC' \ +$PROVIDER1_LISTENER CANTO tendermintrpc '$CANTO_TENDERMINT' \ +$PROVIDER1_LISTENER CANTO rest '$CANTO_REST' \ +$PROVIDER1_LISTENER CANTO grpc '$CANTO_GRPC' \ +$EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/PROVIDER1.log" && sleep 0.25 + +screen -d -m -S provider2 bash -c "source ~/.bashrc; lavad rpcprovider \ +$PROVIDER2_LISTENER ETH1 jsonrpc '$ETH_RPC_WS' \ +$PROVIDER2_LISTENER GTH1 jsonrpc '$GTH_RPC_WS' \ +$PROVIDER2_LISTENER FTM250 jsonrpc '$FTM_RPC_HTTP' \ +$PROVIDER2_LISTENER CELO jsonrpc '$CELO_HTTP' \ +$PROVIDER2_LISTENER ALFAJORES jsonrpc '$CELO_ALFAJORES_HTTP' \ +$PROVIDER2_LISTENER ARB1 jsonrpc '$ARB1_HTTP' \ +$PROVIDER2_LISTENER APT1 rest '$APTOS_REST' \ +$PROVIDER2_LISTENER STRK jsonrpc '$STARKNET_RPC' \ +$PROVIDER2_LISTENER POLYGON1 jsonrpc '$POLYGON_MAINNET_RPC' \ +$PROVIDER2_LISTENER OPTM jsonrpc '$OPTIMISM_RPC' \ +$PROVIDER2_LISTENER BASET jsonrpc '$BASE_GOERLI_RPC' \ +$PROVIDER2_LISTENER COS3 rest '$OSMO_REST' \ +$PROVIDER2_LISTENER COS3 tendermintrpc '$OSMO_RPC' \ +$PROVIDER2_LISTENER COS3 grpc '$OSMO_GRPC' \ +$PROVIDER2_LISTENER LAV1 rest '$LAVA_REST' \ +$PROVIDER2_LISTENER LAV1 tendermintrpc '$LAVA_RPC' \ +$PROVIDER2_LISTENER LAV1 grpc '$LAVA_GRPC' \ +$PROVIDER2_LISTENER COS5 rest '$GAIA_REST' \ +$PROVIDER2_LISTENER COS5 tendermintrpc '$GAIA_RPC' \ +$PROVIDER2_LISTENER COS5 grpc '$GAIA_GRPC' \ +$PROVIDER2_LISTENER JUN1 rest '$JUNO_REST' \ +$PROVIDER2_LISTENER JUN1 tendermintrpc '$JUNO_RPC' \ +$PROVIDER2_LISTENER JUN1 grpc '$JUNO_GRPC' \ +$PROVIDER2_LISTENER EVMOS jsonrpc '$EVMOS_RPC' \ +$PROVIDER2_LISTENER EVMOS tendermintrpc '$EVMOS_TENDERMINTRPC' \ +$PROVIDER2_LISTENER EVMOS rest '$EVMOS_REST' \ +$PROVIDER2_LISTENER EVMOS grpc '$EVMOS_GRPC' \ +$PROVIDER2_LISTENER CANTO jsonrpc '$CANTO_RPC' \ +$PROVIDER2_LISTENER CANTO tendermintrpc '$CANTO_TENDERMINT' \ +$PROVIDER2_LISTENER CANTO rest '$CANTO_REST' \ +$PROVIDER2_LISTENER CANTO grpc '$CANTO_GRPC' \ +$EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/PROVIDER2.log" && sleep 0.25 + +screen -d -m -S provider3 bash -c "source ~/.bashrc; lavad rpcprovider \ +$PROVIDER3_LISTENER ETH1 jsonrpc '$ETH_RPC_WS' \ +$PROVIDER3_LISTENER GTH1 jsonrpc '$GTH_RPC_WS' \ +$PROVIDER3_LISTENER FTM250 jsonrpc '$FTM_RPC_HTTP' \ +$PROVIDER3_LISTENER CELO jsonrpc '$CELO_HTTP' \ +$PROVIDER3_LISTENER ALFAJORES jsonrpc '$CELO_ALFAJORES_HTTP' \ +$PROVIDER3_LISTENER ARB1 jsonrpc '$ARB1_HTTP' \ +$PROVIDER3_LISTENER APT1 rest '$APTOS_REST' \ +$PROVIDER3_LISTENER STRK jsonrpc '$STARKNET_RPC' \ +$PROVIDER3_LISTENER POLYGON1 jsonrpc '$POLYGON_MAINNET_RPC' \ +$PROVIDER3_LISTENER OPTM jsonrpc '$OPTIMISM_RPC' \ +$PROVIDER3_LISTENER BASET jsonrpc '$BASE_GOERLI_RPC' \ +$PROVIDER3_LISTENER COS3 rest '$OSMO_REST' \ +$PROVIDER3_LISTENER COS3 tendermintrpc '$OSMO_RPC' \ +$PROVIDER3_LISTENER COS3 grpc '$OSMO_GRPC' \ +$PROVIDER3_LISTENER LAV1 rest '$LAVA_REST' \ +$PROVIDER3_LISTENER LAV1 tendermintrpc '$LAVA_RPC' \ +$PROVIDER3_LISTENER LAV1 grpc '$LAVA_GRPC' \ +$PROVIDER3_LISTENER COS5 rest '$GAIA_REST' \ +$PROVIDER3_LISTENER COS5 tendermintrpc '$GAIA_RPC' \ +$PROVIDER3_LISTENER COS5 grpc '$GAIA_GRPC' \ +$PROVIDER3_LISTENER JUN1 rest '$JUNO_REST' \ +$PROVIDER3_LISTENER JUN1 tendermintrpc '$JUNO_RPC' \ +$PROVIDER3_LISTENER JUN1 grpc '$JUNO_GRPC' \ +$PROVIDER3_LISTENER EVMOS jsonrpc '$EVMOS_RPC' \ +$PROVIDER3_LISTENER EVMOS tendermintrpc '$EVMOS_TENDERMINTRPC' \ +$PROVIDER3_LISTENER EVMOS rest '$EVMOS_REST' \ +$PROVIDER3_LISTENER EVMOS grpc '$EVMOS_GRPC' \ +$PROVIDER3_LISTENER CANTO jsonrpc '$CANTO_RPC' \ +$PROVIDER3_LISTENER CANTO tendermintrpc '$CANTO_TENDERMINT' \ +$PROVIDER3_LISTENER CANTO rest '$CANTO_REST' \ +$PROVIDER3_LISTENER CANTO grpc '$CANTO_GRPC' \ +$EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/PROVIDER3.log" && sleep 0.25 + +# Setup Portal +screen -d -m -S portals bash -c "source ~/.bashrc; lavad rpcconsumer \ +127.0.0.1:3333 ETH1 jsonrpc \ +127.0.0.1:3334 GTH1 jsonrpc \ +127.0.0.1:3335 FTM250 jsonrpc \ +127.0.0.1:3346 CELO jsonrpc \ +127.0.0.1:3347 ALFAJORES jsonrpc \ +127.0.0.1:3348 ARB1 jsonrpc \ +127.0.0.1:3349 STRK jsonrpc \ +127.0.0.1:3350 APT1 rest \ +127.0.0.1:3351 POLYGON1 jsonrpc \ +127.0.0.1:3352 OPTM jsonrpc \ +127.0.0.1:3353 BASET jsonrpc \ +127.0.0.1:3354 COS3 rest 127.0.0.1:3335 COS3 tendermintrpc 127.0.0.1:3353 COS3 grpc \ +127.0.0.1:3355 COS4 rest 127.0.0.1:3338 COS4 tendermintrpc 127.0.0.1:3354 COS4 grpc \ +127.0.0.1:3356 LAV1 rest 127.0.0.1:3341 LAV1 tendermintrpc 127.0.0.1:3352 LAV1 grpc \ +127.0.0.1:3357 COS5 rest 127.0.0.1:3344 COS5 tendermintrpc 127.0.0.1:3356 COS5 grpc \ +127.0.0.1:3358 JUN1 rest 127.0.0.1:3350 JUN1 tendermintrpc 127.0.0.1:3355 JUN1 grpc \ +127.0.0.1:3359 EVMOS jsonrpc 127.0.0.1:3357 EVMOS rest 127.0.0.1:3358 EVMOS tendermintrpc 127.0.0.1:3359 EVMOS grpc \ +127.0.0.1:3360 CANTO jsonrpc 127.0.0.1:3364 CANTO rest 127.0.0.1:3365 CANTO tendermintrpc 127.0.0.1:3366 CANTO grpc \ +$EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL.log" && sleep 0.25 echo "--- setting up screens done ---" screen -ls \ No newline at end of file From 5bc16d4c79f9b43573b0f0db4a11e606fb4b463a Mon Sep 17 00:00:00 2001 From: omer mishael Date: Sun, 5 Mar 2023 18:41:41 +0200 Subject: [PATCH 072/123] git ignore generated conf file --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index cbbdc45511..240d084d15 100644 --- a/.gitignore +++ b/.gitignore @@ -26,4 +26,5 @@ testutil/debugging/ # Misc scripts/vars/ -rpcprovider.yml \ No newline at end of file +rpcprovider.yml +rpcconsumer.yml From dd5775f09a07a0662111c384ff5c5fc4e9346964 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Sun, 5 Mar 2023 19:20:40 +0200 Subject: [PATCH 073/123] changed init chain commands, added some prints, modified fatals into errors on multiple chains so they won't kill the process --- protocol/chainlib/chainproxy/connector.go | 3 +- protocol/chaintracker/chain_tracker.go | 4 +- protocol/chaintracker/chain_tracker_test.go | 8 ++-- protocol/rpcprovider/rpcprovider.go | 8 ++-- protocol/statetracker/state_tracker.go | 2 +- scripts/init_chain_commands.sh | 5 ++- scripts/setup_providers.sh | 50 ++++++++++----------- 7 files changed, 42 insertions(+), 38 deletions(-) diff --git a/protocol/chainlib/chainproxy/connector.go b/protocol/chainlib/chainproxy/connector.go index a900597809..4a1ae5d126 100644 --- a/protocol/chainlib/chainproxy/connector.go +++ b/protocol/chainlib/chainproxy/connector.go @@ -106,6 +106,7 @@ func (connector *Connector) createConnection(ctx context.Context, addr string, c utils.LavaFormatWarning("Could not connect to the node, retrying", err, &map[string]string{ "Current Number Of Connections": strconv.FormatUint(uint64(currentNumberOfConnections), 10), "Number Of Attempts Remaining": strconv.Itoa(numberOfConnectionAttempts), + "Network Address": addr, }) cancel() continue @@ -151,7 +152,7 @@ func (connector *Connector) increaseNumberOfClients(ctx context.Context, numberO rpcClient, err = rpcclient.DialContext(nctx, connector.addr) if err != nil { utils.LavaFormatDebug( - "increaseNumberOfClients, Could not connect to the node, retrying", + "could no increase number of connections to the node jsonrpc connector, retrying", &map[string]string{"err": err.Error(), "Number Of Attempts": strconv.Itoa(connectionAttempt)}) cancel() continue diff --git a/protocol/chaintracker/chain_tracker.go b/protocol/chaintracker/chain_tracker.go index 94dd7708da..0c785b9a78 100644 --- a/protocol/chaintracker/chain_tracker.go +++ b/protocol/chaintracker/chain_tracker.go @@ -261,7 +261,7 @@ func (cs *ChainTracker) start(ctx context.Context, pollingBlockTime time.Duratio newLatestBlock, err := cs.fetchLatestBlockNum(ctx) if err != nil { - utils.LavaFormatFatal("could not fetchLatestBlockNum in ChainTracker", err, nil) + return utils.LavaFormatError("critical -- failed fetching data from the node, chain tracker creation error", err, &map[string]string{"endpoint": cs.endpoint.String()}) } cs.fetchAllPreviousBlocks(ctx, newLatestBlock) // Polls blocks and keeps a queue of them @@ -341,7 +341,7 @@ func (ct *ChainTracker) serve(ctx context.Context, listenAddr string) error { return nil } -func New(ctx context.Context, chainFetcher ChainFetcher, config ChainTrackerConfig) (chainTracker *ChainTracker, err error) { +func NewChainTracker(ctx context.Context, chainFetcher ChainFetcher, config ChainTrackerConfig) (chainTracker *ChainTracker, err error) { err = config.validate() if err != nil { return nil, err diff --git a/protocol/chaintracker/chain_tracker_test.go b/protocol/chaintracker/chain_tracker_test.go index 1064605e54..53ff7214e4 100644 --- a/protocol/chaintracker/chain_tracker_test.go +++ b/protocol/chaintracker/chain_tracker_test.go @@ -125,7 +125,7 @@ func TestChainTracker(t *testing.T) { currentLatestBlockInMock := mockChainFetcher.AdvanceBlock() chainTrackerConfig := chaintracker.ChainTrackerConfig{BlocksToSave: uint64(tt.fetcherBlocks), AverageBlockTime: TimeForPollingMock, ServerBlockMemory: uint64(tt.mockBlocks)} - chainTracker, err := chaintracker.New(context.Background(), mockChainFetcher, chainTrackerConfig) + chainTracker, err := chaintracker.NewChainTracker(context.Background(), mockChainFetcher, chainTrackerConfig) require.NoError(t, err) for _, advancement := range tt.advancements { for i := 0; i < int(advancement); i++ { @@ -182,7 +182,7 @@ func TestChainTrackerRangeOnly(t *testing.T) { currentLatestBlockInMock := mockChainFetcher.AdvanceBlock() chainTrackerConfig := chaintracker.ChainTrackerConfig{BlocksToSave: uint64(tt.fetcherBlocks), AverageBlockTime: TimeForPollingMock, ServerBlockMemory: uint64(tt.mockBlocks)} - chainTracker, err := chaintracker.New(context.Background(), mockChainFetcher, chainTrackerConfig) + chainTracker, err := chaintracker.NewChainTracker(context.Background(), mockChainFetcher, chainTrackerConfig) require.NoError(t, err) for _, advancement := range tt.advancements { for i := 0; i < int(advancement); i++ { @@ -262,7 +262,7 @@ func TestChainTrackerCallbacks(t *testing.T) { callbackCalledNewLatest = true } chainTrackerConfig := chaintracker.ChainTrackerConfig{BlocksToSave: uint64(fetcherBlocks), AverageBlockTime: TimeForPollingMock, ServerBlockMemory: uint64(mockBlocks), ForkCallback: forkCallback, NewLatestCallback: newBlockCallback} - chainTracker, err := chaintracker.New(context.Background(), mockChainFetcher, chainTrackerConfig) + chainTracker, err := chaintracker.NewChainTracker(context.Background(), mockChainFetcher, chainTrackerConfig) require.NoError(t, err) t.Run("one long test", func(t *testing.T) { for _, tt := range tests { @@ -344,7 +344,7 @@ func TestChainTrackerMaintainMemory(t *testing.T) { callbackCalledFork = true } chainTrackerConfig := chaintracker.ChainTrackerConfig{BlocksToSave: uint64(fetcherBlocks), AverageBlockTime: TimeForPollingMock, ServerBlockMemory: uint64(mockBlocks), ForkCallback: forkCallback} - chainTracker, err := chaintracker.New(context.Background(), mockChainFetcher, chainTrackerConfig) + chainTracker, err := chaintracker.NewChainTracker(context.Background(), mockChainFetcher, chainTrackerConfig) require.NoError(t, err) t.Run("one long test", func(t *testing.T) { for _, tt := range tests { diff --git a/protocol/rpcprovider/rpcprovider.go b/protocol/rpcprovider/rpcprovider.go index 7d03f73109..5550675e34 100644 --- a/protocol/rpcprovider/rpcprovider.go +++ b/protocol/rpcprovider/rpcprovider.go @@ -112,7 +112,8 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client _, averageBlockTime, _, _ := chainParser.ChainBlockStats() chainProxy, err := chainlib.GetChainProxy(ctx, parallelConnections, rpcProviderEndpoint, averageBlockTime) if err != nil { - utils.LavaFormatFatal("failed creating chain proxy", err, &map[string]string{"parallelConnections": strconv.FormatUint(uint64(parallelConnections), 10), "rpcProviderEndpoint": fmt.Sprintf("%+v", rpcProviderEndpoint)}) + utils.LavaFormatError("panic severity critical error, failed creating chain proxy, continuing with others", err, &map[string]string{"parallelConnections": strconv.FormatUint(uint64(parallelConnections), 10), "rpcProviderEndpoint": fmt.Sprintf("%+v", rpcProviderEndpoint)}) + continue } _, averageBlockTime, blocksToFinalization, blocksInFinalizationData := chainParser.ChainBlockStats() @@ -123,9 +124,10 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client ServerBlockMemory: ChainTrackerDefaultMemory + blocksToSaveChainTracker, } chainFetcher := chainlib.NewChainFetcher(ctx, chainProxy, chainParser, rpcProviderEndpoint) - chainTracker, err := chaintracker.New(ctx, chainFetcher, chainTrackerConfig) + chainTracker, err := chaintracker.NewChainTracker(ctx, chainFetcher, chainTrackerConfig) if err != nil { - utils.LavaFormatFatal("failed creating chain tracker", err, &map[string]string{"chainTrackerConfig": fmt.Sprintf("%+v", chainTrackerConfig)}) + utils.LavaFormatError("panic severity critical error, aborting support for chain due to node access, continuing with others", err, &map[string]string{"chainTrackerConfig": fmt.Sprintf("%+v", chainTrackerConfig), "endpoint": rpcProviderEndpoint.String()}) + continue } reliabilityManager := reliabilitymanager.NewReliabilityManager(chainTracker, providerStateTracker, addr.String(), chainProxy, chainParser) providerStateTracker.RegisterReliabilityManagerForVoteUpdates(ctx, reliabilityManager, rpcProviderEndpoint) diff --git a/protocol/statetracker/state_tracker.go b/protocol/statetracker/state_tracker.go index 589f9b5493..fc10c7a186 100644 --- a/protocol/statetracker/state_tracker.go +++ b/protocol/statetracker/state_tracker.go @@ -40,7 +40,7 @@ func NewStateTracker(ctx context.Context, txFactory tx.Factory, clientCtx client AverageBlockTime: time.Duration(resultConsensusParams.ConsensusParams.Block.TimeIotaMs) * time.Millisecond, ServerBlockMemory: BlocksToSaveLavaChainTracker, } - cst.chainTracker, err = chaintracker.New(ctx, chainFetcher, chainTrackerConfig) + cst.chainTracker, err = chaintracker.NewChainTracker(ctx, chainFetcher, chainTrackerConfig) return cst, err } diff --git a/scripts/init_chain_commands.sh b/scripts/init_chain_commands.sh index 695e6aa8b0..d6500dc993 100755 --- a/scripts/init_chain_commands.sh +++ b/scripts/init_chain_commands.sh @@ -25,6 +25,7 @@ lavad tx gov vote 4 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --ga CLIENTSTAKE="500000000000ulava" PROVIDERSTAKE="500000000000ulava" + PROVIDER1_LISTENER="127.0.0.1:2221" PROVIDER2_LISTENER="127.0.0.1:2222" PROVIDER3_LISTENER="127.0.0.1:2223" @@ -140,8 +141,8 @@ lavad tx pairing stake-provider "EVMOS" $PROVIDERSTAKE "$PROVIDER3_LISTENER,json # canto Providers lavad tx pairing stake-provider "CANTO" $PROVIDERSTAKE "$PROVIDER1_LISTENER,jsonrpc,1 $PROVIDER1_LISTENER,tendermintrpc,1 $PROVIDER1_LISTENER,rest,1 $PROVIDER1_LISTENER,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "CANTO" $PROVIDERSTAKE "$PROVIDER1_LISTENER,jsonrpc,1 $PROVIDER1_LISTENER,tendermintrpc,1 $PROVIDER1_LISTENER,rest,1 $PROVIDER1_LISTENER,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "CANTO" $PROVIDERSTAKE "$PROVIDER1_LISTENER,jsonrpc,1 $PROVIDER1_LISTENER,tendermintrpc,1 $PROVIDER1_LISTENER,rest,1 $PROVIDER1_LISTENER,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "CANTO" $PROVIDERSTAKE "$PROVIDER2_LISTENER,jsonrpc,1 $PROVIDER2_LISTENER,tendermintrpc,1 $PROVIDER2_LISTENER,rest,1 $PROVIDER2_LISTENER,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "CANTO" $PROVIDERSTAKE "$PROVIDER3_LISTENER,jsonrpc,1 $PROVIDER3_LISTENER,tendermintrpc,1 $PROVIDER3_LISTENER,rest,1 $PROVIDER3_LISTENER,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE echo "---------------Queries------------------" lavad query pairing providers "ETH1" diff --git a/scripts/setup_providers.sh b/scripts/setup_providers.sh index 8d88964c20..b37adcecf3 100755 --- a/scripts/setup_providers.sh +++ b/scripts/setup_providers.sh @@ -28,23 +28,23 @@ $PROVIDER1_LISTENER POLYGON1 jsonrpc '$POLYGON_MAINNET_RPC' \ $PROVIDER1_LISTENER OPTM jsonrpc '$OPTIMISM_RPC' \ $PROVIDER1_LISTENER BASET jsonrpc '$BASE_GOERLI_RPC' \ $PROVIDER1_LISTENER COS3 rest '$OSMO_REST' \ -$PROVIDER1_LISTENER COS3 tendermintrpc '$OSMO_RPC' \ +$PROVIDER1_LISTENER COS3 tendermintrpc '$OSMO_RPC,$OSMO_RPC' \ $PROVIDER1_LISTENER COS3 grpc '$OSMO_GRPC' \ $PROVIDER1_LISTENER LAV1 rest '$LAVA_REST' \ -$PROVIDER1_LISTENER LAV1 tendermintrpc '$LAVA_RPC' \ +$PROVIDER1_LISTENER LAV1 tendermintrpc '$LAVA_RPC,$LAVA_RPC' \ $PROVIDER1_LISTENER LAV1 grpc '$LAVA_GRPC' \ $PROVIDER1_LISTENER COS5 rest '$GAIA_REST' \ -$PROVIDER1_LISTENER COS5 tendermintrpc '$GAIA_RPC' \ +$PROVIDER1_LISTENER COS5 tendermintrpc '$GAIA_RPC,$GAIA_RPC' \ $PROVIDER1_LISTENER COS5 grpc '$GAIA_GRPC' \ $PROVIDER1_LISTENER JUN1 rest '$JUNO_REST' \ -$PROVIDER1_LISTENER JUN1 tendermintrpc '$JUNO_RPC' \ +$PROVIDER1_LISTENER JUN1 tendermintrpc '$JUNO_RPC,$JUNO_RPC' \ $PROVIDER1_LISTENER JUN1 grpc '$JUNO_GRPC' \ $PROVIDER1_LISTENER EVMOS jsonrpc '$EVMOS_RPC' \ -$PROVIDER1_LISTENER EVMOS tendermintrpc '$EVMOS_TENDERMINTRPC' \ +$PROVIDER1_LISTENER EVMOS tendermintrpc '$EVMOS_TENDERMINTRPC,$EVMOS_TENDERMINTRPC' \ $PROVIDER1_LISTENER EVMOS rest '$EVMOS_REST' \ $PROVIDER1_LISTENER EVMOS grpc '$EVMOS_GRPC' \ $PROVIDER1_LISTENER CANTO jsonrpc '$CANTO_RPC' \ -$PROVIDER1_LISTENER CANTO tendermintrpc '$CANTO_TENDERMINT' \ +$PROVIDER1_LISTENER CANTO tendermintrpc '$CANTO_TENDERMINT,$CANTO_TENDERMINT' \ $PROVIDER1_LISTENER CANTO rest '$CANTO_REST' \ $PROVIDER1_LISTENER CANTO grpc '$CANTO_GRPC' \ $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/PROVIDER1.log" && sleep 0.25 @@ -62,23 +62,23 @@ $PROVIDER2_LISTENER POLYGON1 jsonrpc '$POLYGON_MAINNET_RPC' \ $PROVIDER2_LISTENER OPTM jsonrpc '$OPTIMISM_RPC' \ $PROVIDER2_LISTENER BASET jsonrpc '$BASE_GOERLI_RPC' \ $PROVIDER2_LISTENER COS3 rest '$OSMO_REST' \ -$PROVIDER2_LISTENER COS3 tendermintrpc '$OSMO_RPC' \ +$PROVIDER2_LISTENER COS3 tendermintrpc '$OSMO_RPC,$OSMO_RPC' \ $PROVIDER2_LISTENER COS3 grpc '$OSMO_GRPC' \ $PROVIDER2_LISTENER LAV1 rest '$LAVA_REST' \ -$PROVIDER2_LISTENER LAV1 tendermintrpc '$LAVA_RPC' \ +$PROVIDER2_LISTENER LAV1 tendermintrpc '$LAVA_RPC,$LAVA_RPC' \ $PROVIDER2_LISTENER LAV1 grpc '$LAVA_GRPC' \ $PROVIDER2_LISTENER COS5 rest '$GAIA_REST' \ -$PROVIDER2_LISTENER COS5 tendermintrpc '$GAIA_RPC' \ +$PROVIDER2_LISTENER COS5 tendermintrpc '$GAIA_RPC,$GAIA_RPC' \ $PROVIDER2_LISTENER COS5 grpc '$GAIA_GRPC' \ $PROVIDER2_LISTENER JUN1 rest '$JUNO_REST' \ -$PROVIDER2_LISTENER JUN1 tendermintrpc '$JUNO_RPC' \ +$PROVIDER2_LISTENER JUN1 tendermintrpc '$JUNO_RPC,$JUNO_RPC' \ $PROVIDER2_LISTENER JUN1 grpc '$JUNO_GRPC' \ $PROVIDER2_LISTENER EVMOS jsonrpc '$EVMOS_RPC' \ -$PROVIDER2_LISTENER EVMOS tendermintrpc '$EVMOS_TENDERMINTRPC' \ +$PROVIDER2_LISTENER EVMOS tendermintrpc '$EVMOS_TENDERMINTRPC,$EVMOS_TENDERMINTRPC' \ $PROVIDER2_LISTENER EVMOS rest '$EVMOS_REST' \ $PROVIDER2_LISTENER EVMOS grpc '$EVMOS_GRPC' \ $PROVIDER2_LISTENER CANTO jsonrpc '$CANTO_RPC' \ -$PROVIDER2_LISTENER CANTO tendermintrpc '$CANTO_TENDERMINT' \ +$PROVIDER2_LISTENER CANTO tendermintrpc '$CANTO_TENDERMINT,$CANTO_TENDERMINT' \ $PROVIDER2_LISTENER CANTO rest '$CANTO_REST' \ $PROVIDER2_LISTENER CANTO grpc '$CANTO_GRPC' \ $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/PROVIDER2.log" && sleep 0.25 @@ -96,23 +96,23 @@ $PROVIDER3_LISTENER POLYGON1 jsonrpc '$POLYGON_MAINNET_RPC' \ $PROVIDER3_LISTENER OPTM jsonrpc '$OPTIMISM_RPC' \ $PROVIDER3_LISTENER BASET jsonrpc '$BASE_GOERLI_RPC' \ $PROVIDER3_LISTENER COS3 rest '$OSMO_REST' \ -$PROVIDER3_LISTENER COS3 tendermintrpc '$OSMO_RPC' \ +$PROVIDER3_LISTENER COS3 tendermintrpc '$OSMO_RPC,$OSMO_RPC' \ $PROVIDER3_LISTENER COS3 grpc '$OSMO_GRPC' \ $PROVIDER3_LISTENER LAV1 rest '$LAVA_REST' \ -$PROVIDER3_LISTENER LAV1 tendermintrpc '$LAVA_RPC' \ +$PROVIDER3_LISTENER LAV1 tendermintrpc '$LAVA_RPC,$LAVA_RPC' \ $PROVIDER3_LISTENER LAV1 grpc '$LAVA_GRPC' \ $PROVIDER3_LISTENER COS5 rest '$GAIA_REST' \ -$PROVIDER3_LISTENER COS5 tendermintrpc '$GAIA_RPC' \ +$PROVIDER3_LISTENER COS5 tendermintrpc '$GAIA_RPC,$GAIA_RPC' \ $PROVIDER3_LISTENER COS5 grpc '$GAIA_GRPC' \ $PROVIDER3_LISTENER JUN1 rest '$JUNO_REST' \ -$PROVIDER3_LISTENER JUN1 tendermintrpc '$JUNO_RPC' \ +$PROVIDER3_LISTENER JUN1 tendermintrpc '$JUNO_RPC,$JUNO_RPC' \ $PROVIDER3_LISTENER JUN1 grpc '$JUNO_GRPC' \ $PROVIDER3_LISTENER EVMOS jsonrpc '$EVMOS_RPC' \ -$PROVIDER3_LISTENER EVMOS tendermintrpc '$EVMOS_TENDERMINTRPC' \ +$PROVIDER3_LISTENER EVMOS tendermintrpc '$EVMOS_TENDERMINTRPC,$EVMOS_TENDERMINTRPC' \ $PROVIDER3_LISTENER EVMOS rest '$EVMOS_REST' \ $PROVIDER3_LISTENER EVMOS grpc '$EVMOS_GRPC' \ $PROVIDER3_LISTENER CANTO jsonrpc '$CANTO_RPC' \ -$PROVIDER3_LISTENER CANTO tendermintrpc '$CANTO_TENDERMINT' \ +$PROVIDER3_LISTENER CANTO tendermintrpc '$CANTO_TENDERMINT,$CANTO_TENDERMINT' \ $PROVIDER3_LISTENER CANTO rest '$CANTO_REST' \ $PROVIDER3_LISTENER CANTO grpc '$CANTO_GRPC' \ $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/PROVIDER3.log" && sleep 0.25 @@ -130,13 +130,13 @@ screen -d -m -S portals bash -c "source ~/.bashrc; lavad rpcconsumer \ 127.0.0.1:3351 POLYGON1 jsonrpc \ 127.0.0.1:3352 OPTM jsonrpc \ 127.0.0.1:3353 BASET jsonrpc \ -127.0.0.1:3354 COS3 rest 127.0.0.1:3335 COS3 tendermintrpc 127.0.0.1:3353 COS3 grpc \ -127.0.0.1:3355 COS4 rest 127.0.0.1:3338 COS4 tendermintrpc 127.0.0.1:3354 COS4 grpc \ -127.0.0.1:3356 LAV1 rest 127.0.0.1:3341 LAV1 tendermintrpc 127.0.0.1:3352 LAV1 grpc \ -127.0.0.1:3357 COS5 rest 127.0.0.1:3344 COS5 tendermintrpc 127.0.0.1:3356 COS5 grpc \ -127.0.0.1:3358 JUN1 rest 127.0.0.1:3350 JUN1 tendermintrpc 127.0.0.1:3355 JUN1 grpc \ -127.0.0.1:3359 EVMOS jsonrpc 127.0.0.1:3357 EVMOS rest 127.0.0.1:3358 EVMOS tendermintrpc 127.0.0.1:3359 EVMOS grpc \ -127.0.0.1:3360 CANTO jsonrpc 127.0.0.1:3364 CANTO rest 127.0.0.1:3365 CANTO tendermintrpc 127.0.0.1:3366 CANTO grpc \ +127.0.0.1:3354 COS3 rest 127.0.0.1:3355 COS3 tendermintrpc 127.0.0.1:3356 COS3 grpc \ +127.0.0.1:3357 COS4 rest 127.0.0.1:3358 COS4 tendermintrpc 127.0.0.1:3359 COS4 grpc \ +127.0.0.1:3360 LAV1 rest 127.0.0.1:3361 LAV1 tendermintrpc 127.0.0.1:3362 LAV1 grpc \ +127.0.0.1:3363 COS5 rest 127.0.0.1:3364 COS5 tendermintrpc 127.0.0.1:3365 COS5 grpc \ +127.0.0.1:3366 JUN1 rest 127.0.0.1:3367 JUN1 tendermintrpc 127.0.0.1:3368 JUN1 grpc \ +127.0.0.1:3369 EVMOS jsonrpc 127.0.0.1:3370 EVMOS rest 127.0.0.1:3371 EVMOS tendermintrpc 127.0.0.1:3372 EVMOS grpc \ +127.0.0.1:3373 CANTO jsonrpc 127.0.0.1:3374 CANTO rest 127.0.0.1:3375 CANTO tendermintrpc 127.0.0.1:3376 CANTO grpc \ $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL.log" && sleep 0.25 echo "--- setting up screens done ---" From 488c5d30cdfda4d65f2c6bd7585a491d078e9481 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Sun, 5 Mar 2023 20:22:04 +0200 Subject: [PATCH 074/123] removed check for websocket on tendermint until we decide what to do --- protocol/chainlib/common.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/protocol/chainlib/common.go b/protocol/chainlib/common.go index b47f8e60ab..e50c9898f2 100644 --- a/protocol/chainlib/common.go +++ b/protocol/chainlib/common.go @@ -189,8 +189,11 @@ func verifyTendermintEndpoint(endpoints []string) (websocketEndpoint string, htt } if websocketEndpoint == "" || httpEndpoint == "" { - utils.LavaFormatFatal("Tendermint Provider was not provided with both http and websocket urls. please provide both", nil, + utils.LavaFormatError("Tendermint Provider was not provided with both http and websocket urls. please provide both", nil, &map[string]string{"websocket": websocketEndpoint, "http": httpEndpoint}) + if httpEndpoint != "" { + return httpEndpoint, httpEndpoint + } } return websocketEndpoint, httpEndpoint } From 8799612a98ffe30a04e1e764617418b9b7e98771 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Mon, 6 Mar 2023 03:22:37 +0200 Subject: [PATCH 075/123] better handling of missing urls and endpoints definitions --- protocol/chainlib/chain_fetcher.go | 5 ++- protocol/chainlib/chainlib.go | 10 ++--- protocol/chainlib/common.go | 2 + protocol/chainlib/jsonRPC.go | 1 + protocol/chaintracker/chain_tracker.go | 35 ++++++++++++++--- protocol/common/endpoints.go | 44 ++++++++++++++++++++++ protocol/lavaprotocol/request_builder.go | 8 +--- protocol/lavasession/provider_types.go | 16 +++++++- protocol/rpcconsumer/rpcconsumer_server.go | 6 +-- protocol/rpcprovider/rpcprovider.go | 18 +++++++-- utils/lavalog.go | 9 +++++ 11 files changed, 129 insertions(+), 25 deletions(-) create mode 100644 protocol/common/endpoints.go diff --git a/protocol/chainlib/chain_fetcher.go b/protocol/chainlib/chain_fetcher.go index 8e4ed7ce3a..799628e21a 100644 --- a/protocol/chainlib/chain_fetcher.go +++ b/protocol/chainlib/chain_fetcher.go @@ -97,11 +97,14 @@ func (cf *ChainFetcher) FetchBlockHashByNum(ctx context.Context, blockNum int64) func (cf *ChainFetcher) formatResponseForParsing(reply *types.RelayReply, chainMessage ChainMessageForSend) (parsable parser.RPCInput, err error) { var parserInput parser.RPCInput respData := reply.Data + if len(respData) == 0 { + return nil, utils.LavaFormatError("result (reply.Data) is empty, can't be formatted for parsing", err, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) + } rpcMessage := chainMessage.GetRPCMessage() if customParsingMessage, ok := rpcMessage.(chainproxy.CustomParsingMessage); ok { parserInput, err = customParsingMessage.NewParsableRPCInput(respData) if err != nil { - return nil, utils.LavaFormatError(spectypes.GET_BLOCK_BY_NUM+" failed creating NewParsableRPCInput from CustomParsingMessage", err, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) + return nil, utils.LavaFormatError("failed creating NewParsableRPCInput from CustomParsingMessage", err, &map[string]string{"chainID": cf.endpoint.ChainID, "APIInterface": cf.endpoint.ApiInterface}) } } else { parserInput = chainproxy.DefaultParsableRPCInput(respData) diff --git a/protocol/chainlib/chainlib.go b/protocol/chainlib/chainlib.go index fa26e6fc4e..7a8ac1e924 100644 --- a/protocol/chainlib/chainlib.go +++ b/protocol/chainlib/chainlib.go @@ -15,10 +15,10 @@ import ( ) const ( - DefaultTimeout = 5 * time.Second - TimePerCU = uint64(100 * time.Millisecond) - MinimumTimePerRelayDelay = time.Second - AverageWorldLatency = 200 * time.Millisecond + TimePerCU = uint64(100 * time.Millisecond) + MinimumTimePerRelayDelay = time.Second + AverageWorldLatency = 300 * time.Millisecond + DataReliabilityTimeoutIncrease = 5 * time.Second ) func NewChainParser(apiInterface string) (chainParser ChainParser, err error) { @@ -103,5 +103,5 @@ func GetChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *lavase } func LocalNodeTimePerCu(cu uint64) time.Duration { - return time.Duration(cu * TimePerCU) + return time.Duration(cu*TimePerCU) + AverageWorldLatency // TODO: remove average world latency once our providers run locally, or allow a flag that says local to make it tight, tighter timeouts are better } diff --git a/protocol/chainlib/common.go b/protocol/chainlib/common.go index e50c9898f2..cfdc45cc00 100644 --- a/protocol/chainlib/common.go +++ b/protocol/chainlib/common.go @@ -193,6 +193,8 @@ func verifyTendermintEndpoint(endpoints []string) (websocketEndpoint string, htt &map[string]string{"websocket": websocketEndpoint, "http": httpEndpoint}) if httpEndpoint != "" { return httpEndpoint, httpEndpoint + } else { + utils.LavaFormatFatal("Tendermint Provider was not provided with http url. please provide a url that starts with http/https", nil, nil) } } return websocketEndpoint, httpEndpoint diff --git a/protocol/chainlib/jsonRPC.go b/protocol/chainlib/jsonRPC.go index 4891d0f923..7e3359d881 100644 --- a/protocol/chainlib/jsonRPC.go +++ b/protocol/chainlib/jsonRPC.go @@ -394,6 +394,7 @@ func (cp *JrpcChainProxy) SendNodeMsg(ctx context.Context, ch chan interface{}, var replyMsg rpcInterfaceMessages.JsonrpcMessage // the error check here would only wrap errors not from the rpc if err != nil { + utils.LavaFormatDebug("received an error from SendNodeMsg", &map[string]string{"error": err.Error()}) replyMsg = rpcInterfaceMessages.JsonrpcMessage{ Version: nodeMessage.Version, ID: nodeMessage.ID, diff --git a/protocol/chaintracker/chain_tracker.go b/protocol/chaintracker/chain_tracker.go index 0c785b9a78..bd36b1050e 100644 --- a/protocol/chaintracker/chain_tracker.go +++ b/protocol/chaintracker/chain_tracker.go @@ -21,6 +21,10 @@ import ( grpc "google.golang.org/grpc" ) +const ( + initRetriesCount = 3 +) + type ChainFetcher interface { FetchLatestBlockNum(ctx context.Context) (int64, error) FetchBlockHashByNum(ctx context.Context, blockNum int64) (string, error) @@ -224,12 +228,12 @@ func (cs *ChainTracker) gotNewBlock(ctx context.Context, newLatestBlock int64) ( func (cs *ChainTracker) fetchAllPreviousBlocksIfNecessary(ctx context.Context) (err error) { newLatestBlock, err := cs.fetchLatestBlockNum(ctx) if err != nil { - return utils.LavaFormatError("could not fetchLatestBlockNum in ChainTracker", err, nil) + return utils.LavaFormatError("could not fetchLatestBlockNum in ChainTracker", err, &map[string]string{"endpoint": cs.endpoint.String()}) } gotNewBlock := cs.gotNewBlock(ctx, newLatestBlock) forked, err := cs.forkChanged(ctx, newLatestBlock) if err != nil { - return utils.LavaFormatError("could not fetchLatestBlock Hash in ChainTracker", err, &map[string]string{"block": strconv.FormatInt(newLatestBlock, 10)}) + return utils.LavaFormatError("could not fetchLatestBlock Hash in ChainTracker", err, &map[string]string{"block": strconv.FormatInt(newLatestBlock, 10), "endpoint": cs.endpoint.String()}) } if gotNewBlock || forked { // utils.LavaFormatDebug("ChainTracker should update state", &map[string]string{"gotNewBlock": fmt.Sprintf("%t", gotNewBlock), "forked": fmt.Sprintf("%t", forked), "newLatestBlock": strconv.FormatInt(newLatestBlock, 10), "currentBlock": strconv.FormatInt(cs.GetLatestBlockNum(), 10)}) @@ -255,15 +259,14 @@ func (cs *ChainTracker) fetchAllPreviousBlocksIfNecessary(ctx context.Context) ( // this function starts the fetching timer periodically checking by polling if updates are necessary func (cs *ChainTracker) start(ctx context.Context, pollingBlockTime time.Duration) error { // how often to query latest block. - // TODO: subscribe instead of repeatedly fetching // TODO: improve the polling time, we don't need to poll the first half of every block change ticker := time.NewTicker(pollingBlockTime / 10) // divide here so we don't miss new blocks by all that much - newLatestBlock, err := cs.fetchLatestBlockNum(ctx) + err := cs.fetchInitDataWithRetry(ctx) if err != nil { - return utils.LavaFormatError("critical -- failed fetching data from the node, chain tracker creation error", err, &map[string]string{"endpoint": cs.endpoint.String()}) + return err } - cs.fetchAllPreviousBlocks(ctx, newLatestBlock) + // Polls blocks and keeps a queue of them go func() { for { @@ -283,6 +286,26 @@ func (cs *ChainTracker) start(ctx context.Context, pollingBlockTime time.Duratio return nil } +func (cs *ChainTracker) fetchInitDataWithRetry(ctx context.Context) (err error) { + newLatestBlock, err := cs.fetchLatestBlockNum(ctx) + for idx := 0; idx < initRetriesCount && err != nil; idx++ { + utils.LavaFormatDebug("failed fetching block num data on chain tracker init, retry", &map[string]string{"retry Num": strconv.Itoa(idx), "endpoint": cs.endpoint.String()}) + newLatestBlock, err = cs.fetchLatestBlockNum(ctx) + } + if err != nil { + return utils.LavaFormatError("critical -- failed fetching data from the node, chain tracker creation error", err, &map[string]string{"endpoint": cs.endpoint.String()}) + } + err = cs.fetchAllPreviousBlocks(ctx, newLatestBlock) + for idx := 0; idx < initRetriesCount && err != nil; idx++ { + utils.LavaFormatDebug("failed fetching data on chain tracker init, retry", &map[string]string{"retry Num": strconv.Itoa(idx), "endpoint": cs.endpoint.String()}) + err = cs.fetchAllPreviousBlocks(ctx, newLatestBlock) + } + if err != nil { + return utils.LavaFormatError("critical -- failed fetching data from the node, chain tracker creation error", err, &map[string]string{"endpoint": cs.endpoint.String()}) + } + return nil +} + // this function serves a grpc server if configuration for it was provided, the goal is to enable stateTracker to serve several processes and minimize node queries func (ct *ChainTracker) serve(ctx context.Context, listenAddr string) error { if listenAddr == "" { diff --git a/protocol/common/endpoints.go b/protocol/common/endpoints.go new file mode 100644 index 0000000000..b308026f36 --- /dev/null +++ b/protocol/common/endpoints.go @@ -0,0 +1,44 @@ +package common + +import ( + "net/url" + + "github.com/lavanet/lava/utils" + spectypes "github.com/lavanet/lava/x/spec/types" +) + +func ValidateEndpoint(endpoint string, apiInterface string) error { + switch apiInterface { + case spectypes.APIInterfaceJsonRPC, spectypes.APIInterfaceTendermintRPC, spectypes.APIInterfaceRest: + parsedUrl, err := url.Parse(endpoint) + if err != nil { + return utils.LavaFormatError("could not parse node url", err, &map[string]string{"url": endpoint, "apiInterface": apiInterface}) + } + switch parsedUrl.Scheme { + case "http", "https": + return nil + case "ws", "wss": + return nil + default: + return utils.LavaFormatError("URL scheme should be websocket (ws/wss) or (http/https), got: "+parsedUrl.Scheme, nil, &map[string]string{"apiInterface": apiInterface}) + } + case spectypes.APIInterfaceGrpc: + parsedUrl, err := url.Parse(endpoint) + if err == nil { + // user provided a valid url with a scheme + if parsedUrl.Scheme != "" { + return utils.LavaFormatError("grpc URL scheme should be empty and it is not, usage example: 127.0.0.1:9090 or my-node.com/grpc", nil, &map[string]string{"apiInterface": apiInterface, "scheme": parsedUrl.Scheme}) + } + return nil + } else { + // user provided no scheme, make sure before returning its correct + _, err = url.Parse("//" + endpoint) + if err == nil { + return nil + } + return utils.LavaFormatError("invalid grpc URL, usage example: 127.0.0.1:9090 or my-node.com/grpc", nil, &map[string]string{"apiInterface": apiInterface, "url": endpoint}) + } + default: + return utils.LavaFormatError("unsupported apiInterface", nil, &map[string]string{"apiInterface": apiInterface}) + } +} diff --git a/protocol/lavaprotocol/request_builder.go b/protocol/lavaprotocol/request_builder.go index 87d0205253..276aabdb67 100644 --- a/protocol/lavaprotocol/request_builder.go +++ b/protocol/lavaprotocol/request_builder.go @@ -18,11 +18,7 @@ import ( ) const ( - TimePerCU = uint64(100 * time.Millisecond) - MinimumTimePerRelayDelay = time.Second - AverageWorldLatency = 200 * time.Millisecond - DataReliabilityTimeoutIncrease = 5 * time.Second - SupportedNumberOfVRFs = 2 + SupportedNumberOfVRFs = 2 ) type RelayRequestCommonData struct { @@ -79,7 +75,7 @@ func ConstructRelayRequest(ctx context.Context, privKey *btcec.PrivateKey, chain } func GetTimePerCu(cu uint64) time.Duration { - return chainlib.LocalNodeTimePerCu(cu) + MinimumTimePerRelayDelay + return chainlib.LocalNodeTimePerCu(cu) + chainlib.MinimumTimePerRelayDelay } func UpdateRequestedBlock(request *pairingtypes.RelayRequest, response *pairingtypes.RelayReply) { diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index bf316768a1..218f703f3b 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -7,6 +7,7 @@ import ( "sync/atomic" "github.com/lavanet/lava/protocol/chainlib/chainproxy/rpcclient" + "github.com/lavanet/lava/protocol/common" "github.com/lavanet/lava/utils" ) @@ -30,7 +31,20 @@ type RPCProviderEndpoint struct { } func (endpoint *RPCProviderEndpoint) String() (retStr string) { - return endpoint.ChainID + ":" + endpoint.ApiInterface + " Network Address:" + endpoint.NetworkAddress + "Node: " + strings.Join(endpoint.NodeUrl, ", ") + " Geolocation:" + strconv.FormatUint(endpoint.Geolocation, 10) + return endpoint.ChainID + ":" + endpoint.ApiInterface + " Network Address:" + endpoint.NetworkAddress + " Node: " + strings.Join(endpoint.NodeUrl, ", ") + " Geolocation:" + strconv.FormatUint(endpoint.Geolocation, 10) +} + +func (endpoint *RPCProviderEndpoint) Validate() error { + if len(endpoint.NodeUrl) == 0 { + return utils.LavaFormatError("Empty URL list for endpoint", nil, &map[string]string{"endpoint": endpoint.String()}) + } + for _, url := range endpoint.NodeUrl { + err := common.ValidateEndpoint(url, endpoint.ApiInterface) + if err != nil { + return err + } + } + return nil } type RPCSubscription struct { diff --git a/protocol/rpcconsumer/rpcconsumer_server.go b/protocol/rpcconsumer/rpcconsumer_server.go index 5ddd4d6d6e..c5d0006131 100644 --- a/protocol/rpcconsumer/rpcconsumer_server.go +++ b/protocol/rpcconsumer/rpcconsumer_server.go @@ -212,7 +212,7 @@ func (rpccs *RPCConsumerServer) sendRelayToProvider( if chainMessage.GetInterface().Category.HangingApi { _, extraRelayTimeout, _, _ = rpccs.chainParser.ChainBlockStats() } - relayTimeout := extraRelayTimeout + lavaprotocol.GetTimePerCu(singleConsumerSession.LatestRelayCu) + lavaprotocol.AverageWorldLatency + relayTimeout := extraRelayTimeout + lavaprotocol.GetTimePerCu(singleConsumerSession.LatestRelayCu) + chainlib.AverageWorldLatency relayResult, relayLatency, err := rpccs.relayInner(ctx, singleConsumerSession, relayResult, relayTimeout) if err != nil { // relay failed need to fail the session advancement @@ -231,7 +231,7 @@ func (rpccs *RPCConsumerServer) sendRelayToProvider( // set cache in a non blocking call go func() { new_ctx := context.Background() - new_ctx, cancel := context.WithTimeout(new_ctx, lavaprotocol.DataReliabilityTimeoutIncrease) + new_ctx, cancel := context.WithTimeout(new_ctx, chainlib.DataReliabilityTimeoutIncrease) defer cancel() err2 := rpccs.cache.SetEntry(new_ctx, relayRequest, chainMessage.GetInterface().Interface, nil, chainID, dappID, relayResult.Reply, relayResult.Finalized) // caching in the portal doesn't care about hashes if err2 != nil && !performance.NotInitialisedError.Is(err2) { @@ -367,7 +367,7 @@ func (rpccs *RPCConsumerServer) sendDataReliabilityRelayIfApplicable(ctx context return nil, utils.LavaFormatError("failed creating data reliability relay", err, &map[string]string{"relayRequestCommonData": fmt.Sprintf("%+v", relayRequestCommonData)}) } relayResult = &lavaprotocol.RelayResult{Request: reliabilityRequest, ProviderAddress: providerAddress, Finalized: false} - relayTimeout := lavaprotocol.GetTimePerCu(singleConsumerSession.LatestRelayCu) + lavaprotocol.AverageWorldLatency + lavaprotocol.DataReliabilityTimeoutIncrease + relayTimeout := lavaprotocol.GetTimePerCu(singleConsumerSession.LatestRelayCu) + chainlib.AverageWorldLatency + chainlib.DataReliabilityTimeoutIncrease relayResult, dataReliabilityLatency, err := rpccs.relayInner(ctx, singleConsumerSession, relayResult, relayTimeout) if err != nil { errRet := rpccs.consumerSessionManager.OnDataReliabilitySessionFailure(singleConsumerSession, err) diff --git a/protocol/rpcprovider/rpcprovider.go b/protocol/rpcprovider/rpcprovider.go index 5550675e34..6628fa1025 100644 --- a/protocol/rpcprovider/rpcprovider.go +++ b/protocol/rpcprovider/rpcprovider.go @@ -55,6 +55,7 @@ type RPCProvider struct { providerStateTracker ProviderStateTrackerInf rpcProviderServers map[string]*RPCProviderServer rpcProviderListeners map[string]*ProviderListener + disabledEndpoints []*lavasession.RPCProviderEndpoint } func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, clientCtx client.Context, rpcProviderEndpoints []*lavasession.RPCProviderEndpoint, cache *performance.Cache, parallelConnections uint) (err error) { @@ -67,6 +68,7 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client }() rpcp.rpcProviderServers = make(map[string]*RPCProviderServer) rpcp.rpcProviderListeners = make(map[string]*ProviderListener) + rpcp.disabledEndpoints = []*lavasession.RPCProviderEndpoint{} // single state tracker lavaChainFetcher := chainlib.NewLavaChainFetcher(ctx, clientCtx) providerStateTracker, err := statetracker.NewProviderStateTracker(ctx, txFactory, clientCtx, lavaChainFetcher) @@ -101,6 +103,12 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client utils.LavaFormatFatal("Failed fetching GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment in RPCProvider Start", err, nil) } for _, rpcProviderEndpoint := range rpcProviderEndpoints { + err := rpcProviderEndpoint.Validate() + if err != nil { + utils.LavaFormatError("panic severity critical error, aborting support for chain api due to invalid node url definition, continuing with others", err, &map[string]string{"endpoint": rpcProviderEndpoint.String()}) + rpcp.disabledEndpoints = append(rpcp.disabledEndpoints, rpcProviderEndpoint) + continue + } providerSessionManager := lavasession.NewProviderSessionManager(rpcProviderEndpoint, blockMemorySize) key := rpcProviderEndpoint.Key() rpcp.providerStateTracker.RegisterForEpochUpdates(ctx, providerSessionManager) @@ -112,7 +120,8 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client _, averageBlockTime, _, _ := chainParser.ChainBlockStats() chainProxy, err := chainlib.GetChainProxy(ctx, parallelConnections, rpcProviderEndpoint, averageBlockTime) if err != nil { - utils.LavaFormatError("panic severity critical error, failed creating chain proxy, continuing with others", err, &map[string]string{"parallelConnections": strconv.FormatUint(uint64(parallelConnections), 10), "rpcProviderEndpoint": fmt.Sprintf("%+v", rpcProviderEndpoint)}) + utils.LavaFormatError("panic severity critical error, failed creating chain proxy, continuing with others endpoints", err, &map[string]string{"parallelConnections": strconv.FormatUint(uint64(parallelConnections), 10), "rpcProviderEndpoint": fmt.Sprintf("%+v", rpcProviderEndpoint)}) + rpcp.disabledEndpoints = append(rpcp.disabledEndpoints, rpcProviderEndpoint) continue } @@ -126,7 +135,8 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client chainFetcher := chainlib.NewChainFetcher(ctx, chainProxy, chainParser, rpcProviderEndpoint) chainTracker, err := chaintracker.NewChainTracker(ctx, chainFetcher, chainTrackerConfig) if err != nil { - utils.LavaFormatError("panic severity critical error, aborting support for chain due to node access, continuing with others", err, &map[string]string{"chainTrackerConfig": fmt.Sprintf("%+v", chainTrackerConfig), "endpoint": rpcProviderEndpoint.String()}) + utils.LavaFormatError("panic severity critical error, aborting support for chain api due to node access, continuing with other endpoints", err, &map[string]string{"chainTrackerConfig": fmt.Sprintf("%+v", chainTrackerConfig), "endpoint": rpcProviderEndpoint.String()}) + rpcp.disabledEndpoints = append(rpcp.disabledEndpoints, rpcProviderEndpoint) continue } reliabilityManager := reliabilitymanager.NewReliabilityManager(chainTracker, providerStateTracker, addr.String(), chainProxy, chainParser) @@ -161,7 +171,9 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client } listener.RegisterReceiver(rpcProviderServer, rpcProviderEndpoint) } - + if len(rpcp.disabledEndpoints) > 0 { + utils.LavaFormatError(utils.FormatStringerList("RPCProvider Runnig with disabled Endpoints:", rpcp.disabledEndpoints), nil, nil) + } select { case <-ctx.Done(): utils.LavaFormatInfo("Provider Server ctx.Done", nil) diff --git a/utils/lavalog.go b/utils/lavalog.go index 93e0c2baeb..000a0c7dfc 100644 --- a/utils/lavalog.go +++ b/utils/lavalog.go @@ -133,3 +133,12 @@ func LavaFormatInfo(description string, extraAttributes *map[string]string) erro func LavaFormatDebug(description string, extraAttributes *map[string]string) error { return LavaFormatLog(description, nil, extraAttributes, 0) } + +func FormatStringerList[T fmt.Stringer](description string, listToPrint []T) string { + st := "" + for _, printable := range listToPrint { + st = st + printable.String() + "\n" + } + st = fmt.Sprintf(description+"\n%s", st) + return st +} From 4a7075cfcd0c27d86c5a1281a8a9a42b4922346e Mon Sep 17 00:00:00 2001 From: omer mishael Date: Mon, 6 Mar 2023 03:56:12 +0200 Subject: [PATCH 076/123] better capture grpc endpoint misconfiguration --- protocol/common/endpoints.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/protocol/common/endpoints.go b/protocol/common/endpoints.go index b308026f36..218aacbef8 100644 --- a/protocol/common/endpoints.go +++ b/protocol/common/endpoints.go @@ -2,6 +2,7 @@ package common import ( "net/url" + "strings" "github.com/lavanet/lava/utils" spectypes "github.com/lavanet/lava/x/spec/types" @@ -26,8 +27,8 @@ func ValidateEndpoint(endpoint string, apiInterface string) error { parsedUrl, err := url.Parse(endpoint) if err == nil { // user provided a valid url with a scheme - if parsedUrl.Scheme != "" { - return utils.LavaFormatError("grpc URL scheme should be empty and it is not, usage example: 127.0.0.1:9090 or my-node.com/grpc", nil, &map[string]string{"apiInterface": apiInterface, "scheme": parsedUrl.Scheme}) + if parsedUrl.Scheme != "" && strings.Contains(endpoint, "/") { + return utils.LavaFormatError("grpc URL scheme should be empty and it is not, endpoint definition example: 127.0.0.1:9090 -or- my-node.com/grpc", nil, &map[string]string{"apiInterface": apiInterface, "scheme": parsedUrl.Scheme}) } return nil } else { From b96f55d46ab18006d068f1b1b8c5e9801f7130b4 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Mon, 6 Mar 2023 20:21:20 +0100 Subject: [PATCH 077/123] happy flow for provider session manager --- .../provider_session_manager_test.go | 87 +++++++++++++++++-- protocol/lavasession/provider_types.go | 14 +-- 2 files changed, 86 insertions(+), 15 deletions(-) diff --git a/protocol/lavasession/provider_session_manager_test.go b/protocol/lavasession/provider_session_manager_test.go index 3277aeb7a0..0c08b7e33e 100644 --- a/protocol/lavasession/provider_session_manager_test.go +++ b/protocol/lavasession/provider_session_manager_test.go @@ -1,17 +1,88 @@ package lavasession import ( - "sync/atomic" "testing" "github.com/stretchr/testify/require" ) -// Test the basic functionality of the consumerSessionManager -func TestHappyFlowPSM(t *testing.T) { - var a uint64 = 5 - res_a := atomic.CompareAndSwapUint64(&a, 5, 7) - require.True(t, res_a) - res_b := atomic.CompareAndSwapUint64(&a, 5, 7) - require.False(t, res_b) +const testNumberOfBlocksKeptInMemory = 100 + +func initProviderSessionManager() *ProviderSessionManager { + return NewProviderSessionManager(&RPCProviderEndpoint{ + NetworkAddress: "127.0.0.1:6666", + ChainID: "LAV1", + ApiInterface: "tendermint", + Geolocation: 1, + NodeUrl: []string{"http://localhost:666", "ws://localhost:666/websocket"}, + }, testNumberOfBlocksKeptInMemory) +} + +// Test the basic functionality of the ProviderSessionsManager +func TestHappyFlowPSMWithEpochChange(t *testing.T) { + // parameters for the test + relayCu := uint64(10) + epoch1 := uint64(10) + sessionId := uint64(123) + relayNumber := uint64(1) + maxCu := uint64(150) + epoch2 := testNumberOfBlocksKeptInMemory + epoch1 + + // initialize the struct + psm := initProviderSessionManager() + + // get session for the first time + sps, err := psm.GetSession("consumer1", epoch1, sessionId, relayNumber) + + // validate expected results + require.Empty(t, psm.sessionsWithAllConsumers) + require.Nil(t, sps) + require.Error(t, err) + require.True(t, ConsumerNotRegisteredYet.Is(err)) + + // expect session to be missing, so we need to register it for the first time + sps, err = psm.RegisterProviderSessionWithConsumer("consumer1", epoch1, sessionId, relayNumber, maxCu) + + // validate session was added + require.NotEmpty(t, psm.sessionsWithAllConsumers) + require.Nil(t, err) + require.NotNil(t, sps) + + // prepare session for usage + sps.PrepareSessionForUsage(relayCu, relayCu) + + // validate session was prepared successfully + require.Equal(t, relayCu, sps.LatestRelayCu) + require.Equal(t, sps.CuSum, relayCu) + require.Equal(t, sps.SessionID, sessionId) + require.Equal(t, sps.RelayNum, relayNumber) + require.Equal(t, sps.PairingEpoch, epoch1) + + // on session done successfully + err = psm.OnSessionDone(sps) + + // validate session done data + require.Nil(t, err) + require.Equal(t, sps.LatestRelayCu, uint64(0)) + require.Equal(t, sps.CuSum, relayCu) + require.Equal(t, sps.SessionID, sessionId) + require.Equal(t, sps.RelayNum, relayNumber) + require.Equal(t, sps.PairingEpoch, epoch1) + + // update epoch to epoch2 height + psm.UpdateEpoch(epoch2) + + // validate epoch update + require.Equal(t, psm.blockedEpochHeight, epoch1) + require.Empty(t, psm.dataReliabilitySessionsWithAllConsumers) + require.Empty(t, psm.sessionsWithAllConsumers) + require.Empty(t, psm.subscriptionSessionsWithAllConsumers) + + // try to verify we cannot get a session from epoch1 after we blocked it + sps, err = psm.GetSession("consumer1", epoch1, sessionId, relayNumber) + + // expect an error as we tried to get a session from a blocked epoch + require.Error(t, err) + require.True(t, InvalidEpochError.Is(err)) + require.Nil(t, sps) } diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index bf316768a1..3f29366c05 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -156,32 +156,32 @@ func (sps *SingleProviderSession) VerifyLock() error { return nil } -func (sps *SingleProviderSession) PrepareSessionForUsage(currentCU uint64, relayRequestTotalCU uint64) error { +func (sps *SingleProviderSession) PrepareSessionForUsage(currentRelayCU uint64, relayRequestTotalCU uint64) error { err := sps.VerifyLock() // sps is locked if err != nil { return utils.LavaFormatError("sps.verifyLock() failed in PrepareSessionForUsage", err, nil) } maxCu := sps.userSessionsParent.atomicReadMaxComputeUnits() - if relayRequestTotalCU < sps.CuSum+currentCU { + if relayRequestTotalCU < sps.CuSum+currentRelayCU { sps.lock.Unlock() // unlock on error return utils.LavaFormatError("CU mismatch PrepareSessionForUsage, Provider and consumer disagree on CuSum", ProviderConsumerCuMisMatch, &map[string]string{ "relayRequestTotalCU": strconv.FormatUint(relayRequestTotalCU, 10), "sps.CuSum": strconv.FormatUint(sps.CuSum, 10), - "currentCU": strconv.FormatUint(currentCU, 10), + "currentCU": strconv.FormatUint(currentRelayCU, 10), }) } // this must happen first, as we also validate and add the used cu to parent here - err = sps.validateAndAddUsedCU(currentCU, maxCu) + err = sps.validateAndAddUsedCU(currentRelayCU, maxCu) if err != nil { sps.lock.Unlock() // unlock on error return err } // finished validating, can add all info. - sps.LatestRelayCu = currentCU // 1. update latest - sps.CuSum = relayRequestTotalCU // 2. update CuSum, if consumer wants to pay more, let it - sps.RelayNum = sps.RelayNum + 1 // 3. update RelayNum, we already verified relayNum is valid in GetSession. + sps.LatestRelayCu = currentRelayCU // 1. update latest + sps.CuSum = relayRequestTotalCU // 2. update CuSum, if consumer wants to pay more, let it + sps.RelayNum = sps.RelayNum + 1 // 3. update RelayNum, we already verified relayNum is valid in GetSession. return nil } From e74bb58e7263061b37fc2ebed4b57cee3f11f1b1 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Mon, 6 Mar 2023 20:35:17 +0100 Subject: [PATCH 078/123] adding happy flow, session failure, epoch update tests --- .../provider_session_manager_test.go | 62 +++++++++++++++---- 1 file changed, 50 insertions(+), 12 deletions(-) diff --git a/protocol/lavasession/provider_session_manager_test.go b/protocol/lavasession/provider_session_manager_test.go index 0c08b7e33e..f499ecb555 100644 --- a/protocol/lavasession/provider_session_manager_test.go +++ b/protocol/lavasession/provider_session_manager_test.go @@ -6,7 +6,15 @@ import ( "github.com/stretchr/testify/require" ) -const testNumberOfBlocksKeptInMemory = 100 +const ( + testNumberOfBlocksKeptInMemory = 100 + relayCu = uint64(10) + epoch1 = uint64(10) + sessionId = uint64(123) + relayNumber = uint64(1) + maxCu = uint64(150) + epoch2 = testNumberOfBlocksKeptInMemory + epoch1 +) func initProviderSessionManager() *ProviderSessionManager { return NewProviderSessionManager(&RPCProviderEndpoint{ @@ -18,16 +26,7 @@ func initProviderSessionManager() *ProviderSessionManager { }, testNumberOfBlocksKeptInMemory) } -// Test the basic functionality of the ProviderSessionsManager -func TestHappyFlowPSMWithEpochChange(t *testing.T) { - // parameters for the test - relayCu := uint64(10) - epoch1 := uint64(10) - sessionId := uint64(123) - relayNumber := uint64(1) - maxCu := uint64(150) - epoch2 := testNumberOfBlocksKeptInMemory + epoch1 - +func prepareSession(t *testing.T) (*ProviderSessionManager, *SingleProviderSession) { // initialize the struct psm := initProviderSessionManager() @@ -57,9 +56,32 @@ func TestHappyFlowPSMWithEpochChange(t *testing.T) { require.Equal(t, sps.SessionID, sessionId) require.Equal(t, sps.RelayNum, relayNumber) require.Equal(t, sps.PairingEpoch, epoch1) + return psm, sps +} + +func TestHappyFlowPSM(t *testing.T) { + // init test + psm, sps := prepareSession(t) + + // on session done successfully + err := psm.OnSessionDone(sps) + + // validate session done data + require.Nil(t, err) + require.Equal(t, sps.LatestRelayCu, uint64(0)) + require.Equal(t, sps.CuSum, relayCu) + require.Equal(t, sps.SessionID, sessionId) + require.Equal(t, sps.RelayNum, relayNumber) + require.Equal(t, sps.PairingEpoch, epoch1) +} + +// Test the basic functionality of the ProviderSessionsManager +func TestHappyFlowPSMWithEpochChange(t *testing.T) { + // init test + psm, sps := prepareSession(t) // on session done successfully - err = psm.OnSessionDone(sps) + err := psm.OnSessionDone(sps) // validate session done data require.Nil(t, err) @@ -86,3 +108,19 @@ func TestHappyFlowPSMWithEpochChange(t *testing.T) { require.True(t, InvalidEpochError.Is(err)) require.Nil(t, sps) } + +func TestHappyFlowPSMOnSessionFailure(t *testing.T) { + // init test + psm, sps := prepareSession(t) + + // on session done successfully + err := psm.OnSessionFailure(sps) + + // validate session done data + require.Nil(t, err) + require.Equal(t, sps.LatestRelayCu, uint64(0)) + require.Equal(t, sps.CuSum, uint64(0)) + require.Equal(t, sps.SessionID, sessionId) + require.Equal(t, sps.RelayNum, uint64(0)) + require.Equal(t, sps.PairingEpoch, epoch1) +} From 4f99676df2e78621230711b48db6cd7942a0dbff Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Mon, 6 Mar 2023 20:54:50 +0100 Subject: [PATCH 079/123] adding more tests to psm --- .../provider_session_manager_test.go | 63 ++++++++++++++++++- protocol/lavasession/provider_types.go | 14 ++--- 2 files changed, 67 insertions(+), 10 deletions(-) diff --git a/protocol/lavasession/provider_session_manager_test.go b/protocol/lavasession/provider_session_manager_test.go index f499ecb555..999295a69d 100644 --- a/protocol/lavasession/provider_session_manager_test.go +++ b/protocol/lavasession/provider_session_manager_test.go @@ -14,6 +14,7 @@ const ( relayNumber = uint64(1) maxCu = uint64(150) epoch2 = testNumberOfBlocksKeptInMemory + epoch1 + consumerOneAddress = "consumer1" ) func initProviderSessionManager() *ProviderSessionManager { @@ -31,7 +32,7 @@ func prepareSession(t *testing.T) (*ProviderSessionManager, *SingleProviderSessi psm := initProviderSessionManager() // get session for the first time - sps, err := psm.GetSession("consumer1", epoch1, sessionId, relayNumber) + sps, err := psm.GetSession(consumerOneAddress, epoch1, sessionId, relayNumber) // validate expected results require.Empty(t, psm.sessionsWithAllConsumers) @@ -40,7 +41,7 @@ func prepareSession(t *testing.T) (*ProviderSessionManager, *SingleProviderSessi require.True(t, ConsumerNotRegisteredYet.Is(err)) // expect session to be missing, so we need to register it for the first time - sps, err = psm.RegisterProviderSessionWithConsumer("consumer1", epoch1, sessionId, relayNumber, maxCu) + sps, err = psm.RegisterProviderSessionWithConsumer(consumerOneAddress, epoch1, sessionId, relayNumber, maxCu) // validate session was added require.NotEmpty(t, psm.sessionsWithAllConsumers) @@ -101,7 +102,7 @@ func TestHappyFlowPSMWithEpochChange(t *testing.T) { require.Empty(t, psm.subscriptionSessionsWithAllConsumers) // try to verify we cannot get a session from epoch1 after we blocked it - sps, err = psm.GetSession("consumer1", epoch1, sessionId, relayNumber) + sps, err = psm.GetSession(consumerOneAddress, epoch1, sessionId, relayNumber) // expect an error as we tried to get a session from a blocked epoch require.Error(t, err) @@ -124,3 +125,59 @@ func TestHappyFlowPSMOnSessionFailure(t *testing.T) { require.Equal(t, sps.RelayNum, uint64(0)) require.Equal(t, sps.PairingEpoch, epoch1) } + +func TestPSMUpdateCu(t *testing.T) { + // init test + psm, sps := prepareSession(t) + + // on session done successfully + err := psm.OnSessionDone(sps) + + // validate session done data + require.Nil(t, err) + + err = psm.UpdateSessionCU(consumerOneAddress, epoch1, sessionId, maxCu) + require.Nil(t, err) + require.Equal(t, sps.userSessionsParent.epochData.UsedComputeUnits, maxCu) +} + +func TestPSMUpdateCuMaxCuReached(t *testing.T) { + // init test + psm, sps := prepareSession(t) + + // on session done successfully + err := psm.OnSessionDone(sps) + + // Update the session CU to reach the limit of the cu allowed + err = psm.UpdateSessionCU(consumerOneAddress, epoch1, sessionId, maxCu) + require.Nil(t, err) + require.Equal(t, sps.userSessionsParent.epochData.UsedComputeUnits, maxCu) + + // get another session, this time sps is not nil as the session ID is already registered + sps, err = psm.GetSession(consumerOneAddress, epoch1, sessionId, relayNumber+1) + require.Nil(t, err) + require.NotNil(t, sps) + + // prepare session with max cu overflow. expect an error + err = sps.PrepareSessionForUsage(relayCu, maxCu+relayCu) + require.Error(t, err) + require.True(t, MaximumCULimitReachedByConsumer.Is(err)) +} + +func TestPSMCUMisMatch(t *testing.T) { + // init test + psm, sps := prepareSession(t) + + // on session done successfully + err := psm.OnSessionDone(sps) + + // get another session + sps, err = psm.GetSession(consumerOneAddress, epoch1, sessionId, relayNumber+1) + require.Nil(t, err) + require.NotNil(t, sps) + + // prepare session with wrong cu and expect mismatch, consumer wants to pay less than spec requires + err = sps.PrepareSessionForUsage(relayCu+1, relayCu) + require.Error(t, err) + require.True(t, ProviderConsumerCuMisMatch.Is(err)) +} diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index 4e1a737186..b542b1fa01 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -170,32 +170,32 @@ func (sps *SingleProviderSession) VerifyLock() error { return nil } -func (sps *SingleProviderSession) PrepareSessionForUsage(currentRelayCU uint64, relayRequestTotalCU uint64) error { +func (sps *SingleProviderSession) PrepareSessionForUsage(cuFromSpec uint64, relayRequestTotalCU uint64) error { err := sps.VerifyLock() // sps is locked if err != nil { return utils.LavaFormatError("sps.verifyLock() failed in PrepareSessionForUsage", err, nil) } maxCu := sps.userSessionsParent.atomicReadMaxComputeUnits() - if relayRequestTotalCU < sps.CuSum+currentRelayCU { + if relayRequestTotalCU < sps.CuSum+cuFromSpec { sps.lock.Unlock() // unlock on error return utils.LavaFormatError("CU mismatch PrepareSessionForUsage, Provider and consumer disagree on CuSum", ProviderConsumerCuMisMatch, &map[string]string{ "relayRequestTotalCU": strconv.FormatUint(relayRequestTotalCU, 10), "sps.CuSum": strconv.FormatUint(sps.CuSum, 10), - "currentCU": strconv.FormatUint(currentRelayCU, 10), + "currentCU": strconv.FormatUint(cuFromSpec, 10), }) } // this must happen first, as we also validate and add the used cu to parent here - err = sps.validateAndAddUsedCU(currentRelayCU, maxCu) + err = sps.validateAndAddUsedCU(cuFromSpec, maxCu) if err != nil { sps.lock.Unlock() // unlock on error return err } // finished validating, can add all info. - sps.LatestRelayCu = currentRelayCU // 1. update latest - sps.CuSum = relayRequestTotalCU // 2. update CuSum, if consumer wants to pay more, let it - sps.RelayNum = sps.RelayNum + 1 // 3. update RelayNum, we already verified relayNum is valid in GetSession. + sps.LatestRelayCu = cuFromSpec // 1. update latest + sps.CuSum = relayRequestTotalCU // 2. update CuSum, if consumer wants to pay more, let it + sps.RelayNum = sps.RelayNum + 1 // 3. update RelayNum, we already verified relayNum is valid in GetSession. return nil } From cb74985674d358c4a511ec868bf840160c55c42c Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Mon, 6 Mar 2023 20:55:59 +0100 Subject: [PATCH 080/123] changing test names --- protocol/lavasession/provider_session_manager_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/protocol/lavasession/provider_session_manager_test.go b/protocol/lavasession/provider_session_manager_test.go index 999295a69d..3b85ed64ff 100644 --- a/protocol/lavasession/provider_session_manager_test.go +++ b/protocol/lavasession/provider_session_manager_test.go @@ -77,7 +77,7 @@ func TestHappyFlowPSM(t *testing.T) { } // Test the basic functionality of the ProviderSessionsManager -func TestHappyFlowPSMWithEpochChange(t *testing.T) { +func TestPSMEpochChange(t *testing.T) { // init test psm, sps := prepareSession(t) @@ -110,7 +110,7 @@ func TestHappyFlowPSMWithEpochChange(t *testing.T) { require.Nil(t, sps) } -func TestHappyFlowPSMOnSessionFailure(t *testing.T) { +func TestPSMOnSessionFailure(t *testing.T) { // init test psm, sps := prepareSession(t) From 601e73da966b756c48b68d7f95e9ecc21e7a2306 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Tue, 7 Mar 2023 13:49:15 +0100 Subject: [PATCH 081/123] Fixing data reliability in PSM --- protocol/lavasession/errors.go | 27 ++++---- .../lavasession/provider_session_manager.go | 25 +++++-- .../provider_session_manager_test.go | 28 ++++++++ protocol/lavasession/provider_types.go | 68 ++++++++++++++++--- protocol/rpcprovider/rpcprovider_server.go | 3 + 5 files changed, 123 insertions(+), 28 deletions(-) diff --git a/protocol/lavasession/errors.go b/protocol/lavasession/errors.go index 817e997cab..7317e8114e 100644 --- a/protocol/lavasession/errors.go +++ b/protocol/lavasession/errors.go @@ -28,16 +28,19 @@ var ( // Consumer Side Errors ) var ( // Provider Side Errors - InvalidEpochError = sdkerrors.New("InvalidEpoch Error", 881, "Requested Epoch Is Too Old") - NewSessionWithRelayNumError = sdkerrors.New("NewSessionWithRelayNum Error", 882, "Requested Session With Relay Number Is Invalid") - ConsumerIsBlockListed = sdkerrors.New("ConsumerIsBlockListed Error", 883, "This Consumer Is Blocked.") - ConsumerNotRegisteredYet = sdkerrors.New("ConsumerNotActive Error", 884, "This Consumer Is Not Currently In The Pool.") - SessionDoesNotExist = sdkerrors.New("SessionDoesNotExist Error", 885, "This Session Id Does Not Exist.") - MaximumCULimitReachedByConsumer = sdkerrors.New("MaximumCULimitReachedByConsumer Error", 886, "Consumer reached maximum cu limit") - ProviderConsumerCuMisMatch = sdkerrors.New("ProviderConsumerCuMisMatch Error", 887, "Provider and Consumer disagree on total cu for session") - RelayNumberMismatch = sdkerrors.New("RelayNumberMismatch Error", 888, "Provider and Consumer disagree on relay number for session") - SubscriptionInitiationError = sdkerrors.New("SubscriptionInitiationError Error", 889, "Provider failed initiating subscription") - EpochIsNotRegisteredError = sdkerrors.New("EpochIsNotRegisteredError Error", 890, "Epoch is not registered in provider session manager") - ConsumerIsNotRegisteredError = sdkerrors.New("ConsumerIsNotRegisteredError Error", 891, "Consumer is not registered in provider session manager") - SubscriptionAlreadyExistsError = sdkerrors.New("SubscriptionAlreadyExists Error", 892, "Subscription already exists in single provider session") + InvalidEpochError = sdkerrors.New("InvalidEpoch Error", 881, "Requested Epoch Is Too Old") + NewSessionWithRelayNumError = sdkerrors.New("NewSessionWithRelayNum Error", 882, "Requested Session With Relay Number Is Invalid") + ConsumerIsBlockListed = sdkerrors.New("ConsumerIsBlockListed Error", 883, "This Consumer Is Blocked.") + ConsumerNotRegisteredYet = sdkerrors.New("ConsumerNotActive Error", 884, "This Consumer Is Not Currently In The Pool.") + SessionDoesNotExist = sdkerrors.New("SessionDoesNotExist Error", 885, "This Session Id Does Not Exist.") + MaximumCULimitReachedByConsumer = sdkerrors.New("MaximumCULimitReachedByConsumer Error", 886, "Consumer reached maximum cu limit") + ProviderConsumerCuMisMatch = sdkerrors.New("ProviderConsumerCuMisMatch Error", 887, "Provider and Consumer disagree on total cu for session") + RelayNumberMismatch = sdkerrors.New("RelayNumberMismatch Error", 888, "Provider and Consumer disagree on relay number for session") + SubscriptionInitiationError = sdkerrors.New("SubscriptionInitiationError Error", 889, "Provider failed initiating subscription") + EpochIsNotRegisteredError = sdkerrors.New("EpochIsNotRegisteredError Error", 890, "Epoch is not registered in provider session manager") + ConsumerIsNotRegisteredError = sdkerrors.New("ConsumerIsNotRegisteredError Error", 891, "Consumer is not registered in provider session manager") + SubscriptionAlreadyExistsError = sdkerrors.New("SubscriptionAlreadyExists Error", 892, "Subscription already exists in single provider session") + DataReliabilitySessionAlreadyUsedError = sdkerrors.New("DataReliabilitySessionAlreadyUsed Error", 893, "Data Reliability Session already used by this consumer in this epoch") + DataReliabilityCuSumMisMatchError = sdkerrors.New("DataReliabilityCuSumMisMatch Error", 894, "Data Reliability Cu sum mismatch error") + DataReliabilityRelayNumberMisMatchError = sdkerrors.New("DataReliabilityRelayNumberMisMatch Error", 894, "Data Reliability RelayNumber mismatch error") ) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index ea6e118106..0d9df2c785 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -77,12 +77,12 @@ func (psm *ProviderSessionManager) getOrCreateDataReliabilitySessionWithConsumer } // If we got here, we need to create a new instance for this consumer address. - providerSessionWithConsumer = NewProviderSessionsWithConsumer(address, nil) + providerSessionWithConsumer = NewProviderSessionsWithConsumer(address, nil, isDataReliabilityPSWC) psm.dataReliabilitySessionsWithAllConsumers[epoch][address] = providerSessionWithConsumer return providerSessionWithConsumer, nil } -// GetDataReliabilitySession fetches a data reliability session, and assumes the user +// GetDataReliabilitySession fetches a data reliability session func (psm *ProviderSessionManager) GetDataReliabilitySession(address string, epoch uint64, sessionId uint64, relayNumber uint64) (*SingleProviderSession, error) { // validate Epoch if !psm.IsValidEpoch(epoch) { // fast checking to see if epoch is even relevant @@ -97,13 +97,26 @@ func (psm *ProviderSessionManager) GetDataReliabilitySession(address string, epo // validate RelayNumber if relayNumber > DataReliabilityRelayNumber { - return nil, utils.LavaFormatError("request's relayNumber is larger than the DataReliabilityRelayNumber allowed relay number", nil, &map[string]string{"relayNumber": strconv.FormatUint(relayNumber, 10), "DataReliabilityRelayNumber": strconv.Itoa(DataReliabilityRelayNumber)}) + return nil, utils.LavaFormatError("request's relayNumber is larger than the DataReliabilityRelayNumber allowed in Data Reliability", nil, &map[string]string{"relayNumber": strconv.FormatUint(relayNumber, 10), "DataReliabilityRelayNumber": strconv.Itoa(DataReliabilityRelayNumber)}) } // validate active consumer. - psm.getOrCreateDataReliabilitySessionWithConsumer(address, epoch, sessionId) + providerSessionWithConsumer, err := psm.getOrCreateDataReliabilitySessionWithConsumer(address, epoch, sessionId) + if err != nil { + return nil, utils.LavaFormatError("getOrCreateDataReliabilitySessionWithConsumer Failed", err, &map[string]string{"relayNumber": strconv.FormatUint(relayNumber, 10), "DataReliabilityRelayNumber": strconv.Itoa(DataReliabilityRelayNumber)}) + } + + singleProviderSession, err := providerSessionWithConsumer.getDataReliabilitySingleSession(sessionId, epoch) + if err != nil { + return nil, err + } + + // validate relay number in the session stored + if singleProviderSession.RelayNum+1 > DataReliabilityRelayNumber { // validate relay number fits + return nil, utils.LavaFormatError("GetDataReliabilitySession singleProviderSession.RelayNum relayNumber is larger than the DataReliabilityRelayNumber allowed in Data Reliability", DataReliabilityRelayNumberMisMatchError, &map[string]string{"singleProviderSession.RelayNum": strconv.FormatUint(singleProviderSession.RelayNum+1, 10), "request.relayNumber": strconv.FormatUint(relayNumber, 10)}) + } - return nil, nil + return singleProviderSession, nil } @@ -137,7 +150,7 @@ func (psm *ProviderSessionManager) registerNewConsumer(consumerAddr string, epoc providerSessionWithConsumer, foundAddressInMap := mapOfProviderSessionsWithConsumer[consumerAddr] if !foundAddressInMap { - providerSessionWithConsumer = NewProviderSessionsWithConsumer(consumerAddr, &ProviderSessionsEpochData{MaxComputeUnits: maxCuForConsumer}) + providerSessionWithConsumer = NewProviderSessionsWithConsumer(consumerAddr, &ProviderSessionsEpochData{MaxComputeUnits: maxCuForConsumer}, notDataReliabilityPSWC) mapOfProviderSessionsWithConsumer[consumerAddr] = providerSessionWithConsumer } return providerSessionWithConsumer, nil diff --git a/protocol/lavasession/provider_session_manager_test.go b/protocol/lavasession/provider_session_manager_test.go index 3b85ed64ff..cf647f3dfd 100644 --- a/protocol/lavasession/provider_session_manager_test.go +++ b/protocol/lavasession/provider_session_manager_test.go @@ -11,6 +11,7 @@ const ( relayCu = uint64(10) epoch1 = uint64(10) sessionId = uint64(123) + dataReliabilitySessionId = uint64(0) relayNumber = uint64(1) maxCu = uint64(150) epoch2 = testNumberOfBlocksKeptInMemory + epoch1 @@ -181,3 +182,30 @@ func TestPSMCUMisMatch(t *testing.T) { require.Error(t, err) require.True(t, ProviderConsumerCuMisMatch.Is(err)) } + +func TestPSMDataReliabilityHappyFlow(t *testing.T) { + // initialize the struct + psm := initProviderSessionManager() + + // get data reliability session + sps, err := psm.GetDataReliabilitySession(consumerOneAddress, epoch1, dataReliabilitySessionId, relayNumber) + + // validate results + require.Nil(t, err) + require.NotNil(t, sps) + + // validate expected results + require.Empty(t, psm.sessionsWithAllConsumers) + require.NotEmpty(t, psm.dataReliabilitySessionsWithAllConsumers) + require.Empty(t, psm.subscriptionSessionsWithAllConsumers) + + // // prepare session for usage + sps.PrepareSessionForUsage(relayCu, relayCu) + + // validate session was prepared successfully + require.Equal(t, relayCu, sps.LatestRelayCu) + require.Equal(t, sps.CuSum, relayCu) + require.Equal(t, sps.SessionID, sessionId) + require.Equal(t, sps.RelayNum, relayNumber) + require.Equal(t, sps.PairingEpoch, epoch1) +} diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index b542b1fa01..b5f9437d27 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -60,15 +60,18 @@ func (rpcpe *RPCProviderEndpoint) Key() string { const ( notBlockListedConsumer = 0 blockListedConsumer = 1 + notDataReliabilityPSWC = 0 + isDataReliabilityPSWC = 1 ) // holds all of the data for a consumer for a certain epoch type ProviderSessionsWithConsumer struct { - Sessions map[uint64]*SingleProviderSession - isBlockListed uint32 - consumerAddr string - epochData *ProviderSessionsEpochData - Lock sync.RWMutex + Sessions map[uint64]*SingleProviderSession + isBlockListed uint32 + consumerAddr string + epochData *ProviderSessionsEpochData + Lock sync.RWMutex + isDataReliability uint32 // 0 is false, 1 is true. set to uint so we can atomically read } type SingleProviderSession struct { @@ -81,16 +84,22 @@ type SingleProviderSession struct { PairingEpoch uint64 } -func NewProviderSessionsWithConsumer(consumerAddr string, epochData *ProviderSessionsEpochData) *ProviderSessionsWithConsumer { +func NewProviderSessionsWithConsumer(consumerAddr string, epochData *ProviderSessionsEpochData, isDataReliability uint32) *ProviderSessionsWithConsumer { pswc := &ProviderSessionsWithConsumer{ - Sessions: map[uint64]*SingleProviderSession{}, - isBlockListed: 0, - consumerAddr: consumerAddr, - epochData: epochData, + Sessions: map[uint64]*SingleProviderSession{}, + isBlockListed: 0, + consumerAddr: consumerAddr, + epochData: epochData, + isDataReliability: isDataReliability, } return pswc } +// reads the isDataReliability data atomically +func (pswc *ProviderSessionsWithConsumer) atomicReadIsDataReliability() uint32 { // rename to blocked consumer not blocked epoch + return atomic.LoadUint32(&pswc.isDataReliability) +} + // reads cs.BlockedEpoch atomically, notBlockListedConsumer = 0, blockListedConsumer = 1 func (pswc *ProviderSessionsWithConsumer) atomicWriteConsumerBlocked(blockStatus uint32) { // rename to blocked consumer not blocked epoch atomic.StoreUint32(&pswc.isBlockListed, blockStatus) @@ -152,6 +161,29 @@ func (pswc *ProviderSessionsWithConsumer) GetExistingSession(sessionId uint64) ( return nil, SessionDoesNotExist } +// this function verifies the provider can create a data reliability session and returns one if valid +func (pswc *ProviderSessionsWithConsumer) getDataReliabilitySingleSession(sessionId uint64, epoch uint64) (session *SingleProviderSession, err error) { + utils.LavaFormatDebug("Provider creating new DataReliabilitySingleSession", &map[string]string{"SessionID": strconv.FormatUint(sessionId, 10), "epoch": strconv.FormatUint(epoch, 10)}) + _, foundDataReliabilitySession := pswc.Sessions[sessionId] + if foundDataReliabilitySession { + // consumer already used his data reliability session. + return nil, utils.LavaFormatWarning("Data Reliability Session was already used", DataReliabilitySessionAlreadyUsedError, nil) + } + + session = &SingleProviderSession{ + userSessionsParent: pswc, + SessionID: sessionId, + PairingEpoch: epoch, + } + pswc.Lock.Lock() + defer pswc.Lock.Unlock() + // this is a double lock and risky but we just created session and nobody has reference to it yet + session.lock.Lock() + pswc.Sessions[sessionId] = session + // session is still locked when we return it + return session, nil +} + func (sps *SingleProviderSession) GetPairingEpoch() uint64 { return atomic.LoadUint64(&sps.PairingEpoch) } @@ -170,12 +202,28 @@ func (sps *SingleProviderSession) VerifyLock() error { return nil } +// In case the user session is a data reliability we just need to verify that the cusum is the amount agreed between the consumer and the provider +func (sps *SingleProviderSession) PrepareDataReliabilitySessionForUsage(relayRequestTotalCU uint64) error { + if relayRequestTotalCU != DataReliabilityCuSum { + return utils.LavaFormatError("PrepareDataReliabilitySessionForUsage", DataReliabilityCuSumMisMatchError, &map[string]string{"relayRequestTotalCU": strconv.FormatUint(relayRequestTotalCU, 10)}) + } + sps.LatestRelayCu = DataReliabilityCuSum // 1. update latest + sps.CuSum = relayRequestTotalCU // 2. update CuSum, if consumer wants to pay more, let it + sps.RelayNum = sps.RelayNum + 1 // 3. update RelayNum, we already verified relayNum is valid in GetDataReliabilitySession. + return nil +} + func (sps *SingleProviderSession) PrepareSessionForUsage(cuFromSpec uint64, relayRequestTotalCU uint64) error { err := sps.VerifyLock() // sps is locked if err != nil { return utils.LavaFormatError("sps.verifyLock() failed in PrepareSessionForUsage", err, nil) } + // checking if this user session is a data reliability user session. + if sps.userSessionsParent.atomicReadIsDataReliability() == isDataReliabilityPSWC { + return sps.PrepareDataReliabilitySessionForUsage(relayRequestTotalCU) + } + maxCu := sps.userSessionsParent.atomicReadMaxComputeUnits() if relayRequestTotalCU < sps.CuSum+cuFromSpec { sps.lock.Unlock() // unlock on error diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index b293f7179b..5011daed25 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -315,6 +315,9 @@ func (rpcps *RPCProviderServer) verifyRelaySession(ctx context.Context, request } dataReliabilitySingleProviderSession, err := rpcps.providerSessionManager.GetDataReliabilitySession(extractedConsumerAddress.String(), uint64(request.BlockHeight), request.SessionId, request.RelayNum) if err != nil { + if lavasession.DataReliabilityAlreadySentThisEpochError.Is(err) { + return nil, nil, err + } return nil, nil, utils.LavaFormatError("failed to get a provider data reliability session", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": extractedConsumerAddress.String(), "epoch": strconv.FormatInt(request.BlockHeight, 10)}) } return dataReliabilitySingleProviderSession, extractedConsumerAddress, nil From 2446fb655743986e5ba59cda74229f42b68134ef Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Tue, 7 Mar 2023 14:01:54 +0100 Subject: [PATCH 082/123] data reliability psm WIP --- .../provider_session_manager_test.go | 60 +++++++++++++++++-- protocol/lavasession/provider_types.go | 15 ++++- 2 files changed, 68 insertions(+), 7 deletions(-) diff --git a/protocol/lavasession/provider_session_manager_test.go b/protocol/lavasession/provider_session_manager_test.go index cf647f3dfd..4e2886341b 100644 --- a/protocol/lavasession/provider_session_manager_test.go +++ b/protocol/lavasession/provider_session_manager_test.go @@ -9,6 +9,7 @@ import ( const ( testNumberOfBlocksKeptInMemory = 100 relayCu = uint64(10) + dataReliabilityRelayCu = uint64(0) epoch1 = uint64(10) sessionId = uint64(123) dataReliabilitySessionId = uint64(0) @@ -200,12 +201,59 @@ func TestPSMDataReliabilityHappyFlow(t *testing.T) { require.Empty(t, psm.subscriptionSessionsWithAllConsumers) // // prepare session for usage - sps.PrepareSessionForUsage(relayCu, relayCu) + sps.PrepareSessionForUsage(relayCu, dataReliabilityRelayCu) // validate session was prepared successfully - require.Equal(t, relayCu, sps.LatestRelayCu) - require.Equal(t, sps.CuSum, relayCu) - require.Equal(t, sps.SessionID, sessionId) - require.Equal(t, sps.RelayNum, relayNumber) - require.Equal(t, sps.PairingEpoch, epoch1) + require.Equal(t, dataReliabilityRelayCu, sps.LatestRelayCu) + require.Equal(t, dataReliabilityRelayCu, sps.CuSum) + require.Equal(t, dataReliabilitySessionId, sps.SessionID) + require.Equal(t, relayNumber, sps.RelayNum) + require.Equal(t, epoch1, sps.PairingEpoch) + + // perform session done + psm.OnSessionDone(sps) + + // validate session done information is valid. + require.Equal(t, dataReliabilityRelayCu, sps.LatestRelayCu) + require.Equal(t, dataReliabilityRelayCu, sps.CuSum) + require.Equal(t, dataReliabilitySessionId, sps.SessionID) + require.Equal(t, relayNumber, sps.RelayNum) + require.Equal(t, epoch1, sps.PairingEpoch) +} + +func TestPSMDataReliabilitySessionFailure(t *testing.T) { + // initialize the struct + psm := initProviderSessionManager() + + // get data reliability session + sps, err := psm.GetDataReliabilitySession(consumerOneAddress, epoch1, dataReliabilitySessionId, relayNumber) + + // validate results + require.Nil(t, err) + require.NotNil(t, sps) + + // validate expected results + require.Empty(t, psm.sessionsWithAllConsumers) + require.NotEmpty(t, psm.dataReliabilitySessionsWithAllConsumers) + require.Empty(t, psm.subscriptionSessionsWithAllConsumers) + + // // prepare session for usage + sps.PrepareSessionForUsage(relayCu, dataReliabilityRelayCu) + + // validate session was prepared successfully + require.Equal(t, dataReliabilityRelayCu, sps.LatestRelayCu) + require.Equal(t, dataReliabilityRelayCu, sps.CuSum) + require.Equal(t, dataReliabilitySessionId, sps.SessionID) + require.Equal(t, relayNumber, sps.RelayNum) + require.Equal(t, epoch1, sps.PairingEpoch) + + // perform session failure. + psm.OnSessionFailure(sps) + + // validate on session failure that the relay number was subtracted + require.Equal(t, dataReliabilityRelayCu, sps.LatestRelayCu) + require.Equal(t, dataReliabilityRelayCu, sps.CuSum) + require.Equal(t, dataReliabilitySessionId, sps.SessionID) + require.Equal(t, relayNumber-1, sps.RelayNum) + require.Equal(t, epoch1, sps.PairingEpoch) } diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index b5f9437d27..654b74b724 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -275,16 +275,29 @@ func (sps *SingleProviderSession) validateAndSubUsedCU(currentCU uint64) error { } } +func (sps *SingleProviderSession) onDataReliabilitySessionFailure() error { + sps.CuSum = sps.CuSum - sps.LatestRelayCu + sps.RelayNum = sps.RelayNum - 1 + sps.LatestRelayCu = 0 + return nil +} + func (sps *SingleProviderSession) onSessionFailure() error { err := sps.VerifyLock() // sps is locked if err != nil { return utils.LavaFormatError("sps.verifyLock() failed in onSessionFailure", err, nil) } + defer sps.lock.Unlock() + + // handle data reliability session failure + if sps.userSessionsParent.atomicReadIsDataReliability() == isDataReliabilityPSWC { + return sps.onDataReliabilitySessionFailure() + } + sps.CuSum = sps.CuSum - sps.LatestRelayCu sps.RelayNum = sps.RelayNum - 1 sps.validateAndSubUsedCU(sps.LatestRelayCu) sps.LatestRelayCu = 0 - sps.lock.Unlock() return nil } From c547f8a1fe8757fc1524faa99a63121e6fab3c76 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Tue, 7 Mar 2023 17:57:06 +0100 Subject: [PATCH 083/123] fixing subscription in PSM. adding unitests for the entire package. --- protocol/lavasession/errors.go | 3 +- .../lavasession/provider_session_manager.go | 91 ++++- .../provider_session_manager_test.go | 342 ++++++++++++++++-- protocol/lavasession/provider_types.go | 10 +- 4 files changed, 400 insertions(+), 46 deletions(-) diff --git a/protocol/lavasession/errors.go b/protocol/lavasession/errors.go index 7317e8114e..daf5b83d89 100644 --- a/protocol/lavasession/errors.go +++ b/protocol/lavasession/errors.go @@ -42,5 +42,6 @@ var ( // Provider Side Errors SubscriptionAlreadyExistsError = sdkerrors.New("SubscriptionAlreadyExists Error", 892, "Subscription already exists in single provider session") DataReliabilitySessionAlreadyUsedError = sdkerrors.New("DataReliabilitySessionAlreadyUsed Error", 893, "Data Reliability Session already used by this consumer in this epoch") DataReliabilityCuSumMisMatchError = sdkerrors.New("DataReliabilityCuSumMisMatch Error", 894, "Data Reliability Cu sum mismatch error") - DataReliabilityRelayNumberMisMatchError = sdkerrors.New("DataReliabilityRelayNumberMisMatch Error", 894, "Data Reliability RelayNumber mismatch error") + DataReliabilityRelayNumberMisMatchError = sdkerrors.New("DataReliabilityRelayNumberMisMatch Error", 895, "Data Reliability RelayNumber mismatch error") + SubscriptionPointerIsNilError = sdkerrors.New("SubscriptionPointerIsNil Error", 896, "Trying to unsubscribe a nil pointer.") ) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index 0d9df2c785..076224b9a8 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -106,18 +106,19 @@ func (psm *ProviderSessionManager) GetDataReliabilitySession(address string, epo return nil, utils.LavaFormatError("getOrCreateDataReliabilitySessionWithConsumer Failed", err, &map[string]string{"relayNumber": strconv.FormatUint(relayNumber, 10), "DataReliabilityRelayNumber": strconv.Itoa(DataReliabilityRelayNumber)}) } + // singleProviderSession is locked after this method is called unless we got an error singleProviderSession, err := providerSessionWithConsumer.getDataReliabilitySingleSession(sessionId, epoch) if err != nil { return nil, err } // validate relay number in the session stored - if singleProviderSession.RelayNum+1 > DataReliabilityRelayNumber { // validate relay number fits - return nil, utils.LavaFormatError("GetDataReliabilitySession singleProviderSession.RelayNum relayNumber is larger than the DataReliabilityRelayNumber allowed in Data Reliability", DataReliabilityRelayNumberMisMatchError, &map[string]string{"singleProviderSession.RelayNum": strconv.FormatUint(singleProviderSession.RelayNum+1, 10), "request.relayNumber": strconv.FormatUint(relayNumber, 10)}) + if singleProviderSession.RelayNum+1 > DataReliabilityRelayNumber { // validate relay number fits if it has been used already raise a used error + defer singleProviderSession.lock.Unlock() // in case of an error we need to unlock the session as its currently locked. + return nil, utils.LavaFormatWarning("Data Reliability Session was already used", DataReliabilitySessionAlreadyUsedError, nil) } return singleProviderSession, nil - } func (psm *ProviderSessionManager) GetSession(address string, epoch uint64, sessionId uint64, relayNumber uint64) (*SingleProviderSession, error) { @@ -233,7 +234,30 @@ func (psm *ProviderSessionManager) UpdateEpoch(epoch uint64) { } psm.sessionsWithAllConsumers = filterOldEpochEntries(psm.blockedEpochHeight, psm.sessionsWithAllConsumers) psm.dataReliabilitySessionsWithAllConsumers = filterOldEpochEntries(psm.blockedEpochHeight, psm.dataReliabilitySessionsWithAllConsumers) - psm.subscriptionSessionsWithAllConsumers = filterOldEpochEntries(psm.blockedEpochHeight, psm.subscriptionSessionsWithAllConsumers) + // in the case of subscribe, we need to unsubscribe before deleting the key from storage. + psm.subscriptionSessionsWithAllConsumers = psm.filterOldEpochEntriesSubscribe(psm.blockedEpochHeight, psm.subscriptionSessionsWithAllConsumers) +} + +func (psm *ProviderSessionManager) filterOldEpochEntriesSubscribe(blockedEpochHeight uint64, allEpochsMap map[uint64]map[string]map[string]*RPCSubscription) map[uint64]map[string]map[string]*RPCSubscription { + validEpochsMap := map[uint64]map[string]map[string]*RPCSubscription{} + for epochStored, value := range allEpochsMap { + if !IsEpochValidForUse(epochStored, blockedEpochHeight) { + // epoch is not valid so we don't keep its key in the new map + for _, consumers := range value { // unsubscribe + for _, subscription := range consumers { + if subscription.Sub == nil { // validate subscription not nil + utils.LavaFormatError("filterOldEpochEntriesSubscribe Error", SubscriptionPointerIsNilError, &map[string]string{"subscripionId": subscription.Id}) + } else { + subscription.Sub.Unsubscribe() + } + } + } + continue + } + // if epochStored is ok, copy the value into the new map + validEpochsMap[epochStored] = value + } + return validEpochsMap } func filterOldEpochEntries[T any](blockedEpochHeight uint64, allEpochsMap map[uint64]T) (validEpochsMap map[uint64]T) { @@ -263,21 +287,59 @@ func (psm *ProviderSessionManager) ProcessUnsubscribe(apiName string, subscripti return utils.LavaFormatError("Couldn't find consumer address in psm.subscriptionSessionsWithAllConsumers", nil, &map[string]string{"epoch": strconv.FormatUint(epoch, 10), "address": consumerAddress}) } + var err error if apiName == TendermintUnsubscribeAll { // unsubscribe all subscriptions for _, v := range mapOfSubscriptionId { - v.Sub.Unsubscribe() + if v.Sub == nil { + err = utils.LavaFormatError("ProcessUnsubscribe TendermintUnsubscribeAll mapOfSubscriptionId Error", SubscriptionPointerIsNilError, &map[string]string{"subscripionId": subscriptionID}) + } else { + v.Sub.Unsubscribe() + } } - return nil + psm.subscriptionSessionsWithAllConsumers[epoch][consumerAddress] = make(map[string]*RPCSubscription) // delete the entire map. + return err } subscription, foundSubscription := mapOfSubscriptionId[subscriptionID] if !foundSubscription { return utils.LavaFormatError("Couldn't find subscription Id in psm.subscriptionSessionsWithAllConsumers", nil, &map[string]string{"epoch": strconv.FormatUint(epoch, 10), "address": consumerAddress, "subscriptionId": subscriptionID}) } - subscription.Sub.Unsubscribe() - delete(mapOfSubscriptionId, subscriptionID) // delete subscription after finished with it - return nil + + if subscription.Sub == nil { + err = utils.LavaFormatError("ProcessUnsubscribe Error", SubscriptionPointerIsNilError, &map[string]string{"subscripionId": subscriptionID}) + } else { + subscription.Sub.Unsubscribe() + } + delete(psm.subscriptionSessionsWithAllConsumers[epoch][consumerAddress], subscriptionID) // delete subscription after finished with it + return err +} + +func (psm *ProviderSessionManager) addSubscriptionToStorage(subscription *RPCSubscription, consumerAddress string, epoch uint64) error { + psm.lock.Lock() + defer psm.lock.Unlock() + // we already validated the epoch is valid in the GetSession no need to verify again. + _, foundEpoch := psm.subscriptionSessionsWithAllConsumers[epoch] + if !foundEpoch { + // this is the first time we subscribe in this epoch + psm.subscriptionSessionsWithAllConsumers[epoch] = make(map[string]map[string]*RPCSubscription) + } + + _, foundSubscriptions := psm.subscriptionSessionsWithAllConsumers[epoch][consumerAddress] + if !foundSubscriptions { + // this is the first subscription added in this epoch. we need to create the map + psm.subscriptionSessionsWithAllConsumers[epoch][consumerAddress] = make(map[string]*RPCSubscription) + } + + _, foundSubscription := psm.subscriptionSessionsWithAllConsumers[epoch][consumerAddress][subscription.Id] + if !foundSubscription { + // we shouldnt find a subscription already in the storage. + psm.subscriptionSessionsWithAllConsumers[epoch][consumerAddress][subscription.Id] = subscription + return nil // successfully added subscription to storage + } + + // if we get here we found a subscription already in the storage and we need to return an error as we can't add two subscriptions with the same id + return utils.LavaFormatError("addSubscription", SubscriptionAlreadyExistsError, &map[string]string{"SubscriptionId": subscription.Id, "epoch": strconv.FormatUint(epoch, 10), "address": consumerAddress}) } func (psm *ProviderSessionManager) ReleaseSessionAndCreateSubscription(session *SingleProviderSession, subscription *RPCSubscription, consumerAddress string, epoch uint64) error { @@ -285,7 +347,7 @@ func (psm *ProviderSessionManager) ReleaseSessionAndCreateSubscription(session * if err != nil { return utils.LavaFormatError("Failed ReleaseSessionAndCreateSubscription", err, nil) } - return nil + return psm.addSubscriptionToStorage(subscription, consumerAddress, epoch) } // try to disconnect the subscription incase we got an error. @@ -306,8 +368,13 @@ func (psm *ProviderSessionManager) SubscriptionEnded(consumerAddress string, epo if !foundSubscription { return } - subscription.Sub.Unsubscribe() - delete(mapOfSubscriptionId, subscriptionID) // delete subscription after finished with it + + if subscription.Sub == nil { // validate subscription not nil + utils.LavaFormatError("SubscriptionEnded Error", SubscriptionPointerIsNilError, &map[string]string{"subscripionId": subscription.Id}) + } else { + subscription.Sub.Unsubscribe() + } + delete(psm.subscriptionSessionsWithAllConsumers[epoch][consumerAddress], subscriptionID) // delete subscription after finished with it } // Called when the reward server has information on a higher cu proof and usage and this providerSessionsManager needs to sync up on it diff --git a/protocol/lavasession/provider_session_manager_test.go b/protocol/lavasession/provider_session_manager_test.go index 4e2886341b..2bcce440c3 100644 --- a/protocol/lavasession/provider_session_manager_test.go +++ b/protocol/lavasession/provider_session_manager_test.go @@ -12,6 +12,8 @@ const ( dataReliabilityRelayCu = uint64(0) epoch1 = uint64(10) sessionId = uint64(123) + subscriptionID = "124" + subscriptionID2 = "125" dataReliabilitySessionId = uint64(0) relayNumber = uint64(1) maxCu = uint64(150) @@ -62,6 +64,35 @@ func prepareSession(t *testing.T) (*ProviderSessionManager, *SingleProviderSessi return psm, sps } +func prepareDRSession(t *testing.T) (*ProviderSessionManager, *SingleProviderSession) { + // initialize the struct + psm := initProviderSessionManager() + + // get data reliability session + sps, err := psm.GetDataReliabilitySession(consumerOneAddress, epoch1, dataReliabilitySessionId, relayNumber) + + // validate results + require.Nil(t, err) + require.NotNil(t, sps) + + // validate expected results + require.Empty(t, psm.sessionsWithAllConsumers) + require.NotEmpty(t, psm.dataReliabilitySessionsWithAllConsumers) + require.Empty(t, psm.subscriptionSessionsWithAllConsumers) + + // // prepare session for usage + sps.PrepareSessionForUsage(relayCu, dataReliabilityRelayCu) + + // validate session was prepared successfully + require.Equal(t, dataReliabilityRelayCu, sps.LatestRelayCu) + require.Equal(t, dataReliabilityRelayCu, sps.CuSum) + require.Equal(t, dataReliabilitySessionId, sps.SessionID) + require.Equal(t, relayNumber, sps.RelayNum) + require.Equal(t, epoch1, sps.PairingEpoch) + + return psm, sps +} + func TestHappyFlowPSM(t *testing.T) { // init test psm, sps := prepareSession(t) @@ -185,30 +216,23 @@ func TestPSMCUMisMatch(t *testing.T) { } func TestPSMDataReliabilityHappyFlow(t *testing.T) { - // initialize the struct - psm := initProviderSessionManager() - - // get data reliability session - sps, err := psm.GetDataReliabilitySession(consumerOneAddress, epoch1, dataReliabilitySessionId, relayNumber) + // prepare data reliability session + psm, sps := prepareDRSession(t) - // validate results - require.Nil(t, err) - require.NotNil(t, sps) - - // validate expected results - require.Empty(t, psm.sessionsWithAllConsumers) - require.NotEmpty(t, psm.dataReliabilitySessionsWithAllConsumers) - require.Empty(t, psm.subscriptionSessionsWithAllConsumers) - - // // prepare session for usage - sps.PrepareSessionForUsage(relayCu, dataReliabilityRelayCu) + // perform session done + psm.OnSessionDone(sps) - // validate session was prepared successfully + // validate session done information is valid. require.Equal(t, dataReliabilityRelayCu, sps.LatestRelayCu) require.Equal(t, dataReliabilityRelayCu, sps.CuSum) require.Equal(t, dataReliabilitySessionId, sps.SessionID) require.Equal(t, relayNumber, sps.RelayNum) require.Equal(t, epoch1, sps.PairingEpoch) +} + +func TestPSMDataReliabilityTwicePerEpoch(t *testing.T) { + // prepare data reliability session + psm, sps := prepareDRSession(t) // perform session done psm.OnSessionDone(sps) @@ -219,24 +243,52 @@ func TestPSMDataReliabilityHappyFlow(t *testing.T) { require.Equal(t, dataReliabilitySessionId, sps.SessionID) require.Equal(t, relayNumber, sps.RelayNum) require.Equal(t, epoch1, sps.PairingEpoch) + + // try to get a data reliability session again. + sps, err := psm.GetDataReliabilitySession(consumerOneAddress, epoch1, dataReliabilitySessionId, relayNumber) + + // validate we cant get more than one data reliability session per epoch (might change in the future) + require.Error(t, err) + require.True(t, DataReliabilitySessionAlreadyUsedError.Is(err)) // validate error is what we expect. + require.Nil(t, sps) } func TestPSMDataReliabilitySessionFailure(t *testing.T) { - // initialize the struct - psm := initProviderSessionManager() + // prepare data reliability session + psm, sps := prepareDRSession(t) - // get data reliability session + // perform session failure. + psm.OnSessionFailure(sps) + + // validate on session failure that the relay number was subtracted + require.Equal(t, dataReliabilityRelayCu, sps.LatestRelayCu) + require.Equal(t, dataReliabilityRelayCu, sps.CuSum) + require.Equal(t, dataReliabilitySessionId, sps.SessionID) + require.Equal(t, relayNumber-1, sps.RelayNum) + require.Equal(t, epoch1, sps.PairingEpoch) +} + +func TestPSMDataReliabilityRetryAfterFailure(t *testing.T) { + // prepare data reliability session + psm, sps := prepareDRSession(t) + + // perform session failure. + psm.OnSessionFailure(sps) + + // validate on session failure that the relay number was subtracted + require.Equal(t, dataReliabilityRelayCu, sps.LatestRelayCu) + require.Equal(t, dataReliabilityRelayCu, sps.CuSum) + require.Equal(t, dataReliabilitySessionId, sps.SessionID) + require.Equal(t, relayNumber-1, sps.RelayNum) + require.Equal(t, epoch1, sps.PairingEpoch) + + // try to get a data reliability session again. sps, err := psm.GetDataReliabilitySession(consumerOneAddress, epoch1, dataReliabilitySessionId, relayNumber) - // validate results + // validate we can get a data reliability session if we failed before require.Nil(t, err) require.NotNil(t, sps) - // validate expected results - require.Empty(t, psm.sessionsWithAllConsumers) - require.NotEmpty(t, psm.dataReliabilitySessionsWithAllConsumers) - require.Empty(t, psm.subscriptionSessionsWithAllConsumers) - // // prepare session for usage sps.PrepareSessionForUsage(relayCu, dataReliabilityRelayCu) @@ -247,13 +299,243 @@ func TestPSMDataReliabilitySessionFailure(t *testing.T) { require.Equal(t, relayNumber, sps.RelayNum) require.Equal(t, epoch1, sps.PairingEpoch) - // perform session failure. - psm.OnSessionFailure(sps) + // perform session done + psm.OnSessionDone(sps) - // validate on session failure that the relay number was subtracted + // validate session done information is valid. require.Equal(t, dataReliabilityRelayCu, sps.LatestRelayCu) require.Equal(t, dataReliabilityRelayCu, sps.CuSum) require.Equal(t, dataReliabilitySessionId, sps.SessionID) - require.Equal(t, relayNumber-1, sps.RelayNum) + require.Equal(t, relayNumber, sps.RelayNum) require.Equal(t, epoch1, sps.PairingEpoch) } + +func TestPSMDataReliabilityEpochChange(t *testing.T) { + // prepare data reliability session + psm, sps := prepareDRSession(t) + + // perform session done. + psm.OnSessionDone(sps) + + // update epoch to epoch2 height + psm.UpdateEpoch(epoch2) + + // validate epoch update + require.Equal(t, psm.blockedEpochHeight, epoch1) + require.Empty(t, psm.dataReliabilitySessionsWithAllConsumers) +} + +func TestPSMDataReliabilitySessionFailureEpochChange(t *testing.T) { + // prepare data reliability session + psm, sps := prepareDRSession(t) + + // perform session done. + psm.OnSessionFailure(sps) + + // update epoch to epoch2 height + psm.UpdateEpoch(epoch2) + + // validate epoch update + require.Equal(t, psm.blockedEpochHeight, epoch1) + require.Empty(t, psm.dataReliabilitySessionsWithAllConsumers) +} + +func TestPSMSubscribeHappyFlowProcessUnsubscribe(t *testing.T) { + // init test + psm, sps := prepareSession(t) + + // validate subscription map is empty + require.Empty(t, psm.subscriptionSessionsWithAllConsumers) + + // subscribe + var channel chan interface{} + subscription := &RPCSubscription{ + Id: subscriptionID, + Sub: nil, + SubscribeRepliesChan: channel, + } + psm.ReleaseSessionAndCreateSubscription(sps, subscription, consumerOneAddress, epoch1) + + // verify state after subscription creation + require.True(t, LockMisUseDetectedError.Is(sps.VerifyLock())) // validating session was unlocked. + require.NotEmpty(t, psm.sessionsWithAllConsumers) + require.NotEmpty(t, psm.subscriptionSessionsWithAllConsumers) + _, foundEpoch := psm.subscriptionSessionsWithAllConsumers[epoch1] + require.True(t, foundEpoch) + _, foundConsumer := psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress] + require.True(t, foundConsumer) + _, foundSubscription := psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress][subscriptionID] + require.True(t, foundSubscription) + + err := psm.ProcessUnsubscribe("unsubscribe", subscriptionID, consumerOneAddress, epoch1) + require.True(t, SubscriptionPointerIsNilError.Is(err)) + require.Empty(t, psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress]) +} + +func TestPSMSubscribeHappyFlowProcessUnsubscribeUnsubscribeAll(t *testing.T) { + // init test + psm, sps := prepareSession(t) + + // validate subscription map is empty + require.Empty(t, psm.subscriptionSessionsWithAllConsumers) + + // subscribe + var channel chan interface{} + subscription := &RPCSubscription{ + Id: subscriptionID, + Sub: nil, + SubscribeRepliesChan: channel, + } + subscription2 := &RPCSubscription{ + Id: subscriptionID2, + Sub: nil, + SubscribeRepliesChan: channel, + } + psm.ReleaseSessionAndCreateSubscription(sps, subscription, consumerOneAddress, epoch1) + + sps, err := psm.GetSession(consumerOneAddress, epoch1, sessionId, relayNumber+1) + require.Nil(t, err) + require.NotNil(t, sps) + + // create 2nd subscription + psm.ReleaseSessionAndCreateSubscription(sps, subscription2, consumerOneAddress, epoch1) + + // verify state after subscription creation + require.True(t, LockMisUseDetectedError.Is(sps.VerifyLock())) // validating session was unlocked. + require.NotEmpty(t, psm.sessionsWithAllConsumers) + require.NotEmpty(t, psm.subscriptionSessionsWithAllConsumers) + _, foundEpoch := psm.subscriptionSessionsWithAllConsumers[epoch1] + require.True(t, foundEpoch) + _, foundConsumer := psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress] + require.True(t, foundConsumer) + _, foundSubscription := psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress][subscriptionID] + require.True(t, foundSubscription) + _, foundSubscription2 := psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress][subscriptionID2] + require.True(t, foundSubscription2) + + err = psm.ProcessUnsubscribe(TendermintUnsubscribeAll, subscriptionID, consumerOneAddress, epoch1) + require.True(t, SubscriptionPointerIsNilError.Is(err)) + require.Empty(t, psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress]) +} +func TestPSMSubscribeHappyFlowProcessUnsubscribeUnsubscribeOneOutOfTwo(t *testing.T) { + // init test + psm, sps := prepareSession(t) + + // validate subscription map is empty + require.Empty(t, psm.subscriptionSessionsWithAllConsumers) + + // subscribe + var channel chan interface{} + subscription := &RPCSubscription{ + Id: subscriptionID, + Sub: nil, + SubscribeRepliesChan: channel, + } + subscription2 := &RPCSubscription{ + Id: subscriptionID2, + Sub: nil, + SubscribeRepliesChan: channel, + } + psm.ReleaseSessionAndCreateSubscription(sps, subscription, consumerOneAddress, epoch1) + // create 2nd subscription as we release the session we can just ask for it again with relayNumber + 1 + sps, err := psm.GetSession(consumerOneAddress, epoch1, sessionId, relayNumber+1) + psm.ReleaseSessionAndCreateSubscription(sps, subscription2, consumerOneAddress, epoch1) + + err = psm.ProcessUnsubscribe("unsubscribeOne", subscriptionID, consumerOneAddress, epoch1) + require.True(t, SubscriptionPointerIsNilError.Is(err)) + require.NotEmpty(t, psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress]) + _, foundId2 := psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress][subscriptionID2] + require.True(t, foundId2) +} + +func TestPSMSubscribeHappyFlowSubscriptionEnded(t *testing.T) { + // init test + psm, sps := prepareSession(t) + + // validate subscription map is empty + require.Empty(t, psm.subscriptionSessionsWithAllConsumers) + + // subscribe + var channel chan interface{} + subscription := &RPCSubscription{ + Id: subscriptionID, + Sub: nil, + SubscribeRepliesChan: channel, + } + psm.ReleaseSessionAndCreateSubscription(sps, subscription, consumerOneAddress, epoch1) + + // verify state after subscription creation + require.True(t, LockMisUseDetectedError.Is(sps.VerifyLock())) // validating session was unlocked. + require.NotEmpty(t, psm.sessionsWithAllConsumers) + require.NotEmpty(t, psm.subscriptionSessionsWithAllConsumers) + _, foundEpoch := psm.subscriptionSessionsWithAllConsumers[epoch1] + require.True(t, foundEpoch) + _, foundConsumer := psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress] + require.True(t, foundConsumer) + _, foundSubscription := psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress][subscriptionID] + require.True(t, foundSubscription) + + psm.SubscriptionEnded(consumerOneAddress, epoch1, subscriptionID) + require.Empty(t, psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress]) +} + +func TestPSMSubscribeHappyFlowSubscriptionEndedOneOutOfTwo(t *testing.T) { + // init test + psm, sps := prepareSession(t) + + // validate subscription map is empty + require.Empty(t, psm.subscriptionSessionsWithAllConsumers) + + // subscribe + var channel chan interface{} + subscription := &RPCSubscription{ + Id: subscriptionID, + Sub: nil, + SubscribeRepliesChan: channel, + } + subscription2 := &RPCSubscription{ + Id: subscriptionID2, + Sub: nil, + SubscribeRepliesChan: channel, + } + psm.ReleaseSessionAndCreateSubscription(sps, subscription, consumerOneAddress, epoch1) + // create 2nd subscription as we release the session we can just ask for it again with relayNumber + 1 + sps, err := psm.GetSession(consumerOneAddress, epoch1, sessionId, relayNumber+1) + require.Nil(t, err) + psm.ReleaseSessionAndCreateSubscription(sps, subscription2, consumerOneAddress, epoch1) + + psm.SubscriptionEnded(consumerOneAddress, epoch1, subscriptionID) + require.NotEmpty(t, psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress]) + _, foundId2 := psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress][subscriptionID2] + require.True(t, foundId2) +} + +func TestPSMSubscribeEpochChange(t *testing.T) { + // init test + psm, sps := prepareSession(t) + + // validate subscription map is empty + require.Empty(t, psm.subscriptionSessionsWithAllConsumers) + + // subscribe + var channel chan interface{} + subscription := &RPCSubscription{ + Id: subscriptionID, + Sub: nil, + SubscribeRepliesChan: channel, + } + subscription2 := &RPCSubscription{ + Id: subscriptionID2, + Sub: nil, + SubscribeRepliesChan: channel, + } + psm.ReleaseSessionAndCreateSubscription(sps, subscription, consumerOneAddress, epoch1) + // create 2nd subscription as we release the session we can just ask for it again with relayNumber + 1 + sps, err := psm.GetSession(consumerOneAddress, epoch1, sessionId, relayNumber+1) + require.Nil(t, err) + psm.ReleaseSessionAndCreateSubscription(sps, subscription2, consumerOneAddress, epoch1) + + psm.UpdateEpoch(epoch2) + require.Empty(t, psm.subscriptionSessionsWithAllConsumers) + require.Empty(t, psm.sessionsWithAllConsumers) +} diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index 654b74b724..486d3bfc79 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -164,12 +164,15 @@ func (pswc *ProviderSessionsWithConsumer) GetExistingSession(sessionId uint64) ( // this function verifies the provider can create a data reliability session and returns one if valid func (pswc *ProviderSessionsWithConsumer) getDataReliabilitySingleSession(sessionId uint64, epoch uint64) (session *SingleProviderSession, err error) { utils.LavaFormatDebug("Provider creating new DataReliabilitySingleSession", &map[string]string{"SessionID": strconv.FormatUint(sessionId, 10), "epoch": strconv.FormatUint(epoch, 10)}) - _, foundDataReliabilitySession := pswc.Sessions[sessionId] + session, foundDataReliabilitySession := pswc.Sessions[sessionId] if foundDataReliabilitySession { - // consumer already used his data reliability session. - return nil, utils.LavaFormatWarning("Data Reliability Session was already used", DataReliabilitySessionAlreadyUsedError, nil) + // if session exists, relay number should be 0 as it might had an error + // locking the session and returning for validation + session.lock.Lock() + return session, nil } + // otherwise return a new session and add it to the sessions list session = &SingleProviderSession{ userSessionsParent: pswc, SessionID: sessionId, @@ -180,6 +183,7 @@ func (pswc *ProviderSessionsWithConsumer) getDataReliabilitySingleSession(sessio // this is a double lock and risky but we just created session and nobody has reference to it yet session.lock.Lock() pswc.Sessions[sessionId] = session + // session is still locked when we return it return session, nil } From 72a11698dee292762f58c0114f0179321f53aa79 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Tue, 7 Mar 2023 18:48:25 +0100 Subject: [PATCH 084/123] using generics to trigger delete event instead of using the code twice --- .../lavasession/provider_session_manager.go | 79 +++++++------------ .../provider_session_manager_test.go | 40 ++++++---- protocol/lavasession/provider_types.go | 27 +++++++ 3 files changed, 82 insertions(+), 64 deletions(-) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index 076224b9a8..7785906ee2 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -9,9 +9,9 @@ import ( ) type ProviderSessionManager struct { - sessionsWithAllConsumers map[uint64]map[string]*ProviderSessionsWithConsumer // first key is epochs, second key is a consumer address - dataReliabilitySessionsWithAllConsumers map[uint64]map[string]*ProviderSessionsWithConsumer // separate handling of data reliability so later on we can use it outside of pairing, first key is epochs, second key is a consumer address - subscriptionSessionsWithAllConsumers map[uint64]map[string]map[string]*RPCSubscription // first key is an epoch, second key is a consumer address, third key is subscriptionId + sessionsWithAllConsumers map[uint64]sessionData // first key is epochs, second key is a consumer address + dataReliabilitySessionsWithAllConsumers map[uint64]sessionData // separate handling of data reliability so later on we can use it outside of pairing, first key is epochs, second key is a consumer address + subscriptionSessionsWithAllConsumers map[uint64]subscriptionData // first key is an epoch, second key is a consumer address, third key is subscriptionId lock sync.RWMutex blockedEpochHeight uint64 // requests from this epoch are blocked rpcProviderEndpoint *RPCProviderEndpoint @@ -63,7 +63,7 @@ func (psm *ProviderSessionManager) getSingleSessionFromProviderSessionWithConsum func (psm *ProviderSessionManager) getOrCreateDataReliabilitySessionWithConsumer(address string, epoch uint64, sessionId uint64) (providerSessionWithConsumer *ProviderSessionsWithConsumer, err error) { if mapOfDataReliabilitySessionsWithConsumer, consumerFoundInEpoch := psm.dataReliabilitySessionsWithAllConsumers[epoch]; consumerFoundInEpoch { - if providerSessionWithConsumer, consumerAddressFound := mapOfDataReliabilitySessionsWithConsumer[address]; consumerAddressFound { + if providerSessionWithConsumer, consumerAddressFound := mapOfDataReliabilitySessionsWithConsumer.sessionMap[address]; consumerAddressFound { if providerSessionWithConsumer.atomicReadConsumerBlocked() == blockListedConsumer { // we atomic read block listed so we dont need to lock the provider. (double lock is always a bad idea.) // consumer is blocked. utils.LavaFormatWarning("getActiveConsumer", ConsumerIsBlockListed, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10), "ConsumerAddress": address}) @@ -73,12 +73,12 @@ func (psm *ProviderSessionManager) getOrCreateDataReliabilitySessionWithConsumer } } else { // If Epoch is missing from map, create a new instance - psm.dataReliabilitySessionsWithAllConsumers[epoch] = make(map[string]*ProviderSessionsWithConsumer) + psm.dataReliabilitySessionsWithAllConsumers[epoch] = sessionData{sessionMap: make(map[string]*ProviderSessionsWithConsumer)} } // If we got here, we need to create a new instance for this consumer address. providerSessionWithConsumer = NewProviderSessionsWithConsumer(address, nil, isDataReliabilityPSWC) - psm.dataReliabilitySessionsWithAllConsumers[epoch][address] = providerSessionWithConsumer + psm.dataReliabilitySessionsWithAllConsumers[epoch].sessionMap[address] = providerSessionWithConsumer return providerSessionWithConsumer, nil } @@ -145,14 +145,14 @@ func (psm *ProviderSessionManager) registerNewConsumer(consumerAddr string, epoc mapOfProviderSessionsWithConsumer, foundEpochInMap := psm.sessionsWithAllConsumers[epoch] if !foundEpochInMap { - mapOfProviderSessionsWithConsumer = make(map[string]*ProviderSessionsWithConsumer) + mapOfProviderSessionsWithConsumer = sessionData{sessionMap: make(map[string]*ProviderSessionsWithConsumer)} psm.sessionsWithAllConsumers[epoch] = mapOfProviderSessionsWithConsumer } - providerSessionWithConsumer, foundAddressInMap := mapOfProviderSessionsWithConsumer[consumerAddr] + providerSessionWithConsumer, foundAddressInMap := mapOfProviderSessionsWithConsumer.sessionMap[consumerAddr] if !foundAddressInMap { providerSessionWithConsumer = NewProviderSessionsWithConsumer(consumerAddr, &ProviderSessionsEpochData{MaxComputeUnits: maxCuForConsumer}, notDataReliabilityPSWC) - mapOfProviderSessionsWithConsumer[consumerAddr] = providerSessionWithConsumer + mapOfProviderSessionsWithConsumer.sessionMap[consumerAddr] = providerSessionWithConsumer } return providerSessionWithConsumer, nil } @@ -181,7 +181,7 @@ func (psm *ProviderSessionManager) getActiveConsumer(epoch uint64, address strin return nil, InvalidEpochError } if mapOfProviderSessionsWithConsumer, consumerFoundInEpoch := psm.sessionsWithAllConsumers[epoch]; consumerFoundInEpoch { - if providerSessionWithConsumer, consumerAddressFound := mapOfProviderSessionsWithConsumer[address]; consumerAddressFound { + if providerSessionWithConsumer, consumerAddressFound := mapOfProviderSessionsWithConsumer.sessionMap[address]; consumerAddressFound { if providerSessionWithConsumer.atomicReadConsumerBlocked() == blockListedConsumer { // we atomic read block listed so we dont need to lock the provider. (double lock is always a bad idea.) // consumer is blocked. utils.LavaFormatWarning("getActiveConsumer", ConsumerIsBlockListed, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10), "ConsumerAddress": address}) @@ -234,39 +234,20 @@ func (psm *ProviderSessionManager) UpdateEpoch(epoch uint64) { } psm.sessionsWithAllConsumers = filterOldEpochEntries(psm.blockedEpochHeight, psm.sessionsWithAllConsumers) psm.dataReliabilitySessionsWithAllConsumers = filterOldEpochEntries(psm.blockedEpochHeight, psm.dataReliabilitySessionsWithAllConsumers) - // in the case of subscribe, we need to unsubscribe before deleting the key from storage. - psm.subscriptionSessionsWithAllConsumers = psm.filterOldEpochEntriesSubscribe(psm.blockedEpochHeight, psm.subscriptionSessionsWithAllConsumers) + psm.subscriptionSessionsWithAllConsumers = filterOldEpochEntries(psm.blockedEpochHeight, psm.subscriptionSessionsWithAllConsumers) } -func (psm *ProviderSessionManager) filterOldEpochEntriesSubscribe(blockedEpochHeight uint64, allEpochsMap map[uint64]map[string]map[string]*RPCSubscription) map[uint64]map[string]map[string]*RPCSubscription { - validEpochsMap := map[uint64]map[string]map[string]*RPCSubscription{} - for epochStored, value := range allEpochsMap { - if !IsEpochValidForUse(epochStored, blockedEpochHeight) { - // epoch is not valid so we don't keep its key in the new map - for _, consumers := range value { // unsubscribe - for _, subscription := range consumers { - if subscription.Sub == nil { // validate subscription not nil - utils.LavaFormatError("filterOldEpochEntriesSubscribe Error", SubscriptionPointerIsNilError, &map[string]string{"subscripionId": subscription.Id}) - } else { - subscription.Sub.Unsubscribe() - } - } - } - continue - } - // if epochStored is ok, copy the value into the new map - validEpochsMap[epochStored] = value - } - return validEpochsMap -} - -func filterOldEpochEntries[T any](blockedEpochHeight uint64, allEpochsMap map[uint64]T) (validEpochsMap map[uint64]T) { +func filterOldEpochEntries[T dataHandler](blockedEpochHeight uint64, allEpochsMap map[uint64]T) (validEpochsMap map[uint64]T) { // In order to avoid running over the map twice, (1. mark 2. delete.) better technique is to copy and filter // which has better O(n) vs O(2n) validEpochsMap = map[uint64]T{} for epochStored, value := range allEpochsMap { if !IsEpochValidForUse(epochStored, blockedEpochHeight) { // epoch is not valid so we don't keep its key in the new map + + // in the case of subscribe, we need to unsubscribe before deleting the key from storage. + value.onDeleteEvent() + continue } // if epochStored is ok, copy the value into the new map @@ -282,7 +263,7 @@ func (psm *ProviderSessionManager) ProcessUnsubscribe(apiName string, subscripti if !foundMapOfConsumers { return utils.LavaFormatError("Couldn't find epoch in psm.subscriptionSessionsWithAllConsumers", nil, &map[string]string{"epoch": strconv.FormatUint(epoch, 10), "address": consumerAddress}) } - mapOfSubscriptionId, foundMapOfSubscriptionId := mapOfConsumers[consumerAddress] + mapOfSubscriptionId, foundMapOfSubscriptionId := mapOfConsumers.subscriptionMap[consumerAddress] if !foundMapOfSubscriptionId { return utils.LavaFormatError("Couldn't find consumer address in psm.subscriptionSessionsWithAllConsumers", nil, &map[string]string{"epoch": strconv.FormatUint(epoch, 10), "address": consumerAddress}) } @@ -297,7 +278,7 @@ func (psm *ProviderSessionManager) ProcessUnsubscribe(apiName string, subscripti v.Sub.Unsubscribe() } } - psm.subscriptionSessionsWithAllConsumers[epoch][consumerAddress] = make(map[string]*RPCSubscription) // delete the entire map. + psm.subscriptionSessionsWithAllConsumers[epoch].subscriptionMap[consumerAddress] = make(map[string]*RPCSubscription) // delete the entire map. return err } @@ -311,7 +292,7 @@ func (psm *ProviderSessionManager) ProcessUnsubscribe(apiName string, subscripti } else { subscription.Sub.Unsubscribe() } - delete(psm.subscriptionSessionsWithAllConsumers[epoch][consumerAddress], subscriptionID) // delete subscription after finished with it + delete(psm.subscriptionSessionsWithAllConsumers[epoch].subscriptionMap[consumerAddress], subscriptionID) // delete subscription after finished with it return err } @@ -322,19 +303,19 @@ func (psm *ProviderSessionManager) addSubscriptionToStorage(subscription *RPCSub _, foundEpoch := psm.subscriptionSessionsWithAllConsumers[epoch] if !foundEpoch { // this is the first time we subscribe in this epoch - psm.subscriptionSessionsWithAllConsumers[epoch] = make(map[string]map[string]*RPCSubscription) + psm.subscriptionSessionsWithAllConsumers[epoch] = subscriptionData{subscriptionMap: make(map[string]map[string]*RPCSubscription)} } - _, foundSubscriptions := psm.subscriptionSessionsWithAllConsumers[epoch][consumerAddress] + _, foundSubscriptions := psm.subscriptionSessionsWithAllConsumers[epoch].subscriptionMap[consumerAddress] if !foundSubscriptions { // this is the first subscription added in this epoch. we need to create the map - psm.subscriptionSessionsWithAllConsumers[epoch][consumerAddress] = make(map[string]*RPCSubscription) + psm.subscriptionSessionsWithAllConsumers[epoch].subscriptionMap[consumerAddress] = make(map[string]*RPCSubscription) } - _, foundSubscription := psm.subscriptionSessionsWithAllConsumers[epoch][consumerAddress][subscription.Id] + _, foundSubscription := psm.subscriptionSessionsWithAllConsumers[epoch].subscriptionMap[consumerAddress][subscription.Id] if !foundSubscription { // we shouldnt find a subscription already in the storage. - psm.subscriptionSessionsWithAllConsumers[epoch][consumerAddress][subscription.Id] = subscription + psm.subscriptionSessionsWithAllConsumers[epoch].subscriptionMap[consumerAddress][subscription.Id] = subscription return nil // successfully added subscription to storage } @@ -359,7 +340,7 @@ func (psm *ProviderSessionManager) SubscriptionEnded(consumerAddress string, epo if !foundMapOfConsumers { return } - mapOfSubscriptionId, foundMapOfSubscriptionId := mapOfConsumers[consumerAddress] + mapOfSubscriptionId, foundMapOfSubscriptionId := mapOfConsumers.subscriptionMap[consumerAddress] if !foundMapOfSubscriptionId { return } @@ -374,7 +355,7 @@ func (psm *ProviderSessionManager) SubscriptionEnded(consumerAddress string, epo } else { subscription.Sub.Unsubscribe() } - delete(psm.subscriptionSessionsWithAllConsumers[epoch][consumerAddress], subscriptionID) // delete subscription after finished with it + delete(psm.subscriptionSessionsWithAllConsumers[epoch].subscriptionMap[consumerAddress], subscriptionID) // delete subscription after finished with it } // Called when the reward server has information on a higher cu proof and usage and this providerSessionsManager needs to sync up on it @@ -390,7 +371,7 @@ func (psm *ProviderSessionManager) UpdateSessionCU(consumerAddress string, epoch if !ok { return utils.LavaFormatError("UpdateSessionCU Failed", EpochIsNotRegisteredError, &map[string]string{"epoch": strconv.FormatUint(epoch, 10)}) } - providerSessionWithConsumer, foundConsumer := providerSessionsWithConsumerMap[consumerAddress] + providerSessionWithConsumer, foundConsumer := providerSessionsWithConsumerMap.sessionMap[consumerAddress] if !foundConsumer { return utils.LavaFormatError("UpdateSessionCU Failed", ConsumerIsNotRegisteredError, &map[string]string{"epoch": strconv.FormatUint(epoch, 10), "consumer": consumerAddress}) } @@ -408,9 +389,9 @@ func NewProviderSessionManager(rpcProviderEndpoint *RPCProviderEndpoint, numberO return &ProviderSessionManager{ rpcProviderEndpoint: rpcProviderEndpoint, blockDistanceForEpochValidity: numberOfBlocksKeptInMemory, - sessionsWithAllConsumers: map[uint64]map[string]*ProviderSessionsWithConsumer{}, - dataReliabilitySessionsWithAllConsumers: map[uint64]map[string]*ProviderSessionsWithConsumer{}, - subscriptionSessionsWithAllConsumers: map[uint64]map[string]map[string]*RPCSubscription{}, + sessionsWithAllConsumers: map[uint64]sessionData{}, + dataReliabilitySessionsWithAllConsumers: map[uint64]sessionData{}, + subscriptionSessionsWithAllConsumers: map[uint64]subscriptionData{}, } } diff --git a/protocol/lavasession/provider_session_manager_test.go b/protocol/lavasession/provider_session_manager_test.go index 2bcce440c3..60baddc16a 100644 --- a/protocol/lavasession/provider_session_manager_test.go +++ b/protocol/lavasession/provider_session_manager_test.go @@ -53,9 +53,10 @@ func prepareSession(t *testing.T) (*ProviderSessionManager, *SingleProviderSessi require.NotNil(t, sps) // prepare session for usage - sps.PrepareSessionForUsage(relayCu, relayCu) + err = sps.PrepareSessionForUsage(relayCu, relayCu) // validate session was prepared successfully + require.Nil(t, err) require.Equal(t, relayCu, sps.LatestRelayCu) require.Equal(t, sps.CuSum, relayCu) require.Equal(t, sps.SessionID, sessionId) @@ -109,6 +110,15 @@ func TestHappyFlowPSM(t *testing.T) { require.Equal(t, sps.PairingEpoch, epoch1) } +func TestPSMPrepareTwice(t *testing.T) { + // init test + _, sps := prepareSession(t) + + // prepare session for usage + err := sps.PrepareSessionForUsage(relayCu, relayCu) + require.Error(t, err) +} + // Test the basic functionality of the ProviderSessionsManager func TestPSMEpochChange(t *testing.T) { // init test @@ -362,14 +372,14 @@ func TestPSMSubscribeHappyFlowProcessUnsubscribe(t *testing.T) { require.NotEmpty(t, psm.subscriptionSessionsWithAllConsumers) _, foundEpoch := psm.subscriptionSessionsWithAllConsumers[epoch1] require.True(t, foundEpoch) - _, foundConsumer := psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress] + _, foundConsumer := psm.subscriptionSessionsWithAllConsumers[epoch1].subscriptionMap[consumerOneAddress] require.True(t, foundConsumer) - _, foundSubscription := psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress][subscriptionID] + _, foundSubscription := psm.subscriptionSessionsWithAllConsumers[epoch1].subscriptionMap[consumerOneAddress][subscriptionID] require.True(t, foundSubscription) err := psm.ProcessUnsubscribe("unsubscribe", subscriptionID, consumerOneAddress, epoch1) require.True(t, SubscriptionPointerIsNilError.Is(err)) - require.Empty(t, psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress]) + require.Empty(t, psm.subscriptionSessionsWithAllConsumers[epoch1].subscriptionMap[consumerOneAddress]) } func TestPSMSubscribeHappyFlowProcessUnsubscribeUnsubscribeAll(t *testing.T) { @@ -406,16 +416,16 @@ func TestPSMSubscribeHappyFlowProcessUnsubscribeUnsubscribeAll(t *testing.T) { require.NotEmpty(t, psm.subscriptionSessionsWithAllConsumers) _, foundEpoch := psm.subscriptionSessionsWithAllConsumers[epoch1] require.True(t, foundEpoch) - _, foundConsumer := psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress] + _, foundConsumer := psm.subscriptionSessionsWithAllConsumers[epoch1].subscriptionMap[consumerOneAddress] require.True(t, foundConsumer) - _, foundSubscription := psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress][subscriptionID] + _, foundSubscription := psm.subscriptionSessionsWithAllConsumers[epoch1].subscriptionMap[consumerOneAddress][subscriptionID] require.True(t, foundSubscription) - _, foundSubscription2 := psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress][subscriptionID2] + _, foundSubscription2 := psm.subscriptionSessionsWithAllConsumers[epoch1].subscriptionMap[consumerOneAddress][subscriptionID2] require.True(t, foundSubscription2) err = psm.ProcessUnsubscribe(TendermintUnsubscribeAll, subscriptionID, consumerOneAddress, epoch1) require.True(t, SubscriptionPointerIsNilError.Is(err)) - require.Empty(t, psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress]) + require.Empty(t, psm.subscriptionSessionsWithAllConsumers[epoch1].subscriptionMap[consumerOneAddress]) } func TestPSMSubscribeHappyFlowProcessUnsubscribeUnsubscribeOneOutOfTwo(t *testing.T) { // init test @@ -443,8 +453,8 @@ func TestPSMSubscribeHappyFlowProcessUnsubscribeUnsubscribeOneOutOfTwo(t *testin err = psm.ProcessUnsubscribe("unsubscribeOne", subscriptionID, consumerOneAddress, epoch1) require.True(t, SubscriptionPointerIsNilError.Is(err)) - require.NotEmpty(t, psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress]) - _, foundId2 := psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress][subscriptionID2] + require.NotEmpty(t, psm.subscriptionSessionsWithAllConsumers[epoch1].subscriptionMap[consumerOneAddress]) + _, foundId2 := psm.subscriptionSessionsWithAllConsumers[epoch1].subscriptionMap[consumerOneAddress][subscriptionID2] require.True(t, foundId2) } @@ -470,13 +480,13 @@ func TestPSMSubscribeHappyFlowSubscriptionEnded(t *testing.T) { require.NotEmpty(t, psm.subscriptionSessionsWithAllConsumers) _, foundEpoch := psm.subscriptionSessionsWithAllConsumers[epoch1] require.True(t, foundEpoch) - _, foundConsumer := psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress] + _, foundConsumer := psm.subscriptionSessionsWithAllConsumers[epoch1].subscriptionMap[consumerOneAddress] require.True(t, foundConsumer) - _, foundSubscription := psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress][subscriptionID] + _, foundSubscription := psm.subscriptionSessionsWithAllConsumers[epoch1].subscriptionMap[consumerOneAddress][subscriptionID] require.True(t, foundSubscription) psm.SubscriptionEnded(consumerOneAddress, epoch1, subscriptionID) - require.Empty(t, psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress]) + require.Empty(t, psm.subscriptionSessionsWithAllConsumers[epoch1].subscriptionMap[consumerOneAddress]) } func TestPSMSubscribeHappyFlowSubscriptionEndedOneOutOfTwo(t *testing.T) { @@ -505,8 +515,8 @@ func TestPSMSubscribeHappyFlowSubscriptionEndedOneOutOfTwo(t *testing.T) { psm.ReleaseSessionAndCreateSubscription(sps, subscription2, consumerOneAddress, epoch1) psm.SubscriptionEnded(consumerOneAddress, epoch1, subscriptionID) - require.NotEmpty(t, psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress]) - _, foundId2 := psm.subscriptionSessionsWithAllConsumers[epoch1][consumerOneAddress][subscriptionID2] + require.NotEmpty(t, psm.subscriptionSessionsWithAllConsumers[epoch1].subscriptionMap[consumerOneAddress]) + _, foundId2 := psm.subscriptionSessionsWithAllConsumers[epoch1].subscriptionMap[consumerOneAddress][subscriptionID2] require.True(t, foundId2) } diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index 486d3bfc79..0b1c0c4850 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -47,6 +47,33 @@ func (endpoint *RPCProviderEndpoint) Validate() error { return nil } +type dataHandler interface { + onDeleteEvent() +} + +type subscriptionData struct { + subscriptionMap map[string]map[string]*RPCSubscription +} + +func (sm subscriptionData) onDeleteEvent() { + for _, consumer := range sm.subscriptionMap { + for _, subscription := range consumer { + if subscription.Sub == nil { // validate subscription not nil + utils.LavaFormatError("filterOldEpochEntriesSubscribe Error", SubscriptionPointerIsNilError, &map[string]string{"subscripionId": subscription.Id}) + } else { + subscription.Sub.Unsubscribe() + } + } + } +} + +type sessionData struct { + sessionMap map[string]*ProviderSessionsWithConsumer +} + +func (sm sessionData) onDeleteEvent() { // do nothing +} + type RPCSubscription struct { Id string Sub *rpcclient.ClientSubscription From 59b3b32f3c0d2360e8e105818a776180b708cd4f Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Wed, 8 Mar 2023 08:39:36 +0100 Subject: [PATCH 085/123] remove print --- protocol/chainlib/chainproxy/connector.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/protocol/chainlib/chainproxy/connector.go b/protocol/chainlib/chainproxy/connector.go index 4a1ae5d126..909cd87dfc 100644 --- a/protocol/chainlib/chainproxy/connector.go +++ b/protocol/chainlib/chainproxy/connector.go @@ -63,7 +63,7 @@ func addClientsAsynchronously(ctx context.Context, connector *Connector, nConns if (connector.numberOfFreeClients() + connector.numberOfUsedClients()) == 0 { utils.LavaFormatFatal("Could not create any connections to the node check address", nil, &map[string]string{"address": addr}) } - utils.LavaFormatInfo("Finished adding Clients Asynchronously"+strconv.Itoa(len(connector.freeClients)), nil) + utils.LavaFormatInfo("Finished adding Clients Asynchronously", nil) utils.LavaFormatInfo("Number of parallel connections created: "+strconv.Itoa(len(connector.freeClients)), nil) go connector.connectorLoop(ctx) } From ff510f79afde314a8e77cb744b0222361fa42e2c Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Wed, 8 Mar 2023 12:44:21 +0100 Subject: [PATCH 086/123] fixing some issues related to connector crashing --- protocol/chainlib/chainproxy/connector.go | 12 ++++++------ protocol/chainlib/chainproxy/connector_test.go | 6 ++++-- protocol/chainlib/grpc.go | 6 +++++- protocol/chainlib/jsonRPC.go | 7 +++++-- protocol/rpcprovider/rewardserver/reward_server.go | 2 +- protocol/statetracker/payment_updater.go | 14 ++++++++------ 6 files changed, 29 insertions(+), 18 deletions(-) diff --git a/protocol/chainlib/chainproxy/connector.go b/protocol/chainlib/chainproxy/connector.go index 909cd87dfc..7a3a43a7fe 100644 --- a/protocol/chainlib/chainproxy/connector.go +++ b/protocol/chainlib/chainproxy/connector.go @@ -35,7 +35,7 @@ type Connector struct { addr string } -func NewConnector(ctx context.Context, nConns uint, addr string) *Connector { +func NewConnector(ctx context.Context, nConns uint, addr string) (*Connector, error) { NumberOfParallelConnections = nConns // set number of parallel connections requested by user (or default.) connector := &Connector{ freeClients: make([]*rpcclient.Client, 0, nConns), @@ -44,12 +44,12 @@ func NewConnector(ctx context.Context, nConns uint, addr string) *Connector { rpcClient, err := connector.createConnection(ctx, addr, connector.numberOfFreeClients()) if err != nil { - utils.LavaFormatFatal("Failed to create the first connection", err, &map[string]string{"address": addr}) + return nil, utils.LavaFormatError("Failed to create the first connection", err, &map[string]string{"address": addr}) } connector.addClient(rpcClient) go addClientsAsynchronously(ctx, connector, nConns-1, addr) - return connector + return connector, nil } func addClientsAsynchronously(ctx context.Context, connector *Connector, nConns uint, addr string) { @@ -220,7 +220,7 @@ type GRPCConnector struct { addr string } -func NewGRPCConnector(ctx context.Context, nConns uint, addr string) *GRPCConnector { +func NewGRPCConnector(ctx context.Context, nConns uint, addr string) (*GRPCConnector, error) { NumberOfParallelConnections = nConns // set number of parallel connections requested by user (or default.) connector := &GRPCConnector{ freeClients: make([]*grpc.ClientConn, 0, nConns), @@ -229,11 +229,11 @@ func NewGRPCConnector(ctx context.Context, nConns uint, addr string) *GRPCConnec rpcClient, err := connector.createConnection(ctx, addr, connector.numberOfFreeClients()) if err != nil { - utils.LavaFormatFatal("Failed to create the first connection", err, &map[string]string{"address": addr}) + return nil, utils.LavaFormatError("Failed to create the first connection", err, &map[string]string{"address": addr}) } connector.addClient(rpcClient) go addClientsAsynchronouslyGrpc(ctx, connector, nConns-1, addr) - return connector + return connector, nil } func (connector *GRPCConnector) increaseNumberOfClients(ctx context.Context, numberOfFreeClients int) { diff --git a/protocol/chainlib/chainproxy/connector_test.go b/protocol/chainlib/chainproxy/connector_test.go index 4e51e273a2..b3daeb57db 100644 --- a/protocol/chainlib/chainproxy/connector_test.go +++ b/protocol/chainlib/chainproxy/connector_test.go @@ -62,7 +62,8 @@ func TestConnector(t *testing.T) { listener := createRPCServer(t) // create a grpcServer so we can connect to its endpoint and validate everything works. defer listener.Close() ctx := context.Background() - conn := NewConnector(ctx, numberOfClients, listenerAddressTcp) + conn, err := NewConnector(ctx, numberOfClients, listenerAddressTcp) + require.Nil(t, err) for { // wait for the routine to finish connecting if len(conn.freeClients) == numberOfClients { break @@ -88,7 +89,8 @@ func TestConnectorGrpc(t *testing.T) { server := createGRPCServer(t) // create a grpcServer so we can connect to its endpoint and validate everything works. defer server.Stop() ctx := context.Background() - conn := NewGRPCConnector(ctx, numberOfClients, listenerAddress) + conn, err := NewGRPCConnector(ctx, numberOfClients, listenerAddress) + require.Nil(t, err) for { // wait for the routine to finish connecting if len(conn.freeClients) == numberOfClients { break diff --git a/protocol/chainlib/grpc.go b/protocol/chainlib/grpc.go index dd99579f04..da3eee273b 100644 --- a/protocol/chainlib/grpc.go +++ b/protocol/chainlib/grpc.go @@ -250,7 +250,11 @@ func NewGrpcChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *la cp := &GrpcChainProxy{ BaseChainProxy: BaseChainProxy{averageBlockTime: averageBlockTime}, } - cp.conn = chainproxy.NewGRPCConnector(ctx, nConns, strings.TrimSuffix(rpcProviderEndpoint.NodeUrl[0], "/")) + conn, err := chainproxy.NewGRPCConnector(ctx, nConns, strings.TrimSuffix(rpcProviderEndpoint.NodeUrl[0], "/")) + if err != nil { + return nil, err + } + cp.conn = conn if cp.conn == nil { return nil, utils.LavaFormatError("g_conn == nil", nil, nil) } diff --git a/protocol/chainlib/jsonRPC.go b/protocol/chainlib/jsonRPC.go index 7e3359d881..71ea01f0d4 100644 --- a/protocol/chainlib/jsonRPC.go +++ b/protocol/chainlib/jsonRPC.go @@ -354,11 +354,14 @@ func NewJrpcChainProxy(ctx context.Context, nConns uint, rpcProviderEndpoint *la } func (cp *JrpcChainProxy) start(ctx context.Context, nConns uint, nodeUrl string) error { - cp.conn = chainproxy.NewConnector(ctx, nConns, nodeUrl) + conn, err := chainproxy.NewConnector(ctx, nConns, nodeUrl) + if err != nil { + return err + } + cp.conn = conn if cp.conn == nil { return errors.New("g_conn == nil") } - return nil } diff --git a/protocol/rpcprovider/rewardserver/reward_server.go b/protocol/rpcprovider/rewardserver/reward_server.go index f17e9091fe..8066831658 100644 --- a/protocol/rpcprovider/rewardserver/reward_server.go +++ b/protocol/rpcprovider/rewardserver/reward_server.go @@ -237,7 +237,7 @@ func (rws *RewardServer) gatherRewardsForClaim(ctx context.Context, currentEpoch } if blockDistanceForEpochValidity > currentEpoch { - return nil, utils.LavaFormatError("current epoch too low", nil, &map[string]string{"current epoch": strconv.FormatUint(currentEpoch, 10)}) + return nil, utils.LavaFormatWarning("gatherRewardsForClaim current epoch is too low to claim rewards", nil, &map[string]string{"current epoch": strconv.FormatUint(currentEpoch, 10)}) } activeEpochThreshold := currentEpoch - blockDistanceForEpochValidity for epoch, epochRewards := range rws.rewards { diff --git a/protocol/statetracker/payment_updater.go b/protocol/statetracker/payment_updater.go index 04d427e7fb..b0887fc598 100644 --- a/protocol/statetracker/payment_updater.go +++ b/protocol/statetracker/payment_updater.go @@ -15,16 +15,16 @@ type PaymentUpdatable interface { } type PaymentUpdater struct { - paymentUpdatables map[string]*PaymentUpdatable - stateQuery *ProviderStateQuery + paymentUpdatable map[string]*PaymentUpdatable + stateQuery *ProviderStateQuery } func NewPaymentUpdater(stateQuery *ProviderStateQuery) *PaymentUpdater { - return &PaymentUpdater{paymentUpdatables: map[string]*PaymentUpdatable{}, stateQuery: stateQuery} + return &PaymentUpdater{paymentUpdatable: map[string]*PaymentUpdatable{}, stateQuery: stateQuery} } func (pu *PaymentUpdater) RegisterPaymentUpdatable(ctx context.Context, paymentUpdatable *PaymentUpdatable) { - pu.paymentUpdatables[(*paymentUpdatable).Description()] = paymentUpdatable + pu.paymentUpdatable[(*paymentUpdatable).Description()] = paymentUpdatable } func (pu *PaymentUpdater) UpdaterKey() string { @@ -38,7 +38,9 @@ func (pu *PaymentUpdater) Update(latestBlock int64) { return } for _, payment := range payments { - updatable := pu.paymentUpdatables[payment.Description] - (*updatable).PaymentHandler(payment) + updatable, foundUpdatable := pu.paymentUpdatable[payment.Description] + if foundUpdatable { + (*updatable).PaymentHandler(payment) + } } } From c6e42b3347ea8ea0604edce7605502f09ed68a3e Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Wed, 8 Mar 2023 15:14:00 +0100 Subject: [PATCH 087/123] extract cobra commands from main --- cmd/lavad/main.go | 282 +--------------------------- protocol/rpcconsumer/rpcconsumer.go | 146 +++++++++++++- protocol/rpcprovider/rpcprovider.go | 154 ++++++++++++++- 3 files changed, 305 insertions(+), 277 deletions(-) diff --git a/cmd/lavad/main.go b/cmd/lavad/main.go index ec17e11b71..0a3c19e18b 100644 --- a/cmd/lavad/main.go +++ b/cmd/lavad/main.go @@ -3,11 +3,9 @@ package main import ( "context" "fmt" - "math/rand" "os" "strconv" "strings" - "time" _ "net/http/pprof" @@ -15,11 +13,8 @@ import ( "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/client/tx" svrcmd "github.com/cosmos/cosmos-sdk/server/cmd" - "github.com/cosmos/cosmos-sdk/version" "github.com/ignite-hq/cli/ignite/pkg/cosmoscmd" "github.com/lavanet/lava/app" - "github.com/lavanet/lava/protocol/common" - "github.com/lavanet/lava/protocol/lavasession" "github.com/lavanet/lava/protocol/rpcconsumer" "github.com/lavanet/lava/protocol/rpcprovider" "github.com/lavanet/lava/relayer" @@ -28,7 +23,6 @@ import ( "github.com/lavanet/lava/relayer/sentry" "github.com/lavanet/lava/utils" "github.com/spf13/cobra" - "github.com/spf13/viper" ) const ( @@ -191,254 +185,10 @@ func main() { }, } - cmdRPCConsumer := &cobra.Command{ - Use: "rpcconsumer [config-file] | { {listen-ip:listen-port spec-chain-id api-interface} ... }", - Short: `rpcconsumer sets up a server to perform api requests and sends them through the lava protocol to data providers`, - Long: `rpcconsumer sets up a server to perform api requests and sends them through the lava protocol to data providers - all configs should be located in the local running directory /config or ` + app.DefaultNodeHome + ` - if no arguments are passed, assumes default config file: ` + DefaultRPCConsumerFileName + ` - if one argument is passed, its assumed the config file name - `, - Example: `required flags: --geolocation 1 --from alice -rpcconsumer -rpcconsumer rpcconsumer_conf -rpcconsumer 127.0.0.1:3333 COS3 tendermintrpc 127.0.0.1:3334 COS3 rest `, - Args: func(cmd *cobra.Command, args []string) error { - // Optionally run one of the validators provided by cobra - if err := cobra.RangeArgs(0, 1)(cmd, args); err == nil { - // zero or one argument is allowed - return nil - } - if len(args)%len(rpcconsumer.Yaml_config_properties) != 0 { - return fmt.Errorf("invalid number of arguments, either its a single config file or repeated groups of 3 HOST:PORT chain-id api-interface") - } - return nil - }, - RunE: func(cmd *cobra.Command, args []string) error { - utils.LavaFormatInfo("RPCConsumer started", &map[string]string{"args": strings.Join(args, ",")}) - clientCtx, err := client.GetClientTxContext(cmd) - if err != nil { - return err - } - config_name := DefaultRPCConsumerFileName - if len(args) == 1 { - config_name = args[0] // name of config file (without extension) - } - viper.SetConfigName(config_name) - viper.SetConfigType("yml") - viper.AddConfigPath(".") - viper.AddConfigPath("./config") - viper.AddConfigPath(app.DefaultNodeHome) - var rpcEndpoints []*lavasession.RPCEndpoint - var endpoints_strings []string - var viper_endpoints *viper.Viper - if len(args) > 1 { - viper_endpoints, err = common.ParseEndpointArgs(args, rpcconsumer.Yaml_config_properties, common.EndpointsConfigName) - if err != nil { - return utils.LavaFormatError("invalid endpoints arguments", err, &map[string]string{"endpoint_strings": strings.Join(args, "")}) - } - viper.MergeConfigMap(viper_endpoints.AllSettings()) - err := viper.SafeWriteConfigAs(DefaultRPCConsumerFileName) - if err != nil { - utils.LavaFormatInfo("did not create new config file, if it's desired remove the config file", &map[string]string{"file_name": viper.ConfigFileUsed()}) - } else { - utils.LavaFormatInfo("created new config file", &map[string]string{"file_name": DefaultRPCConsumerFileName}) - } - } else { - err = viper.ReadInConfig() - if err != nil { - utils.LavaFormatFatal("could not load config file", err, &map[string]string{"expected_config_name": viper.ConfigFileUsed()}) - } - utils.LavaFormatInfo("read config file successfully", &map[string]string{"expected_config_name": viper.ConfigFileUsed()}) - } - geolocation, err := cmd.Flags().GetUint64(lavasession.GeolocationFlag) - if err != nil { - utils.LavaFormatFatal("failed to read geolocation flag, required flag", err, nil) - } - rpcEndpoints, err = rpcconsumer.ParseEndpoints(viper.GetViper(), geolocation) - if err != nil || len(rpcEndpoints) == 0 { - return utils.LavaFormatError("invalid endpoints definition", err, &map[string]string{"endpoint_strings": strings.Join(endpoints_strings, "")}) - } - // handle flags, pass necessary fields - ctx := context.Background() - networkChainId, err := cmd.Flags().GetString(flags.FlagChainID) - if err != nil { - return err - } - logLevel, err := cmd.Flags().GetString(flags.FlagLogLevel) - if err != nil { - utils.LavaFormatFatal("failed to read log level flag", err, nil) - } - utils.LoggingLevel(logLevel) - - // check if the command includes --pprof-address - pprofAddressFlagUsed := cmd.Flags().Lookup("pprof-address").Changed - if pprofAddressFlagUsed { - // get pprof server ip address (default value: "") - pprofServerAddress, err := cmd.Flags().GetString("pprof-address") - if err != nil { - utils.LavaFormatFatal("failed to read pprof address flag", err, nil) - } - - // start pprof HTTP server - err = performance.StartPprofServer(pprofServerAddress) - if err != nil { - return utils.LavaFormatError("failed to start pprof HTTP server", err, nil) - } - } - txFactory := tx.NewFactoryCLI(clientCtx, cmd.Flags()).WithChainID(networkChainId) - rpcConsumer := rpcconsumer.RPCConsumer{} - requiredResponses := 1 // TODO: handle secure flag, for a majority between providers - utils.LavaFormatInfo("lavad Binary Version: "+version.Version, nil) - rand.Seed(time.Now().UnixNano()) - vrf_sk, _, err := utils.GetOrCreateVRFKey(clientCtx) - if err != nil { - utils.LavaFormatFatal("failed getting or creating a VRF key", err, nil) - } - var cache *performance.Cache = nil - cacheAddr, err := cmd.Flags().GetString(performance.CacheFlagName) - if err != nil { - utils.LavaFormatError("Failed To Get Cache Address flag", err, &map[string]string{"flags": fmt.Sprintf("%v", cmd.Flags())}) - } else if cacheAddr != "" { - cache, err = performance.InitCache(ctx, cacheAddr) - if err != nil { - utils.LavaFormatError("Failed To Connect to cache at address", err, &map[string]string{"address": cacheAddr}) - } else { - utils.LavaFormatInfo("cache service connected", &map[string]string{"address": cacheAddr}) - } - } - err = rpcConsumer.Start(ctx, txFactory, clientCtx, rpcEndpoints, requiredResponses, vrf_sk, cache) - return err - }, - } - - cmdRPCProvider := &cobra.Command{ - Use: `rpcprovider [config-file] | { {listen-ip:listen-port spec-chain-id api-interface "comma-separated-node-urls"} ... }`, - Short: `rpcprovider sets up a server to listen for rpc-consumers requests from the lava protocol send them to a configured node and respond with the reply`, - Long: `rpcprovider sets up a server to listen for rpc-consumers requests from the lava protocol send them to a configured node and respond with the reply - all configs should be located in` + app.DefaultNodeHome + "/config or the local running directory" + ` - if no arguments are passed, assumes default config file: ` + DefaultRPCProviderFileName + ` - if one argument is passed, its assumed the config file name - `, - Example: `required flags: --geolocation 1 --from alice -optional: --save-conf -rpcprovider -rpcprovider rpcprovider_conf.yml -rpcprovider 127.0.0.1:3333 ETH1 jsonrpc wss://www.eth-node.com:80 -rpcprovider 127.0.0.1:3333 COS3 tendermintrpc "wss://www.node-path.com:80,https://www.node-path.com:80" 127.0.0.1:3333 COS3 rest https://www.node-path.com:1317 `, - Args: func(cmd *cobra.Command, args []string) error { - // Optionally run one of the validators provided by cobra - if err := cobra.RangeArgs(0, 1)(cmd, args); err == nil { - // zero or one argument is allowed - return nil - } - if len(args)%len(rpcprovider.Yaml_config_properties) != 0 { - return fmt.Errorf("invalid number of arguments, either its a single config file or repeated groups of 4 HOST:PORT chain-id api-interface [node_url,node_url_2], arg count: %d", len(args)) - } - return nil - }, - RunE: func(cmd *cobra.Command, args []string) error { - utils.LavaFormatInfo("RPCProvider started", &map[string]string{"args": strings.Join(args, ",")}) - clientCtx, err := client.GetClientTxContext(cmd) - if err != nil { - return err - } - config_name := DefaultRPCProviderFileName - if len(args) == 1 { - config_name = args[0] // name of config file (without extension) - } - viper.SetConfigName(config_name) - viper.SetConfigType("yml") - viper.AddConfigPath(".") - viper.AddConfigPath("./config") - var rpcProviderEndpoints []*lavasession.RPCProviderEndpoint - var endpoints_strings []string - var viper_endpoints *viper.Viper - if len(args) > 1 { - viper_endpoints, err = common.ParseEndpointArgs(args, rpcprovider.Yaml_config_properties, common.EndpointsConfigName) - if err != nil { - return utils.LavaFormatError("invalid endpoints arguments", err, &map[string]string{"endpoint_strings": strings.Join(args, "")}) - } - save_config, err := cmd.Flags().GetBool(common.SaveConfigFlagName) - if err != nil { - return utils.LavaFormatError("failed reading flag", err, &map[string]string{"flag_name": common.SaveConfigFlagName}) - } - viper.MergeConfigMap(viper_endpoints.AllSettings()) - if save_config { - err := viper.SafeWriteConfigAs(DefaultRPCProviderFileName) - if err != nil { - utils.LavaFormatInfo("did not create new config file, if it's desired remove the config file", &map[string]string{"file_name": DefaultRPCProviderFileName, "error": err.Error()}) - } else { - utils.LavaFormatInfo("created new config file", &map[string]string{"file_name": DefaultRPCProviderFileName}) - } - } - } else { - err = viper.ReadInConfig() - if err != nil { - utils.LavaFormatFatal("could not load config file", err, &map[string]string{"expected_config_name": viper.ConfigFileUsed()}) - } - utils.LavaFormatInfo("read config file successfully", &map[string]string{"expected_config_name": viper.ConfigFileUsed()}) - } - geolocation, err := cmd.Flags().GetUint64(lavasession.GeolocationFlag) - if err != nil { - utils.LavaFormatFatal("failed to read geolocation flag, required flag", err, nil) - } - rpcProviderEndpoints, err = rpcprovider.ParseEndpoints(viper.GetViper(), geolocation) - if err != nil || len(rpcProviderEndpoints) == 0 { - return utils.LavaFormatError("invalid endpoints definition", err, &map[string]string{"endpoint_strings": strings.Join(endpoints_strings, "")}) - } - // handle flags, pass necessary fields - ctx := context.Background() - networkChainId, err := cmd.Flags().GetString(flags.FlagChainID) - if err != nil { - return err - } - txFactory := tx.NewFactoryCLI(clientCtx, cmd.Flags()).WithChainID(networkChainId) - logLevel, err := cmd.Flags().GetString(flags.FlagLogLevel) - if err != nil { - utils.LavaFormatFatal("failed to read log level flag", err, nil) - } - utils.LoggingLevel(logLevel) - - // check if the command includes --pprof-address - pprofAddressFlagUsed := cmd.Flags().Lookup("pprof-address").Changed - if pprofAddressFlagUsed { - // get pprof server ip address (default value: "") - pprofServerAddress, err := cmd.Flags().GetString("pprof-address") - if err != nil { - utils.LavaFormatFatal("failed to read pprof address flag", err, nil) - } - - // start pprof HTTP server - err = performance.StartPprofServer(pprofServerAddress) - if err != nil { - return utils.LavaFormatError("failed to start pprof HTTP server", err, nil) - } - } - - utils.LavaFormatInfo("lavad Binary Version: "+version.Version, nil) - rand.Seed(time.Now().UnixNano()) - var cache *performance.Cache = nil - cacheAddr, err := cmd.Flags().GetString(performance.CacheFlagName) - if err != nil { - utils.LavaFormatError("Failed To Get Cache Address flag", err, &map[string]string{"flags": fmt.Sprintf("%v", cmd.Flags())}) - } else if cacheAddr != "" { - cache, err = performance.InitCache(ctx, cacheAddr) - if err != nil { - utils.LavaFormatError("Failed To Connect to cache at address", err, &map[string]string{"address": cacheAddr}) - } else { - utils.LavaFormatInfo("cache service connected", &map[string]string{"address": cacheAddr}) - } - } - numberOfNodeParallelConnections, err := cmd.Flags().GetUint(chainproxy.ParallelConnectionsFlag) - if err != nil { - utils.LavaFormatFatal("error fetching chainproxy.ParallelConnectionsFlag", err, nil) - } - rpcProvider := rpcprovider.RPCProvider{} - err = rpcProvider.Start(ctx, txFactory, clientCtx, rpcProviderEndpoints, cache, numberOfNodeParallelConnections) - return err - }, - } + // rpc consumer cobra command + cmdRPCConsumer := rpcconsumer.CreateRPCConsumerCobraCommand() + // rpc provider cobra command + cmdRPCProvider := rpcprovider.CreateRPCProviderCobraCommand() // Server command flags flags.AddTxFlagsToCmd(cmdServer) @@ -471,28 +221,10 @@ rpcprovider 127.0.0.1:3333 COS3 tendermintrpc "wss://www.node-path.com:80,https: cmdTestClient.Flags().Bool("secure", false, "secure sends reliability on every message") rootCmd.AddCommand(cmdTestClient) - // RPCConsumer command flags - flags.AddTxFlagsToCmd(cmdRPCConsumer) - cmdRPCConsumer.MarkFlagRequired(flags.FlagFrom) - cmdRPCConsumer.Flags().String(flags.FlagChainID, app.Name, "network chain id") - cmdRPCConsumer.Flags().Uint64(sentry.GeolocationFlag, 0, "geolocation to run from") - cmdRPCConsumer.MarkFlagRequired(sentry.GeolocationFlag) - cmdRPCConsumer.Flags().Bool("secure", false, "secure sends reliability on every message") - cmdRPCConsumer.Flags().String(performance.PprofAddressFlagName, "", "pprof server address, used for code profiling") - cmdRPCConsumer.Flags().String(performance.CacheFlagName, "", "address for a cache server to improve performance") + // Add RPC Consumer Command rootCmd.AddCommand(cmdRPCConsumer) - - // RPCProvider command flags - flags.AddTxFlagsToCmd(cmdRPCProvider) - cmdRPCProvider.MarkFlagRequired(flags.FlagFrom) - cmdRPCProvider.Flags().Bool(common.SaveConfigFlagName, false, "save cmd args to a config file") - cmdRPCProvider.Flags().String(flags.FlagChainID, app.Name, "network chain id") - cmdRPCProvider.Flags().Uint64(sentry.GeolocationFlag, 0, "geolocation to run from") - cmdRPCProvider.MarkFlagRequired(sentry.GeolocationFlag) - cmdRPCProvider.Flags().String(performance.PprofAddressFlagName, "", "pprof server address, used for code profiling") - cmdRPCProvider.Flags().String(performance.CacheFlagName, "", "address for a cache server to improve performance") - cmdRPCProvider.Flags().Uint(chainproxy.ParallelConnectionsFlag, chainproxy.NumberOfParallelConnections, "parallel connections") - rootCmd.AddCommand(cmdRPCProvider) // TODO: DISABLE COMMAND SO IT'S NOT EXPOSED ON MAIN YET + // Add RPC Provider Command + rootCmd.AddCommand(cmdRPCProvider) if err := svrcmd.Execute(rootCmd, app.DefaultNodeHome); err != nil { os.Exit(1) diff --git a/protocol/rpcconsumer/rpcconsumer.go b/protocol/rpcconsumer/rpcconsumer.go index a5b4851ba0..88198d1578 100644 --- a/protocol/rpcconsumer/rpcconsumer.go +++ b/protocol/rpcconsumer/rpcconsumer.go @@ -3,28 +3,37 @@ package rpcconsumer import ( "context" "fmt" + "math/rand" "os" "os/signal" "strconv" + "strings" + "time" "github.com/coniks-sys/coniks-go/crypto/vrf" "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/client/tx" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/version" + "github.com/lavanet/lava/app" "github.com/lavanet/lava/protocol/chainlib" "github.com/lavanet/lava/protocol/common" "github.com/lavanet/lava/protocol/lavaprotocol" "github.com/lavanet/lava/protocol/lavasession" "github.com/lavanet/lava/protocol/statetracker" "github.com/lavanet/lava/relayer/performance" + "github.com/lavanet/lava/relayer/sentry" "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/utils" conflicttypes "github.com/lavanet/lava/x/conflict/types" + "github.com/spf13/cobra" "github.com/spf13/viper" ) var ( - Yaml_config_properties = []string{"network-address", "chain-id", "api-interface"} + Yaml_config_properties = []string{"network-address", "chain-id", "api-interface"} + DefaultRPCConsumerFileName = "rpcconsumer.yml" ) type ConsumerStateTrackerInf interface { @@ -102,3 +111,138 @@ func ParseEndpoints(viper_endpoints *viper.Viper, geolocation uint64) (endpoints } return } + +func CreateRPCConsumerCobraCommand() *cobra.Command { + cmdRPCConsumer := &cobra.Command{ + Use: "rpcconsumer [config-file] | { {listen-ip:listen-port spec-chain-id api-interface} ... }", + Short: `rpcconsumer sets up a server to perform api requests and sends them through the lava protocol to data providers`, + Long: `rpcconsumer sets up a server to perform api requests and sends them through the lava protocol to data providers + all configs should be located in the local running directory /config or ` + app.DefaultNodeHome + ` + if no arguments are passed, assumes default config file: ` + DefaultRPCConsumerFileName + ` + if one argument is passed, its assumed the config file name + `, + Example: `required flags: --geolocation 1 --from alice +rpcconsumer +rpcconsumer rpcconsumer_conf +rpcconsumer 127.0.0.1:3333 COS3 tendermintrpc 127.0.0.1:3334 COS3 rest `, + Args: func(cmd *cobra.Command, args []string) error { + // Optionally run one of the validators provided by cobra + if err := cobra.RangeArgs(0, 1)(cmd, args); err == nil { + // zero or one argument is allowed + return nil + } + if len(args)%len(Yaml_config_properties) != 0 { + return fmt.Errorf("invalid number of arguments, either its a single config file or repeated groups of 3 HOST:PORT chain-id api-interface") + } + return nil + }, + RunE: func(cmd *cobra.Command, args []string) error { + utils.LavaFormatInfo("RPCConsumer started", &map[string]string{"args": strings.Join(args, ",")}) + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + config_name := DefaultRPCConsumerFileName + if len(args) == 1 { + config_name = args[0] // name of config file (without extension) + } + viper.SetConfigName(config_name) + viper.SetConfigType("yml") + viper.AddConfigPath(".") + viper.AddConfigPath("./config") + viper.AddConfigPath(app.DefaultNodeHome) + var rpcEndpoints []*lavasession.RPCEndpoint + var endpoints_strings []string + var viper_endpoints *viper.Viper + if len(args) > 1 { + viper_endpoints, err = common.ParseEndpointArgs(args, Yaml_config_properties, common.EndpointsConfigName) + if err != nil { + return utils.LavaFormatError("invalid endpoints arguments", err, &map[string]string{"endpoint_strings": strings.Join(args, "")}) + } + viper.MergeConfigMap(viper_endpoints.AllSettings()) + err := viper.SafeWriteConfigAs(DefaultRPCConsumerFileName) + if err != nil { + utils.LavaFormatInfo("did not create new config file, if it's desired remove the config file", &map[string]string{"file_name": viper.ConfigFileUsed()}) + } else { + utils.LavaFormatInfo("created new config file", &map[string]string{"file_name": DefaultRPCConsumerFileName}) + } + } else { + err = viper.ReadInConfig() + if err != nil { + utils.LavaFormatFatal("could not load config file", err, &map[string]string{"expected_config_name": viper.ConfigFileUsed()}) + } + utils.LavaFormatInfo("read config file successfully", &map[string]string{"expected_config_name": viper.ConfigFileUsed()}) + } + geolocation, err := cmd.Flags().GetUint64(lavasession.GeolocationFlag) + if err != nil { + utils.LavaFormatFatal("failed to read geolocation flag, required flag", err, nil) + } + rpcEndpoints, err = ParseEndpoints(viper.GetViper(), geolocation) + if err != nil || len(rpcEndpoints) == 0 { + return utils.LavaFormatError("invalid endpoints definition", err, &map[string]string{"endpoint_strings": strings.Join(endpoints_strings, "")}) + } + // handle flags, pass necessary fields + ctx := context.Background() + networkChainId, err := cmd.Flags().GetString(flags.FlagChainID) + if err != nil { + return err + } + logLevel, err := cmd.Flags().GetString(flags.FlagLogLevel) + if err != nil { + utils.LavaFormatFatal("failed to read log level flag", err, nil) + } + utils.LoggingLevel(logLevel) + + // check if the command includes --pprof-address + pprofAddressFlagUsed := cmd.Flags().Lookup("pprof-address").Changed + if pprofAddressFlagUsed { + // get pprof server ip address (default value: "") + pprofServerAddress, err := cmd.Flags().GetString("pprof-address") + if err != nil { + utils.LavaFormatFatal("failed to read pprof address flag", err, nil) + } + + // start pprof HTTP server + err = performance.StartPprofServer(pprofServerAddress) + if err != nil { + return utils.LavaFormatError("failed to start pprof HTTP server", err, nil) + } + } + txFactory := tx.NewFactoryCLI(clientCtx, cmd.Flags()).WithChainID(networkChainId) + rpcConsumer := RPCConsumer{} + requiredResponses := 1 // TODO: handle secure flag, for a majority between providers + utils.LavaFormatInfo("lavad Binary Version: "+version.Version, nil) + rand.Seed(time.Now().UnixNano()) + vrf_sk, _, err := utils.GetOrCreateVRFKey(clientCtx) + if err != nil { + utils.LavaFormatFatal("failed getting or creating a VRF key", err, nil) + } + var cache *performance.Cache = nil + cacheAddr, err := cmd.Flags().GetString(performance.CacheFlagName) + if err != nil { + utils.LavaFormatError("Failed To Get Cache Address flag", err, &map[string]string{"flags": fmt.Sprintf("%v", cmd.Flags())}) + } else if cacheAddr != "" { + cache, err = performance.InitCache(ctx, cacheAddr) + if err != nil { + utils.LavaFormatError("Failed To Connect to cache at address", err, &map[string]string{"address": cacheAddr}) + } else { + utils.LavaFormatInfo("cache service connected", &map[string]string{"address": cacheAddr}) + } + } + err = rpcConsumer.Start(ctx, txFactory, clientCtx, rpcEndpoints, requiredResponses, vrf_sk, cache) + return err + }, + } + + // RPCConsumer command flags + flags.AddTxFlagsToCmd(cmdRPCConsumer) + cmdRPCConsumer.MarkFlagRequired(flags.FlagFrom) + cmdRPCConsumer.Flags().String(flags.FlagChainID, app.Name, "network chain id") + cmdRPCConsumer.Flags().Uint64(sentry.GeolocationFlag, 0, "geolocation to run from") + cmdRPCConsumer.MarkFlagRequired(sentry.GeolocationFlag) + cmdRPCConsumer.Flags().Bool("secure", false, "secure sends reliability on every message") + cmdRPCConsumer.Flags().String(performance.PprofAddressFlagName, "", "pprof server address, used for code profiling") + cmdRPCConsumer.Flags().String(performance.CacheFlagName, "", "address for a cache server to improve performance") + + return cmdRPCConsumer +} diff --git a/protocol/rpcprovider/rpcprovider.go b/protocol/rpcprovider/rpcprovider.go index 6628fa1025..fcebcf91be 100644 --- a/protocol/rpcprovider/rpcprovider.go +++ b/protocol/rpcprovider/rpcprovider.go @@ -3,15 +3,21 @@ package rpcprovider import ( "context" "fmt" + "math/rand" "os" "os/signal" "strconv" + "strings" "time" "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/client/tx" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/version" + "github.com/lavanet/lava/app" "github.com/lavanet/lava/protocol/chainlib" + "github.com/lavanet/lava/protocol/chainlib/chainproxy" "github.com/lavanet/lava/protocol/chaintracker" "github.com/lavanet/lava/protocol/common" "github.com/lavanet/lava/protocol/lavasession" @@ -19,9 +25,11 @@ import ( "github.com/lavanet/lava/protocol/rpcprovider/rewardserver" "github.com/lavanet/lava/protocol/statetracker" "github.com/lavanet/lava/relayer/performance" + "github.com/lavanet/lava/relayer/sentry" "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/utils" pairingtypes "github.com/lavanet/lava/x/pairing/types" + "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -30,7 +38,8 @@ const ( ) var ( - Yaml_config_properties = []string{"network-address", "chain-id", "api-interface", "node-url"} + Yaml_config_properties = []string{"network-address", "chain-id", "api-interface", "node-url"} + DefaultRPCProviderFileName = "rpcprovider.yml" ) type ProviderStateTrackerInf interface { @@ -200,3 +209,146 @@ func ParseEndpoints(viper_endpoints *viper.Viper, geolocation uint64) (endpoints } return } + +func CreateRPCProviderCobraCommand() *cobra.Command { + cmdRPCProvider := &cobra.Command{ + Use: `rpcprovider [config-file] | { {listen-ip:listen-port spec-chain-id api-interface "comma-separated-node-urls"} ... }`, + Short: `rpcprovider sets up a server to listen for rpc-consumers requests from the lava protocol send them to a configured node and respond with the reply`, + Long: `rpcprovider sets up a server to listen for rpc-consumers requests from the lava protocol send them to a configured node and respond with the reply + all configs should be located in` + app.DefaultNodeHome + "/config or the local running directory" + ` + if no arguments are passed, assumes default config file: ` + DefaultRPCProviderFileName + ` + if one argument is passed, its assumed the config file name + `, + Example: `required flags: --geolocation 1 --from alice +optional: --save-conf +rpcprovider +rpcprovider rpcprovider_conf.yml +rpcprovider 127.0.0.1:3333 ETH1 jsonrpc wss://www.eth-node.com:80 +rpcprovider 127.0.0.1:3333 COS3 tendermintrpc "wss://www.node-path.com:80,https://www.node-path.com:80" 127.0.0.1:3333 COS3 rest https://www.node-path.com:1317 `, + Args: func(cmd *cobra.Command, args []string) error { + // Optionally run one of the validators provided by cobra + if err := cobra.RangeArgs(0, 1)(cmd, args); err == nil { + // zero or one argument is allowed + return nil + } + if len(args)%len(Yaml_config_properties) != 0 { + return fmt.Errorf("invalid number of arguments, either its a single config file or repeated groups of 4 HOST:PORT chain-id api-interface [node_url,node_url_2], arg count: %d", len(args)) + } + return nil + }, + RunE: func(cmd *cobra.Command, args []string) error { + utils.LavaFormatInfo("RPCProvider started", &map[string]string{"args": strings.Join(args, ",")}) + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + config_name := DefaultRPCProviderFileName + if len(args) == 1 { + config_name = args[0] // name of config file (without extension) + } + viper.SetConfigName(config_name) + viper.SetConfigType("yml") + viper.AddConfigPath(".") + viper.AddConfigPath("./config") + var rpcProviderEndpoints []*lavasession.RPCProviderEndpoint + var endpoints_strings []string + var viper_endpoints *viper.Viper + if len(args) > 1 { + viper_endpoints, err = common.ParseEndpointArgs(args, Yaml_config_properties, common.EndpointsConfigName) + if err != nil { + return utils.LavaFormatError("invalid endpoints arguments", err, &map[string]string{"endpoint_strings": strings.Join(args, "")}) + } + save_config, err := cmd.Flags().GetBool(common.SaveConfigFlagName) + if err != nil { + return utils.LavaFormatError("failed reading flag", err, &map[string]string{"flag_name": common.SaveConfigFlagName}) + } + viper.MergeConfigMap(viper_endpoints.AllSettings()) + if save_config { + err := viper.SafeWriteConfigAs(DefaultRPCProviderFileName) + if err != nil { + utils.LavaFormatInfo("did not create new config file, if it's desired remove the config file", &map[string]string{"file_name": DefaultRPCProviderFileName, "error": err.Error()}) + } else { + utils.LavaFormatInfo("created new config file", &map[string]string{"file_name": DefaultRPCProviderFileName}) + } + } + } else { + err = viper.ReadInConfig() + if err != nil { + utils.LavaFormatFatal("could not load config file", err, &map[string]string{"expected_config_name": viper.ConfigFileUsed()}) + } + utils.LavaFormatInfo("read config file successfully", &map[string]string{"expected_config_name": viper.ConfigFileUsed()}) + } + geolocation, err := cmd.Flags().GetUint64(lavasession.GeolocationFlag) + if err != nil { + utils.LavaFormatFatal("failed to read geolocation flag, required flag", err, nil) + } + rpcProviderEndpoints, err = ParseEndpoints(viper.GetViper(), geolocation) + if err != nil || len(rpcProviderEndpoints) == 0 { + return utils.LavaFormatError("invalid endpoints definition", err, &map[string]string{"endpoint_strings": strings.Join(endpoints_strings, "")}) + } + // handle flags, pass necessary fields + ctx := context.Background() + networkChainId, err := cmd.Flags().GetString(flags.FlagChainID) + if err != nil { + return err + } + txFactory := tx.NewFactoryCLI(clientCtx, cmd.Flags()).WithChainID(networkChainId) + logLevel, err := cmd.Flags().GetString(flags.FlagLogLevel) + if err != nil { + utils.LavaFormatFatal("failed to read log level flag", err, nil) + } + utils.LoggingLevel(logLevel) + + // check if the command includes --pprof-address + pprofAddressFlagUsed := cmd.Flags().Lookup("pprof-address").Changed + if pprofAddressFlagUsed { + // get pprof server ip address (default value: "") + pprofServerAddress, err := cmd.Flags().GetString("pprof-address") + if err != nil { + utils.LavaFormatFatal("failed to read pprof address flag", err, nil) + } + + // start pprof HTTP server + err = performance.StartPprofServer(pprofServerAddress) + if err != nil { + return utils.LavaFormatError("failed to start pprof HTTP server", err, nil) + } + } + + utils.LavaFormatInfo("lavad Binary Version: "+version.Version, nil) + rand.Seed(time.Now().UnixNano()) + var cache *performance.Cache = nil + cacheAddr, err := cmd.Flags().GetString(performance.CacheFlagName) + if err != nil { + utils.LavaFormatError("Failed To Get Cache Address flag", err, &map[string]string{"flags": fmt.Sprintf("%v", cmd.Flags())}) + } else if cacheAddr != "" { + cache, err = performance.InitCache(ctx, cacheAddr) + if err != nil { + utils.LavaFormatError("Failed To Connect to cache at address", err, &map[string]string{"address": cacheAddr}) + } else { + utils.LavaFormatInfo("cache service connected", &map[string]string{"address": cacheAddr}) + } + } + numberOfNodeParallelConnections, err := cmd.Flags().GetUint(chainproxy.ParallelConnectionsFlag) + if err != nil { + utils.LavaFormatFatal("error fetching chainproxy.ParallelConnectionsFlag", err, nil) + } + rpcProvider := RPCProvider{} + err = rpcProvider.Start(ctx, txFactory, clientCtx, rpcProviderEndpoints, cache, numberOfNodeParallelConnections) + return err + }, + } + + // RPCProvider command flags + flags.AddTxFlagsToCmd(cmdRPCProvider) + cmdRPCProvider.MarkFlagRequired(flags.FlagFrom) + cmdRPCProvider.Flags().Bool(common.SaveConfigFlagName, false, "save cmd args to a config file") + cmdRPCProvider.Flags().String(flags.FlagChainID, app.Name, "network chain id") + cmdRPCProvider.Flags().Uint64(sentry.GeolocationFlag, 0, "geolocation to run from") + cmdRPCProvider.MarkFlagRequired(sentry.GeolocationFlag) + cmdRPCProvider.Flags().String(performance.PprofAddressFlagName, "", "pprof server address, used for code profiling") + cmdRPCProvider.Flags().String(performance.CacheFlagName, "", "address for a cache server to improve performance") + cmdRPCProvider.Flags().Uint(chainproxy.ParallelConnectionsFlag, chainproxy.NumberOfParallelConnections, "parallel connections") + + return cmdRPCProvider +} From 1369997613121890d9a87e5c2196c1ce2aadf987 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 9 Mar 2023 02:32:48 +0200 Subject: [PATCH 088/123] refactored proto bufs, now changes to the code --- proto/pairing/relay.proto | 47 +- proto/pairing/tx.proto | 5 +- x/pairing/types/relay.pb.go | 1526 +++++++++++++++++++++++++++-------- x/pairing/types/tx.pb.go | 150 +++- 4 files changed, 1346 insertions(+), 382 deletions(-) diff --git a/proto/pairing/relay.proto b/proto/pairing/relay.proto index 37c20ba8ff..3c00d77f70 100644 --- a/proto/pairing/relay.proto +++ b/proto/pairing/relay.proto @@ -9,24 +9,41 @@ service Relayer { rpc RelaySubscribe (RelayRequest) returns (stream RelayReply) {} } -message RelayRequest { +message RelaySession { string chainID = 1; - string connection_type = 2; - string api_url = 3; // some relays have associated urls that are filled with params ('/block/{height}') - uint64 session_id = 4; - uint64 cu_sum = 5; // total compute unit used including this relay + bytes content_hash = 2; + uint64 session_id = 3; + uint64 cu_sum = 4; // total compute unit used including this relay + string provider = 5; + uint64 relay_num = 6; + QualityOfServiceReport QoSReport = 7; + int64 block_height = 8; + bytes unresponsive_providers = 9; + string lava_chain_id = 10; + bytes sig = 11; + Badge badge = 12; +} + +message RelayPrivateData { + string connection_type = 1; + string api_url = 2; // some relays have associated urls that are filled with params ('/block/{height}') + bytes data = 3; + int64 request_block = 4; + string apiInterface = 5; + bytes salt = 6; +} - bytes data = 6; - bytes sig = 7; - string provider = 8; - int64 block_height = 9; - uint64 relay_num = 10; - int64 request_block = 11; +message RelayRequest { + RelaySession relay_session = 1; + RelayPrivateData relay_data= 2; + VRFData DataReliability = 3; +} - VRFData DataReliability = 12; - QualityOfServiceReport QoSReport = 13; - bytes unresponsive_providers = 14; - string apiInterface = 15; +message Badge { + uint64 cu_allocation =1; + int64 epoch = 2; + bytes badge_pk = 3; + bytes project_sig = 4; } message RelayReply { diff --git a/proto/pairing/tx.proto b/proto/pairing/tx.proto index 62b7888458..5aa69c89c4 100644 --- a/proto/pairing/tx.proto +++ b/proto/pairing/tx.proto @@ -60,8 +60,9 @@ message MsgUnstakeClientResponse { message MsgRelayPayment { string creator = 1; - repeated RelayRequest relays = 2; - string descriptionString = 3; + repeated RelaySession relays = 2; + repeated VRFData VRFs = 3; + string descriptionString = 4; } message MsgRelayPaymentResponse { diff --git a/x/pairing/types/relay.pb.go b/x/pairing/types/relay.pb.go index cdd1fce482..5b2c656f12 100644 --- a/x/pairing/types/relay.pb.go +++ b/x/pairing/types/relay.pb.go @@ -29,36 +29,33 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -type RelayRequest struct { +type RelaySession struct { ChainID string `protobuf:"bytes,1,opt,name=chainID,proto3" json:"chainID,omitempty"` - ConnectionType string `protobuf:"bytes,2,opt,name=connection_type,json=connectionType,proto3" json:"connection_type,omitempty"` - ApiUrl string `protobuf:"bytes,3,opt,name=api_url,json=apiUrl,proto3" json:"api_url,omitempty"` - SessionId uint64 `protobuf:"varint,4,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` - CuSum uint64 `protobuf:"varint,5,opt,name=cu_sum,json=cuSum,proto3" json:"cu_sum,omitempty"` - Data []byte `protobuf:"bytes,6,opt,name=data,proto3" json:"data,omitempty"` - Sig []byte `protobuf:"bytes,7,opt,name=sig,proto3" json:"sig,omitempty"` - Provider string `protobuf:"bytes,8,opt,name=provider,proto3" json:"provider,omitempty"` - BlockHeight int64 `protobuf:"varint,9,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` - RelayNum uint64 `protobuf:"varint,10,opt,name=relay_num,json=relayNum,proto3" json:"relay_num,omitempty"` - RequestBlock int64 `protobuf:"varint,11,opt,name=request_block,json=requestBlock,proto3" json:"request_block,omitempty"` - DataReliability *VRFData `protobuf:"bytes,12,opt,name=DataReliability,proto3" json:"DataReliability,omitempty"` - QoSReport *QualityOfServiceReport `protobuf:"bytes,13,opt,name=QoSReport,proto3" json:"QoSReport,omitempty"` - UnresponsiveProviders []byte `protobuf:"bytes,14,opt,name=unresponsive_providers,json=unresponsiveProviders,proto3" json:"unresponsive_providers,omitempty"` - ApiInterface string `protobuf:"bytes,15,opt,name=apiInterface,proto3" json:"apiInterface,omitempty"` -} - -func (m *RelayRequest) Reset() { *m = RelayRequest{} } -func (m *RelayRequest) String() string { return proto.CompactTextString(m) } -func (*RelayRequest) ProtoMessage() {} -func (*RelayRequest) Descriptor() ([]byte, []int) { + ContentHash []byte `protobuf:"bytes,2,opt,name=content_hash,json=contentHash,proto3" json:"content_hash,omitempty"` + SessionId uint64 `protobuf:"varint,3,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` + CuSum uint64 `protobuf:"varint,4,opt,name=cu_sum,json=cuSum,proto3" json:"cu_sum,omitempty"` + Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider,omitempty"` + RelayNum uint64 `protobuf:"varint,6,opt,name=relay_num,json=relayNum,proto3" json:"relay_num,omitempty"` + QoSReport *QualityOfServiceReport `protobuf:"bytes,7,opt,name=QoSReport,proto3" json:"QoSReport,omitempty"` + BlockHeight int64 `protobuf:"varint,8,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` + UnresponsiveProviders []byte `protobuf:"bytes,9,opt,name=unresponsive_providers,json=unresponsiveProviders,proto3" json:"unresponsive_providers,omitempty"` + LavaChainId string `protobuf:"bytes,10,opt,name=lava_chain_id,json=lavaChainId,proto3" json:"lava_chain_id,omitempty"` + Sig []byte `protobuf:"bytes,11,opt,name=sig,proto3" json:"sig,omitempty"` + Badge *Badge `protobuf:"bytes,12,opt,name=badge,proto3" json:"badge,omitempty"` +} + +func (m *RelaySession) Reset() { *m = RelaySession{} } +func (m *RelaySession) String() string { return proto.CompactTextString(m) } +func (*RelaySession) ProtoMessage() {} +func (*RelaySession) Descriptor() ([]byte, []int) { return fileDescriptor_10cd1bfeb9978acf, []int{0} } -func (m *RelayRequest) XXX_Unmarshal(b []byte) error { +func (m *RelaySession) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RelayRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RelaySession) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RelayRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_RelaySession.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -68,95 +65,239 @@ func (m *RelayRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (m *RelayRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RelayRequest.Merge(m, src) +func (m *RelaySession) XXX_Merge(src proto.Message) { + xxx_messageInfo_RelaySession.Merge(m, src) } -func (m *RelayRequest) XXX_Size() int { +func (m *RelaySession) XXX_Size() int { return m.Size() } -func (m *RelayRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RelayRequest.DiscardUnknown(m) +func (m *RelaySession) XXX_DiscardUnknown() { + xxx_messageInfo_RelaySession.DiscardUnknown(m) } -var xxx_messageInfo_RelayRequest proto.InternalMessageInfo +var xxx_messageInfo_RelaySession proto.InternalMessageInfo -func (m *RelayRequest) GetChainID() string { +func (m *RelaySession) GetChainID() string { if m != nil { return m.ChainID } return "" } -func (m *RelayRequest) GetConnectionType() string { +func (m *RelaySession) GetContentHash() []byte { if m != nil { - return m.ConnectionType + return m.ContentHash } - return "" + return nil } -func (m *RelayRequest) GetApiUrl() string { +func (m *RelaySession) GetSessionId() uint64 { if m != nil { - return m.ApiUrl + return m.SessionId + } + return 0 +} + +func (m *RelaySession) GetCuSum() uint64 { + if m != nil { + return m.CuSum + } + return 0 +} + +func (m *RelaySession) GetProvider() string { + if m != nil { + return m.Provider } return "" } -func (m *RelayRequest) GetSessionId() uint64 { +func (m *RelaySession) GetRelayNum() uint64 { if m != nil { - return m.SessionId + return m.RelayNum } return 0 } -func (m *RelayRequest) GetCuSum() uint64 { +func (m *RelaySession) GetQoSReport() *QualityOfServiceReport { if m != nil { - return m.CuSum + return m.QoSReport + } + return nil +} + +func (m *RelaySession) GetBlockHeight() int64 { + if m != nil { + return m.BlockHeight } return 0 } -func (m *RelayRequest) GetData() []byte { +func (m *RelaySession) GetUnresponsiveProviders() []byte { if m != nil { - return m.Data + return m.UnresponsiveProviders } return nil } -func (m *RelayRequest) GetSig() []byte { +func (m *RelaySession) GetLavaChainId() string { + if m != nil { + return m.LavaChainId + } + return "" +} + +func (m *RelaySession) GetSig() []byte { if m != nil { return m.Sig } return nil } -func (m *RelayRequest) GetProvider() string { +func (m *RelaySession) GetBadge() *Badge { if m != nil { - return m.Provider + return m.Badge + } + return nil +} + +type RelayPrivateData struct { + ConnectionType string `protobuf:"bytes,1,opt,name=connection_type,json=connectionType,proto3" json:"connection_type,omitempty"` + ApiUrl string `protobuf:"bytes,2,opt,name=api_url,json=apiUrl,proto3" json:"api_url,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + RequestBlock int64 `protobuf:"varint,4,opt,name=request_block,json=requestBlock,proto3" json:"request_block,omitempty"` + ApiInterface string `protobuf:"bytes,5,opt,name=apiInterface,proto3" json:"apiInterface,omitempty"` + Salt []byte `protobuf:"bytes,6,opt,name=salt,proto3" json:"salt,omitempty"` +} + +func (m *RelayPrivateData) Reset() { *m = RelayPrivateData{} } +func (m *RelayPrivateData) String() string { return proto.CompactTextString(m) } +func (*RelayPrivateData) ProtoMessage() {} +func (*RelayPrivateData) Descriptor() ([]byte, []int) { + return fileDescriptor_10cd1bfeb9978acf, []int{1} +} +func (m *RelayPrivateData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RelayPrivateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RelayPrivateData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RelayPrivateData) XXX_Merge(src proto.Message) { + xxx_messageInfo_RelayPrivateData.Merge(m, src) +} +func (m *RelayPrivateData) XXX_Size() int { + return m.Size() +} +func (m *RelayPrivateData) XXX_DiscardUnknown() { + xxx_messageInfo_RelayPrivateData.DiscardUnknown(m) +} + +var xxx_messageInfo_RelayPrivateData proto.InternalMessageInfo + +func (m *RelayPrivateData) GetConnectionType() string { + if m != nil { + return m.ConnectionType } return "" } -func (m *RelayRequest) GetBlockHeight() int64 { +func (m *RelayPrivateData) GetApiUrl() string { if m != nil { - return m.BlockHeight + return m.ApiUrl } - return 0 + return "" } -func (m *RelayRequest) GetRelayNum() uint64 { +func (m *RelayPrivateData) GetData() []byte { if m != nil { - return m.RelayNum + return m.Data } - return 0 + return nil } -func (m *RelayRequest) GetRequestBlock() int64 { +func (m *RelayPrivateData) GetRequestBlock() int64 { if m != nil { return m.RequestBlock } return 0 } +func (m *RelayPrivateData) GetApiInterface() string { + if m != nil { + return m.ApiInterface + } + return "" +} + +func (m *RelayPrivateData) GetSalt() []byte { + if m != nil { + return m.Salt + } + return nil +} + +type RelayRequest struct { + RelaySession *RelaySession `protobuf:"bytes,1,opt,name=relay_session,json=relaySession,proto3" json:"relay_session,omitempty"` + RelayData *RelayPrivateData `protobuf:"bytes,2,opt,name=relay_data,json=relayData,proto3" json:"relay_data,omitempty"` + DataReliability *VRFData `protobuf:"bytes,3,opt,name=DataReliability,proto3" json:"DataReliability,omitempty"` +} + +func (m *RelayRequest) Reset() { *m = RelayRequest{} } +func (m *RelayRequest) String() string { return proto.CompactTextString(m) } +func (*RelayRequest) ProtoMessage() {} +func (*RelayRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_10cd1bfeb9978acf, []int{2} +} +func (m *RelayRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RelayRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RelayRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RelayRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RelayRequest.Merge(m, src) +} +func (m *RelayRequest) XXX_Size() int { + return m.Size() +} +func (m *RelayRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RelayRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RelayRequest proto.InternalMessageInfo + +func (m *RelayRequest) GetRelaySession() *RelaySession { + if m != nil { + return m.RelaySession + } + return nil +} + +func (m *RelayRequest) GetRelayData() *RelayPrivateData { + if m != nil { + return m.RelayData + } + return nil +} + func (m *RelayRequest) GetDataReliability() *VRFData { if m != nil { return m.DataReliability @@ -164,25 +305,72 @@ func (m *RelayRequest) GetDataReliability() *VRFData { return nil } -func (m *RelayRequest) GetQoSReport() *QualityOfServiceReport { +type Badge struct { + CuAllocation uint64 `protobuf:"varint,1,opt,name=cu_allocation,json=cuAllocation,proto3" json:"cu_allocation,omitempty"` + Epoch int64 `protobuf:"varint,2,opt,name=epoch,proto3" json:"epoch,omitempty"` + BadgePk []byte `protobuf:"bytes,3,opt,name=badge_pk,json=badgePk,proto3" json:"badge_pk,omitempty"` + ProjectSig []byte `protobuf:"bytes,4,opt,name=project_sig,json=projectSig,proto3" json:"project_sig,omitempty"` +} + +func (m *Badge) Reset() { *m = Badge{} } +func (m *Badge) String() string { return proto.CompactTextString(m) } +func (*Badge) ProtoMessage() {} +func (*Badge) Descriptor() ([]byte, []int) { + return fileDescriptor_10cd1bfeb9978acf, []int{3} +} +func (m *Badge) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Badge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Badge.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Badge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Badge.Merge(m, src) +} +func (m *Badge) XXX_Size() int { + return m.Size() +} +func (m *Badge) XXX_DiscardUnknown() { + xxx_messageInfo_Badge.DiscardUnknown(m) +} + +var xxx_messageInfo_Badge proto.InternalMessageInfo + +func (m *Badge) GetCuAllocation() uint64 { if m != nil { - return m.QoSReport + return m.CuAllocation } - return nil + return 0 } -func (m *RelayRequest) GetUnresponsiveProviders() []byte { +func (m *Badge) GetEpoch() int64 { if m != nil { - return m.UnresponsiveProviders + return m.Epoch + } + return 0 +} + +func (m *Badge) GetBadgePk() []byte { + if m != nil { + return m.BadgePk } return nil } -func (m *RelayRequest) GetApiInterface() string { +func (m *Badge) GetProjectSig() []byte { if m != nil { - return m.ApiInterface + return m.ProjectSig } - return "" + return nil } type RelayReply struct { @@ -198,7 +386,7 @@ func (m *RelayReply) Reset() { *m = RelayReply{} } func (m *RelayReply) String() string { return proto.CompactTextString(m) } func (*RelayReply) ProtoMessage() {} func (*RelayReply) Descriptor() ([]byte, []int) { - return fileDescriptor_10cd1bfeb9978acf, []int{1} + return fileDescriptor_10cd1bfeb9978acf, []int{4} } func (m *RelayReply) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -283,7 +471,7 @@ func (m *VRFData) Reset() { *m = VRFData{} } func (m *VRFData) String() string { return proto.CompactTextString(m) } func (*VRFData) ProtoMessage() {} func (*VRFData) Descriptor() ([]byte, []int) { - return fileDescriptor_10cd1bfeb9978acf, []int{2} + return fileDescriptor_10cd1bfeb9978acf, []int{5} } func (m *VRFData) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -371,7 +559,7 @@ func (m *QualityOfServiceReport) Reset() { *m = QualityOfServiceReport{} func (m *QualityOfServiceReport) String() string { return proto.CompactTextString(m) } func (*QualityOfServiceReport) ProtoMessage() {} func (*QualityOfServiceReport) Descriptor() ([]byte, []int) { - return fileDescriptor_10cd1bfeb9978acf, []int{3} + return fileDescriptor_10cd1bfeb9978acf, []int{6} } func (m *QualityOfServiceReport) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -401,7 +589,10 @@ func (m *QualityOfServiceReport) XXX_DiscardUnknown() { var xxx_messageInfo_QualityOfServiceReport proto.InternalMessageInfo func init() { + proto.RegisterType((*RelaySession)(nil), "lavanet.lava.pairing.RelaySession") + proto.RegisterType((*RelayPrivateData)(nil), "lavanet.lava.pairing.RelayPrivateData") proto.RegisterType((*RelayRequest)(nil), "lavanet.lava.pairing.RelayRequest") + proto.RegisterType((*Badge)(nil), "lavanet.lava.pairing.Badge") proto.RegisterType((*RelayReply)(nil), "lavanet.lava.pairing.RelayReply") proto.RegisterType((*VRFData)(nil), "lavanet.lava.pairing.VRFData") proto.RegisterType((*QualityOfServiceReport)(nil), "lavanet.lava.pairing.QualityOfServiceReport") @@ -410,59 +601,71 @@ func init() { func init() { proto.RegisterFile("pairing/relay.proto", fileDescriptor_10cd1bfeb9978acf) } var fileDescriptor_10cd1bfeb9978acf = []byte{ - // 819 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x41, 0x6f, 0x1b, 0x45, - 0x14, 0xf6, 0x26, 0x4e, 0x6c, 0x3f, 0x3b, 0x0e, 0x9a, 0x26, 0xed, 0x2a, 0x50, 0xc7, 0x2c, 0x52, - 0xeb, 0x03, 0xd8, 0xa8, 0x08, 0x0e, 0x48, 0x48, 0x60, 0x05, 0x68, 0x10, 0xa2, 0xcd, 0x18, 0x7a, - 0xc8, 0x65, 0x35, 0x5e, 0x8f, 0xd7, 0xa3, 0x8e, 0x67, 0xb6, 0x33, 0xbb, 0x16, 0xcb, 0xaf, 0xe0, - 0xb7, 0x70, 0x80, 0x3b, 0xa7, 0x1e, 0x7b, 0x44, 0x1c, 0xa2, 0x2a, 0xf9, 0x07, 0xfc, 0x02, 0x34, - 0x6f, 0x77, 0x13, 0xb7, 0xb5, 0x90, 0x2a, 0x71, 0xda, 0x79, 0xdf, 0x7b, 0xef, 0x7b, 0x7e, 0xdf, - 0x7b, 0x33, 0x86, 0x5b, 0x09, 0x13, 0x46, 0xa8, 0x78, 0x64, 0xb8, 0x64, 0xf9, 0x30, 0x31, 0x3a, - 0xd5, 0xe4, 0x40, 0xb2, 0x15, 0x53, 0x3c, 0x1d, 0xba, 0xef, 0xb0, 0x8c, 0x38, 0x3a, 0x88, 0x75, - 0xac, 0x31, 0x60, 0xe4, 0x4e, 0x45, 0x6c, 0xf0, 0x47, 0x1d, 0x3a, 0xd4, 0xe5, 0x52, 0xfe, 0x2c, - 0xe3, 0x36, 0x25, 0x3e, 0x34, 0xa2, 0x05, 0x13, 0xea, 0xf4, 0xc4, 0xf7, 0xfa, 0xde, 0xa0, 0x45, - 0x2b, 0x93, 0xdc, 0x87, 0xfd, 0x48, 0x2b, 0xc5, 0xa3, 0x54, 0x68, 0x15, 0xa6, 0x79, 0xc2, 0xfd, - 0x2d, 0x8c, 0xe8, 0xde, 0xc0, 0x3f, 0xe6, 0x09, 0x27, 0x77, 0xa0, 0xc1, 0x12, 0x11, 0x66, 0x46, - 0xfa, 0xdb, 0x18, 0xb0, 0xcb, 0x12, 0xf1, 0x93, 0x91, 0xe4, 0x2e, 0x80, 0xe5, 0xd6, 0xba, 0x74, - 0x31, 0xf3, 0xeb, 0x7d, 0x6f, 0x50, 0xa7, 0xad, 0x12, 0x39, 0x9d, 0x91, 0x43, 0xd8, 0x8d, 0xb2, - 0xd0, 0x66, 0x4b, 0x7f, 0x07, 0x5d, 0x3b, 0x51, 0x36, 0xc9, 0x96, 0x84, 0x40, 0x7d, 0xc6, 0x52, - 0xe6, 0xef, 0xf6, 0xbd, 0x41, 0x87, 0xe2, 0x99, 0xbc, 0x03, 0xdb, 0x56, 0xc4, 0x7e, 0x03, 0x21, - 0x77, 0x24, 0x47, 0xd0, 0x4c, 0x8c, 0x5e, 0x89, 0x19, 0x37, 0x7e, 0x13, 0xab, 0x5e, 0xdb, 0xe4, - 0x7d, 0xe8, 0x4c, 0xa5, 0x8e, 0x9e, 0x86, 0x0b, 0x2e, 0xe2, 0x45, 0xea, 0xb7, 0xfa, 0xde, 0x60, - 0x9b, 0xb6, 0x11, 0x7b, 0x88, 0x10, 0x79, 0x17, 0x5a, 0x28, 0x61, 0xa8, 0xb2, 0xa5, 0x0f, 0x58, - 0xbe, 0x89, 0xc0, 0x0f, 0xd9, 0x92, 0x7c, 0x00, 0x7b, 0xa6, 0x90, 0x27, 0xc4, 0x1c, 0xbf, 0x8d, - 0x04, 0x9d, 0x12, 0x1c, 0x3b, 0x8c, 0x7c, 0x0b, 0xfb, 0x27, 0x2c, 0x65, 0x94, 0x4b, 0xc1, 0xa6, - 0x42, 0x8a, 0x34, 0xf7, 0x3b, 0x7d, 0x6f, 0xd0, 0x7e, 0x70, 0x77, 0xb8, 0x69, 0x1e, 0xc3, 0x27, - 0xf4, 0x1b, 0x8c, 0x7f, 0x3d, 0x8b, 0x7c, 0x07, 0xad, 0x33, 0x3d, 0xa1, 0x3c, 0xd1, 0x26, 0xf5, - 0xf7, 0x90, 0xe2, 0xc3, 0xcd, 0x14, 0x67, 0x19, 0x73, 0x19, 0x8f, 0xe6, 0x13, 0x6e, 0x56, 0x22, - 0xe2, 0x45, 0x0e, 0xbd, 0x49, 0x27, 0x9f, 0xc2, 0xed, 0x4c, 0x19, 0x6e, 0x13, 0xad, 0xac, 0x58, - 0xf1, 0xb0, 0x92, 0xc4, 0xfa, 0x5d, 0x94, 0xee, 0x70, 0xdd, 0xfb, 0xb8, 0x72, 0x92, 0x00, 0x3a, - 0x2c, 0x11, 0xa7, 0x2a, 0xe5, 0x66, 0xce, 0x22, 0xee, 0xef, 0xa3, 0xa0, 0xaf, 0x60, 0xc1, 0x9f, - 0x1e, 0x40, 0xb9, 0x39, 0x89, 0xcc, 0xaf, 0xa7, 0xe4, 0xbd, 0x39, 0xa5, 0xad, 0x9b, 0x29, 0x1d, - 0xc0, 0x8e, 0xd2, 0x2a, 0xe2, 0xb8, 0x18, 0x7b, 0xb4, 0x30, 0xdc, 0x7c, 0x24, 0x4b, 0x6f, 0xe4, - 0xad, 0x17, 0xf3, 0x29, 0xb0, 0x42, 0xdd, 0xcf, 0xe0, 0xce, 0x5c, 0x28, 0x26, 0xc5, 0x2f, 0x7c, - 0x56, 0x44, 0xd9, 0x70, 0xc1, 0xec, 0x82, 0x5b, 0x5c, 0x96, 0x0e, 0x3d, 0xbc, 0x76, 0x63, 0x82, - 0x7d, 0x88, 0x4e, 0x5c, 0x39, 0x11, 0x97, 0x19, 0xe5, 0x0a, 0xb5, 0xac, 0x88, 0x8b, 0xa0, 0xe0, - 0xa5, 0x07, 0x8d, 0x72, 0x10, 0xe4, 0x1e, 0x74, 0x67, 0x62, 0x3e, 0xe7, 0x86, 0xab, 0x54, 0xb0, - 0x54, 0x1b, 0xec, 0xa5, 0x49, 0x5f, 0x43, 0xdd, 0xaa, 0xac, 0xcc, 0x3c, 0x5c, 0x31, 0x99, 0xf1, - 0xb2, 0xb7, 0xe6, 0xca, 0xcc, 0x9f, 0x38, 0xbb, 0x72, 0x26, 0x46, 0xeb, 0x39, 0x36, 0x59, 0x38, - 0x1f, 0x3b, 0xdb, 0xf5, 0x59, 0x0d, 0x20, 0x74, 0xc2, 0xd4, 0xd1, 0xdf, 0xae, 0xb0, 0x89, 0x88, - 0x49, 0x1f, 0xda, 0x4c, 0x4a, 0xf7, 0x7b, 0x5c, 0x03, 0x65, 0x6f, 0xeb, 0x10, 0x79, 0x0f, 0x5a, - 0xcf, 0x32, 0x6e, 0x72, 0xf4, 0x97, 0x0d, 0x5d, 0x03, 0x6f, 0x5e, 0x8c, 0xe0, 0xb7, 0x2d, 0xb8, - 0xbd, 0x79, 0x51, 0xc8, 0x39, 0x34, 0x9c, 0xc6, 0x2a, 0xca, 0x8b, 0xbb, 0x3e, 0xfe, 0xf2, 0xf9, - 0xc5, 0x71, 0xed, 0xef, 0x8b, 0xe3, 0x7b, 0xb1, 0x48, 0x17, 0xd9, 0x74, 0x18, 0xe9, 0xe5, 0x28, - 0xd2, 0x76, 0xa9, 0x6d, 0xf9, 0xf9, 0xc8, 0xce, 0x9e, 0x8e, 0xdc, 0xd5, 0xb7, 0xc3, 0x13, 0x1e, - 0xfd, 0x73, 0x71, 0xdc, 0xcd, 0xd9, 0x52, 0x7e, 0x1e, 0x7c, 0x5f, 0xd0, 0x04, 0xb4, 0x22, 0x24, - 0x02, 0x3a, 0x6c, 0xc5, 0x84, 0xac, 0xee, 0x02, 0x3e, 0x15, 0xe3, 0xaf, 0xdf, 0xba, 0xc0, 0xad, - 0xa2, 0xc0, 0x3a, 0x57, 0x40, 0x5f, 0xa1, 0x26, 0x67, 0x50, 0xb7, 0xb9, 0x8a, 0x8a, 0xc7, 0x66, - 0xfc, 0xc5, 0x5b, 0x97, 0x68, 0x17, 0x25, 0x1c, 0x47, 0x40, 0x91, 0xea, 0xc1, 0xef, 0x1e, 0x34, - 0x70, 0xb9, 0xb9, 0x21, 0x8f, 0x60, 0x07, 0x8f, 0x24, 0xd8, 0x7c, 0x0b, 0xd7, 0x9f, 0xcf, 0xa3, - 0xfe, 0x7f, 0xc6, 0x24, 0x32, 0x0f, 0x6a, 0xe4, 0x1c, 0xba, 0x68, 0x4f, 0xb2, 0xa9, 0x8d, 0x8c, - 0x98, 0xf2, 0xff, 0x8b, 0xf9, 0x63, 0x6f, 0xfc, 0xd5, 0xf3, 0xcb, 0x9e, 0xf7, 0xe2, 0xb2, 0xe7, - 0xbd, 0xbc, 0xec, 0x79, 0xbf, 0x5e, 0xf5, 0x6a, 0x2f, 0xae, 0x7a, 0xb5, 0xbf, 0xae, 0x7a, 0xb5, - 0xf3, 0xfb, 0x6b, 0x7a, 0x94, 0x4c, 0xf8, 0x1d, 0xfd, 0x3c, 0xaa, 0xfe, 0x44, 0x50, 0x94, 0xe9, - 0x2e, 0xfe, 0x33, 0x7c, 0xf2, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x28, 0x1d, 0x4a, 0xdd, 0x5c, - 0x06, 0x00, 0x00, + // 1013 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x41, 0x6f, 0x1b, 0xc5, + 0x17, 0xcf, 0xc6, 0x76, 0x6c, 0xbf, 0x75, 0xd3, 0x6a, 0x9a, 0xb4, 0xfb, 0x4f, 0xff, 0x75, 0xcc, + 0x22, 0xa5, 0x39, 0x80, 0x0d, 0x41, 0x70, 0x40, 0x42, 0xa2, 0x26, 0xa5, 0x09, 0x42, 0x34, 0x19, + 0x43, 0x0f, 0xb9, 0xac, 0xc6, 0xeb, 0xf1, 0x7a, 0xc8, 0x7a, 0x67, 0x3b, 0xb3, 0x6b, 0x61, 0x0e, + 0x7c, 0x06, 0x3e, 0x0b, 0x07, 0x3e, 0x00, 0x12, 0x52, 0xb9, 0xf5, 0x88, 0x38, 0x44, 0x55, 0x72, + 0xe0, 0xce, 0x27, 0x40, 0xf3, 0x76, 0xd6, 0x71, 0xa3, 0x28, 0x52, 0x25, 0x4e, 0x3b, 0xf3, 0x7b, + 0x6f, 0x7e, 0xb3, 0xef, 0xf7, 0x7e, 0xfb, 0x6c, 0xb8, 0x9b, 0x32, 0xa1, 0x44, 0x12, 0xf5, 0x14, + 0x8f, 0xd9, 0xbc, 0x9b, 0x2a, 0x99, 0x49, 0xb2, 0x11, 0xb3, 0x19, 0x4b, 0x78, 0xd6, 0x35, 0xcf, + 0xae, 0xcd, 0xd8, 0xda, 0x88, 0x64, 0x24, 0x31, 0xa1, 0x67, 0x56, 0x45, 0xae, 0xff, 0x47, 0x05, + 0x5a, 0xd4, 0x9c, 0x1d, 0x70, 0xad, 0x85, 0x4c, 0x88, 0x07, 0xf5, 0x70, 0xc2, 0x44, 0x72, 0xb8, + 0xef, 0x39, 0x1d, 0x67, 0xb7, 0x49, 0xcb, 0x2d, 0x79, 0x07, 0x5a, 0xa1, 0x4c, 0x32, 0x9e, 0x64, + 0xc1, 0x84, 0xe9, 0x89, 0xb7, 0xda, 0x71, 0x76, 0x5b, 0xd4, 0xb5, 0xd8, 0x01, 0xd3, 0x13, 0xf2, + 0x10, 0x40, 0x17, 0x3c, 0x81, 0x18, 0x79, 0x95, 0x8e, 0xb3, 0x5b, 0xa5, 0x4d, 0x8b, 0x1c, 0x8e, + 0xc8, 0x26, 0xac, 0x85, 0x79, 0xa0, 0xf3, 0xa9, 0x57, 0xc5, 0x50, 0x2d, 0xcc, 0x07, 0xf9, 0x94, + 0x6c, 0x41, 0x23, 0x55, 0x72, 0x26, 0x46, 0x5c, 0x79, 0x35, 0xbc, 0x73, 0xb1, 0x27, 0x0f, 0xa0, + 0x89, 0xa5, 0x05, 0x49, 0x3e, 0xf5, 0xd6, 0xf0, 0x54, 0x03, 0x81, 0x6f, 0xf2, 0x29, 0xf9, 0x0a, + 0x9a, 0xc7, 0x72, 0x40, 0x79, 0x2a, 0x55, 0xe6, 0xd5, 0x3b, 0xce, 0xae, 0xbb, 0xf7, 0x5e, 0xf7, + 0xba, 0xe2, 0xbb, 0xc7, 0x39, 0x8b, 0x45, 0x36, 0x7f, 0x36, 0x1e, 0x70, 0x35, 0x13, 0x21, 0x2f, + 0xce, 0xd0, 0xcb, 0xe3, 0xa6, 0xba, 0x61, 0x2c, 0xc3, 0xd3, 0x60, 0xc2, 0x45, 0x34, 0xc9, 0xbc, + 0x46, 0xc7, 0xd9, 0xad, 0x50, 0x17, 0xb1, 0x03, 0x84, 0xc8, 0xc7, 0x70, 0x2f, 0x4f, 0x14, 0xd7, + 0xa9, 0x4c, 0xb4, 0x98, 0xf1, 0xa0, 0x7c, 0x49, 0xed, 0x35, 0x51, 0x8a, 0xcd, 0xe5, 0xe8, 0x51, + 0x19, 0x24, 0x3e, 0xdc, 0x32, 0xef, 0x12, 0xa0, 0x8e, 0x46, 0x17, 0xc0, 0x1a, 0x5d, 0x03, 0x7e, + 0x81, 0xda, 0x8e, 0xc8, 0x1d, 0xa8, 0x68, 0x11, 0x79, 0x2e, 0xf2, 0x98, 0x25, 0xf9, 0x10, 0x6a, + 0x43, 0x36, 0x8a, 0xb8, 0xd7, 0xc2, 0xba, 0x1e, 0x5c, 0x5f, 0x57, 0xdf, 0xa4, 0xd0, 0x22, 0xd3, + 0xff, 0xdd, 0x81, 0x3b, 0xd8, 0xcb, 0x23, 0x25, 0x66, 0x2c, 0xe3, 0xfb, 0x2c, 0x63, 0xe4, 0x11, + 0xdc, 0x0e, 0x65, 0x92, 0xf0, 0x30, 0x33, 0x5d, 0xc9, 0xe6, 0x29, 0xb7, 0x7d, 0x5d, 0xbf, 0x84, + 0xbf, 0x9d, 0xa7, 0x9c, 0xdc, 0x87, 0x3a, 0x4b, 0x45, 0x90, 0xab, 0x18, 0x3b, 0xdb, 0xa4, 0x6b, + 0x2c, 0x15, 0xdf, 0xa9, 0x98, 0x10, 0xa8, 0x8e, 0x58, 0xc6, 0xb0, 0x9d, 0x2d, 0x8a, 0x6b, 0xf2, + 0x2e, 0xdc, 0x52, 0xfc, 0x45, 0xce, 0x75, 0x16, 0xa0, 0x42, 0xd8, 0xd0, 0x0a, 0x6d, 0x59, 0xb0, + 0x6f, 0x30, 0xe2, 0x43, 0x8b, 0xa5, 0xe2, 0x30, 0xc9, 0xb8, 0x1a, 0xb3, 0x90, 0xdb, 0xde, 0xbe, + 0x81, 0x19, 0x72, 0xcd, 0xe2, 0x0c, 0x5b, 0xdb, 0xa2, 0xb8, 0xf6, 0xff, 0x76, 0xac, 0x27, 0x69, + 0xc1, 0x46, 0x9e, 0x9a, 0xdb, 0x8c, 0x09, 0xac, 0x95, 0xb0, 0x02, 0x77, 0xcf, 0xbf, 0x5e, 0x93, + 0x65, 0x3b, 0x9b, 0x37, 0x5a, 0x32, 0xf7, 0x13, 0x80, 0x82, 0x08, 0x0b, 0x5a, 0x45, 0x96, 0x9d, + 0x1b, 0x58, 0x96, 0x84, 0xa4, 0x85, 0x0f, 0x51, 0xd3, 0xa7, 0x70, 0x1b, 0x21, 0x1e, 0x0b, 0x36, + 0x14, 0xc6, 0x58, 0x28, 0x8e, 0xbb, 0xf7, 0xf0, 0x7a, 0xae, 0xe7, 0xf4, 0x4b, 0xcc, 0xbf, 0x7a, + 0xca, 0xff, 0x09, 0x6a, 0xd8, 0x41, 0xa3, 0x67, 0x98, 0x07, 0x2c, 0x8e, 0x65, 0xc8, 0xb2, 0xb2, + 0xc2, 0x2a, 0x6d, 0x85, 0xf9, 0xe3, 0x05, 0x46, 0x36, 0xa0, 0xc6, 0x53, 0x19, 0x16, 0x5f, 0x5e, + 0x85, 0x16, 0x1b, 0xf2, 0x3f, 0x68, 0x60, 0xfb, 0x83, 0xf4, 0xd4, 0xb6, 0xa8, 0x8e, 0xfb, 0xa3, + 0x53, 0xb2, 0x0d, 0x6e, 0xaa, 0xe4, 0xf7, 0x3c, 0xcc, 0x02, 0xe3, 0xae, 0x2a, 0x46, 0xc1, 0x42, + 0x03, 0x11, 0xf9, 0xbf, 0x39, 0x00, 0x56, 0xe9, 0x34, 0x9e, 0x2f, 0x3a, 0xed, 0x2c, 0x75, 0xda, + 0x3a, 0x73, 0xf5, 0xd2, 0x99, 0x1b, 0x50, 0x4b, 0x64, 0x12, 0x72, 0xbc, 0xed, 0x16, 0x2d, 0x36, + 0xe6, 0xfb, 0x89, 0x59, 0x76, 0xd5, 0x10, 0x6e, 0x81, 0x15, 0x7e, 0xf8, 0x04, 0xee, 0x8f, 0x45, + 0xc2, 0x62, 0xf1, 0x23, 0x1f, 0x15, 0x59, 0x1a, 0x27, 0x09, 0xd7, 0x68, 0x8d, 0x16, 0xdd, 0x5c, + 0x84, 0xf1, 0x80, 0x3e, 0xc0, 0x20, 0x4e, 0x15, 0x11, 0xd9, 0x13, 0xd6, 0x29, 0x4d, 0x2d, 0xa2, + 0x22, 0xc9, 0x7f, 0xed, 0x40, 0xdd, 0x2a, 0x4c, 0x76, 0x60, 0x7d, 0x24, 0xc6, 0x63, 0xae, 0x78, + 0x92, 0x09, 0x96, 0x49, 0x85, 0xb5, 0x34, 0xe8, 0x15, 0xd4, 0x8c, 0x95, 0x99, 0x1a, 0x07, 0x33, + 0x16, 0xe7, 0xdc, 0xd6, 0xd6, 0x98, 0xa9, 0xf1, 0x73, 0xb3, 0x2f, 0x83, 0xa9, 0x92, 0x72, 0x6c, + 0x25, 0x35, 0xc1, 0x23, 0xb3, 0x37, 0x75, 0x96, 0xdf, 0xfd, 0x92, 0xa8, 0x6e, 0x89, 0x0d, 0x44, + 0x44, 0x3a, 0xe0, 0xb2, 0x38, 0x36, 0xef, 0x63, 0x0a, 0xb0, 0xb5, 0x2d, 0x43, 0xe4, 0xff, 0xd0, + 0x7c, 0x91, 0x73, 0x35, 0xc7, 0xb8, 0x2d, 0x68, 0x01, 0x94, 0x92, 0xd7, 0x17, 0x92, 0xfb, 0xbf, + 0xac, 0xc2, 0xbd, 0xeb, 0x47, 0x18, 0x39, 0x81, 0xba, 0xd1, 0x38, 0x09, 0xe7, 0xc5, 0x77, 0xdd, + 0xff, 0xfc, 0xe5, 0xd9, 0xf6, 0xca, 0x5f, 0x67, 0xdb, 0x3b, 0x91, 0xc8, 0x26, 0xf9, 0xb0, 0x1b, + 0xca, 0x69, 0x2f, 0x94, 0x7a, 0x2a, 0xb5, 0x7d, 0xbc, 0xaf, 0x47, 0xa7, 0x3d, 0x33, 0x08, 0x74, + 0x77, 0x9f, 0x87, 0xff, 0x9c, 0x6d, 0xaf, 0xcf, 0xd9, 0x34, 0xfe, 0xd4, 0xff, 0xba, 0xa0, 0xf1, + 0x69, 0x49, 0x48, 0x04, 0xb4, 0xd8, 0x8c, 0x89, 0xb8, 0x34, 0x39, 0xce, 0x85, 0xfe, 0x93, 0xb7, + 0xbe, 0xe0, 0x6e, 0x71, 0xc1, 0x32, 0x97, 0x4f, 0xdf, 0xa0, 0x26, 0xc7, 0x50, 0xd5, 0xf3, 0x24, + 0x44, 0xb9, 0x9b, 0xfd, 0xcf, 0xde, 0xfa, 0x0a, 0xb7, 0xb8, 0xc2, 0x70, 0xf8, 0x14, 0xa9, 0xf6, + 0x7e, 0x75, 0xa0, 0x8e, 0xe6, 0xe6, 0x8a, 0x3c, 0x83, 0x1a, 0x2e, 0xc9, 0x4d, 0x33, 0xc3, 0x8e, + 0x9b, 0xad, 0xce, 0x8d, 0x39, 0x69, 0x3c, 0xf7, 0x57, 0xc8, 0x09, 0xac, 0x17, 0x73, 0x26, 0x1f, + 0xea, 0x50, 0x89, 0x21, 0xff, 0xaf, 0x98, 0x3f, 0x70, 0xfa, 0x8f, 0x5f, 0x9e, 0xb7, 0x9d, 0x57, + 0xe7, 0x6d, 0xe7, 0xf5, 0x79, 0xdb, 0xf9, 0xf9, 0xa2, 0xbd, 0xf2, 0xea, 0xa2, 0xbd, 0xf2, 0xe7, + 0x45, 0x7b, 0xe5, 0xe4, 0xd1, 0x92, 0x1e, 0x96, 0x09, 0x9f, 0xbd, 0x1f, 0x7a, 0xe5, 0x1f, 0x01, + 0x14, 0x65, 0xb8, 0x86, 0xbf, 0xee, 0x1f, 0xfd, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x25, 0x7e, 0xca, + 0xaa, 0x20, 0x08, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -609,7 +812,7 @@ var _Relayer_serviceDesc = grpc.ServiceDesc{ Metadata: "pairing/relay.proto", } -func (m *RelayRequest) Marshal() (dAtA []byte, err error) { +func (m *RelaySession) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -619,45 +822,57 @@ func (m *RelayRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RelayRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *RelaySession) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RelayRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RelaySession) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.ApiInterface) > 0 { - i -= len(m.ApiInterface) - copy(dAtA[i:], m.ApiInterface) - i = encodeVarintRelay(dAtA, i, uint64(len(m.ApiInterface))) + if m.Badge != nil { + { + size, err := m.Badge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRelay(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if len(m.Sig) > 0 { + i -= len(m.Sig) + copy(dAtA[i:], m.Sig) + i = encodeVarintRelay(dAtA, i, uint64(len(m.Sig))) + i-- + dAtA[i] = 0x5a + } + if len(m.LavaChainId) > 0 { + i -= len(m.LavaChainId) + copy(dAtA[i:], m.LavaChainId) + i = encodeVarintRelay(dAtA, i, uint64(len(m.LavaChainId))) i-- - dAtA[i] = 0x7a + dAtA[i] = 0x52 } if len(m.UnresponsiveProviders) > 0 { i -= len(m.UnresponsiveProviders) copy(dAtA[i:], m.UnresponsiveProviders) i = encodeVarintRelay(dAtA, i, uint64(len(m.UnresponsiveProviders))) i-- - dAtA[i] = 0x72 + dAtA[i] = 0x4a } - if m.QoSReport != nil { - { - size, err := m.QoSReport.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRelay(dAtA, i, uint64(size)) - } + if m.BlockHeight != 0 { + i = encodeVarintRelay(dAtA, i, uint64(m.BlockHeight)) i-- - dAtA[i] = 0x6a + dAtA[i] = 0x40 } - if m.DataReliability != nil { + if m.QoSReport != nil { { - size, err := m.DataReliability.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.QoSReport.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -665,67 +880,36 @@ func (m *RelayRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintRelay(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x62 - } - if m.RequestBlock != 0 { - i = encodeVarintRelay(dAtA, i, uint64(m.RequestBlock)) - i-- - dAtA[i] = 0x58 + dAtA[i] = 0x3a } if m.RelayNum != 0 { i = encodeVarintRelay(dAtA, i, uint64(m.RelayNum)) i-- - dAtA[i] = 0x50 - } - if m.BlockHeight != 0 { - i = encodeVarintRelay(dAtA, i, uint64(m.BlockHeight)) - i-- - dAtA[i] = 0x48 + dAtA[i] = 0x30 } if len(m.Provider) > 0 { i -= len(m.Provider) copy(dAtA[i:], m.Provider) i = encodeVarintRelay(dAtA, i, uint64(len(m.Provider))) i-- - dAtA[i] = 0x42 - } - if len(m.Sig) > 0 { - i -= len(m.Sig) - copy(dAtA[i:], m.Sig) - i = encodeVarintRelay(dAtA, i, uint64(len(m.Sig))) - i-- - dAtA[i] = 0x3a - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintRelay(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x32 + dAtA[i] = 0x2a } if m.CuSum != 0 { i = encodeVarintRelay(dAtA, i, uint64(m.CuSum)) i-- - dAtA[i] = 0x28 + dAtA[i] = 0x20 } if m.SessionId != 0 { i = encodeVarintRelay(dAtA, i, uint64(m.SessionId)) i-- - dAtA[i] = 0x20 + dAtA[i] = 0x18 } - if len(m.ApiUrl) > 0 { - i -= len(m.ApiUrl) - copy(dAtA[i:], m.ApiUrl) - i = encodeVarintRelay(dAtA, i, uint64(len(m.ApiUrl))) + if len(m.ContentHash) > 0 { + i -= len(m.ContentHash) + copy(dAtA[i:], m.ContentHash) + i = encodeVarintRelay(dAtA, i, uint64(len(m.ContentHash))) i-- - dAtA[i] = 0x1a - } - if len(m.ConnectionType) > 0 { - i -= len(m.ConnectionType) - copy(dAtA[i:], m.ConnectionType) - i = encodeVarintRelay(dAtA, i, uint64(len(m.ConnectionType))) - i-- - dAtA[i] = 0x12 + dAtA[i] = 0x12 } if len(m.ChainID) > 0 { i -= len(m.ChainID) @@ -737,6 +921,175 @@ func (m *RelayRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *RelayPrivateData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RelayPrivateData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RelayPrivateData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Salt) > 0 { + i -= len(m.Salt) + copy(dAtA[i:], m.Salt) + i = encodeVarintRelay(dAtA, i, uint64(len(m.Salt))) + i-- + dAtA[i] = 0x32 + } + if len(m.ApiInterface) > 0 { + i -= len(m.ApiInterface) + copy(dAtA[i:], m.ApiInterface) + i = encodeVarintRelay(dAtA, i, uint64(len(m.ApiInterface))) + i-- + dAtA[i] = 0x2a + } + if m.RequestBlock != 0 { + i = encodeVarintRelay(dAtA, i, uint64(m.RequestBlock)) + i-- + dAtA[i] = 0x20 + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintRelay(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x1a + } + if len(m.ApiUrl) > 0 { + i -= len(m.ApiUrl) + copy(dAtA[i:], m.ApiUrl) + i = encodeVarintRelay(dAtA, i, uint64(len(m.ApiUrl))) + i-- + dAtA[i] = 0x12 + } + if len(m.ConnectionType) > 0 { + i -= len(m.ConnectionType) + copy(dAtA[i:], m.ConnectionType) + i = encodeVarintRelay(dAtA, i, uint64(len(m.ConnectionType))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RelayRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RelayRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RelayRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DataReliability != nil { + { + size, err := m.DataReliability.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRelay(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.RelayData != nil { + { + size, err := m.RelayData.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRelay(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.RelaySession != nil { + { + size, err := m.RelaySession.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRelay(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Badge) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Badge) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Badge) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProjectSig) > 0 { + i -= len(m.ProjectSig) + copy(dAtA[i:], m.ProjectSig) + i = encodeVarintRelay(dAtA, i, uint64(len(m.ProjectSig))) + i-- + dAtA[i] = 0x22 + } + if len(m.BadgePk) > 0 { + i -= len(m.BadgePk) + copy(dAtA[i:], m.BadgePk) + i = encodeVarintRelay(dAtA, i, uint64(len(m.BadgePk))) + i-- + dAtA[i] = 0x1a + } + if m.Epoch != 0 { + i = encodeVarintRelay(dAtA, i, uint64(m.Epoch)) + i-- + dAtA[i] = 0x10 + } + if m.CuAllocation != 0 { + i = encodeVarintRelay(dAtA, i, uint64(m.CuAllocation)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *RelayReply) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -937,7 +1290,7 @@ func encodeVarintRelay(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *RelayRequest) Size() (n int) { +func (m *RelaySession) Size() (n int) { if m == nil { return 0 } @@ -947,11 +1300,7 @@ func (m *RelayRequest) Size() (n int) { if l > 0 { n += 1 + l + sovRelay(uint64(l)) } - l = len(m.ConnectionType) - if l > 0 { - n += 1 + l + sovRelay(uint64(l)) - } - l = len(m.ApiUrl) + l = len(m.ContentHash) if l > 0 { n += 1 + l + sovRelay(uint64(l)) } @@ -961,7 +1310,25 @@ func (m *RelayRequest) Size() (n int) { if m.CuSum != 0 { n += 1 + sovRelay(uint64(m.CuSum)) } - l = len(m.Data) + l = len(m.Provider) + if l > 0 { + n += 1 + l + sovRelay(uint64(l)) + } + if m.RelayNum != 0 { + n += 1 + sovRelay(uint64(m.RelayNum)) + } + if m.QoSReport != nil { + l = m.QoSReport.Size() + n += 1 + l + sovRelay(uint64(l)) + } + if m.BlockHeight != 0 { + n += 1 + sovRelay(uint64(m.BlockHeight)) + } + l = len(m.UnresponsiveProviders) + if l > 0 { + n += 1 + l + sovRelay(uint64(l)) + } + l = len(m.LavaChainId) if l > 0 { n += 1 + l + sovRelay(uint64(l)) } @@ -969,32 +1336,83 @@ func (m *RelayRequest) Size() (n int) { if l > 0 { n += 1 + l + sovRelay(uint64(l)) } - l = len(m.Provider) + if m.Badge != nil { + l = m.Badge.Size() + n += 1 + l + sovRelay(uint64(l)) + } + return n +} + +func (m *RelayPrivateData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConnectionType) if l > 0 { n += 1 + l + sovRelay(uint64(l)) } - if m.BlockHeight != 0 { - n += 1 + sovRelay(uint64(m.BlockHeight)) + l = len(m.ApiUrl) + if l > 0 { + n += 1 + l + sovRelay(uint64(l)) } - if m.RelayNum != 0 { - n += 1 + sovRelay(uint64(m.RelayNum)) + l = len(m.Data) + if l > 0 { + n += 1 + l + sovRelay(uint64(l)) } if m.RequestBlock != 0 { n += 1 + sovRelay(uint64(m.RequestBlock)) } + l = len(m.ApiInterface) + if l > 0 { + n += 1 + l + sovRelay(uint64(l)) + } + l = len(m.Salt) + if l > 0 { + n += 1 + l + sovRelay(uint64(l)) + } + return n +} + +func (m *RelayRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RelaySession != nil { + l = m.RelaySession.Size() + n += 1 + l + sovRelay(uint64(l)) + } + if m.RelayData != nil { + l = m.RelayData.Size() + n += 1 + l + sovRelay(uint64(l)) + } if m.DataReliability != nil { l = m.DataReliability.Size() n += 1 + l + sovRelay(uint64(l)) } - if m.QoSReport != nil { - l = m.QoSReport.Size() - n += 1 + l + sovRelay(uint64(l)) + return n +} + +func (m *Badge) Size() (n int) { + if m == nil { + return 0 } - l = len(m.UnresponsiveProviders) + var l int + _ = l + if m.CuAllocation != 0 { + n += 1 + sovRelay(uint64(m.CuAllocation)) + } + if m.Epoch != 0 { + n += 1 + sovRelay(uint64(m.Epoch)) + } + l = len(m.BadgePk) if l > 0 { n += 1 + l + sovRelay(uint64(l)) } - l = len(m.ApiInterface) + l = len(m.ProjectSig) if l > 0 { n += 1 + l + sovRelay(uint64(l)) } @@ -1089,7 +1507,7 @@ func sovRelay(x uint64) (n int) { func sozRelay(x uint64) (n int) { return sovRelay(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *RelayRequest) Unmarshal(dAtA []byte) error { +func (m *RelaySession) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1112,10 +1530,10 @@ func (m *RelayRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RelayRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RelaySession: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RelayRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RelaySession: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1152,9 +1570,9 @@ func (m *RelayRequest) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConnectionType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContentHash", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRelay @@ -1164,29 +1582,31 @@ func (m *RelayRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLengthRelay } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthRelay } if postIndex > l { return io.ErrUnexpectedEOF } - m.ConnectionType = string(dAtA[iNdEx:postIndex]) + m.ContentHash = append(m.ContentHash[:0], dAtA[iNdEx:postIndex]...) + if m.ContentHash == nil { + m.ContentHash = []byte{} + } iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ApiUrl", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionId", wireType) } - var stringLen uint64 + m.SessionId = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRelay @@ -1196,46 +1616,14 @@ func (m *RelayRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.SessionId |= uint64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRelay - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRelay - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ApiUrl = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SessionId", wireType) - } - m.SessionId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRelay - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SessionId |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CuSum", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CuSum", wireType) } m.CuSum = 0 for shift := uint(0); ; shift += 7 { @@ -1252,11 +1640,11 @@ func (m *RelayRequest) Unmarshal(dAtA []byte) error { break } } - case 6: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRelay @@ -1266,31 +1654,29 @@ func (m *RelayRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthRelay } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthRelay } if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } + m.Provider = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sig", wireType) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RelayNum", wireType) } - var byteLen int + m.RelayNum = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRelay @@ -1300,31 +1686,16 @@ func (m *RelayRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.RelayNum |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLengthRelay - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRelay - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Sig = append(m.Sig[:0], dAtA[iNdEx:postIndex]...) - if m.Sig == nil { - m.Sig = []byte{} - } - iNdEx = postIndex - case 8: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field QoSReport", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRelay @@ -1334,25 +1705,29 @@ func (m *RelayRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthRelay } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthRelay } if postIndex > l { return io.ErrUnexpectedEOF } - m.Provider = string(dAtA[iNdEx:postIndex]) + if m.QoSReport == nil { + m.QoSReport = &QualityOfServiceReport{} + } + if err := m.QoSReport.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 9: + case 8: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field BlockHeight", wireType) } @@ -1371,11 +1746,11 @@ func (m *RelayRequest) Unmarshal(dAtA []byte) error { break } } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RelayNum", wireType) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnresponsiveProviders", wireType) } - m.RelayNum = 0 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRelay @@ -1385,16 +1760,31 @@ func (m *RelayRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RelayNum |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestBlock", wireType) + if byteLen < 0 { + return ErrInvalidLengthRelay } - m.RequestBlock = 0 + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthRelay + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UnresponsiveProviders = append(m.UnresponsiveProviders[:0], dAtA[iNdEx:postIndex]...) + if m.UnresponsiveProviders == nil { + m.UnresponsiveProviders = []byte{} + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LavaChainId", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRelay @@ -1404,16 +1794,29 @@ func (m *RelayRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RequestBlock |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 12: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRelay + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRelay + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LavaChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataReliability", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Sig", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRelay @@ -1423,31 +1826,29 @@ func (m *RelayRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthRelay } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthRelay } if postIndex > l { return io.ErrUnexpectedEOF } - if m.DataReliability == nil { - m.DataReliability = &VRFData{} - } - if err := m.DataReliability.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Sig = append(m.Sig[:0], dAtA[iNdEx:postIndex]...) + if m.Sig == nil { + m.Sig = []byte{} } iNdEx = postIndex - case 13: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field QoSReport", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Badge", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1474,18 +1875,68 @@ func (m *RelayRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.QoSReport == nil { - m.QoSReport = &QualityOfServiceReport{} + if m.Badge == nil { + m.Badge = &Badge{} } - if err := m.QoSReport.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Badge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 14: + default: + iNdEx = preIndex + skippy, err := skipRelay(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRelay + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RelayPrivateData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRelay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RelayPrivateData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RelayPrivateData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UnresponsiveProviders", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionType", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRelay @@ -1495,29 +1946,27 @@ func (m *RelayRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthRelay } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthRelay } if postIndex > l { return io.ErrUnexpectedEOF } - m.UnresponsiveProviders = append(m.UnresponsiveProviders[:0], dAtA[iNdEx:postIndex]...) - if m.UnresponsiveProviders == nil { - m.UnresponsiveProviders = []byte{} - } + m.ConnectionType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 15: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ApiInterface", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ApiUrl", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1545,7 +1994,440 @@ func (m *RelayRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ApiInterface = string(dAtA[iNdEx:postIndex]) + m.ApiUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRelay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRelay + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthRelay + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestBlock", wireType) + } + m.RequestBlock = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRelay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RequestBlock |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApiInterface", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRelay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRelay + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRelay + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ApiInterface = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Salt", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRelay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRelay + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthRelay + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Salt = append(m.Salt[:0], dAtA[iNdEx:postIndex]...) + if m.Salt == nil { + m.Salt = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRelay(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRelay + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RelayRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRelay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RelayRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RelayRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RelaySession", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRelay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRelay + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRelay + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RelaySession == nil { + m.RelaySession = &RelaySession{} + } + if err := m.RelaySession.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RelayData", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRelay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRelay + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRelay + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RelayData == nil { + m.RelayData = &RelayPrivateData{} + } + if err := m.RelayData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataReliability", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRelay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRelay + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRelay + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DataReliability == nil { + m.DataReliability = &VRFData{} + } + if err := m.DataReliability.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRelay(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRelay + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Badge) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRelay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Badge: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Badge: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CuAllocation", wireType) + } + m.CuAllocation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRelay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CuAllocation |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Epoch", wireType) + } + m.Epoch = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRelay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Epoch |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BadgePk", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRelay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRelay + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthRelay + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BadgePk = append(m.BadgePk[:0], dAtA[iNdEx:postIndex]...) + if m.BadgePk == nil { + m.BadgePk = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProjectSig", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRelay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRelay + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthRelay + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProjectSig = append(m.ProjectSig[:0], dAtA[iNdEx:postIndex]...) + if m.ProjectSig == nil { + m.ProjectSig = []byte{} + } iNdEx = postIndex default: iNdEx = preIndex diff --git a/x/pairing/types/tx.pb.go b/x/pairing/types/tx.pb.go index b1310c8b8b..24a681ecf1 100644 --- a/x/pairing/types/tx.pb.go +++ b/x/pairing/types/tx.pb.go @@ -440,8 +440,9 @@ var xxx_messageInfo_MsgUnstakeClientResponse proto.InternalMessageInfo type MsgRelayPayment struct { Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` - Relays []*RelayRequest `protobuf:"bytes,2,rep,name=relays,proto3" json:"relays,omitempty"` - DescriptionString string `protobuf:"bytes,3,opt,name=descriptionString,proto3" json:"descriptionString,omitempty"` + Relays []*RelaySession `protobuf:"bytes,2,rep,name=relays,proto3" json:"relays,omitempty"` + VRFs []*VRFData `protobuf:"bytes,3,rep,name=VRFs,proto3" json:"VRFs,omitempty"` + DescriptionString string `protobuf:"bytes,4,opt,name=descriptionString,proto3" json:"descriptionString,omitempty"` } func (m *MsgRelayPayment) Reset() { *m = MsgRelayPayment{} } @@ -484,13 +485,20 @@ func (m *MsgRelayPayment) GetCreator() string { return "" } -func (m *MsgRelayPayment) GetRelays() []*RelayRequest { +func (m *MsgRelayPayment) GetRelays() []*RelaySession { if m != nil { return m.Relays } return nil } +func (m *MsgRelayPayment) GetVRFs() []*VRFData { + if m != nil { + return m.VRFs + } + return nil +} + func (m *MsgRelayPayment) GetDescriptionString() string { if m != nil { return m.DescriptionString @@ -550,44 +558,46 @@ func init() { func init() { proto.RegisterFile("pairing/tx.proto", fileDescriptor_b2db224a5e52fa36) } var fileDescriptor_b2db224a5e52fa36 = []byte{ - // 591 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x6f, 0xd3, 0x30, - 0x14, 0x6e, 0xb6, 0xae, 0xa8, 0x2e, 0x63, 0x23, 0x54, 0x90, 0x05, 0x14, 0xaa, 0x00, 0xa3, 0x87, - 0xe1, 0xb0, 0x72, 0x40, 0xe2, 0xc6, 0xc6, 0xcf, 0x43, 0xa5, 0x29, 0x13, 0x17, 0x6e, 0x6e, 0x6a, - 0x5c, 0xab, 0x8d, 0x1d, 0x6c, 0xb7, 0x5a, 0xef, 0xdc, 0xe1, 0xc2, 0x9f, 0xc2, 0xff, 0xb0, 0xe3, - 0x8e, 0x9c, 0x10, 0x6a, 0xff, 0x11, 0x94, 0xc4, 0x09, 0x4d, 0xba, 0x8d, 0x0a, 0x24, 0x4e, 0xc9, - 0xf3, 0xfb, 0xde, 0xfb, 0xde, 0xf7, 0xfc, 0xa5, 0x05, 0xdb, 0x11, 0xa2, 0x82, 0x32, 0xe2, 0xa9, - 0x13, 0x18, 0x09, 0xae, 0xb8, 0xd9, 0x1c, 0xa1, 0x09, 0x62, 0x58, 0xc1, 0xf8, 0x09, 0x75, 0xda, - 0x76, 0x02, 0x2e, 0x43, 0x2e, 0xbd, 0x1e, 0x92, 0xd8, 0x9b, 0xec, 0xf7, 0xb0, 0x42, 0xfb, 0x5e, - 0xc0, 0x29, 0x4b, 0xab, 0xec, 0x26, 0xe1, 0x84, 0x27, 0xaf, 0x5e, 0xfc, 0xa6, 0x4f, 0x6f, 0xe3, - 0x88, 0x07, 0x03, 0xa9, 0xb8, 0x40, 0x04, 0x7b, 0x98, 0xf5, 0x23, 0x4e, 0x99, 0xd2, 0xc9, 0x1b, - 0x19, 0xb5, 0xc0, 0x23, 0x34, 0x4d, 0x0f, 0xdd, 0x4f, 0x6b, 0x60, 0xbb, 0x2b, 0xc9, 0xb1, 0x42, - 0x43, 0x7c, 0x24, 0xf8, 0x84, 0xf6, 0xb1, 0x30, 0x2d, 0x70, 0x25, 0x10, 0x18, 0x29, 0x2e, 0x2c, - 0xa3, 0x65, 0xb4, 0xeb, 0x7e, 0x16, 0x26, 0x99, 0x01, 0xa2, 0xec, 0xed, 0x0b, 0x6b, 0x4d, 0x67, - 0xd2, 0xd0, 0x7c, 0x0a, 0x6a, 0x28, 0xe4, 0x63, 0xa6, 0xac, 0xf5, 0x96, 0xd1, 0x6e, 0x74, 0x76, - 0x60, 0xaa, 0x00, 0xc6, 0x0a, 0xa0, 0x56, 0x00, 0x0f, 0x39, 0x65, 0x07, 0xd5, 0xd3, 0x1f, 0x77, - 0x2b, 0xbe, 0x86, 0x9b, 0xaf, 0x41, 0x3d, 0x1b, 0x54, 0x5a, 0xd5, 0xd6, 0x7a, 0xbb, 0xd1, 0xb9, - 0x07, 0x0b, 0x3b, 0x59, 0x14, 0x05, 0x5f, 0x6a, 0xac, 0xee, 0xf2, 0xbb, 0xd6, 0x6c, 0x81, 0x06, - 0xc1, 0x7c, 0xc4, 0x03, 0xa4, 0x28, 0x67, 0xd6, 0x46, 0xcb, 0x68, 0x57, 0xfd, 0xc5, 0xa3, 0x78, - 0xfa, 0x90, 0x33, 0x3a, 0xc4, 0xc2, 0xaa, 0xa5, 0xd3, 0xeb, 0xd0, 0xb5, 0x81, 0x55, 0xde, 0x82, - 0x8f, 0x65, 0xc4, 0x99, 0xc4, 0xee, 0x37, 0x03, 0x5c, 0xcb, 0x92, 0x87, 0x23, 0x8a, 0x99, 0xfa, - 0xbf, 0x0b, 0x2a, 0xe9, 0xaa, 0x2e, 0xeb, 0x6a, 0x82, 0x8d, 0x89, 0xf8, 0x10, 0x0d, 0x13, 0xcd, - 0x75, 0x3f, 0x0d, 0x5c, 0x0b, 0xdc, 0x2c, 0x8e, 0x9d, 0x2b, 0x7a, 0x03, 0xcc, 0xae, 0x24, 0xef, - 0x98, 0xfc, 0xd7, 0x5b, 0x77, 0xef, 0x00, 0x7b, 0xb9, 0x53, 0xce, 0xf3, 0x2a, 0xf1, 0x96, 0xce, - 0xfe, 0xfd, 0xea, 0xf4, 0xed, 0x14, 0xfa, 0xe4, 0x1c, 0x5f, 0x0d, 0xb0, 0xd5, 0x95, 0xc4, 0x8f, - 0x3d, 0x7d, 0x84, 0xa6, 0xe1, 0xe5, 0x1c, 0xcf, 0x40, 0x2d, 0x71, 0xbf, 0xb4, 0xd6, 0x12, 0xa7, - 0xb9, 0xf0, 0xbc, 0xaf, 0x0f, 0x26, 0xdd, 0x7c, 0xfc, 0x71, 0x8c, 0xa5, 0xf2, 0x75, 0x85, 0xb9, - 0x07, 0xae, 0xf7, 0xb1, 0x0c, 0x04, 0x8d, 0xe2, 0xa5, 0x1f, 0xab, 0x18, 0x99, 0xdc, 0x65, 0xdd, - 0x5f, 0x4e, 0xb8, 0x3b, 0xe0, 0x56, 0x69, 0xac, 0x6c, 0xe4, 0xce, 0xe7, 0x2a, 0x58, 0xef, 0x4a, - 0x62, 0x12, 0xb0, 0x59, 0xfc, 0xee, 0x76, 0xcf, 0x9f, 0xa6, 0xec, 0x4c, 0x1b, 0xae, 0x86, 0xcb, - 0x08, 0x4d, 0x04, 0x1a, 0x8b, 0xee, 0xbd, 0x7f, 0x79, 0x79, 0x8a, 0xb2, 0xf7, 0x56, 0x41, 0xe5, - 0x14, 0x21, 0xd8, 0x2a, 0xfb, 0xa9, 0x7d, 0x61, 0x83, 0x12, 0xd2, 0x7e, 0xbc, 0x2a, 0x32, 0xa7, - 0x23, 0x60, 0xb3, 0x68, 0xab, 0xdd, 0x3f, 0xb5, 0xd0, 0xaa, 0xe0, 0x6a, 0xb8, 0x9c, 0xa8, 0x0f, - 0xae, 0x16, 0xac, 0xf5, 0xe0, 0xc2, 0xfa, 0x45, 0x98, 0xfd, 0x68, 0x25, 0x58, 0xc6, 0x72, 0xf0, - 0xfc, 0x74, 0xe6, 0x18, 0x67, 0x33, 0xc7, 0xf8, 0x39, 0x73, 0x8c, 0x2f, 0x73, 0xa7, 0x72, 0x36, - 0x77, 0x2a, 0xdf, 0xe7, 0x4e, 0xe5, 0xfd, 0x43, 0x42, 0xd5, 0x60, 0xdc, 0x83, 0x01, 0x0f, 0x3d, - 0xdd, 0x32, 0x79, 0x7a, 0x27, 0x5e, 0xfe, 0x4f, 0x32, 0x8d, 0xb0, 0xec, 0xd5, 0x92, 0xdf, 0xf3, - 0x27, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x92, 0x41, 0xe8, 0x61, 0x06, 0x00, 0x00, + // 612 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0x41, 0x6f, 0xd3, 0x4c, + 0x10, 0x8d, 0x1b, 0x37, 0x9f, 0xb2, 0xf9, 0x4a, 0x8b, 0xa9, 0xc0, 0x35, 0x60, 0x22, 0x03, 0x25, + 0x87, 0xb2, 0xa6, 0xe5, 0x80, 0xc4, 0x8d, 0xb6, 0x14, 0x38, 0x44, 0xaa, 0x5c, 0xd1, 0x03, 0xb7, + 0x8d, 0xb3, 0x6c, 0x57, 0x8d, 0x77, 0x2d, 0xef, 0x36, 0x6a, 0xef, 0xdc, 0xe1, 0xcf, 0xf0, 0x1f, + 0x72, 0xec, 0x91, 0x13, 0x42, 0xc9, 0x1f, 0x41, 0x5e, 0xaf, 0x4d, 0xec, 0xa4, 0x25, 0x02, 0x89, + 0x93, 0xbd, 0x3b, 0x6f, 0xe6, 0xcd, 0x9b, 0x79, 0x96, 0xc1, 0x5a, 0x8c, 0x68, 0x42, 0x19, 0xf1, + 0xe5, 0x39, 0x8c, 0x13, 0x2e, 0xb9, 0xb5, 0x3e, 0x40, 0x43, 0xc4, 0xb0, 0x84, 0xe9, 0x13, 0xea, + 0xb0, 0xe3, 0x86, 0x5c, 0x44, 0x5c, 0xf8, 0x3d, 0x24, 0xb0, 0x3f, 0xdc, 0xee, 0x61, 0x89, 0xb6, + 0xfd, 0x90, 0x53, 0x96, 0x65, 0x39, 0xeb, 0x84, 0x13, 0xae, 0x5e, 0xfd, 0xf4, 0x4d, 0xdf, 0xde, + 0xc5, 0x31, 0x0f, 0x4f, 0x84, 0xe4, 0x09, 0x22, 0xd8, 0xc7, 0xac, 0x1f, 0x73, 0xca, 0xa4, 0x0e, + 0xde, 0xca, 0xa9, 0x13, 0x3c, 0x40, 0x17, 0xd9, 0xa5, 0xf7, 0x69, 0x09, 0xac, 0x75, 0x05, 0x39, + 0x92, 0xe8, 0x14, 0x1f, 0x26, 0x7c, 0x48, 0xfb, 0x38, 0xb1, 0x6c, 0xf0, 0x5f, 0x98, 0x60, 0x24, + 0x79, 0x62, 0x1b, 0x6d, 0xa3, 0xd3, 0x0c, 0xf2, 0xa3, 0x8a, 0x9c, 0x20, 0xca, 0xde, 0xed, 0xdb, + 0x4b, 0x3a, 0x92, 0x1d, 0xad, 0x17, 0xa0, 0x81, 0x22, 0x7e, 0xc6, 0xa4, 0x5d, 0x6f, 0x1b, 0x9d, + 0xd6, 0xce, 0x06, 0xcc, 0x14, 0xc0, 0x54, 0x01, 0xd4, 0x0a, 0xe0, 0x1e, 0xa7, 0x6c, 0xd7, 0x1c, + 0x7d, 0x7f, 0x50, 0x0b, 0x34, 0xdc, 0x7a, 0x03, 0x9a, 0x79, 0xa3, 0xc2, 0x36, 0xdb, 0xf5, 0x4e, + 0x6b, 0xe7, 0x21, 0x2c, 0xcd, 0x64, 0x5a, 0x14, 0x7c, 0xad, 0xb1, 0xba, 0xca, 0xaf, 0x5c, 0xab, + 0x0d, 0x5a, 0x04, 0xf3, 0x01, 0x0f, 0x91, 0xa4, 0x9c, 0xd9, 0xcb, 0x6d, 0xa3, 0x63, 0x06, 0xd3, + 0x57, 0x69, 0xf7, 0x11, 0x67, 0xf4, 0x14, 0x27, 0x76, 0x23, 0xeb, 0x5e, 0x1f, 0x3d, 0x07, 0xd8, + 0xd5, 0x29, 0x04, 0x58, 0xc4, 0x9c, 0x09, 0xec, 0x7d, 0x35, 0xc0, 0x8d, 0x3c, 0xb8, 0x37, 0xa0, + 0x98, 0xc9, 0x7f, 0x3b, 0xa0, 0x8a, 0x2e, 0x73, 0x56, 0xd7, 0x3a, 0x58, 0x1e, 0x26, 0x1f, 0xe3, + 0x53, 0xa5, 0xb9, 0x19, 0x64, 0x07, 0xcf, 0x06, 0xb7, 0xcb, 0x6d, 0x17, 0x8a, 0xde, 0x02, 0xab, + 0x2b, 0xc8, 0x7b, 0x26, 0xfe, 0x76, 0xeb, 0xde, 0x3d, 0xe0, 0xcc, 0x56, 0x2a, 0x78, 0x0e, 0x94, + 0xb7, 0x74, 0xf4, 0xcf, 0x47, 0xa7, 0xb7, 0x53, 0xaa, 0x53, 0x70, 0x8c, 0x0c, 0xb0, 0xda, 0x15, + 0x24, 0x48, 0x3d, 0x7d, 0x88, 0x2e, 0xa2, 0xeb, 0x39, 0x5e, 0x82, 0x86, 0x72, 0xbf, 0xb0, 0x97, + 0x94, 0xd3, 0x3c, 0x38, 0xef, 0xeb, 0x83, 0xaa, 0xda, 0x11, 0x16, 0x82, 0x72, 0x16, 0xe8, 0x0c, + 0x6b, 0x1b, 0x98, 0xc7, 0xc1, 0x81, 0xb0, 0xeb, 0x2a, 0xf3, 0xfe, 0xfc, 0xcc, 0xe3, 0xe0, 0x60, + 0x1f, 0x49, 0x14, 0x28, 0xa8, 0xb5, 0x05, 0x6e, 0xf6, 0xb1, 0x08, 0x13, 0x1a, 0xa7, 0x7b, 0x3a, + 0x92, 0x29, 0x44, 0x2d, 0xb0, 0x19, 0xcc, 0x06, 0xbc, 0x0d, 0x70, 0xa7, 0xa2, 0x24, 0x57, 0xb9, + 0xf3, 0xd9, 0x04, 0xf5, 0xae, 0x20, 0x16, 0x01, 0x2b, 0xe5, 0x4f, 0x75, 0x73, 0x7e, 0x1b, 0x55, + 0x33, 0x3b, 0x70, 0x31, 0x5c, 0x4e, 0x68, 0x21, 0xd0, 0x9a, 0x36, 0xfc, 0xa3, 0xeb, 0xd3, 0x33, + 0x94, 0xb3, 0xb5, 0x08, 0xaa, 0xa0, 0x88, 0xc0, 0x6a, 0xd5, 0x82, 0x9d, 0x2b, 0x0b, 0x54, 0x90, + 0xce, 0xb3, 0x45, 0x91, 0x05, 0x1d, 0x01, 0x2b, 0x65, 0x27, 0x6e, 0xfe, 0xae, 0x84, 0x56, 0x05, + 0x17, 0xc3, 0x15, 0x44, 0x7d, 0xf0, 0x7f, 0xc9, 0x8d, 0x8f, 0xaf, 0xcc, 0x9f, 0x86, 0x39, 0x4f, + 0x17, 0x82, 0xe5, 0x2c, 0xbb, 0xaf, 0x46, 0x63, 0xd7, 0xb8, 0x1c, 0xbb, 0xc6, 0x8f, 0xb1, 0x6b, + 0x7c, 0x99, 0xb8, 0xb5, 0xcb, 0x89, 0x5b, 0xfb, 0x36, 0x71, 0x6b, 0x1f, 0x9e, 0x10, 0x2a, 0x4f, + 0xce, 0x7a, 0x30, 0xe4, 0x91, 0xaf, 0x4b, 0xaa, 0xa7, 0x7f, 0xee, 0x17, 0x3f, 0x9f, 0x8b, 0x18, + 0x8b, 0x5e, 0x43, 0xfd, 0x02, 0x9e, 0xff, 0x0c, 0x00, 0x00, 0xff, 0xff, 0x06, 0xc3, 0xe8, 0x19, + 0x94, 0x06, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1137,7 +1147,21 @@ func (m *MsgRelayPayment) MarshalToSizedBuffer(dAtA []byte) (int, error) { copy(dAtA[i:], m.DescriptionString) i = encodeVarintTx(dAtA, i, uint64(len(m.DescriptionString))) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x22 + } + if len(m.VRFs) > 0 { + for iNdEx := len(m.VRFs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VRFs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } if len(m.Relays) > 0 { for iNdEx := len(m.Relays) - 1; iNdEx >= 0; iNdEx-- { @@ -1341,6 +1365,12 @@ func (m *MsgRelayPayment) Size() (n int) { n += 1 + l + sovTx(uint64(l)) } } + if len(m.VRFs) > 0 { + for _, e := range m.VRFs { + l = e.Size() + n += 1 + l + sovTx(uint64(l)) + } + } l = len(m.DescriptionString) if l > 0 { n += 1 + l + sovTx(uint64(l)) @@ -2311,12 +2341,46 @@ func (m *MsgRelayPayment) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Relays = append(m.Relays, &RelayRequest{}) + m.Relays = append(m.Relays, &RelaySession{}) if err := m.Relays[len(m.Relays)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VRFs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VRFs = append(m.VRFs, &VRFData{}) + if err := m.VRFs[len(m.VRFs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field DescriptionString", wireType) } From 106004948d5a74a45a5b972e6ce26594ee929a2f Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 9 Mar 2023 04:50:05 +0200 Subject: [PATCH 089/123] WIP changing fields in relay --- go.mod | 3 +- go.sum | 4 + relayer/sigs/sigs.go | 24 +- utils/vrf.go | 18 +- x/pairing/keeper/msg_server_relay_payment.go | 19 +- .../msg_server_relay_payment_gov_test.go | 212 ++++++++---------- 6 files changed, 122 insertions(+), 158 deletions(-) diff --git a/go.mod b/go.mod index 87fa520dda..1cbdf2ddf5 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/docker/distribution v2.8.1+incompatible github.com/fullstorydev/grpcurl v1.8.5 github.com/gogo/status v1.1.0 - github.com/golang/protobuf v1.5.2 + github.com/golang/protobuf v1.5.3 github.com/ignite-hq/cli v0.22.1-0.20220610070456-1b33c09fceb7 github.com/jhump/protoreflect v1.14.0 github.com/joho/godotenv v1.3.0 @@ -49,6 +49,7 @@ require ( github.com/ghodss/yaml v1.0.0 // indirect github.com/gogo/googleapis v1.4.0 // indirect github.com/golang/glog v1.0.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect golang.org/x/mod v0.7.0 // indirect golang.org/x/tools v0.2.0 // indirect diff --git a/go.sum b/go.sum index 736691d9d4..8331621464 100644 --- a/go.sum +++ b/go.sum @@ -694,6 +694,8 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -784,6 +786,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 h1:gDLXvp5S9izjldquuoAhDzccbskOL6tDC5jMSyx3zxE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2/go.mod h1:7pdNwVWBBHGiCxa9lAszqCJMbfTISJ7oMftp8+UGV08= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= diff --git a/relayer/sigs/sigs.go b/relayer/sigs/sigs.go index 51a54297f2..bb1a08cb6c 100644 --- a/relayer/sigs/sigs.go +++ b/relayer/sigs/sigs.go @@ -62,10 +62,13 @@ func SignVRFData(pkey *btcSecp256k1.PrivateKey, vrfData *pairingtypes.VRFData) ( return sig, nil } -func SignRelay(pkey *btcSecp256k1.PrivateKey, request pairingtypes.RelayRequest) ([]byte, error) { - // - request.DataReliability = nil // its not a part of the signature, its a separate part +func prepareRelaySessionForSignature(request *pairingtypes.RelaySession) { + request.Badge = nil // its not a part of the signature, its a separate part request.Sig = []byte{} +} + +func SignRelay(pkey *btcSecp256k1.PrivateKey, request pairingtypes.RelaySession) ([]byte, error) { + prepareRelaySessionForSignature(&request) msgData := []byte(request.String()) // Sign sig, err := btcSecp256k1.SignCompact(btcSecp256k1.S256(), pkey, HashMsg(msgData), false) @@ -85,7 +88,7 @@ func AllDataHash(relayResponse *pairingtypes.RelayReply, relayReq *pairingtypes. func DataToSignRelayResponse(relayResponse *pairingtypes.RelayReply, relayReq *pairingtypes.RelayRequest) (dataToSign []byte) { // sign the data hash+query hash+nonce - queryHash := utils.CalculateQueryHash(*relayReq) + queryHash := utils.CalculateQueryHash(*relayReq.RelayData) data_hash := AllDataHash(relayResponse, relayReq) dataToSign = bytes.Join([][]byte{data_hash, queryHash}, nil) dataToSign = HashMsg(dataToSign) @@ -93,7 +96,7 @@ func DataToSignRelayResponse(relayResponse *pairingtypes.RelayReply, relayReq *p } func DataToVerifyProviderSig(request *pairingtypes.RelayRequest, data_hash []byte) (dataToSign []byte) { - queryHash := utils.CalculateQueryHash(*request) + queryHash := utils.CalculateQueryHash(*request.RelayData) dataToSign = bytes.Join([][]byte{data_hash, queryHash}, nil) dataToSign = HashMsg(dataToSign) return @@ -101,7 +104,7 @@ func DataToVerifyProviderSig(request *pairingtypes.RelayRequest, data_hash []byt func DataToSignResponseFinalizationData(relayResponse *pairingtypes.RelayReply, relayReq *pairingtypes.RelayRequest, clientAddress sdk.AccAddress) (dataToSign []byte) { // sign latest_block+finalized_blocks_hashes+session_id+block_height+relay_num - return DataToSignResponseFinalizationDataInner(relayResponse.LatestBlock, relayReq.SessionId, relayReq.BlockHeight, relayReq.RelayNum, relayResponse.FinalizedBlocksHashes, clientAddress) + return DataToSignResponseFinalizationDataInner(relayResponse.LatestBlock, relayReq.RelaySession.SessionId, relayReq.RelaySession.BlockHeight, relayReq.RelaySession.RelayNum, relayResponse.FinalizedBlocksHashes, clientAddress) } func DataToSignResponseFinalizationDataInner(latestBlock int64, sessionID uint64, blockHeight int64, relayNum uint64, finalizedBlockHashes []byte, clientAddress sdk.AccAddress) (dataToSign []byte) { @@ -206,11 +209,10 @@ func RecoverProviderPubKeyFromVrfDataAndQuery(request *pairingtypes.RelayRequest return RecoverProviderPubKeyFromQueryAndAllDataHash(request, request.DataReliability.AllDataHash, request.DataReliability.ProviderSig) } -func RecoverPubKeyFromRelay(in pairingtypes.RelayRequest) (secp256k1.PubKey, error) { - signature := in.Sig - in.Sig = []byte{} - in.DataReliability = nil - hash := HashMsg([]byte(in.String())) +func RecoverPubKeyFromRelay(relay pairingtypes.RelaySession) (secp256k1.PubKey, error) { + signature := relay.Sig // save sig + prepareRelaySessionForSignature(&relay) + hash := HashMsg([]byte(relay.String())) pubKey, err := RecoverPubKey(signature, hash) if err != nil { return nil, err diff --git a/utils/vrf.go b/utils/vrf.go index 684648a6bd..b0b9e7c969 100644 --- a/utils/vrf.go +++ b/utils/vrf.go @@ -53,35 +53,27 @@ func VerifyVrfProofFromVRFData(reliabilityData *pairingtypes.VRFData, vrf_pk Vrf } func VerifyVrfProof(request *pairingtypes.RelayRequest, vrf_pk VrfPubKey, relayEpochStart uint64) (valid bool) { - queryHash := CalculateQueryHash(*request) + queryHash := CalculateQueryHash(*request.RelayData) return verifyVRF(queryHash, request.DataReliability, vrf_pk, relayEpochStart) } -func CalculateVrfOnRelay(request *pairingtypes.RelayRequest, response *pairingtypes.RelayReply, vrf_sk vrf.PrivateKey, currentEpoch uint64) ([]byte, []byte) { +func CalculateVrfOnRelay(request *pairingtypes.RelayPrivateData, response *pairingtypes.RelayReply, vrf_sk vrf.PrivateKey, currentEpoch uint64) ([]byte, []byte) { vrfData0 := FormatDataForVrf(request, response, false, currentEpoch) vrfData1 := FormatDataForVrf(request, response, true, currentEpoch) return vrf_sk.Compute(vrfData0), vrf_sk.Compute(vrfData1) } -func ProveVrfOnRelay(request *pairingtypes.RelayRequest, response *pairingtypes.RelayReply, vrf_sk vrf.PrivateKey, differentiator bool, currentEpoch uint64) (vrf_res []byte, proof []byte) { +func ProveVrfOnRelay(request *pairingtypes.RelayPrivateData, response *pairingtypes.RelayReply, vrf_sk vrf.PrivateKey, differentiator bool, currentEpoch uint64) (vrf_res []byte, proof []byte) { vrfData := FormatDataForVrf(request, response, differentiator, currentEpoch) return vrf_sk.Prove(vrfData) } -func CalculateQueryHash(relayReq pairingtypes.RelayRequest) (queryHash []byte) { - relayReq.CuSum = 0 - relayReq.Provider = "" - relayReq.RelayNum = 0 - relayReq.SessionId = 0 - relayReq.Sig = nil - relayReq.QoSReport = nil - relayReq.DataReliability = nil - relayReq.UnresponsiveProviders = nil +func CalculateQueryHash(relayReq pairingtypes.RelayPrivateData) (queryHash []byte) { queryHash = tendermintcrypto.Sha256([]byte(relayReq.String())) return } -func FormatDataForVrf(request *pairingtypes.RelayRequest, response *pairingtypes.RelayReply, differentiator bool, currentEpoch uint64) (data []byte) { +func FormatDataForVrf(request *pairingtypes.RelayPrivateData, response *pairingtypes.RelayReply, differentiator bool, currentEpoch uint64) (data []byte) { // vrf is calculated on: query hash, relayer signature and 0/1 byte queryHash := CalculateQueryHash(*request) currentEpochBytes := make([]byte, 8) diff --git a/x/pairing/keeper/msg_server_relay_payment.go b/x/pairing/keeper/msg_server_relay_payment.go index d697ac676d..3960aab7b6 100644 --- a/x/pairing/keeper/msg_server_relay_payment.go +++ b/x/pairing/keeper/msg_server_relay_payment.go @@ -31,7 +31,7 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen errorLogAndFormat := func(name string, attrs map[string]string, details string) (*types.MsgRelayPaymentResponse, error) { return nil, utils.LavaError(ctx, logger, name, attrs, details) } - for _, relay := range msg.Relays { + for relayIdx, relay := range msg.Relays { if relay.BlockHeight > ctx.BlockHeight() { return errorLogAndFormat("relay_future_block", map[string]string{"blockheight": string(relay.Sig)}, "relay request for a block in the future") } @@ -80,15 +80,10 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen return errorLogAndFormat("relay_payment_epoch_start", details, "problem getting epoch start") } - expectedInterfaces := k.specKeeper.GetExpectedInterfacesForSpec(ctx, relay.ChainID) - if !expectedInterfaces[relay.ApiInterface] { - details := map[string]string{"expectedInterfaces": fmt.Sprintf("%+v", expectedInterfaces), "apiInterface": relay.ApiInterface} - return errorLogAndFormat("relay_payment_apiInterface", details, "unexpected api interface") - } - payReliability := false // validate data reliability - if relay.DataReliability != nil { + if msg.VRFs != nil && msg.VRFs[relayIdx] != nil { + vrfData := msg.VRFs[relayIdx] details := map[string]string{"client": clientAddr.String(), "provider": providerAddr.String()} if !spec.DataReliabilityEnabled { details["chainID"] = relay.ChainID @@ -96,12 +91,12 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen } // verify user signed this data reliability - valid, err := sigs.ValidateSignerOnVRFData(clientAddr, *relay.DataReliability) + valid, err := sigs.ValidateSignerOnVRFData(clientAddr, *vrfData) if err != nil || !valid { details["error"] = err.Error() return errorLogAndFormat("relay_data_reliability_signer", details, "invalid signature by consumer on data reliability message") } - otherProviderAddress, err := sigs.RecoverProviderPubKeyFromVrfDataOnly(relay.DataReliability) + otherProviderAddress, err := sigs.RecoverProviderPubKeyFromVrfDataOnly(vrfData) if err != nil { return errorLogAndFormat("relay_data_reliability_other_provider", details, "invalid signature by other provider on data reliability message") } @@ -134,7 +129,7 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen return errorLogAndFormat("relay_data_reliability_client_vrf_pk", details, "invalid parsing of vrf pk form bech32") } // signatures valid, validate VRF signing - valid = utils.VerifyVrfProofFromVRFData(relay.DataReliability, *vrfPk, epochStart) + valid = utils.VerifyVrfProofFromVRFData(vrfData, *vrfPk, epochStart) if !valid { details["error"] = "vrf signing is invalid, proof result mismatch" return errorLogAndFormat("relay_data_reliability_vrf_proof", details, "invalid vrf proof by consumer, result doesn't correspond to proof") @@ -146,7 +141,7 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen return errorLogAndFormat("relay_payment_reliability_servicerstopaircount", details, err.Error()) } - index, vrfErr := utils.GetIndexForVrf(relay.DataReliability.VrfValue, uint32(providersCount), spec.ReliabilityThreshold) + index, vrfErr := utils.GetIndexForVrf(vrfData.VrfValue, uint32(providersCount), spec.ReliabilityThreshold) if vrfErr != nil { details["error"] = vrfErr.Error() details["VRF_index"] = strconv.FormatInt(index, 10) diff --git a/x/pairing/keeper/msg_server_relay_payment_gov_test.go b/x/pairing/keeper/msg_server_relay_payment_gov_test.go index 16be3cecb0..b1279b54f8 100644 --- a/x/pairing/keeper/msg_server_relay_payment_gov_test.go +++ b/x/pairing/keeper/msg_server_relay_payment_gov_test.go @@ -64,18 +64,15 @@ func TestRelayPaymentGovQosWeightChange(t *testing.T) { for ti, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Create relay request that was done in the test's epoch. Change session ID each iteration to avoid double spending error (provider asks reward for the same transaction twice) - relayRequest := &pairingtypes.RelayRequest{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(ti), - ChainID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: int64(tt.epoch), - RelayNum: 0, - RequestBlock: -1, - QoSReport: badQoS, - DataReliability: nil, + relayRequest := &pairingtypes.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(ti), + ChainID: ts.spec.Name, + CuSum: ts.spec.Apis[0].ComputeUnits * 10, + BlockHeight: int64(tt.epoch), + RelayNum: 0, + QoSReport: badQoS, } // Sign and send the payment requests for block 0 tx @@ -84,7 +81,7 @@ func TestRelayPaymentGovQosWeightChange(t *testing.T) { require.Nil(t, err) // Add the relay request to the Relays array (for relayPaymentMessage()) - var Relays []*pairingtypes.RelayRequest + var Relays []*pairingtypes.RelaySession Relays = append(Relays, relayRequest) // Get provider's and consumer's balance before payment @@ -169,17 +166,14 @@ func TestRelayPaymentGovEpochBlocksDecrease(t *testing.T) { for ti, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Create relay request that was done in the test's epoch. Change session ID each iteration to avoid double spending error (provider asks reward for the same transaction twice) - relayRequest := &pairingtypes.RelayRequest{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(ti), - ChainID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: int64(tt.epoch), - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, + relayRequest := &pairingtypes.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(ti), + ChainID: ts.spec.Name, + CuSum: ts.spec.Apis[0].ComputeUnits * 10, + BlockHeight: int64(tt.epoch), + RelayNum: 0, } // Sign and send the payment requests @@ -188,7 +182,7 @@ func TestRelayPaymentGovEpochBlocksDecrease(t *testing.T) { require.Nil(t, err) // Request payment (helper function validates the balances and verifies if we should get an error through valid) - var Relays []*pairingtypes.RelayRequest + var Relays []*pairingtypes.RelaySession Relays = append(Relays, relayRequest) relayPaymentMessage := pairingtypes.MsgRelayPayment{Creator: ts.providers[0].address.String(), Relays: Relays} payAndVerifyBalance(t, ts, relayPaymentMessage, tt.valid, ts.clients[0].address, ts.providers[0].address) @@ -251,17 +245,14 @@ func TestRelayPaymentGovEpochBlocksIncrease(t *testing.T) { for ti, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Create relay request that was done in the test's epoch+block. Change session ID each iteration to avoid double spending error (provider asks reward for the same transaction twice) - relayRequest := &pairingtypes.RelayRequest{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(ti), - ChainID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: int64(tt.epoch), - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, + relayRequest := &pairingtypes.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(ti), + ChainID: ts.spec.Name, + CuSum: ts.spec.Apis[0].ComputeUnits * 10, + BlockHeight: int64(tt.epoch), + RelayNum: 0, } // Sign and send the payment requests @@ -270,7 +261,7 @@ func TestRelayPaymentGovEpochBlocksIncrease(t *testing.T) { require.Nil(t, err) // Request payment (helper function validates the balances and verifies if we should get an error through valid) - var Relays []*pairingtypes.RelayRequest + var Relays []*pairingtypes.RelaySession Relays = append(Relays, relayRequest) relayPaymentMessage := pairingtypes.MsgRelayPayment{Creator: ts.providers[0].address.String(), Relays: Relays} payAndVerifyBalance(t, ts, relayPaymentMessage, tt.valid, ts.clients[0].address, ts.providers[0].address) @@ -338,17 +329,14 @@ func TestRelayPaymentGovEpochToSaveDecrease(t *testing.T) { } // Create relay request that was done in the test's epoch+block. Change session ID each iteration to avoid double spending error (provider asks reward for the same transaction twice) - relayRequest := &pairingtypes.RelayRequest{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(ti), - ChainID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: int64(tt.epoch), - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, + relayRequest := &pairingtypes.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(ti), + ChainID: ts.spec.Name, + CuSum: ts.spec.Apis[0].ComputeUnits * 10, + BlockHeight: int64(tt.epoch), + RelayNum: 0, } // Sign and send the payment requests @@ -357,7 +345,7 @@ func TestRelayPaymentGovEpochToSaveDecrease(t *testing.T) { require.Nil(t, err) // Request payment (helper function validates the balances and verifies if we should get an error through valid) - var Relays []*pairingtypes.RelayRequest + var Relays []*pairingtypes.RelaySession Relays = append(Relays, relayRequest) relayPaymentMessage := pairingtypes.MsgRelayPayment{Creator: ts.providers[0].address.String(), Relays: Relays} payAndVerifyBalance(t, ts, relayPaymentMessage, tt.valid, ts.clients[0].address, ts.providers[0].address) @@ -414,17 +402,14 @@ func TestRelayPaymentGovEpochToSaveIncrease(t *testing.T) { for ti, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Create relay request that was done in the test's epoch+block. Change session ID each iteration to avoid double spending error (provider asks reward for the same transaction twice) - relayRequest := &pairingtypes.RelayRequest{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(ti), - ChainID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: int64(tt.epoch), - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, + relayRequest := &pairingtypes.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(ti), + ChainID: ts.spec.Name, + CuSum: ts.spec.Apis[0].ComputeUnits * 10, + BlockHeight: int64(tt.epoch), + RelayNum: 0, } // Sign and send the payment requests @@ -433,7 +418,7 @@ func TestRelayPaymentGovEpochToSaveIncrease(t *testing.T) { require.Nil(t, err) // Request payment (helper function validates the balances and verifies if we should get an error through valid) - var Relays []*pairingtypes.RelayRequest + var Relays []*pairingtypes.RelaySession Relays = append(Relays, relayRequest) relayPaymentMessage := pairingtypes.MsgRelayPayment{Creator: ts.providers[0].address.String(), Relays: Relays} payAndVerifyBalance(t, ts, relayPaymentMessage, tt.valid, ts.clients[0].address, ts.providers[0].address) @@ -507,17 +492,14 @@ func TestRelayPaymentGovStakeToMaxCUListMaxCUDecrease(t *testing.T) { for ti, tt := range tests { t.Run(tt.name, func(t *testing.T) { - relayRequest := &pairingtypes.RelayRequest{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(ti), - ChainID: ts.spec.Name, - CuSum: uint64(250001), // the relayRequest costs 250001 (more than the previous limit, and less than in the new limit). This should influence the validity of the request - BlockHeight: int64(tt.epoch), - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, + relayRequest := &pairingtypes.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(ti), + ChainID: ts.spec.Name, + CuSum: uint64(250001), // the relayRequest costs 250001 (more than the previous limit, and less than in the new limit). This should influence the validity of the request + BlockHeight: int64(tt.epoch), + RelayNum: 0, } // Sign and send the payment requests for block 20 (=epochBeforeChange) @@ -526,7 +508,7 @@ func TestRelayPaymentGovStakeToMaxCUListMaxCUDecrease(t *testing.T) { require.Nil(t, err) // Add the relay request to the Relays array (for relayPaymentMessage()) - var Relays []*pairingtypes.RelayRequest + var Relays []*pairingtypes.RelaySession Relays = append(Relays, relayRequest) relayPaymentMessage := pairingtypes.MsgRelayPayment{Creator: ts.providers[0].address.String(), Relays: Relays} @@ -601,17 +583,14 @@ func TestRelayPaymentGovStakeToMaxCUListStakeThresholdIncrease(t *testing.T) { for ti, tt := range tests { t.Run(tt.name, func(t *testing.T) { - relayRequest := &pairingtypes.RelayRequest{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(ti), - ChainID: ts.spec.Name, - CuSum: uint64(200000), // the relayRequest costs 200000 (less than the previous limit, and more than in the new limit). This should influence the validity of the request - BlockHeight: int64(tt.epoch), - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, + relayRequest := &pairingtypes.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(ti), + ChainID: ts.spec.Name, + CuSum: uint64(200000), // the relayRequest costs 200000 (less than the previous limit, and more than in the new limit). This should influence the validity of the request + BlockHeight: int64(tt.epoch), + RelayNum: 0, } // Sign and send the payment requests for block 20 (=epochBeforeChange) @@ -620,7 +599,7 @@ func TestRelayPaymentGovStakeToMaxCUListStakeThresholdIncrease(t *testing.T) { require.Nil(t, err) // Add the relay request to the Relays array (for relayPaymentMessage()) - var Relays []*pairingtypes.RelayRequest + var Relays []*pairingtypes.RelaySession Relays = append(Relays, relayRequest) relayPaymentMessage := pairingtypes.MsgRelayPayment{Creator: ts.providers[0].address.String(), Relays: Relays} @@ -697,17 +676,14 @@ func TestRelayPaymentGovEpochBlocksMultipleChanges(t *testing.T) { } // Create relay request that was done in the test's epoch+block. Change session ID each iteration to avoid double spending error (provider asks reward for the same transaction twice) - relayRequest := &pairingtypes.RelayRequest{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(ti), - ChainID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: int64(tt.paymentEpoch), - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, + relayRequest := &pairingtypes.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(ti), + ChainID: ts.spec.Name, + CuSum: ts.spec.Apis[0].ComputeUnits * 10, + BlockHeight: int64(tt.paymentEpoch), + RelayNum: 0, } // Sign and send the payment requests @@ -716,7 +692,7 @@ func TestRelayPaymentGovEpochBlocksMultipleChanges(t *testing.T) { require.Nil(t, err) // Request payment (helper function validates the balances and verifies if we should get an error through valid) - var Relays []*pairingtypes.RelayRequest + var Relays []*pairingtypes.RelaySession Relays = append(Relays, relayRequest) relayPaymentMessage := pairingtypes.MsgRelayPayment{Creator: ts.providers[0].address.String(), Relays: Relays} payAndVerifyBalance(t, ts, relayPaymentMessage, tt.valid, ts.clients[0].address, ts.providers[0].address) @@ -826,17 +802,14 @@ func TestStakePaymentUnstake(t *testing.T) { // Advance an epoch to apply EpochBlocks change. From here, the documented blockHeight is with offset of initEpochBlocks ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) // blockHeight = 20 - relayRequest := &pairingtypes.RelayRequest{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: uint64(10000), - BlockHeight: int64(sdk.UnwrapSDKContext(ts.ctx).BlockHeight()), - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, + relayRequest := &pairingtypes.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(1), + ChainID: ts.spec.Name, + CuSum: uint64(10000), + BlockHeight: int64(sdk.UnwrapSDKContext(ts.ctx).BlockHeight()), + RelayNum: 0, } // Sign and send the payment requests for block 20 (=epochBeforeChange) @@ -845,7 +818,7 @@ func TestStakePaymentUnstake(t *testing.T) { require.Nil(t, err) // Add the relay request to the Relays array (for relayPaymentMessage()) - var Relays []*pairingtypes.RelayRequest + var Relays []*pairingtypes.RelaySession Relays = append(Relays, relayRequest) // get payment @@ -903,17 +876,14 @@ func TestRelayPaymentMemoryTransferAfterEpochChangeWithGovParamChange(t *testing ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) epochAfterEpochBlocksChanged := ts.keepers.Epochstorage.GetEpochStart(sdk.UnwrapSDKContext(ts.ctx)) - relayRequest := &pairingtypes.RelayRequest{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: uint64(10000), - BlockHeight: int64(epochAfterEpochBlocksChanged), - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, + relayRequest := &pairingtypes.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(1), + ChainID: ts.spec.Name, + CuSum: uint64(10000), + BlockHeight: int64(epochAfterEpochBlocksChanged), + RelayNum: 0, } // Sign the payment request @@ -951,7 +921,7 @@ func TestRelayPaymentMemoryTransferAfterEpochChangeWithGovParamChange(t *testing } // Helper function to verify the relay payment objects that are saved on-chain after getting payment from a relay request -func verifyRelayPaymentObjects(t *testing.T, ts *testStruct, relayRequest *pairingtypes.RelayRequest, objectExists bool) { +func verifyRelayPaymentObjects(t *testing.T, ts *testStruct, relayRequest *pairingtypes.RelaySession, objectExists bool) { // Get EpochPayment struct from current epoch and perform basic verifications epochPayments, found, epochPaymentKey := ts.keepers.Pairing.GetEpochPaymentsFromBlock(sdk.UnwrapSDKContext(ts.ctx), uint64(relayRequest.GetBlockHeight())) if objectExists { From 52f515cc456e6015ea27e5273d4383124fd0c16b Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 9 Mar 2023 05:51:12 +0200 Subject: [PATCH 090/123] wip --- .../keeper/msg_server_relay_payment_test.go | 129 +++++++++--------- 1 file changed, 61 insertions(+), 68 deletions(-) diff --git a/x/pairing/keeper/msg_server_relay_payment_test.go b/x/pairing/keeper/msg_server_relay_payment_test.go index e18a85c192..f036b4c021 100644 --- a/x/pairing/keeper/msg_server_relay_payment_test.go +++ b/x/pairing/keeper/msg_server_relay_payment_test.go @@ -136,17 +136,14 @@ func TestRelayPaymentMemoryTransferAfterEpochChange(t *testing.T) { } // Create relay request that was done in the first epoch. Change session ID each iteration to avoid double spending error (provider asks reward for the same transaction twice) - relayRequest := &types.RelayRequest{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: sessionCounter, - ChainID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: int64(firstEpoch), - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, + relayRequest := &types.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: sessionCounter, + ChainID: ts.spec.Name, + CuSum: ts.spec.Apis[0].ComputeUnits * 10, + BlockHeight: int64(firstEpoch), + RelayNum: 0, } // Sign and send the payment requests @@ -155,7 +152,7 @@ func TestRelayPaymentMemoryTransferAfterEpochChange(t *testing.T) { require.Nil(t, err) // Request payment (helper function validates the balances and verifies if we should get an error through valid) - var Relays []*types.RelayRequest + var Relays []*types.RelaySession Relays = append(Relays, relayRequest) relayPaymentMessage := types.MsgRelayPayment{Creator: ts.providers[0].address.String(), Relays: Relays} payAndVerifyBalance(t, ts, relayPaymentMessage, tt.valid, ts.clients[0].address, ts.providers[0].address) @@ -206,17 +203,15 @@ func TestRelayPaymentBlockHeight(t *testing.T) { require.Nil(t, err) ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) - relayRequest := &types.RelayRequest{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight() + tt.blockTime, - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, + relayRequest := &types.RelaySession{ + Provider: ts.providers[0].address.String(), + + Data: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(1), + ChainID: ts.spec.Name, + CuSum: ts.spec.Apis[0].ComputeUnits * 10, + BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight() + tt.blockTime, + RelayNum: 0, } sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relayRequest) @@ -271,24 +266,22 @@ func TestRelayPaymentOverUse(t *testing.T) { maxcu, err := ts.keepers.Pairing.GetAllowedCUForBlock(sdk.UnwrapSDKContext(ts.ctx), uint64(sdk.UnwrapSDKContext(ts.ctx).BlockHeight()), entry) require.Nil(t, err) - relayRequest := &types.RelayRequest{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: maxcu * 2, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, + relayRequest := &types.RelaySession{ + Provider: ts.providers[0].address.String(), + ApiUrl: "", + Data: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(1), + ChainID: ts.spec.Name, + CuSum: maxcu * 2, + BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + RelayNum: 0, } sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relayRequest) relayRequest.Sig = sig require.Nil(t, err) - var Relays []*types.RelayRequest + var Relays []*types.RelaySession Relays = append(Relays, relayRequest) // TODO: currently over use is returning an error and doesnt get to balance zero. we will fix it in the future so this can be uncommented. // balance := ts.keepers.BankKeeper.GetBalance(sdk.UnwrapSDKContext(ts.ctx), ts.providers[0].address, epochstoragetypes.TokenDenom).Amount.Int64() @@ -327,10 +320,10 @@ func TestRelayPaymentNotUnstakingProviderForUnresponsivenessIfNoEpochInformation unresponsiveProvidersData, err := json.Marshal([]string{ts.providers[1].address.String()}) require.Nil(t, err) - var Relays []*types.RelayRequest + var Relays []*types.RelaySession for clientIndex := 0; clientIndex < testClientAmount; clientIndex++ { // testing testClientAmount of complaints - relayRequest := &types.RelayRequest{ + relayRequest := &types.RelaySession{ Provider: ts.providers[0].address.String(), ApiUrl: "", Data: []byte(ts.spec.Apis[0].Name), @@ -382,10 +375,10 @@ func TestRelayPaymentUnstakingProviderForUnresponsivenessWithBadDataInput(t *tes unresponsiveProvidersData[i] = badData } require.Nil(t, err) - var Relays []*types.RelayRequest + var Relays []*types.RelaySession var totalCu uint64 for clientIndex := 0; clientIndex < testClientAmount; clientIndex++ { // testing testClientAmount of complaints - relayRequest := &types.RelayRequest{ + relayRequest := &types.RelaySession{ Provider: ts.providers[0].address.String(), ApiUrl: "", Data: []byte(ts.spec.Apis[0].Name), @@ -424,7 +417,7 @@ func TestRelayPaymentNotUnstakingProviderForUnresponsivenessBecauseOfServices(t var RelaysForUnresponsiveProviderInFirstTwoEpochs []*types.RelayRequest for i := 0; i < 2; i++ { // move to epoch 3 so we can check enough epochs in the past - relayRequest := &types.RelayRequest{ + relayRequest := &types.RelaySession{ Provider: ts.providers[1].address.String(), ApiUrl: "", Data: []byte(ts.spec.Apis[0].Name), @@ -440,7 +433,7 @@ func TestRelayPaymentNotUnstakingProviderForUnresponsivenessBecauseOfServices(t sig, err := sigs.SignRelay(ts.clients[i].secretKey, *relayRequest) relayRequest.Sig = sig require.Nil(t, err) - RelaysForUnresponsiveProviderInFirstTwoEpochs = []*types.RelayRequest{relayRequest} // each epoch get one service + RelaysForUnresponsiveProviderInFirstTwoEpochs = []*types.RelaySession{relayRequest} // each epoch get one service _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: ts.providers[1].address.String(), Relays: RelaysForUnresponsiveProviderInFirstTwoEpochs}) require.Nil(t, err) ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) // after payment move one epoch @@ -448,10 +441,10 @@ func TestRelayPaymentNotUnstakingProviderForUnresponsivenessBecauseOfServices(t unresponsiveProvidersData, err := json.Marshal([]string{ts.providers[1].address.String()}) require.Nil(t, err) - var Relays []*types.RelayRequest + var Relays []*types.RelaySession for clientIndex := 0; clientIndex < testClientAmount; clientIndex++ { // testing testClientAmount of complaints - relayRequest := &types.RelayRequest{ + relayRequest := &types.RelaySession{ Provider: ts.providers[0].address.String(), ApiUrl: "", Data: []byte(ts.spec.Apis[0].Name), @@ -492,7 +485,7 @@ func TestRelayPaymentDoubleSpending(t *testing.T) { ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) cuSum := ts.spec.GetApis()[0].ComputeUnits * 10 - relayRequest := &types.RelayRequest{ + relayRequest := &types.RelaySession{ Provider: ts.providers[0].address.String(), ApiUrl: "", Data: []byte(ts.spec.Apis[0].Name), @@ -509,7 +502,7 @@ func TestRelayPaymentDoubleSpending(t *testing.T) { relayRequest.Sig = sig require.Nil(t, err) - var Relays []*types.RelayRequest + var Relays []*types.RelaySession Relays = append(Relays, relayRequest) relayRequest2 := *relayRequest Relays = append(Relays, &relayRequest2) @@ -540,7 +533,7 @@ func TestRelayPaymentDataModification(t *testing.T) { require.Nil(t, err) ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) - relayRequest := &types.RelayRequest{ + relayRequest := &types.RelaySession{ Provider: ts.providers[0].address.String(), ApiUrl: "", Data: []byte(ts.spec.Apis[0].Name), @@ -574,7 +567,7 @@ func TestRelayPaymentDataModification(t *testing.T) { relayRequest.CuSum = tt.cu relayRequest.SessionId = uint64(tt.id) - var Relays []*types.RelayRequest + var Relays []*types.RelaySession Relays = append(Relays, relayRequest) _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: ts.providers[0].address.String(), Relays: Relays}) @@ -595,7 +588,7 @@ func TestRelayPaymentDelayedDoubleSpending(t *testing.T) { require.Nil(t, err) ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) - relayRequest := &types.RelayRequest{ + relayRequest := &types.RelaySession{ Provider: ts.providers[0].address.String(), ApiUrl: "", Data: []byte(ts.spec.Apis[0].Name), @@ -612,7 +605,7 @@ func TestRelayPaymentDelayedDoubleSpending(t *testing.T) { relayRequest.Sig = sig require.Nil(t, err) - var Relays []*types.RelayRequest + var Relays []*types.RelaySession relay := *relayRequest Relays = append(Relays, &relay) @@ -683,7 +676,7 @@ func TestRelayPaymentOldEpochs(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 - relayRequest := &types.RelayRequest{ + relayRequest := &types.RelaySession{ Provider: ts.providers[0].address.String(), ApiUrl: "", Data: []byte(ts.spec.Apis[0].Name), @@ -700,7 +693,7 @@ func TestRelayPaymentOldEpochs(t *testing.T) { relayRequest.Sig = sig require.Nil(t, err) - var Relays []*types.RelayRequest + var Relays []*types.RelaySession Relays = append(Relays, relayRequest) balance := ts.keepers.BankKeeper.GetBalance(sdk.UnwrapSDKContext(ts.ctx), ts.providers[0].address, epochstoragetypes.TokenDenom).Amount.Int64() @@ -755,7 +748,7 @@ func TestRelayPaymentQoS(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: tt.latency, Availability: tt.availability, Sync: tt.sync} - relayRequest := &types.RelayRequest{ + relayRequest := &types.RelaySession{ Provider: ts.providers[0].address.String(), ApiUrl: "", Data: []byte(ts.spec.Apis[0].Name), @@ -773,7 +766,7 @@ func TestRelayPaymentQoS(t *testing.T) { relayRequest.Sig = sig require.Nil(t, err) - var Relays []*types.RelayRequest + var Relays []*types.RelaySession relay := *relayRequest Relays = append(Relays, &relay) @@ -843,7 +836,7 @@ func TestRelayPaymentDataReliability(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequest := &types.RelayRequest{ + relayRequest := &types.RelaySession{ Provider: ts.providers[0].address.String(), ApiUrl: "", Data: []byte(ts.spec.Apis[0].Name), @@ -987,7 +980,7 @@ func TestRelayPaymentDataReliabilityWrongProvider(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequest := &types.RelayRequest{ + relayRequest := &types.RelaySession{ Provider: ts.providers[0].address.String(), ApiUrl: "", Data: []byte(ts.spec.Apis[0].Name), @@ -1061,7 +1054,7 @@ GetWrongProvider: require.Nil(t, err) QoSDR := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequestWithDataReliability0 := &types.RelayRequest{ + relayRequestWithDataReliability0 := &types.RelaySession{ Provider: providers[wrongProviderIndex].Address, ApiUrl: "", Data: []byte(ts.spec.Apis[0].Name), @@ -1079,7 +1072,7 @@ GetWrongProvider: require.Nil(t, err) provider := ts.getProvider(providers[wrongProviderIndex].Address) - relaysRequests := []*types.RelayRequest{relayRequestWithDataReliability0} + relaysRequests := []*types.RelaySession{relayRequestWithDataReliability0} _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: provider.address.String(), Relays: relaysRequests}) require.NotNil(t, err) @@ -1104,7 +1097,7 @@ func TestRelayPaymentDataReliabilityBelowReliabilityThreshold(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequest := &types.RelayRequest{ + relayRequest := &types.RelaySession{ Provider: ts.providers[0].address.String(), ApiUrl: "", Data: []byte(ts.spec.Apis[0].Name), @@ -1153,7 +1146,7 @@ func TestRelayPaymentDataReliabilityBelowReliabilityThreshold(t *testing.T) { // make all providers send a datareliability payment request. Everyone should fail for _, provider := range ts.providers { QoSDR := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequestWithDataReliability0 := &types.RelayRequest{ + relayRequestWithDataReliability0 := &types.RelaySession{ Provider: provider.address.String(), ApiUrl: "", Data: []byte(ts.spec.Apis[0].Name), @@ -1170,7 +1163,7 @@ func TestRelayPaymentDataReliabilityBelowReliabilityThreshold(t *testing.T) { relayRequestWithDataReliability0.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relayRequestWithDataReliability0) require.Nil(t, err) - relaysRequests := []*types.RelayRequest{relayRequestWithDataReliability0} + relaysRequests := []*types.RelaySession{relayRequestWithDataReliability0} _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: provider.address.String(), Relays: relaysRequests}) require.NotNil(t, err) @@ -1195,7 +1188,7 @@ func TestRelayPaymentDataReliabilityDifferentClientSign(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequest := &types.RelayRequest{ + relayRequest := &types.RelaySession{ Provider: ts.providers[0].address.String(), ApiUrl: "", Data: []byte(ts.spec.Apis[0].Name), @@ -1251,7 +1244,7 @@ func TestRelayPaymentDataReliabilityDifferentClientSign(t *testing.T) { require.Nil(t, err) QoSDR := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequestWithDataReliability0 := &types.RelayRequest{ + relayRequestWithDataReliability0 := &types.RelaySession{ Provider: providers[index0].Address, ApiUrl: "", Data: []byte(ts.spec.Apis[0].Name), @@ -1269,7 +1262,7 @@ func TestRelayPaymentDataReliabilityDifferentClientSign(t *testing.T) { require.Nil(t, err) provider := ts.getProvider(providers[index0].Address) - relaysRequests := []*types.RelayRequest{relayRequestWithDataReliability0} + relaysRequests := []*types.RelaySession{relayRequestWithDataReliability0} _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: provider.address.String(), Relays: relaysRequests}) require.NotNil(t, err) @@ -1293,7 +1286,7 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequest := &types.RelayRequest{ + relayRequest := &types.RelaySession{ Provider: ts.providers[0].address.String(), ApiUrl: "", Data: []byte(ts.spec.Apis[0].Name), @@ -1351,7 +1344,7 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { require.Nil(t, err) QoSDR := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequestWithDataReliability0 := &types.RelayRequest{ + relayRequestWithDataReliability0 := &types.RelaySession{ Provider: providers[index0].Address, ApiUrl: "", Data: []byte(ts.spec.Apis[0].Name), @@ -1369,7 +1362,7 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { require.Nil(t, err) provider := ts.getProvider(providers[index0].Address) - relaysRequests := []*types.RelayRequest{relayRequestWithDataReliability0} + relaysRequests := []*types.RelaySession{relayRequestWithDataReliability0} _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: provider.address.String(), Relays: relaysRequests}) require.Nil(t, err) @@ -1424,7 +1417,7 @@ func TestEpochPaymentDeletion(t *testing.T) { ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) - relayRequest := &types.RelayRequest{ + relayRequest := &types.RelaySession{ Provider: ts.providers[0].address.String(), ApiUrl: "", Data: []byte(ts.spec.Apis[0].Name), @@ -1441,7 +1434,7 @@ func TestEpochPaymentDeletion(t *testing.T) { relayRequest.Sig = sig require.Nil(t, err) - var Relays []*types.RelayRequest + var Relays []*types.RelaySession Relays = append(Relays, relayRequest) balanceProvider := ts.keepers.BankKeeper.GetBalance(sdk.UnwrapSDKContext(ts.ctx), ts.providers[0].address, epochstoragetypes.TokenDenom).Amount.Int64() From 3a7cdec4639faddd89b13f27b6486f7328c93186 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Thu, 9 Mar 2023 18:24:00 +0100 Subject: [PATCH 091/123] adding config files for e2e --- go.mod | 2 +- go.sum | 3 +- protocol/lavasession/provider_types.go | 12 +- protocol/rpcprovider/rpcprovider.go | 1 + testutil/e2e/e2e.go | 142 ++++++++---------- .../e2e/e2eProviderConfigs/ethConsumer.yml | 4 + .../e2eProviderConfigs/jsonrpcProvider1.yml | 7 + .../e2eProviderConfigs/jsonrpcProvider2.yml | 7 + .../e2eProviderConfigs/jsonrpcProvider3.yml | 7 + .../e2eProviderConfigs/jsonrpcProvider4.yml | 7 + .../e2eProviderConfigs/jsonrpcProvider5.yml | 7 + .../e2e/e2eProviderConfigs/lavaConsumer.yml | 11 ++ .../e2e/e2eProviderConfigs/lavaProvider10.yml | 15 ++ .../e2e/e2eProviderConfigs/lavaProvider6.yml | 15 ++ .../e2e/e2eProviderConfigs/lavaProvider7.yml | 15 ++ .../e2e/e2eProviderConfigs/lavaProvider8.yml | 15 ++ .../e2e/e2eProviderConfigs/lavaProvider9.yml | 15 ++ 17 files changed, 205 insertions(+), 80 deletions(-) create mode 100644 testutil/e2e/e2eProviderConfigs/ethConsumer.yml create mode 100644 testutil/e2e/e2eProviderConfigs/jsonrpcProvider1.yml create mode 100644 testutil/e2e/e2eProviderConfigs/jsonrpcProvider2.yml create mode 100644 testutil/e2e/e2eProviderConfigs/jsonrpcProvider3.yml create mode 100644 testutil/e2e/e2eProviderConfigs/jsonrpcProvider4.yml create mode 100644 testutil/e2e/e2eProviderConfigs/jsonrpcProvider5.yml create mode 100644 testutil/e2e/e2eProviderConfigs/lavaConsumer.yml create mode 100644 testutil/e2e/e2eProviderConfigs/lavaProvider10.yml create mode 100644 testutil/e2e/e2eProviderConfigs/lavaProvider6.yml create mode 100644 testutil/e2e/e2eProviderConfigs/lavaProvider7.yml create mode 100644 testutil/e2e/e2eProviderConfigs/lavaProvider8.yml create mode 100644 testutil/e2e/e2eProviderConfigs/lavaProvider9.yml diff --git a/go.mod b/go.mod index 87fa520dda..cfccfab970 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/docker/distribution v2.8.1+incompatible github.com/fullstorydev/grpcurl v1.8.5 github.com/gogo/status v1.1.0 - github.com/golang/protobuf v1.5.2 + github.com/golang/protobuf v1.5.3 github.com/ignite-hq/cli v0.22.1-0.20220610070456-1b33c09fceb7 github.com/jhump/protoreflect v1.14.0 github.com/joho/godotenv v1.3.0 diff --git a/go.sum b/go.sum index 736691d9d4..b4521d6dbd 100644 --- a/go.sum +++ b/go.sum @@ -692,8 +692,9 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index 0b1c0c4850..7a147674e1 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -241,6 +241,10 @@ func (sps *SingleProviderSession) PrepareDataReliabilitySessionForUsage(relayReq sps.LatestRelayCu = DataReliabilityCuSum // 1. update latest sps.CuSum = relayRequestTotalCU // 2. update CuSum, if consumer wants to pay more, let it sps.RelayNum = sps.RelayNum + 1 // 3. update RelayNum, we already verified relayNum is valid in GetDataReliabilitySession. + utils.LavaFormatDebug("PrepareDataReliabilitySessionForUsage", &map[string]string{"relayRequestTotalCU": strconv.FormatUint(relayRequestTotalCU, 10), + "sps.LatestRelayCu": strconv.FormatUint(sps.LatestRelayCu, 10), + "sps.RelayNum": strconv.FormatUint(sps.RelayNum, 10), + }) return nil } @@ -255,6 +259,12 @@ func (sps *SingleProviderSession) PrepareSessionForUsage(cuFromSpec uint64, rela return sps.PrepareDataReliabilitySessionForUsage(relayRequestTotalCU) } + utils.LavaFormatDebug("Before Update Normal PrepareSessionForUsage", &map[string]string{"relayRequestTotalCU": strconv.FormatUint(relayRequestTotalCU, 10), + "sps.LatestRelayCu": strconv.FormatUint(sps.LatestRelayCu, 10), + "sps.RelayNum": strconv.FormatUint(sps.RelayNum, 10), + "sps.CuSum": strconv.FormatUint(sps.CuSum, 10), + }) + maxCu := sps.userSessionsParent.atomicReadMaxComputeUnits() if relayRequestTotalCU < sps.CuSum+cuFromSpec { sps.lock.Unlock() // unlock on error @@ -273,7 +283,7 @@ func (sps *SingleProviderSession) PrepareSessionForUsage(cuFromSpec uint64, rela } // finished validating, can add all info. sps.LatestRelayCu = cuFromSpec // 1. update latest - sps.CuSum = relayRequestTotalCU // 2. update CuSum, if consumer wants to pay more, let it + sps.CuSum = sps.CuSum + sps.LatestRelayCu // 2. update CuSum, if consumer wants to pay more, let it sps.RelayNum = sps.RelayNum + 1 // 3. update RelayNum, we already verified relayNum is valid in GetSession. return nil } diff --git a/protocol/rpcprovider/rpcprovider.go b/protocol/rpcprovider/rpcprovider.go index fcebcf91be..a3b5c751db 100644 --- a/protocol/rpcprovider/rpcprovider.go +++ b/protocol/rpcprovider/rpcprovider.go @@ -349,6 +349,7 @@ rpcprovider 127.0.0.1:3333 COS3 tendermintrpc "wss://www.node-path.com:80,https: cmdRPCProvider.Flags().String(performance.PprofAddressFlagName, "", "pprof server address, used for code profiling") cmdRPCProvider.Flags().String(performance.CacheFlagName, "", "address for a cache server to improve performance") cmdRPCProvider.Flags().Uint(chainproxy.ParallelConnectionsFlag, chainproxy.NumberOfParallelConnections, "parallel connections") + cmdRPCProvider.Flags().String(flags.FlagLogLevel, "debug", "log level") return cmdRPCProvider } diff --git a/testutil/e2e/e2e.go b/testutil/e2e/e2e.go index d42d07a9c7..fc1ed8dcb1 100644 --- a/testutil/e2e/e2e.go +++ b/testutil/e2e/e2e.go @@ -36,7 +36,10 @@ import ( "google.golang.org/grpc/credentials/insecure" ) -const logsFolder = "./testutil/e2e/logs/" +const ( + logsFolder = "./testutil/e2e/logs/" + configFolder = "./testutil/e2e/e2eProviderConfigs/" +) var ( checkedSpecsE2E = []string{"LAV1", "ETH1"} @@ -204,17 +207,17 @@ func (lt *lavaTest) startJSONRPCProxy(ctx context.Context) { utils.LavaFormatInfo("startJSONRPCProxy OK", nil) } -func (lt *lavaTest) startJSONRPCProvider(rpcURL string, ctx context.Context) { +func (lt *lavaTest) startJSONRPCProvider(ctx context.Context) { providerCommands := []string{ - lt.lavadPath + " server 127.0.0.1 2221 " + rpcURL + " ETH1 jsonrpc --from servicer1 --geolocation 1 --log_level debug", - lt.lavadPath + " server 127.0.0.1 2222 " + rpcURL + " ETH1 jsonrpc --from servicer2 --geolocation 1 --log_level debug", - lt.lavadPath + " server 127.0.0.1 2223 " + rpcURL + " ETH1 jsonrpc --from servicer3 --geolocation 1 --log_level debug", - lt.lavadPath + " server 127.0.0.1 2224 " + rpcURL + " ETH1 jsonrpc --from servicer4 --geolocation 1 --log_level debug", - lt.lavadPath + " server 127.0.0.1 2225 " + rpcURL + " ETH1 jsonrpc --from servicer5 --geolocation 1 --log_level debug", + lt.lavadPath + " rpcprovider " + configFolder + "jsonrpcProvider1.yml --from servicer1 --geolocation 1 --log_level debug", + lt.lavadPath + " rpcprovider " + configFolder + "jsonrpcProvider2.yml --from servicer2 --geolocation 1 --log_level debug", + lt.lavadPath + " rpcprovider " + configFolder + "jsonrpcProvider3.yml --from servicer3 --geolocation 1 --log_level debug", + lt.lavadPath + " rpcprovider " + configFolder + "jsonrpcProvider4.yml --from servicer4 --geolocation 1 --log_level debug", + lt.lavadPath + " rpcprovider " + configFolder + "jsonrpcProvider5.yml --from servicer5 --geolocation 1 --log_level debug", } for idx, providerCommand := range providerCommands { - logName := "03_jsonProvider_" + fmt.Sprintf("%02d ", idx) + logName := "03_EthProvider_" + fmt.Sprintf("%02d ", idx) lt.logs[logName] = new(bytes.Buffer) cmd := exec.CommandContext(ctx, "", "") cmd.Path = lt.lavadPath @@ -230,12 +233,18 @@ func (lt *lavaTest) startJSONRPCProvider(rpcURL string, ctx context.Context) { go func(idx int) { lt.listenCmdCommand(cmd, "startJSONRPCProvider process returned unexpectedly, provider idx:"+strconv.Itoa(idx), "startJSONRPCProvider") }(idx) + } + // validate all providers are up + for idx := 0; idx < len(providerCommands); idx++ { + lt.checkProviderResponsive(ctx, "127.0.0.1:222"+fmt.Sprintf("%d", idx+1), time.Minute) + } + utils.LavaFormatInfo("startJSONRPCProvider OK", nil) } func (lt *lavaTest) startJSONRPCConsumer(ctx context.Context) { - providerCommand := lt.lavadPath + " rpcconsumer 127.0.0.1:3333 ETH1 jsonrpc --from user1 --geolocation 1 --log_level debug" + providerCommand := lt.lavadPath + " rpcconsumer " + configFolder + "ethConsumer.yml --from user1 --geolocation 1 --log_level debug" logName := "04_jsonConsumer" lt.logs[logName] = new(bytes.Buffer) @@ -275,6 +284,24 @@ func (lt *lavaTest) checkJSONRPCConsumer(rpcURL string, timeout time.Duration, m panic("checkJSONRPCConsumer: JSONRPC Check Failed Consumer didn't respond") } +func (lt *lavaTest) checkProviderResponsive(ctx context.Context, rpcURL string, timeout time.Duration) { + for start := time.Now(); time.Since(start) < timeout; { + utils.LavaFormatInfo("Waiting Provider "+rpcURL, nil) + nctx, cancel := context.WithTimeout(ctx, time.Second) + grpcClient, err := grpc.DialContext(nctx, rpcURL, grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + // utils.LavaFormatInfo(fmt.Sprintf("Provider is still intializing %s", err), nil) + cancel() + time.Sleep(time.Second) + continue + } + cancel() + grpcClient.Close() + return + } + panic("checkProviderResponsive: Check Failed Provider didn't respond" + rpcURL) +} + func jsonrpcTests(rpcURL string, testDuration time.Duration) error { ctx := context.Background() utils.LavaFormatInfo("Starting JSONRPC Tests", nil) @@ -379,17 +406,17 @@ func jsonrpcTests(rpcURL string, testDuration time.Duration) error { return nil } -func (lt *lavaTest) startTendermintProvider(rpcURL string, httpUrl string, ctx context.Context) { +func (lt *lavaTest) startLavaProviders(ctx context.Context) { providerCommands := []string{ - lt.lavadPath + " server 127.0.0.1 2261 " + rpcURL + " LAV1 tendermintrpc --from servicer6 --geolocation 1 --log_level debug --tendermint-http-endpoint " + httpUrl, - lt.lavadPath + " server 127.0.0.1 2262 " + rpcURL + " LAV1 tendermintrpc --from servicer7 --geolocation 1 --log_level debug --tendermint-http-endpoint " + httpUrl, - lt.lavadPath + " server 127.0.0.1 2263 " + rpcURL + " LAV1 tendermintrpc --from servicer8 --geolocation 1 --log_level debug --tendermint-http-endpoint " + httpUrl, - lt.lavadPath + " server 127.0.0.1 2264 " + rpcURL + " LAV1 tendermintrpc --from servicer9 --geolocation 1 --log_level debug --tendermint-http-endpoint " + httpUrl, - lt.lavadPath + " server 127.0.0.1 2265 " + rpcURL + " LAV1 tendermintrpc --from servicer10 --geolocation 1 --log_level debug --tendermint-http-endpoint " + httpUrl, + lt.lavadPath + " rpcprovider " + configFolder + "lavaProvider6.yml --from servicer6 --geolocation 1 --log_level debug", + lt.lavadPath + " rpcprovider " + configFolder + "lavaProvider7.yml --from servicer7 --geolocation 1 --log_level debug", + lt.lavadPath + " rpcprovider " + configFolder + "lavaProvider8.yml --from servicer8 --geolocation 1 --log_level debug", + lt.lavadPath + " rpcprovider " + configFolder + "lavaProvider9.yml --from servicer9 --geolocation 1 --log_level debug", + lt.lavadPath + " rpcprovider " + configFolder + "lavaProvider10.yml --from servicer10 --geolocation 1 --log_level debug", } for idx, providerCommand := range providerCommands { - logName := "05_tendermintProvider_" + fmt.Sprintf("%02d ", idx) + logName := "05_LavaProvider_" + fmt.Sprintf("%02d ", idx) lt.logs[logName] = new(bytes.Buffer) cmd := exec.CommandContext(ctx, "", "") cmd.Path = lt.lavadPath @@ -407,11 +434,19 @@ func (lt *lavaTest) startTendermintProvider(rpcURL string, httpUrl string, ctx c lt.listenCmdCommand(cmd, "startTendermintProvider process returned unexpectedly, provider idx:"+strconv.Itoa(idx), "startTendermintProvider") }(idx) } + + // validate all providers are up + for idx := 0; idx < len(providerCommands); idx++ { + lt.checkProviderResponsive(ctx, "127.0.0.1:226"+fmt.Sprintf("%d", idx+1), time.Minute) + lt.checkProviderResponsive(ctx, "127.0.0.1:227"+fmt.Sprintf("%d", idx+1), time.Minute) + lt.checkProviderResponsive(ctx, "127.0.0.1:228"+fmt.Sprintf("%d", idx+1), time.Minute) + } + utils.LavaFormatInfo("startTendermintProvider OK", nil) } -func (lt *lavaTest) startRPCConsumer(ctx context.Context) { - providerCommand := lt.lavadPath + " rpcconsumer 127.0.0.1:3340 LAV1 tendermintrpc 127.0.0.1:3341 LAV1 rest 127.0.0.1:3342 LAV1 grpc --from user2 --geolocation 1 --log_level debug" +func (lt *lavaTest) startLavaConsumer(ctx context.Context) { + providerCommand := lt.lavadPath + " rpcconsumer " + configFolder + "lavaConsumer.yml --from user2 --geolocation 1 --log_level debug" logName := "06_RPCConsumer" lt.logs[logName] = new(bytes.Buffer) @@ -556,28 +591,6 @@ func (lt *lavaTest) startRESTProvider(rpcURL string, ctx context.Context) { utils.LavaFormatInfo("startRESTProvider OK", nil) } -func (lt *lavaTest) startRESTConsumer(ctx context.Context) { - providerCommand := lt.lavadPath + " rpcconsumer 127.0.0.1:3341 LAV1 rest --from user2 --geolocation 1 --log_level debug" - logName := "09_restConsumer" - lt.logs[logName] = new(bytes.Buffer) - - cmd := exec.CommandContext(ctx, "", "") - cmd.Path = lt.lavadPath - cmd.Args = strings.Split(providerCommand, " ") - cmd.Stdout = lt.logs[logName] - cmd.Stderr = lt.logs[logName] - - err := cmd.Start() - if err != nil { - panic(err) - } - lt.commands[logName] = cmd - go func() { - lt.listenCmdCommand(cmd, "startRESTConsumer process returned unexpectedly", "startRESTConsumer") - }() - utils.LavaFormatInfo("startRESTConsumer OK", nil) -} - func (lt *lavaTest) checkRESTConsumer(rpcURL string, timeout time.Duration) { for start := time.Now(); time.Since(start) < timeout; { utils.LavaFormatInfo("Waiting REST Consumer", nil) @@ -669,28 +682,6 @@ func (lt *lavaTest) startGRPCProvider(rpcURL string, ctx context.Context) { utils.LavaFormatInfo("startGRPCProvider OK", nil) } -func (lt *lavaTest) startGRPCConsumer(ctx context.Context) { - providerCommand := lt.lavadPath + " portal_server 127.0.0.1 3342 LAV1 grpc --from user2 --geolocation 1 --log_level debug" - logName := "11_grpcConsumer" - lt.logs[logName] = new(bytes.Buffer) - - cmd := exec.CommandContext(ctx, "", "") - cmd.Path = lt.lavadPath - cmd.Args = strings.Split(providerCommand, " ") - cmd.Stdout = lt.logs[logName] - cmd.Stderr = lt.logs[logName] - - err := cmd.Start() - if err != nil { - panic(err) - } - lt.commands[logName] = cmd - go func() { - lt.listenCmdCommand(cmd, "startGRPCConsumer process returned unexpectedly", "startGRPCConsumer") - }() - utils.LavaFormatInfo("startGRPCConsumer OK", nil) -} - func (lt *lavaTest) checkGRPCConsumer(rpcURL string, timeout time.Duration) { for start := time.Now(); time.Since(start) < timeout; { utils.LavaFormatInfo("Waiting GRPC Consumer", nil) @@ -908,23 +899,22 @@ func runE2E() { utils.LavaFormatInfo("RUNNING TESTS", nil) - jsonCTX := context.Background() + // ETH1 flow + jsonCTX, cancel := context.WithCancel(context.Background()) + defer cancel() + lt.startJSONRPCProxy(jsonCTX) lt.checkJSONRPCConsumer("http://127.0.0.1:1111", time.Minute*2, "JSONRPCProxy OK") // checks proxy. - lt.startJSONRPCProvider("http://127.0.0.1:1111", jsonCTX) + lt.startJSONRPCProvider(jsonCTX) lt.startJSONRPCConsumer(jsonCTX) lt.checkJSONRPCConsumer("http://127.0.0.1:3333/1", time.Minute*2, "JSONRPCConsumer OK") - tendermintCTX := context.Background() - lt.startTendermintProvider("ws://0.0.0.0:26657/websocket", "http://0.0.0.0:26657", tendermintCTX) - - restCTX := context.Background() - lt.startRESTProvider("http://127.0.0.1:1317", restCTX) - - grpcCTX := context.Background() - lt.startGRPCProvider("127.0.0.1:9090", grpcCTX) + // Lava Flow + rpcCtx, cancel := context.WithCancel(context.Background()) + defer cancel() - lt.startRPCConsumer(tendermintCTX) + lt.startLavaProviders(rpcCtx) + lt.startLavaConsumer(rpcCtx) lt.checkTendermintConsumer("http://127.0.0.1:3340/1", time.Second*30) lt.checkRESTConsumer("http://127.0.0.1:3341/1", time.Second*30) lt.checkGRPCConsumer("127.0.0.1:3342", time.Second*30) @@ -950,7 +940,7 @@ func runE2E() { utils.LavaFormatInfo("TENDERMINTRPC URI TEST OK", nil) } - lt.lavaOverLava(tendermintCTX) + lt.lavaOverLava(rpcCtx) restErr := restTests("http://127.0.0.1:3341/1", time.Second*30) if restErr != nil { @@ -969,9 +959,7 @@ func runE2E() { } jsonCTX.Done() - tendermintCTX.Done() - restCTX.Done() - grpcCTX.Done() + rpcCtx.Done() lt.finishTestSuccessfully() } diff --git a/testutil/e2e/e2eProviderConfigs/ethConsumer.yml b/testutil/e2e/e2eProviderConfigs/ethConsumer.yml new file mode 100644 index 0000000000..dc017a8cd0 --- /dev/null +++ b/testutil/e2e/e2eProviderConfigs/ethConsumer.yml @@ -0,0 +1,4 @@ +endpoints: + - chain-id: ETH1 + api-interface: jsonrpc + network-address: 127.0.0.1:3333 \ No newline at end of file diff --git a/testutil/e2e/e2eProviderConfigs/jsonrpcProvider1.yml b/testutil/e2e/e2eProviderConfigs/jsonrpcProvider1.yml new file mode 100644 index 0000000000..1fe7e91185 --- /dev/null +++ b/testutil/e2e/e2eProviderConfigs/jsonrpcProvider1.yml @@ -0,0 +1,7 @@ +endpoints: + - api-interface: jsonrpc + chain-id: ETH1 + network-address: 127.0.0.1:2221 + node-url: + - http://127.0.0.1:1111 + \ No newline at end of file diff --git a/testutil/e2e/e2eProviderConfigs/jsonrpcProvider2.yml b/testutil/e2e/e2eProviderConfigs/jsonrpcProvider2.yml new file mode 100644 index 0000000000..ed96ccdb3e --- /dev/null +++ b/testutil/e2e/e2eProviderConfigs/jsonrpcProvider2.yml @@ -0,0 +1,7 @@ +endpoints: + - api-interface: jsonrpc + chain-id: ETH1 + network-address: 127.0.0.1:2222 + node-url: + - http://127.0.0.1:1111 + \ No newline at end of file diff --git a/testutil/e2e/e2eProviderConfigs/jsonrpcProvider3.yml b/testutil/e2e/e2eProviderConfigs/jsonrpcProvider3.yml new file mode 100644 index 0000000000..830e436c02 --- /dev/null +++ b/testutil/e2e/e2eProviderConfigs/jsonrpcProvider3.yml @@ -0,0 +1,7 @@ +endpoints: + - api-interface: jsonrpc + chain-id: ETH1 + network-address: 127.0.0.1:2223 + node-url: + - http://127.0.0.1:1111 + \ No newline at end of file diff --git a/testutil/e2e/e2eProviderConfigs/jsonrpcProvider4.yml b/testutil/e2e/e2eProviderConfigs/jsonrpcProvider4.yml new file mode 100644 index 0000000000..f567837111 --- /dev/null +++ b/testutil/e2e/e2eProviderConfigs/jsonrpcProvider4.yml @@ -0,0 +1,7 @@ +endpoints: + - api-interface: jsonrpc + chain-id: ETH1 + network-address: 127.0.0.1:2224 + node-url: + - http://127.0.0.1:1111 + \ No newline at end of file diff --git a/testutil/e2e/e2eProviderConfigs/jsonrpcProvider5.yml b/testutil/e2e/e2eProviderConfigs/jsonrpcProvider5.yml new file mode 100644 index 0000000000..30c5175ec0 --- /dev/null +++ b/testutil/e2e/e2eProviderConfigs/jsonrpcProvider5.yml @@ -0,0 +1,7 @@ +endpoints: + - api-interface: jsonrpc + chain-id: ETH1 + network-address: 127.0.0.1:2225 + node-url: + - http://127.0.0.1:1111 + \ No newline at end of file diff --git a/testutil/e2e/e2eProviderConfigs/lavaConsumer.yml b/testutil/e2e/e2eProviderConfigs/lavaConsumer.yml new file mode 100644 index 0000000000..d44c13e8a9 --- /dev/null +++ b/testutil/e2e/e2eProviderConfigs/lavaConsumer.yml @@ -0,0 +1,11 @@ +endpoints: + - chain-id: LAV1 + api-interface: tendermintrpc + network-address: 127.0.0.1:3340 + - chain-id: LAV1 + api-interface: rest + network-address: 127.0.0.1:3341 + - chain-id: LAV1 + api-interface: grpc + network-address: 127.0.0.1:3342 + \ No newline at end of file diff --git a/testutil/e2e/e2eProviderConfigs/lavaProvider10.yml b/testutil/e2e/e2eProviderConfigs/lavaProvider10.yml new file mode 100644 index 0000000000..0762a17890 --- /dev/null +++ b/testutil/e2e/e2eProviderConfigs/lavaProvider10.yml @@ -0,0 +1,15 @@ +endpoints: + - api-interface: tendermintrpc + chain-id: LAV1 + network-address: 127.0.0.1:2265 + node-url: + - ws://0.0.0.0:26657/websocket + - http://0.0.0.0:26657 + - api-interface: rest + chain-id: LAV1 + network-address: 127.0.0.1:2275 + node-url: http://127.0.0.1:1317 + - api-interface: grpc + chain-id: LAV1 + network-address: 127.0.0.1:2285 + node-url: 127.0.0.1:9090 \ No newline at end of file diff --git a/testutil/e2e/e2eProviderConfigs/lavaProvider6.yml b/testutil/e2e/e2eProviderConfigs/lavaProvider6.yml new file mode 100644 index 0000000000..8ef948bdb4 --- /dev/null +++ b/testutil/e2e/e2eProviderConfigs/lavaProvider6.yml @@ -0,0 +1,15 @@ +endpoints: + - api-interface: tendermintrpc + chain-id: LAV1 + network-address: 127.0.0.1:2261 + node-url: + - ws://0.0.0.0:26657/websocket + - http://0.0.0.0:26657 + - api-interface: rest + chain-id: LAV1 + network-address: 127.0.0.1:2271 + node-url: http://127.0.0.1:1317 + - api-interface: grpc + chain-id: LAV1 + network-address: 127.0.0.1:2281 + node-url: 127.0.0.1:9090 \ No newline at end of file diff --git a/testutil/e2e/e2eProviderConfigs/lavaProvider7.yml b/testutil/e2e/e2eProviderConfigs/lavaProvider7.yml new file mode 100644 index 0000000000..1e7cf0d8cd --- /dev/null +++ b/testutil/e2e/e2eProviderConfigs/lavaProvider7.yml @@ -0,0 +1,15 @@ +endpoints: + - api-interface: tendermintrpc + chain-id: LAV1 + network-address: 127.0.0.1:2262 + node-url: + - ws://0.0.0.0:26657/websocket + - http://0.0.0.0:26657 + - api-interface: rest + chain-id: LAV1 + network-address: 127.0.0.1:2272 + node-url: http://127.0.0.1:1317 + - api-interface: grpc + chain-id: LAV1 + network-address: 127.0.0.1:2282 + node-url: 127.0.0.1:9090 \ No newline at end of file diff --git a/testutil/e2e/e2eProviderConfigs/lavaProvider8.yml b/testutil/e2e/e2eProviderConfigs/lavaProvider8.yml new file mode 100644 index 0000000000..46f6ac8bab --- /dev/null +++ b/testutil/e2e/e2eProviderConfigs/lavaProvider8.yml @@ -0,0 +1,15 @@ +endpoints: + - api-interface: tendermintrpc + chain-id: LAV1 + network-address: 127.0.0.1:2263 + node-url: + - ws://0.0.0.0:26657/websocket + - http://0.0.0.0:26657 + - api-interface: rest + chain-id: LAV1 + network-address: 127.0.0.1:2273 + node-url: http://127.0.0.1:1317 + - api-interface: grpc + chain-id: LAV1 + network-address: 127.0.0.1:2283 + node-url: 127.0.0.1:9090 \ No newline at end of file diff --git a/testutil/e2e/e2eProviderConfigs/lavaProvider9.yml b/testutil/e2e/e2eProviderConfigs/lavaProvider9.yml new file mode 100644 index 0000000000..a0c0f70c90 --- /dev/null +++ b/testutil/e2e/e2eProviderConfigs/lavaProvider9.yml @@ -0,0 +1,15 @@ +endpoints: + - api-interface: tendermintrpc + chain-id: LAV1 + network-address: 127.0.0.1:2264 + node-url: + - ws://0.0.0.0:26657/websocket + - http://0.0.0.0:26657 + - api-interface: rest + chain-id: LAV1 + network-address: 127.0.0.1:2274 + node-url: http://127.0.0.1:1317 + - api-interface: grpc + chain-id: LAV1 + network-address: 127.0.0.1:2284 + node-url: 127.0.0.1:9090 \ No newline at end of file From 1b9a4a4a5a3f7cde6c5a20f6168cfc7f2a38c535 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Thu, 9 Mar 2023 18:32:38 +0100 Subject: [PATCH 092/123] fixing provider cu charge in case consumer pays more --- protocol/lavasession/provider_types.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index 7a147674e1..943f963d1c 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -275,15 +275,18 @@ func (sps *SingleProviderSession) PrepareSessionForUsage(cuFromSpec uint64, rela }) } + // if consumer wants to pay more, we need to adjust the payment. so next relay will be in sync + cuToAdd := relayRequestTotalCU - sps.CuSum // how much consumer thinks he needs to pay - our current state + // this must happen first, as we also validate and add the used cu to parent here - err = sps.validateAndAddUsedCU(cuFromSpec, maxCu) + err = sps.validateAndAddUsedCU(cuToAdd, maxCu) if err != nil { sps.lock.Unlock() // unlock on error return err } // finished validating, can add all info. - sps.LatestRelayCu = cuFromSpec // 1. update latest - sps.CuSum = sps.CuSum + sps.LatestRelayCu // 2. update CuSum, if consumer wants to pay more, let it + sps.LatestRelayCu = cuToAdd // 1. update latest + sps.CuSum += cuToAdd // 2. update CuSum, if consumer wants to pay more, let it sps.RelayNum = sps.RelayNum + 1 // 3. update RelayNum, we already verified relayNum is valid in GetSession. return nil } From 28fb62956ba7165d176866e46550097670f11408 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Sat, 11 Mar 2023 11:11:39 +0200 Subject: [PATCH 093/123] changed some tests to use new format --- x/pairing/keeper/msg_server_relay_payment.go | 2 +- .../keeper/msg_server_relay_payment_test.go | 559 ++++++++---------- 2 files changed, 258 insertions(+), 303 deletions(-) diff --git a/x/pairing/keeper/msg_server_relay_payment.go b/x/pairing/keeper/msg_server_relay_payment.go index 3960aab7b6..4a8cb8f0ed 100644 --- a/x/pairing/keeper/msg_server_relay_payment.go +++ b/x/pairing/keeper/msg_server_relay_payment.go @@ -82,7 +82,7 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen payReliability := false // validate data reliability - if msg.VRFs != nil && msg.VRFs[relayIdx] != nil { + if len(msg.VRFs) > relayIdx && msg.VRFs[relayIdx] != nil { vrfData := msg.VRFs[relayIdx] details := map[string]string{"client": clientAddr.String(), "provider": providerAddr.String()} if !spec.DataReliabilityEnabled { diff --git a/x/pairing/keeper/msg_server_relay_payment_test.go b/x/pairing/keeper/msg_server_relay_payment_test.go index f036b4c021..987a766ef6 100644 --- a/x/pairing/keeper/msg_server_relay_payment_test.go +++ b/x/pairing/keeper/msg_server_relay_payment_test.go @@ -39,6 +39,15 @@ type testStruct struct { spec spectypes.Spec } +func createStubRequest(relaySession *types.RelaySession, dataReliability *types.VRFData) *types.RelayRequest { + req := &types.RelayRequest{ + RelaySession: relaySession, + RelayData: &types.RelayPrivateData{Data: []byte("stub-data")}, + DataReliability: dataReliability, + } + return req +} + func (ts *testStruct) addClient(amount int) error { for i := 0; i < amount; i++ { sk, address := sigs.GenerateFloatingKey() @@ -136,7 +145,7 @@ func TestRelayPaymentMemoryTransferAfterEpochChange(t *testing.T) { } // Create relay request that was done in the first epoch. Change session ID each iteration to avoid double spending error (provider asks reward for the same transaction twice) - relayRequest := &types.RelaySession{ + relaySession := &types.RelaySession{ Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: sessionCounter, @@ -147,18 +156,18 @@ func TestRelayPaymentMemoryTransferAfterEpochChange(t *testing.T) { } // Sign and send the payment requests - sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relayRequest) - relayRequest.Sig = sig + sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relaySession) + relaySession.Sig = sig require.Nil(t, err) // Request payment (helper function validates the balances and verifies if we should get an error through valid) var Relays []*types.RelaySession - Relays = append(Relays, relayRequest) + Relays = append(Relays, relaySession) relayPaymentMessage := types.MsgRelayPayment{Creator: ts.providers[0].address.String(), Relays: Relays} payAndVerifyBalance(t, ts, relayPaymentMessage, tt.valid, ts.clients[0].address, ts.providers[0].address) // Check the RPO exists (shouldn't exist after epochsToSave+1 passes) - verifyRelayPaymentObjects(t, ts, relayRequest, tt.valid) + verifyRelayPaymentObjects(t, ts, relaySession, tt.valid) }) } } @@ -203,10 +212,9 @@ func TestRelayPaymentBlockHeight(t *testing.T) { require.Nil(t, err) ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) - relayRequest := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - - Data: []byte(ts.spec.Apis[0].Name), + relaySession := &types.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), ChainID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, @@ -214,12 +222,12 @@ func TestRelayPaymentBlockHeight(t *testing.T) { RelayNum: 0, } - sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relayRequest) - relayRequest.Sig = sig + sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relaySession) + relaySession.Sig = sig require.Nil(t, err) - var Relays []*types.RelayRequest - Relays = append(Relays, relayRequest) + var Relays []*types.RelaySession + Relays = append(Relays, relaySession) balanceProvider := ts.keepers.BankKeeper.GetBalance(sdk.UnwrapSDKContext(ts.ctx), ts.providers[0].address, epochstoragetypes.TokenDenom).Amount.Int64() stakeClient, found, _ := ts.keepers.Epochstorage.GetStakeEntryByAddressCurrent(sdk.UnwrapSDKContext(ts.ctx), epochstoragetypes.ClientKey, ts.spec.Index, ts.clients[0].address) @@ -266,10 +274,9 @@ func TestRelayPaymentOverUse(t *testing.T) { maxcu, err := ts.keepers.Pairing.GetAllowedCUForBlock(sdk.UnwrapSDKContext(ts.ctx), uint64(sdk.UnwrapSDKContext(ts.ctx).BlockHeight()), entry) require.Nil(t, err) - relayRequest := &types.RelaySession{ + relaySession := &types.RelaySession{ Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), + ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), ChainID: ts.spec.Name, CuSum: maxcu * 2, @@ -277,12 +284,12 @@ func TestRelayPaymentOverUse(t *testing.T) { RelayNum: 0, } - sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relayRequest) - relayRequest.Sig = sig + sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relaySession) + relaySession.Sig = sig require.Nil(t, err) var Relays []*types.RelaySession - Relays = append(Relays, relayRequest) + Relays = append(Relays, relaySession) // TODO: currently over use is returning an error and doesnt get to balance zero. we will fix it in the future so this can be uncommented. // balance := ts.keepers.BankKeeper.GetBalance(sdk.UnwrapSDKContext(ts.ctx), ts.providers[0].address, epochstoragetypes.TokenDenom).Amount.Int64() @@ -323,24 +330,21 @@ func TestRelayPaymentNotUnstakingProviderForUnresponsivenessIfNoEpochInformation var Relays []*types.RelaySession for clientIndex := 0; clientIndex < testClientAmount; clientIndex++ { // testing testClientAmount of complaints - relayRequest := &types.RelaySession{ + relaySession := &types.RelaySession{ Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), + ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), ChainID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, UnresponsiveProviders: unresponsiveProvidersData, // create the complaint } - sig, err := sigs.SignRelay(ts.clients[clientIndex].secretKey, *relayRequest) - relayRequest.Sig = sig + sig, err := sigs.SignRelay(ts.clients[clientIndex].secretKey, *relaySession) + relaySession.Sig = sig require.Nil(t, err) - Relays = append(Relays, relayRequest) + Relays = append(Relays, relaySession) } _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: ts.providers[0].address.String(), Relays: Relays}) require.Nil(t, err) @@ -378,25 +382,22 @@ func TestRelayPaymentUnstakingProviderForUnresponsivenessWithBadDataInput(t *tes var Relays []*types.RelaySession var totalCu uint64 for clientIndex := 0; clientIndex < testClientAmount; clientIndex++ { // testing testClientAmount of complaints - relayRequest := &types.RelaySession{ + relaySession := &types.RelaySession{ Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), + ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), ChainID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, UnresponsiveProviders: unresponsiveProvidersData[clientIndex], // create the complaint } - totalCu += relayRequest.CuSum + totalCu += relaySession.CuSum - sig, err := sigs.SignRelay(ts.clients[clientIndex].secretKey, *relayRequest) - relayRequest.Sig = sig + sig, err := sigs.SignRelay(ts.clients[clientIndex].secretKey, *relaySession) + relaySession.Sig = sig require.Nil(t, err) - Relays = append(Relays, relayRequest) + Relays = append(Relays, relaySession) } balanceProviderBeforePayment := ts.keepers.BankKeeper.GetBalance(sdk.UnwrapSDKContext(ts.ctx), ts.providers[0].address, epochstoragetypes.TokenDenom).Amount.Int64() @@ -415,25 +416,22 @@ func TestRelayPaymentNotUnstakingProviderForUnresponsivenessBecauseOfServices(t ts := setupClientsAndProvidersForUnresponsiveness(t, testClientAmount, testProviderAmount) ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) // after payment move one epoch to stake - var RelaysForUnresponsiveProviderInFirstTwoEpochs []*types.RelayRequest + var RelaysForUnresponsiveProviderInFirstTwoEpochs []*types.RelaySession for i := 0; i < 2; i++ { // move to epoch 3 so we can check enough epochs in the past - relayRequest := &types.RelaySession{ - Provider: ts.providers[1].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, + relaySession := &types.RelaySession{ + Provider: ts.providers[1].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(1), + ChainID: ts.spec.Name, + CuSum: ts.spec.Apis[0].ComputeUnits * 10, + BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + RelayNum: 0, } - sig, err := sigs.SignRelay(ts.clients[i].secretKey, *relayRequest) - relayRequest.Sig = sig + sig, err := sigs.SignRelay(ts.clients[i].secretKey, *relaySession) + relaySession.Sig = sig require.Nil(t, err) - RelaysForUnresponsiveProviderInFirstTwoEpochs = []*types.RelaySession{relayRequest} // each epoch get one service + RelaysForUnresponsiveProviderInFirstTwoEpochs = []*types.RelaySession{relaySession} // each epoch get one service _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: ts.providers[1].address.String(), Relays: RelaysForUnresponsiveProviderInFirstTwoEpochs}) require.Nil(t, err) ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) // after payment move one epoch @@ -444,24 +442,21 @@ func TestRelayPaymentNotUnstakingProviderForUnresponsivenessBecauseOfServices(t var Relays []*types.RelaySession for clientIndex := 0; clientIndex < testClientAmount; clientIndex++ { // testing testClientAmount of complaints - relayRequest := &types.RelaySession{ + relaySession := &types.RelaySession{ Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), + ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), ChainID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, UnresponsiveProviders: unresponsiveProvidersData, // create the complaint } - sig, err := sigs.SignRelay(ts.clients[clientIndex].secretKey, *relayRequest) - relayRequest.Sig = sig + sig, err := sigs.SignRelay(ts.clients[clientIndex].secretKey, *relaySession) + relaySession.Sig = sig require.Nil(t, err) - Relays = append(Relays, relayRequest) + Relays = append(Relays, relaySession) } _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: ts.providers[0].address.String(), Relays: Relays}) require.Nil(t, err) @@ -485,26 +480,23 @@ func TestRelayPaymentDoubleSpending(t *testing.T) { ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) cuSum := ts.spec.GetApis()[0].ComputeUnits * 10 - relayRequest := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, + relaySession := &types.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(1), + ChainID: ts.spec.Name, + CuSum: cuSum, + BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + RelayNum: 0, } - sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relayRequest) - relayRequest.Sig = sig + sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relaySession) + relaySession.Sig = sig require.Nil(t, err) var Relays []*types.RelaySession - Relays = append(Relays, relayRequest) - relayRequest2 := *relayRequest + Relays = append(Relays, relaySession) + relayRequest2 := *relaySession Relays = append(Relays, &relayRequest2) balance := ts.keepers.BankKeeper.GetBalance(sdk.UnwrapSDKContext(ts.ctx), ts.providers[0].address, epochstoragetypes.TokenDenom).Amount.Int64() @@ -533,21 +525,18 @@ func TestRelayPaymentDataModification(t *testing.T) { require.Nil(t, err) ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) - relayRequest := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, + relaySession := &types.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(1), + ChainID: ts.spec.Name, + CuSum: ts.spec.Apis[0].ComputeUnits * 10, + BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + RelayNum: 0, } - sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relayRequest) - relayRequest.Sig = sig + sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relaySession) + relaySession.Sig = sig require.Nil(t, err) tests := []struct { @@ -563,12 +552,12 @@ func TestRelayPaymentDataModification(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - relayRequest.Provider = tt.provider - relayRequest.CuSum = tt.cu - relayRequest.SessionId = uint64(tt.id) + relaySession.Provider = tt.provider + relaySession.CuSum = tt.cu + relaySession.SessionId = uint64(tt.id) var Relays []*types.RelaySession - Relays = append(Relays, relayRequest) + Relays = append(Relays, relaySession) _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: ts.providers[0].address.String(), Relays: Relays}) @@ -588,25 +577,22 @@ func TestRelayPaymentDelayedDoubleSpending(t *testing.T) { require.Nil(t, err) ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) - relayRequest := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, + relaySession := &types.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(1), + ChainID: ts.spec.Name, + CuSum: ts.spec.Apis[0].ComputeUnits * 10, + BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + RelayNum: 0, } - sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relayRequest) - relayRequest.Sig = sig + sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relaySession) + relaySession.Sig = sig require.Nil(t, err) var Relays []*types.RelaySession - relay := *relayRequest + relay := *relaySession Relays = append(Relays, &relay) _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: ts.providers[0].address.String(), Relays: Relays}) @@ -631,8 +617,8 @@ func TestRelayPaymentDelayedDoubleSpending(t *testing.T) { ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) } - var Relays []*types.RelayRequest - relay := *relayRequest + var Relays []*types.RelaySession + relay := *relaySession Relays = append(Relays, &relay) _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: ts.providers[0].address.String(), Relays: Relays}) @@ -676,25 +662,22 @@ func TestRelayPaymentOldEpochs(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 - relayRequest := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: tt.sid, - ChainID: ts.spec.Name, - CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight() - int64(blocksInEpoch)*tt.epoch, - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, + relaySession := &types.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: tt.sid, + ChainID: ts.spec.Name, + CuSum: cuSum, + BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight() - int64(blocksInEpoch)*tt.epoch, + RelayNum: 0, } - sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relayRequest) - relayRequest.Sig = sig + sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relaySession) + relaySession.Sig = sig require.Nil(t, err) var Relays []*types.RelaySession - Relays = append(Relays, relayRequest) + Relays = append(Relays, relaySession) balance := ts.keepers.BankKeeper.GetBalance(sdk.UnwrapSDKContext(ts.ctx), ts.providers[0].address, epochstoragetypes.TokenDenom).Amount.Int64() stakeClient, _, _ := ts.keepers.Epochstorage.GetStakeEntryByAddressCurrent(sdk.UnwrapSDKContext(ts.ctx), epochstoragetypes.ClientKey, ts.spec.Index, ts.clients[0].address) @@ -748,26 +731,23 @@ func TestRelayPaymentQoS(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: tt.latency, Availability: tt.availability, Sync: tt.sync} - relayRequest := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - RequestBlock: -1, - QoSReport: QoS, - DataReliability: nil, + relaySession := &types.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(1), + ChainID: ts.spec.Name, + CuSum: cuSum, + BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + RelayNum: 0, + QoSReport: QoS, } QoS.ComputeQoS() - sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relayRequest) - relayRequest.Sig = sig + sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relaySession) + relaySession.Sig = sig require.Nil(t, err) var Relays []*types.RelaySession - relay := *relayRequest + relay := *relaySession Relays = append(Relays, &relay) balance := ts.keepers.BankKeeper.GetBalance(sdk.UnwrapSDKContext(ts.ctx), ts.providers[0].address, epochstoragetypes.TokenDenom).Amount.Int64() @@ -836,22 +816,19 @@ func TestRelayPaymentDataReliability(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequest := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - RequestBlock: -1, - QoSReport: QoS, - DataReliability: nil, + relaySession := &types.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(1), + ChainID: ts.spec.Name, + CuSum: cuSum, + BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + RelayNum: 0, + QoSReport: QoS, } QoS.ComputeQoS() - relayRequest.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relayRequest) + relaySession.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relaySession) require.Nil(t, err) currentEpoch := ts.keepers.Epochstorage.GetEpochStart(sdk.UnwrapSDKContext(ts.ctx)) @@ -860,20 +837,22 @@ func TestRelayPaymentDataReliability(t *testing.T) { var relayReply *types.RelayReply var nonce uint32 // increasing the nonce changes the hash of the reply which in turn produces a different vrfRes resulting to a different index + relayRequest := createStubRequest(relaySession, nil) for { relayReply = &types.RelayReply{ Nonce: nonce, } + relayReply.Sig, err = sigs.SignRelayResponse(ts.providers[0].secretKey, relayReply, relayRequest) require.Nil(t, err) - vrfRes0, _ := utils.CalculateVrfOnRelay(relayRequest, relayReply, ts.clients[0].vrfSk, currentEpoch) + vrfRes0, _ := utils.CalculateVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].vrfSk, currentEpoch) require.Nil(t, err) index0, err = utils.GetIndexForVrf(vrfRes0, uint32(ts.keepers.Pairing.ServicersToPairCountRaw(sdk.UnwrapSDKContext(ts.ctx))), ts.spec.ReliabilityThreshold) require.Nil(t, err) - providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relayRequest.ChainID, ts.clients[0].address) + providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relaySession.ChainID, ts.clients[0].address) require.Nil(t, err) if providers[index0].Address != ts.providers[0].address.String() { @@ -882,14 +861,14 @@ func TestRelayPaymentDataReliability(t *testing.T) { nonce += 1 } } - vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest, relayReply, ts.clients[0].vrfSk, false, currentEpoch) + vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].vrfSk, false, currentEpoch) dataReliability0 := &types.VRFData{ Differentiator: false, VrfValue: vrf_res0, VrfProof: vrf_proof0, ProviderSig: relayReply.Sig, AllDataHash: sigs.AllDataHash(relayReply, relayRequest), - QueryHash: utils.CalculateQueryHash(*relayRequest), + QueryHash: utils.CalculateQueryHash(*relayRequest.RelayData), Sig: nil, } dataReliability0.Sig, err = sigs.SignVRFData(ts.clients[0].secretKey, dataReliability0) @@ -923,28 +902,25 @@ func TestRelayPaymentDataReliability(t *testing.T) { } QoSDR := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequestWithDataReliability0 := &types.RelayRequest{ - Provider: providers[index0].Address, - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - RequestBlock: -1, - DataReliability: dataReliability0, - QoSReport: QoSDR, + relayRequestWithDataReliability0 := &types.RelaySession{ + Provider: providers[index0].Address, + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(1), + ChainID: ts.spec.Name, + CuSum: cuSum, + BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + RelayNum: 0, + QoSReport: QoSDR, } QoSDR.ComputeQoS() relayRequestWithDataReliability0.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relayRequestWithDataReliability0) require.Nil(t, err) provider := ts.getProvider(providers[index0].Address) - relaysRequests := []*types.RelayRequest{relayRequestWithDataReliability0} - + relaysRequests := []*types.RelaySession{relayRequestWithDataReliability0} + dataReliabilities := []*types.VRFData{dataReliability0} balanceBefore := ts.keepers.BankKeeper.GetBalance(sdk.UnwrapSDKContext(ts.ctx), provider.address, epochstoragetypes.TokenDenom).Amount.Int64() - _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: provider.address.String(), Relays: relaysRequests}) + _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: provider.address.String(), Relays: relaysRequests, VRFs: dataReliabilities}) if tt.valid { require.Nil(t, err) @@ -980,22 +956,19 @@ func TestRelayPaymentDataReliabilityWrongProvider(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequest := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, - QoSReport: QoS, + relaySession := &types.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(1), + ChainID: ts.spec.Name, + CuSum: cuSum, + BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + RelayNum: 0, + QoSReport: QoS, } QoS.ComputeQoS() - relayRequest.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relayRequest) + relaySession.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relaySession) require.Nil(t, err) currentEpoch := ts.keepers.Epochstorage.GetEpochStart(sdk.UnwrapSDKContext(ts.ctx)) @@ -1005,6 +978,7 @@ func TestRelayPaymentDataReliabilityWrongProvider(t *testing.T) { var nonce uint32 wrongProviderIndex := 1 + relayRequest := createStubRequest(relaySession, nil) GetWrongProvider: for { relayReply = &types.RelayReply{ @@ -1013,12 +987,12 @@ GetWrongProvider: relayReply.Sig, err = sigs.SignRelayResponse(ts.providers[0].secretKey, relayReply, relayRequest) require.Nil(t, err) - vrfRes0, vrfRes1 := utils.CalculateVrfOnRelay(relayRequest, relayReply, ts.clients[0].vrfSk, currentEpoch) + vrfRes0, vrfRes1 := utils.CalculateVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].vrfSk, currentEpoch) index0, _ = utils.GetIndexForVrf(vrfRes0, uint32(ts.keepers.Pairing.ServicersToPairCountRaw(sdk.UnwrapSDKContext(ts.ctx))), ts.spec.ReliabilityThreshold) index1, _ := utils.GetIndexForVrf(vrfRes1, uint32(ts.keepers.Pairing.ServicersToPairCountRaw(sdk.UnwrapSDKContext(ts.ctx))), ts.spec.ReliabilityThreshold) - providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relayRequest.ChainID, ts.clients[0].address) + providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relaySession.ChainID, ts.clients[0].address) require.Nil(t, err) // two providers returned by GetIndexForVrf and the provider getting tested need 1 more to perform this test properly require.Greater(t, len(providers), 3) @@ -1040,14 +1014,14 @@ GetWrongProvider: } } - vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest, relayReply, ts.clients[0].vrfSk, false, currentEpoch) + vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].vrfSk, false, currentEpoch) dataReliability0 := &types.VRFData{ Differentiator: false, VrfValue: vrf_res0, VrfProof: vrf_proof0, ProviderSig: relayReply.Sig, AllDataHash: sigs.AllDataHash(relayReply, relayRequest), - QueryHash: utils.CalculateQueryHash(*relayRequest), + QueryHash: utils.CalculateQueryHash(*relayRequest.RelayData), Sig: nil, } dataReliability0.Sig, err = sigs.SignVRFData(ts.clients[0].secretKey, dataReliability0) @@ -1055,17 +1029,14 @@ GetWrongProvider: QoSDR := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} relayRequestWithDataReliability0 := &types.RelaySession{ - Provider: providers[wrongProviderIndex].Address, - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - RequestBlock: -1, - DataReliability: dataReliability0, - QoSReport: QoSDR, + Provider: providers[wrongProviderIndex].Address, + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(1), + ChainID: ts.spec.Name, + CuSum: cuSum, + BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + RelayNum: 0, + QoSReport: QoSDR, } QoSDR.ComputeQoS() relayRequestWithDataReliability0.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relayRequestWithDataReliability0) @@ -1073,8 +1044,8 @@ GetWrongProvider: provider := ts.getProvider(providers[wrongProviderIndex].Address) relaysRequests := []*types.RelaySession{relayRequestWithDataReliability0} - - _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: provider.address.String(), Relays: relaysRequests}) + dataReliabilities := []*types.VRFData{dataReliability0} + _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: provider.address.String(), Relays: relaysRequests, VRFs: dataReliabilities}) require.NotNil(t, err) } @@ -1097,23 +1068,22 @@ func TestRelayPaymentDataReliabilityBelowReliabilityThreshold(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequest := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, - QoSReport: QoS, + relaySession := &types.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(1), + ChainID: ts.spec.Name, + CuSum: cuSum, + BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + RelayNum: 0, + QoSReport: QoS, } QoS.ComputeQoS() - relayRequest.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relayRequest) + relaySession.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relaySession) require.Nil(t, err) + relayRequest := createStubRequest(relaySession, nil) + var relayReply *types.RelayReply var nonce uint32 relayReply = &types.RelayReply{ @@ -1123,21 +1093,21 @@ func TestRelayPaymentDataReliabilityBelowReliabilityThreshold(t *testing.T) { require.Nil(t, err) currentEpoch := ts.keepers.Epochstorage.GetEpochStart(sdk.UnwrapSDKContext(ts.ctx)) - vrfRes0, vrfRes1 := utils.CalculateVrfOnRelay(relayRequest, relayReply, ts.clients[0].vrfSk, currentEpoch) + vrfRes0, vrfRes1 := utils.CalculateVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].vrfSk, currentEpoch) index0, _ := utils.GetIndexForVrf(vrfRes0, uint32(ts.keepers.Pairing.ServicersToPairCountRaw(sdk.UnwrapSDKContext(ts.ctx))), ts.spec.ReliabilityThreshold) index1, _ := utils.GetIndexForVrf(vrfRes1, uint32(ts.keepers.Pairing.ServicersToPairCountRaw(sdk.UnwrapSDKContext(ts.ctx))), ts.spec.ReliabilityThreshold) require.Equal(t, index0, int64(-1)) require.Equal(t, index1, int64(-1)) - vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest, relayReply, ts.clients[0].vrfSk, false, currentEpoch) + vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].vrfSk, false, currentEpoch) dataReliability0 := &types.VRFData{ Differentiator: false, VrfValue: vrf_res0, VrfProof: vrf_proof0, ProviderSig: relayReply.Sig, AllDataHash: sigs.AllDataHash(relayReply, relayRequest), - QueryHash: utils.CalculateQueryHash(*relayRequest), + QueryHash: utils.CalculateQueryHash(*relayRequest.RelayData), Sig: nil, } dataReliability0.Sig, err = sigs.SignVRFData(ts.clients[0].secretKey, dataReliability0) @@ -1147,25 +1117,22 @@ func TestRelayPaymentDataReliabilityBelowReliabilityThreshold(t *testing.T) { for _, provider := range ts.providers { QoSDR := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} relayRequestWithDataReliability0 := &types.RelaySession{ - Provider: provider.address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - RequestBlock: -1, - DataReliability: dataReliability0, - QoSReport: QoSDR, + Provider: provider.address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(1), + ChainID: ts.spec.Name, + CuSum: cuSum, + BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + RelayNum: 0, + QoSReport: QoSDR, } QoSDR.ComputeQoS() relayRequestWithDataReliability0.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relayRequestWithDataReliability0) require.Nil(t, err) relaysRequests := []*types.RelaySession{relayRequestWithDataReliability0} - - _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: provider.address.String(), Relays: relaysRequests}) + dataReliabilities := []*types.VRFData{dataReliability0} + _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: provider.address.String(), Relays: relaysRequests, VRFs: dataReliabilities}) require.NotNil(t, err) } } @@ -1188,21 +1155,18 @@ func TestRelayPaymentDataReliabilityDifferentClientSign(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequest := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, - QoSReport: QoS, + relaySession := &types.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(1), + ChainID: ts.spec.Name, + CuSum: cuSum, + BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + RelayNum: 0, + QoSReport: QoS, } QoS.ComputeQoS() - relayRequest.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relayRequest) + relaySession.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relaySession) require.Nil(t, err) currentEpoch := ts.keepers.Epochstorage.GetEpochStart(sdk.UnwrapSDKContext(ts.ctx)) @@ -1210,6 +1174,8 @@ func TestRelayPaymentDataReliabilityDifferentClientSign(t *testing.T) { var providers []epochstoragetypes.StakeEntry var relayReply *types.RelayReply var nonce uint32 + relayRequest := createStubRequest(relaySession, nil) + for { relayReply = &types.RelayReply{ Nonce: nonce, @@ -1217,11 +1183,11 @@ func TestRelayPaymentDataReliabilityDifferentClientSign(t *testing.T) { relayReply.Sig, err = sigs.SignRelayResponse(ts.providers[0].secretKey, relayReply, relayRequest) require.Nil(t, err) - vrfRes0, _ := utils.CalculateVrfOnRelay(relayRequest, relayReply, ts.clients[0].vrfSk, currentEpoch) + vrfRes0, _ := utils.CalculateVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].vrfSk, currentEpoch) index0, _ = utils.GetIndexForVrf(vrfRes0, uint32(ts.keepers.Pairing.ServicersToPairCountRaw(sdk.UnwrapSDKContext(ts.ctx))), ts.spec.ReliabilityThreshold) - providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relayRequest.ChainID, ts.clients[0].address) + providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relaySession.ChainID, ts.clients[0].address) require.Nil(t, err) if providers[index0].Address != ts.providers[0].address.String() { @@ -1230,14 +1196,14 @@ func TestRelayPaymentDataReliabilityDifferentClientSign(t *testing.T) { nonce += 1 } - vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest, relayReply, ts.clients[0].vrfSk, false, currentEpoch) + vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].vrfSk, false, currentEpoch) dataReliability0 := &types.VRFData{ Differentiator: false, VrfValue: vrf_res0, VrfProof: vrf_proof0, ProviderSig: relayReply.Sig, AllDataHash: sigs.AllDataHash(relayReply, relayRequest), - QueryHash: utils.CalculateQueryHash(*relayRequest), + QueryHash: utils.CalculateQueryHash(*relayRequest.RelayData), Sig: nil, } dataReliability0.Sig, err = sigs.SignVRFData(ts.clients[1].secretKey, dataReliability0) @@ -1245,17 +1211,14 @@ func TestRelayPaymentDataReliabilityDifferentClientSign(t *testing.T) { QoSDR := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} relayRequestWithDataReliability0 := &types.RelaySession{ - Provider: providers[index0].Address, - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - RequestBlock: -1, - DataReliability: dataReliability0, - QoSReport: QoSDR, + Provider: providers[index0].Address, + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(1), + ChainID: ts.spec.Name, + CuSum: cuSum, + BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + RelayNum: 0, + QoSReport: QoSDR, } QoSDR.ComputeQoS() relayRequestWithDataReliability0.Sig, err = sigs.SignRelay(ts.clients[1].secretKey, *relayRequestWithDataReliability0) @@ -1263,8 +1226,8 @@ func TestRelayPaymentDataReliabilityDifferentClientSign(t *testing.T) { provider := ts.getProvider(providers[index0].Address) relaysRequests := []*types.RelaySession{relayRequestWithDataReliability0} - - _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: provider.address.String(), Relays: relaysRequests}) + dataReliabilities := []*types.VRFData{dataReliability0} + _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: provider.address.String(), Relays: relaysRequests, VRFs: dataReliabilities}) require.NotNil(t, err) } @@ -1286,22 +1249,19 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequest := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, - QoSReport: QoS, + relaySession := &types.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(1), + ChainID: ts.spec.Name, + CuSum: cuSum, + BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + RelayNum: 0, + QoSReport: QoS, } QoS.ComputeQoS() - relayRequest.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relayRequest) + relaySession.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relaySession) require.Nil(t, err) currentEpoch := ts.keepers.Epochstorage.GetEpochStart(sdk.UnwrapSDKContext(ts.ctx)) @@ -1309,6 +1269,7 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { var providers []epochstoragetypes.StakeEntry var relayReply *types.RelayReply var nonce uint32 + relayRequest := createStubRequest(relaySession, nil) for { relayReply = &types.RelayReply{ Nonce: nonce, @@ -1316,11 +1277,11 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { relayReply.Sig, err = sigs.SignRelayResponse(ts.providers[0].secretKey, relayReply, relayRequest) require.Nil(t, err) - vrfRes0, _ := utils.CalculateVrfOnRelay(relayRequest, relayReply, ts.clients[0].vrfSk, currentEpoch) + vrfRes0, _ := utils.CalculateVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].vrfSk, currentEpoch) index0, _ = utils.GetIndexForVrf(vrfRes0, uint32(ts.keepers.Pairing.ServicersToPairCountRaw(sdk.UnwrapSDKContext(ts.ctx))), ts.spec.ReliabilityThreshold) - providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relayRequest.ChainID, ts.clients[0].address) + providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relaySession.ChainID, ts.clients[0].address) require.Nil(t, err) if providers[index0].Address != ts.providers[0].address.String() { @@ -1330,14 +1291,14 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { } } - vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest, relayReply, ts.clients[0].vrfSk, false, currentEpoch) + vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].vrfSk, false, currentEpoch) dataReliability0 := &types.VRFData{ Differentiator: false, VrfValue: vrf_res0, VrfProof: vrf_proof0, ProviderSig: relayReply.Sig, AllDataHash: sigs.AllDataHash(relayReply, relayRequest), - QueryHash: utils.CalculateQueryHash(*relayRequest), + QueryHash: utils.CalculateQueryHash(*relayRequest.RelayData), Sig: nil, } dataReliability0.Sig, err = sigs.SignVRFData(ts.clients[0].secretKey, dataReliability0) @@ -1345,17 +1306,14 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { QoSDR := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} relayRequestWithDataReliability0 := &types.RelaySession{ - Provider: providers[index0].Address, - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - RequestBlock: -1, - DataReliability: dataReliability0, - QoSReport: QoSDR, + Provider: providers[index0].Address, + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(1), + ChainID: ts.spec.Name, + CuSum: cuSum, + BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + RelayNum: 0, + QoSReport: QoSDR, } QoSDR.ComputeQoS() relayRequestWithDataReliability0.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relayRequestWithDataReliability0) @@ -1363,8 +1321,8 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { provider := ts.getProvider(providers[index0].Address) relaysRequests := []*types.RelaySession{relayRequestWithDataReliability0} - - _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: provider.address.String(), Relays: relaysRequests}) + dataReliabilities := []*types.VRFData{dataReliability0} + _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: provider.address.String(), Relays: relaysRequests, VRFs: dataReliabilities}) require.Nil(t, err) // Advance Epoch and set block height and resign the tx @@ -1375,7 +1333,7 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { relayRequestWithDataReliability0.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relayRequestWithDataReliability0) require.Nil(t, err) - _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: provider.address.String(), Relays: relaysRequests}) + _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: provider.address.String(), Relays: relaysRequests, VRFs: dataReliabilities}) require.NotNil(t, err) } @@ -1417,25 +1375,22 @@ func TestEpochPaymentDeletion(t *testing.T) { ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) - relayRequest := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, + relaySession := &types.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(1), + ChainID: ts.spec.Name, + CuSum: ts.spec.Apis[0].ComputeUnits * 10, + BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + RelayNum: 0, } - sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relayRequest) - relayRequest.Sig = sig + sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relaySession) + relaySession.Sig = sig require.Nil(t, err) var Relays []*types.RelaySession - Relays = append(Relays, relayRequest) + Relays = append(Relays, relaySession) balanceProvider := ts.keepers.BankKeeper.GetBalance(sdk.UnwrapSDKContext(ts.ctx), ts.providers[0].address, epochstoragetypes.TokenDenom).Amount.Int64() stakeClient, found, _ := ts.keepers.Epochstorage.GetStakeEntryByAddressCurrent(sdk.UnwrapSDKContext(ts.ctx), epochstoragetypes.ClientKey, ts.spec.Index, ts.clients[0].address) From 32565d58735bdc9068ec2e8f561ab178cda57a07 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Sat, 11 Mar 2023 11:12:46 +0200 Subject: [PATCH 094/123] fixed gov test --- x/pairing/keeper/msg_server_relay_payment_gov_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x/pairing/keeper/msg_server_relay_payment_gov_test.go b/x/pairing/keeper/msg_server_relay_payment_gov_test.go index b1279b54f8..f2fcb40d23 100644 --- a/x/pairing/keeper/msg_server_relay_payment_gov_test.go +++ b/x/pairing/keeper/msg_server_relay_payment_gov_test.go @@ -892,7 +892,7 @@ func TestRelayPaymentMemoryTransferAfterEpochChangeWithGovParamChange(t *testing require.Nil(t, err) // Add the relay request to the Relays array (for relayPaymentMessage()) - var Relays []*pairingtypes.RelayRequest + var Relays []*pairingtypes.RelaySession Relays = append(Relays, relayRequest) // get payment From 7bf6997a19e9dc4d42c51e2c69e969e9ff55f0ad Mon Sep 17 00:00:00 2001 From: omer mishael Date: Sat, 11 Mar 2023 15:24:13 +0200 Subject: [PATCH 095/123] continue refactor --- proto/pairing/relay.proto | 16 ++- protocol/lavaprotocol/response_builder.go | 2 +- .../rpcprovider/rewardserver/reward_server.go | 31 ++--- protocol/rpcprovider/rpcprovider.go | 2 +- protocol/rpcprovider/rpcprovider_server.go | 125 +++++++++--------- .../statetracker/provider_state_tracker.go | 4 +- protocol/statetracker/tx_sender.go | 4 +- relayer/sigs/sigs.go | 29 +++- x/pairing/keeper/fixation_test.go | 23 ++-- x/pairing/keeper/msg_server_relay_payment.go | 12 +- .../keeper/unresponsive_provider_test.go | 36 ++--- x/pairing/types/message_relay_payment.go | 3 +- 12 files changed, 154 insertions(+), 133 deletions(-) diff --git a/proto/pairing/relay.proto b/proto/pairing/relay.proto index 3c00d77f70..c18a6fa0e2 100644 --- a/proto/pairing/relay.proto +++ b/proto/pairing/relay.proto @@ -56,13 +56,15 @@ message RelayReply { } message VRFData { - bool differentiator = 1; - bytes vrf_value =2; - bytes vrf_proof =3; - bytes provider_sig = 4; - bytes allDataHash = 5; - bytes queryHash = 6; //we only need it for payment later - bytes sig =7; + string chainID = 1; + int64 epoch = 2; + bool differentiator = 3; + bytes vrf_value =4; + bytes vrf_proof =5; + bytes provider_sig = 6; + bytes allDataHash = 7; + bytes queryHash = 8; //we only need it for payment later + bytes sig =9; } message QualityOfServiceReport{ diff --git a/protocol/lavaprotocol/response_builder.go b/protocol/lavaprotocol/response_builder.go index 2c85be1af8..65947584bc 100644 --- a/protocol/lavaprotocol/response_builder.go +++ b/protocol/lavaprotocol/response_builder.go @@ -40,7 +40,7 @@ func SignRelayResponse(consumerAddress sdk.AccAddress, request pairingtypes.Rela return reply, nil } -func ExtractSignerAddress(in *pairingtypes.RelayRequest) (tenderbytes.HexBytes, error) { +func ExtractSignerAddress(in *pairingtypes.RelaySession) (tenderbytes.HexBytes, error) { pubKey, err := sigs.RecoverPubKeyFromRelay(*in) if err != nil { return nil, err diff --git a/protocol/rpcprovider/rewardserver/reward_server.go b/protocol/rpcprovider/rewardserver/reward_server.go index 8066831658..4304027c77 100644 --- a/protocol/rpcprovider/rewardserver/reward_server.go +++ b/protocol/rpcprovider/rewardserver/reward_server.go @@ -29,11 +29,11 @@ type PaymentRequest struct { type ConsumerRewards struct { epoch uint64 consumer string - proofs map[uint64]*pairingtypes.RelayRequest // key is sessionID + proofs map[uint64]*pairingtypes.RelaySession // key is sessionID dataReliabilityProofs []*pairingtypes.VRFData } -func (csrw *ConsumerRewards) PrepareRewardsForClaim() (retProofs []*pairingtypes.RelayRequest, errRet error) { +func (csrw *ConsumerRewards) PrepareRewardsForClaim() (retProofs []*pairingtypes.RelaySession, retVRFs []*pairingtypes.VRFData, errRet error) { for _, proof := range csrw.proofs { retProofs = append(retProofs, proof) } @@ -44,7 +44,7 @@ func (csrw *ConsumerRewards) PrepareRewardsForClaim() (retProofs []*pairingtypes if idx > dataReliabilityProofs-1 { break } - retProofs[idx].DataReliability = csrw.dataReliabilityProofs[idx] + retVRFs = append(retVRFs, csrw.dataReliabilityProofs[idx]) } } return @@ -66,24 +66,24 @@ type RewardServer struct { } type RewardsTxSender interface { - TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest, description string) error + TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelaySession, dataReliabilityProofs []*pairingtypes.VRFData, description string) error GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment(ctx context.Context) (uint64, error) EarliestBlockInMemory(ctx context.Context) (uint64, error) } -func (rws *RewardServer) SendNewProof(ctx context.Context, proof *pairingtypes.RelayRequest, epoch uint64, consumerAddr string) (existingCU uint64, updatedWithProof bool) { +func (rws *RewardServer) SendNewProof(ctx context.Context, proof *pairingtypes.RelaySession, epoch uint64, consumerAddr string) (existingCU uint64, updatedWithProof bool) { rws.lock.Lock() // assuming 99% of the time we will need to write the new entry so there's no use in doing the read lock first to check stuff defer rws.lock.Unlock() epochRewards, ok := rws.rewards[epoch] if !ok { - proofs := map[uint64]*pairingtypes.RelayRequest{proof.SessionId: proof} + proofs := map[uint64]*pairingtypes.RelaySession{proof.SessionId: proof} consumerRewardsMap := map[string]*ConsumerRewards{consumerAddr: {epoch: epoch, consumer: consumerAddr, proofs: proofs, dataReliabilityProofs: []*pairingtypes.VRFData{}}} rws.rewards[epoch] = &EpochRewards{epoch: epoch, consumerRewards: consumerRewardsMap} return 0, true } consumerRewards, ok := epochRewards.consumerRewards[consumerAddr] if !ok { - proofs := map[uint64]*pairingtypes.RelayRequest{proof.SessionId: proof} + proofs := map[uint64]*pairingtypes.RelaySession{proof.SessionId: proof} consumerRewards := &ConsumerRewards{epoch: epoch, consumer: consumerAddr, proofs: proofs, dataReliabilityProofs: []*pairingtypes.VRFData{}} epochRewards.consumerRewards[consumerAddr] = consumerRewards return 0, true @@ -106,13 +106,13 @@ func (rws *RewardServer) SendNewDataReliabilityProof(ctx context.Context, dataRe defer rws.lock.Unlock() epochRewards, ok := rws.rewards[epoch] if !ok { - consumerRewardsMap := map[string]*ConsumerRewards{consumerAddr: {epoch: epoch, consumer: consumerAddr, proofs: map[uint64]*pairingtypes.RelayRequest{}, dataReliabilityProofs: []*pairingtypes.VRFData{dataReliability}}} + consumerRewardsMap := map[string]*ConsumerRewards{consumerAddr: {epoch: epoch, consumer: consumerAddr, proofs: map[uint64]*pairingtypes.RelaySession{}, dataReliabilityProofs: []*pairingtypes.VRFData{dataReliability}}} rws.rewards[epoch] = &EpochRewards{epoch: epoch, consumerRewards: consumerRewardsMap} return true } consumerRewards, ok := epochRewards.consumerRewards[consumerAddr] if !ok { - consumerRewards := &ConsumerRewards{epoch: epoch, consumer: consumerAddr, proofs: map[uint64]*pairingtypes.RelayRequest{}, dataReliabilityProofs: []*pairingtypes.VRFData{dataReliability}} + consumerRewards := &ConsumerRewards{epoch: epoch, consumer: consumerAddr, proofs: map[uint64]*pairingtypes.RelaySession{}, dataReliabilityProofs: []*pairingtypes.VRFData{dataReliability}} epochRewards.consumerRewards[consumerAddr] = consumerRewards return true } @@ -130,7 +130,7 @@ func (rws *RewardServer) UpdateEpoch(epoch uint64) { } func (rws *RewardServer) sendRewardsClaim(ctx context.Context, epoch uint64) error { - rewardsToClaim, err := rws.gatherRewardsForClaim(ctx, epoch) + rewardsToClaim, dataReliabilityProofs, err := rws.gatherRewardsForClaim(ctx, epoch) if err != nil { return err } @@ -150,7 +150,7 @@ func (rws *RewardServer) sendRewardsClaim(ctx context.Context, epoch uint64) err rws.updateCUServiced(relay.CuSum) } if len(rewardsToClaim) > 0 { - err = rws.rewardsTxSender.TxRelayPayment(ctx, rewardsToClaim, strconv.FormatUint(rws.serverID, 10)) + err = rws.rewardsTxSender.TxRelayPayment(ctx, rewardsToClaim, dataReliabilityProofs, strconv.FormatUint(rws.serverID, 10)) if err != nil { return utils.LavaFormatError("failed sending rewards claim", err, nil) } @@ -228,16 +228,16 @@ func (rws *RewardServer) RemoveExpectedPayment(paidCUToFInd uint64, expectedClie return false } -func (rws *RewardServer) gatherRewardsForClaim(ctx context.Context, currentEpoch uint64) (rewardsForClaim []*pairingtypes.RelayRequest, errRet error) { +func (rws *RewardServer) gatherRewardsForClaim(ctx context.Context, currentEpoch uint64) (rewardsForClaim []*pairingtypes.RelaySession, dataReliabilityProofs []*pairingtypes.VRFData, errRet error) { rws.lock.Lock() defer rws.lock.Unlock() blockDistanceForEpochValidity, err := rws.rewardsTxSender.GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment(ctx) if err != nil { - return nil, utils.LavaFormatError("gatherRewardsForClaim failed to GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment", err, nil) + return nil, nil, utils.LavaFormatError("gatherRewardsForClaim failed to GetEpochSizeMultipliedByRecommendedEpochNumToCollectPayment", err, nil) } if blockDistanceForEpochValidity > currentEpoch { - return nil, utils.LavaFormatWarning("gatherRewardsForClaim current epoch is too low to claim rewards", nil, &map[string]string{"current epoch": strconv.FormatUint(currentEpoch, 10)}) + return nil, nil, utils.LavaFormatWarning("gatherRewardsForClaim current epoch is too low to claim rewards", nil, &map[string]string{"current epoch": strconv.FormatUint(currentEpoch, 10)}) } activeEpochThreshold := currentEpoch - blockDistanceForEpochValidity for epoch, epochRewards := range rws.rewards { @@ -247,12 +247,13 @@ func (rws *RewardServer) gatherRewardsForClaim(ctx context.Context, currentEpoch } for consumerAddr, rewards := range epochRewards.consumerRewards { - claimables, err := rewards.PrepareRewardsForClaim() + claimables, dataReliabilities, err := rewards.PrepareRewardsForClaim() if err != nil { // can't claim this now continue } rewardsForClaim = append(rewardsForClaim, claimables...) + dataReliabilityProofs = append(dataReliabilityProofs, dataReliabilities...) delete(epochRewards.consumerRewards, consumerAddr) } if len(epochRewards.consumerRewards) == 0 { diff --git a/protocol/rpcprovider/rpcprovider.go b/protocol/rpcprovider/rpcprovider.go index fcebcf91be..c122f9881d 100644 --- a/protocol/rpcprovider/rpcprovider.go +++ b/protocol/rpcprovider/rpcprovider.go @@ -46,7 +46,7 @@ type ProviderStateTrackerInf interface { RegisterChainParserForSpecUpdates(ctx context.Context, chainParser chainlib.ChainParser, chainID string) error RegisterReliabilityManagerForVoteUpdates(ctx context.Context, voteUpdatable statetracker.VoteUpdatable, endpointP *lavasession.RPCProviderEndpoint) RegisterForEpochUpdates(ctx context.Context, epochUpdatable statetracker.EpochUpdatable) - TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest, description string) error + TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelaySession, dataReliabilityProofs []*pairingtypes.VRFData, description string) error SendVoteReveal(voteID string, vote *reliabilitymanager.VoteData) error SendVoteCommitment(voteID string, vote *reliabilitymanager.VoteData) error LatestBlock() int64 diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 5011daed25..02e274df59 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -45,7 +45,7 @@ type ReliabilityManagerInf interface { } type RewardServerInf interface { - SendNewProof(ctx context.Context, proof *pairingtypes.RelayRequest, epoch uint64, consumerAddr string) (existingCU uint64, updatedWithProof bool) + SendNewProof(ctx context.Context, proof *pairingtypes.RelaySession, epoch uint64, consumerAddr string) (existingCU uint64, updatedWithProof bool) SendNewDataReliabilityProof(ctx context.Context, dataReliability *pairingtypes.VRFData, epoch uint64, consumerAddr string) (updatedWithProof bool) SubscribeStarted(consumer string, epoch uint64, subscribeID string) SubscribeEnded(consumer string, epoch uint64, subscribeID string) @@ -83,10 +83,13 @@ func (rpcps *RPCProviderServer) ServeRPCRequests( // function used to handle relay requests from a consumer, it is called by a provider_listener by calling RegisterReceiver func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes.RelayRequest) (*pairingtypes.RelayReply, error) { + if request.RelayData == nil || request.RelaySession == nil { + return nil, utils.LavaFormatError("invalid relay request, internal fields are nil", nil, nil) + } utils.LavaFormatDebug("Provider got relay request", &map[string]string{ - "request.SessionId": strconv.FormatUint(request.SessionId, 10), - "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), - "request.cu": strconv.FormatUint(request.CuSum, 10), + "request.SessionId": strconv.FormatUint(request.RelaySession.SessionId, 10), + "request.relayNumber": strconv.FormatUint(request.RelaySession.RelayNum, 10), + "request.cu": strconv.FormatUint(request.RelaySession.CuSum, 10), }) relaySession, consumerAddress, chainMessage, err := rpcps.initRelay(ctx, request) if err != nil { @@ -100,7 +103,7 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes err = sdkerrors.Wrapf(relayFailureError, "On relay failure: "+err.Error()) } err = utils.LavaFormatError("TryRelay Failed", err, &map[string]string{ - "request.SessionId": strconv.FormatUint(request.SessionId, 10), + "request.SessionId": strconv.FormatUint(request.RelaySession.SessionId, 10), "request.userAddr": consumerAddress.String(), }) } else { @@ -114,8 +117,8 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes return nil, err } utils.LavaFormatDebug("Provider Finished Relay Successfully", &map[string]string{ - "request.SessionId": strconv.FormatUint(request.SessionId, 10), - "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), + "request.SessionId": strconv.FormatUint(request.RelaySession.SessionId, 10), + "request.relayNumber": strconv.FormatUint(request.RelaySession.RelayNum, 10), }) } else { updated := rpcps.rewardServer.SendNewDataReliabilityProof(ctx, request.DataReliability, relaySession.PairingEpoch, consumerAddress.String()) @@ -123,8 +126,8 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes return nil, utils.LavaFormatError("existing data reliability proof", lavasession.DataReliabilityAlreadySentThisEpochError, nil) } utils.LavaFormatDebug("Provider Finished DataReliability Relay Successfully", &map[string]string{ - "request.SessionId": strconv.FormatUint(request.SessionId, 10), - "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), + "request.SessionId": strconv.FormatUint(request.RelaySession.SessionId, 10), + "request.relayNumber": strconv.FormatUint(request.RelaySession.RelayNum, 10), }) } } @@ -138,12 +141,12 @@ func (rpcps *RPCProviderServer) initRelay(ctx context.Context, request *pairingt return nil, nil, nil, err } // parse the message to extract the cu and chainMessage for sending it - chainMessage, err = rpcps.chainParser.ParseMsg(request.ApiUrl, request.Data, request.ConnectionType) + chainMessage, err = rpcps.chainParser.ParseMsg(request.RelayData.ApiUrl, request.RelayData.Data, request.RelayData.ConnectionType) if err != nil { return nil, nil, nil, err } relayCU := chainMessage.GetServiceApi().ComputeUnits - err = relaySession.PrepareSessionForUsage(relayCU, request.CuSum) + err = relaySession.PrepareSessionForUsage(relayCU, request.RelaySession.CuSum) if err != nil { // If PrepareSessionForUsage, session lose sync. // We then wrap the error with the SessionOutOfSyncError that has a unique error code. @@ -158,16 +161,16 @@ func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayReques return utils.LavaFormatError("subscribe data reliability not supported", nil, nil) } utils.LavaFormatDebug("Provider got relay subscribe request", &map[string]string{ - "request.SessionId": strconv.FormatUint(request.SessionId, 10), - "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), - "request.cu": strconv.FormatUint(request.CuSum, 10), + "request.SessionId": strconv.FormatUint(request.RelaySession.SessionId, 10), + "request.relayNumber": strconv.FormatUint(request.RelaySession.RelayNum, 10), + "request.cu": strconv.FormatUint(request.RelaySession.CuSum, 10), }) ctx := context.Background() relaySession, consumerAddress, chainMessage, err := rpcps.initRelay(ctx, request) if err != nil { return rpcps.handleRelayErrorStatus(err) } - subscribed, err := rpcps.TryRelaySubscribe(ctx, uint64(request.BlockHeight), srv, chainMessage, consumerAddress, relaySession) // this function does not return until subscription ends + subscribed, err := rpcps.TryRelaySubscribe(ctx, uint64(request.RelaySession.BlockHeight), srv, chainMessage, consumerAddress, relaySession) // this function does not return until subscription ends if subscribed { // meaning we created a subscription and used it for at least a message relayError := rpcps.providerSessionManager.OnSessionDone(relaySession) // TODO: when we pay as u go on subscription this will need to change @@ -179,9 +182,9 @@ func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayReques return err } utils.LavaFormatDebug("Provider finished subscribing", &map[string]string{ - "request.SessionId": strconv.FormatUint(request.SessionId, 10), - "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), - "request.cu": strconv.FormatUint(request.CuSum, 10), + "request.SessionId": strconv.FormatUint(request.RelaySession.SessionId, 10), + "request.relayNumber": strconv.FormatUint(request.RelaySession.RelayNum, 10), + "request.cu": strconv.FormatUint(request.RelaySession.CuSum, 10), "termination": err.Error(), }) err = nil // we don't want to return an error here @@ -198,11 +201,11 @@ func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayReques return rpcps.handleRelayErrorStatus(err) } -func (rpcps *RPCProviderServer) SendProof(ctx context.Context, relaySession *lavasession.SingleProviderSession, request *pairingtypes.RelayRequest, consumerAddress sdk.AccAddress) error { - epoch := relaySession.PairingEpoch - storedCU, updatedWithProof := rpcps.rewardServer.SendNewProof(ctx, request.ShallowCopy(), epoch, consumerAddress.String()) - if !updatedWithProof && storedCU > request.CuSum { - rpcps.providerSessionManager.UpdateSessionCU(consumerAddress.String(), epoch, request.SessionId, storedCU) +func (rpcps *RPCProviderServer) SendProof(ctx context.Context, providerSession *lavasession.SingleProviderSession, request *pairingtypes.RelayRequest, consumerAddress sdk.AccAddress) error { + epoch := providerSession.PairingEpoch + storedCU, updatedWithProof := rpcps.rewardServer.SendNewProof(ctx, request.RelaySession, epoch, consumerAddress.String()) + if !updatedWithProof && storedCU > request.RelaySession.CuSum { + rpcps.providerSessionManager.UpdateSessionCU(consumerAddress.String(), epoch, request.RelaySession.SessionId, storedCU) err := utils.LavaFormatError("Cu in relay smaller than existing proof", lavasession.ProviderConsumerCuMisMatch, &map[string]string{"existing_proof_cu": strconv.FormatUint(storedCU, 10)}) return rpcps.handleRelayErrorStatus(err) } @@ -278,22 +281,22 @@ func (rpcps *RPCProviderServer) TryRelaySubscribe(ctx context.Context, requestBl // verifies basic relay fields, and gets a provider session func (rpcps *RPCProviderServer) verifyRelaySession(ctx context.Context, request *pairingtypes.RelayRequest) (singleProviderSession *lavasession.SingleProviderSession, extractedConsumerAddress sdk.AccAddress, err error) { - valid := rpcps.providerSessionManager.IsValidEpoch(uint64(request.BlockHeight)) + valid := rpcps.providerSessionManager.IsValidEpoch(uint64(request.RelaySession.BlockHeight)) if !valid { return nil, nil, utils.LavaFormatError("user reported invalid lava block height", nil, &map[string]string{ "current lava block": strconv.FormatInt(rpcps.stateTracker.LatestBlock(), 10), - "requested lava block": strconv.FormatInt(request.BlockHeight, 10), + "requested lava block": strconv.FormatInt(request.RelaySession.BlockHeight, 10), "threshold": strconv.FormatUint(rpcps.providerSessionManager.GetBlockedEpochHeight(), 10), }) } // Check data - err = rpcps.verifyRelayRequestMetaData(request) + err = rpcps.verifyRelayRequestMetaData(request.RelaySession) if err != nil { return nil, nil, utils.LavaFormatError("did not pass relay validation", err, nil) } // check signature - consumerBytes, err := lavaprotocol.ExtractSignerAddress(request) + consumerBytes, err := lavaprotocol.ExtractSignerAddress(request.RelaySession) if err != nil { return nil, nil, utils.LavaFormatError("extract signer address from relay", err, nil) } @@ -304,7 +307,7 @@ func (rpcps *RPCProviderServer) verifyRelaySession(ctx context.Context, request // handle non data reliability relays if request.DataReliability == nil { - singleProviderSession, err = rpcps.getSingleProviderSession(ctx, request, extractedConsumerAddress.String()) + singleProviderSession, err = rpcps.getSingleProviderSession(ctx, request.RelaySession, extractedConsumerAddress.String()) return singleProviderSession, extractedConsumerAddress, err } @@ -313,17 +316,17 @@ func (rpcps *RPCProviderServer) verifyRelaySession(ctx context.Context, request if err != nil { return nil, nil, utils.LavaFormatError("failed data reliability validation", err, nil) } - dataReliabilitySingleProviderSession, err := rpcps.providerSessionManager.GetDataReliabilitySession(extractedConsumerAddress.String(), uint64(request.BlockHeight), request.SessionId, request.RelayNum) + dataReliabilitySingleProviderSession, err := rpcps.providerSessionManager.GetDataReliabilitySession(extractedConsumerAddress.String(), uint64(request.RelaySession.BlockHeight), request.RelaySession.SessionId, request.RelaySession.RelayNum) if err != nil { if lavasession.DataReliabilityAlreadySentThisEpochError.Is(err) { return nil, nil, err } - return nil, nil, utils.LavaFormatError("failed to get a provider data reliability session", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": extractedConsumerAddress.String(), "epoch": strconv.FormatInt(request.BlockHeight, 10)}) + return nil, nil, utils.LavaFormatError("failed to get a provider data reliability session", err, &map[string]string{"sessionID": strconv.FormatUint(request.RelaySession.SessionId, 10), "consumer": extractedConsumerAddress.String(), "epoch": strconv.FormatInt(request.RelaySession.BlockHeight, 10)}) } return dataReliabilitySingleProviderSession, extractedConsumerAddress, nil } -func (rpcps *RPCProviderServer) getSingleProviderSession(ctx context.Context, request *pairingtypes.RelayRequest, consumerAddressString string) (*lavasession.SingleProviderSession, error) { +func (rpcps *RPCProviderServer) getSingleProviderSession(ctx context.Context, request *pairingtypes.RelaySession, consumerAddressString string) (*lavasession.SingleProviderSession, error) { // regular session, verifies pairing epoch and relay number singleProviderSession, err := rpcps.providerSessionManager.GetSession(consumerAddressString, uint64(request.BlockHeight), request.SessionId, request.RelayNum) if err != nil { @@ -352,22 +355,22 @@ func (rpcps *RPCProviderServer) getSingleProviderSession(ctx context.Context, re return singleProviderSession, nil } -func (rpcps *RPCProviderServer) verifyRelayRequestMetaData(request *pairingtypes.RelayRequest) error { +func (rpcps *RPCProviderServer) verifyRelayRequestMetaData(requestSession *pairingtypes.RelaySession) error { providerAddress := rpcps.providerAddress.String() - if request.Provider != providerAddress { - return utils.LavaFormatError("request had the wrong provider", nil, &map[string]string{"providerAddress": providerAddress, "request_provider": request.Provider}) + if requestSession.Provider != providerAddress { + return utils.LavaFormatError("request had the wrong provider", nil, &map[string]string{"providerAddress": providerAddress, "request_provider": requestSession.Provider}) } - if request.ChainID != rpcps.rpcProviderEndpoint.ChainID { - return utils.LavaFormatError("request had the wrong chainID", nil, &map[string]string{"request_chainID": request.ChainID, "chainID": rpcps.rpcProviderEndpoint.ChainID}) + if requestSession.ChainID != rpcps.rpcProviderEndpoint.ChainID { + return utils.LavaFormatError("request had the wrong chainID", nil, &map[string]string{"request_chainID": requestSession.ChainID, "chainID": rpcps.rpcProviderEndpoint.ChainID}) } return nil } func (rpcps *RPCProviderServer) verifyDataReliabilityRelayRequest(ctx context.Context, request *pairingtypes.RelayRequest, consumerAddress sdk.AccAddress) error { - if request.CuSum != lavasession.DataReliabilityCuSum { - return utils.LavaFormatError("request's CU sum is not equal to the data reliability CU sum", nil, &map[string]string{"cuSum": strconv.FormatUint(request.CuSum, 10), "DataReliabilityCuSum": strconv.Itoa(lavasession.DataReliabilityCuSum)}) + if request.RelaySession.CuSum != lavasession.DataReliabilityCuSum { + return utils.LavaFormatError("request's CU sum is not equal to the data reliability CU sum", nil, &map[string]string{"cuSum": strconv.FormatUint(request.RelaySession.CuSum, 10), "DataReliabilityCuSum": strconv.Itoa(lavasession.DataReliabilityCuSum)}) } - vrf_pk, _, err := rpcps.stateTracker.GetVrfPkAndMaxCuForUser(ctx, consumerAddress.String(), request.ChainID, uint64(request.BlockHeight)) + vrf_pk, _, err := rpcps.stateTracker.GetVrfPkAndMaxCuForUser(ctx, consumerAddress.String(), request.RelaySession.ChainID, uint64(request.RelaySession.BlockHeight)) if err != nil { return utils.LavaFormatError("failed to get vrfpk and maxCURes for data reliability!", err, &map[string]string{ "userAddr": consumerAddress.String(), @@ -377,28 +380,28 @@ func (rpcps *RPCProviderServer) verifyDataReliabilityRelayRequest(ctx context.Co // data reliability is not session dependant, its always sent with sessionID 0 and if not we don't care if vrf_pk == nil { return utils.LavaFormatError("dataReliability Triggered with vrf_pk == nil", nil, - &map[string]string{"requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": consumerAddress.String()}) + &map[string]string{"requested epoch": strconv.FormatInt(request.RelaySession.BlockHeight, 10), "userAddr": consumerAddress.String()}) } // verify the providerSig is indeed a signature by a valid provider on this query valid, index, err := rpcps.VerifyReliabilityAddressSigning(ctx, consumerAddress, request) if err != nil { return utils.LavaFormatError("VerifyReliabilityAddressSigning invalid", err, - &map[string]string{"requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": consumerAddress.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) + &map[string]string{"requested epoch": strconv.FormatInt(request.RelaySession.BlockHeight, 10), "userAddr": consumerAddress.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) } if !valid { return utils.LavaFormatError("invalid DataReliability Provider signing", nil, - &map[string]string{"requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": consumerAddress.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) + &map[string]string{"requested epoch": strconv.FormatInt(request.RelaySession.BlockHeight, 10), "userAddr": consumerAddress.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) } // verify data reliability fields correspond to the right vrf - valid = utils.VerifyVrfProof(request, *vrf_pk, uint64(request.BlockHeight)) + valid = utils.VerifyVrfProof(request, *vrf_pk, uint64(request.RelaySession.BlockHeight)) if !valid { return utils.LavaFormatError("invalid DataReliability fields, VRF wasn't verified with provided proof", nil, - &map[string]string{"requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": consumerAddress.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) + &map[string]string{"requested epoch": strconv.FormatInt(request.RelaySession.BlockHeight, 10), "userAddr": consumerAddress.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) } _, dataReliabilityThreshold := rpcps.chainParser.DataReliabilityParams() - providersCount, err := rpcps.stateTracker.GetProvidersCountForConsumer(ctx, consumerAddress.String(), uint64(request.BlockHeight), request.ChainID) + providersCount, err := rpcps.stateTracker.GetProvidersCountForConsumer(ctx, consumerAddress.String(), uint64(request.RelaySession.BlockHeight), request.RelaySession.ChainID) if err != nil { - return utils.LavaFormatError("VerifyReliabilityAddressSigning failed fetching providers count for consumer", err, &map[string]string{"chainID": request.ChainID, "consumer": consumerAddress.String(), "epoch": strconv.FormatInt(request.BlockHeight, 10)}) + return utils.LavaFormatError("VerifyReliabilityAddressSigning failed fetching providers count for consumer", err, &map[string]string{"chainID": request.RelaySession.ChainID, "consumer": consumerAddress.String(), "epoch": strconv.FormatInt(request.RelaySession.BlockHeight, 10)}) } vrfIndex, vrfErr := utils.GetIndexForVrf(request.DataReliability.VrfValue, providersCount, dataReliabilityThreshold) if vrfErr != nil { @@ -408,8 +411,8 @@ func (rpcps *RPCProviderServer) verifyDataReliabilityRelayRequest(ctx context.Co } return utils.LavaFormatError("Provider identified vrf value in data reliability request does not meet threshold", vrfErr, &map[string]string{ - "requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": consumerAddress.String(), - "dataReliability": string(dataReliabilityMarshalled), "relayEpochStart": strconv.FormatInt(request.BlockHeight, 10), + "requested epoch": strconv.FormatInt(request.RelaySession.BlockHeight, 10), "userAddr": consumerAddress.String(), + "dataReliability": string(dataReliabilityMarshalled), "relayEpochStart": strconv.FormatInt(request.RelaySession.BlockHeight, 10), "vrfIndex": strconv.FormatInt(vrfIndex, 10), "self Index": strconv.FormatInt(index, 10), }) @@ -421,8 +424,8 @@ func (rpcps *RPCProviderServer) verifyDataReliabilityRelayRequest(ctx context.Co } return utils.LavaFormatError("Provider identified invalid vrfIndex in data reliability request, the given index and self index are different", nil, &map[string]string{ - "requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": consumerAddress.String(), - "dataReliability": string(dataReliabilityMarshalled), "relayEpochStart": strconv.FormatInt(request.BlockHeight, 10), + "requested epoch": strconv.FormatInt(request.RelaySession.BlockHeight, 10), "userAddr": consumerAddress.String(), + "dataReliability": string(dataReliabilityMarshalled), "relayEpochStart": strconv.FormatInt(request.RelaySession.BlockHeight, 10), "vrfIndex": strconv.FormatInt(vrfIndex, 10), "self Index": strconv.FormatInt(index, 10), }) @@ -432,7 +435,7 @@ func (rpcps *RPCProviderServer) verifyDataReliabilityRelayRequest(ctx context.Co } func (rpcps *RPCProviderServer) VerifyReliabilityAddressSigning(ctx context.Context, consumer sdk.AccAddress, request *pairingtypes.RelayRequest) (valid bool, index int64, err error) { - queryHash := utils.CalculateQueryHash(*request) + queryHash := utils.CalculateQueryHash(*request.RelayData) if !bytes.Equal(queryHash, request.DataReliability.QueryHash) { return false, 0, utils.LavaFormatError("query hash mismatch on data reliability message", nil, &map[string]string{"queryHash": string(queryHash), "request QueryHash": string(request.DataReliability.QueryHash)}) @@ -458,7 +461,7 @@ func (rpcps *RPCProviderServer) VerifyReliabilityAddressSigning(ctx context.Cont return false, 0, utils.LavaFormatError("failed converting signer to address", err, &map[string]string{"consumer": consumer.String(), "PubKey": pubKey.Address().String()}) } - return rpcps.stateTracker.VerifyPairing(ctx, consumer.String(), providerAccAddress.String(), uint64(request.BlockHeight), request.ChainID) // return if this pairing is authorised + return rpcps.stateTracker.VerifyPairing(ctx, consumer.String(), providerAccAddress.String(), uint64(request.RelaySession.BlockHeight), request.RelaySession.ChainID) // return if this pairing is authorised } func (rpcps *RPCProviderServer) handleRelayErrorStatus(err error) error { @@ -494,7 +497,7 @@ func (rpcps *RPCProviderServer) TryRelay(ctx context.Context, request *pairingty toBlock := spectypes.LATEST_BLOCK - int64(blockDistanceToFinalization) fromBlock := toBlock - int64(blocksInFinalizationData) + 1 var requestedHashes []*chaintracker.BlockStore - latestBlock, requestedHashes, err = rpcps.reliabilityManager.GetLatestBlockData(fromBlock, toBlock, request.RequestBlock) + latestBlock, requestedHashes, err = rpcps.reliabilityManager.GetLatestBlockData(fromBlock, toBlock, request.RelayData.RequestBlock) if err != nil { if chaintracker.InvalidRequestedSpecificBlock.Is(err) { // specific block is invalid, try again without specific block @@ -503,28 +506,28 @@ func (rpcps *RPCProviderServer) TryRelay(ctx context.Context, request *pairingty return nil, utils.LavaFormatError("error getting range even without specific block", err, &map[string]string{"fromBlock": strconv.FormatInt(fromBlock, 10), "latestBlock": strconv.FormatInt(latestBlock, 10), "toBlock": strconv.FormatInt(toBlock, 10)}) } } else { - return nil, utils.LavaFormatError("Could not guarantee data reliability", err, &map[string]string{"requestedBlock": strconv.FormatInt(request.RequestBlock, 10), "latestBlock": strconv.FormatInt(latestBlock, 10), "fromBlock": strconv.FormatInt(fromBlock, 10), "toBlock": strconv.FormatInt(toBlock, 10)}) + return nil, utils.LavaFormatError("Could not guarantee data reliability", err, &map[string]string{"requestedBlock": strconv.FormatInt(request.RelayData.RequestBlock, 10), "latestBlock": strconv.FormatInt(latestBlock, 10), "fromBlock": strconv.FormatInt(fromBlock, 10), "toBlock": strconv.FormatInt(toBlock, 10)}) } } - request.RequestBlock = lavaprotocol.ReplaceRequestedBlock(request.RequestBlock, latestBlock) + request.RelayData.RequestBlock = lavaprotocol.ReplaceRequestedBlock(request.RelayData.RequestBlock, latestBlock) for _, block := range requestedHashes { - if block.Block == request.RequestBlock { + if block.Block == request.RelayData.RequestBlock { requestedBlockHash = []byte(block.Hash) } else { finalizedBlockHashes[block.Block] = block.Hash } } - if requestedBlockHash == nil && request.RequestBlock != spectypes.NOT_APPLICABLE { + if requestedBlockHash == nil && request.RelayData.RequestBlock != spectypes.NOT_APPLICABLE { // avoid using cache, but can still service - utils.LavaFormatWarning("no hash data for requested block", nil, &map[string]string{"requestedBlock": strconv.FormatInt(request.RequestBlock, 10), "latestBlock": strconv.FormatInt(latestBlock, 10)}) + utils.LavaFormatWarning("no hash data for requested block", nil, &map[string]string{"requestedBlock": strconv.FormatInt(request.RelayData.RequestBlock, 10), "latestBlock": strconv.FormatInt(latestBlock, 10)}) } - if request.RequestBlock > latestBlock { + if request.RelayData.RequestBlock > latestBlock { // consumer asked for a block that is newer than our state tracker, we cant sign this for DR - return nil, utils.LavaFormatError("Requested a block that is too new", err, &map[string]string{"requestedBlock": strconv.FormatInt(request.RequestBlock, 10), "latestBlock": strconv.FormatInt(latestBlock, 10)}) + return nil, utils.LavaFormatError("Requested a block that is too new", err, &map[string]string{"requestedBlock": strconv.FormatInt(request.RelayData.RequestBlock, 10), "latestBlock": strconv.FormatInt(latestBlock, 10)}) } - finalized = spectypes.IsFinalizedBlock(request.RequestBlock, latestBlock, blockDistanceToFinalization) + finalized = spectypes.IsFinalizedBlock(request.RelayData.RequestBlock, latestBlock, blockDistanceToFinalization) } cache := rpcps.cache // TODO: handle cache on fork for dataReliability = false diff --git a/protocol/statetracker/provider_state_tracker.go b/protocol/statetracker/provider_state_tracker.go index 845740272b..22268e7725 100644 --- a/protocol/statetracker/provider_state_tracker.go +++ b/protocol/statetracker/provider_state_tracker.go @@ -76,8 +76,8 @@ func (pst *ProviderStateTracker) RegisterPaymentUpdatableForPayments(ctx context payemntUpdater.RegisterPaymentUpdatable(ctx, &paymentUpdatable) } -func (pst *ProviderStateTracker) TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest, description string) error { - return pst.txSender.TxRelayPayment(ctx, relayRequests, description) +func (pst *ProviderStateTracker) TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelaySession, dataReliabilityProofs []*pairingtypes.VRFData, description string) error { + return pst.txSender.TxRelayPayment(ctx, relayRequests, dataReliabilityProofs, description) } func (pst *ProviderStateTracker) SendVoteReveal(voteID string, vote *reliabilitymanager.VoteData) error { diff --git a/protocol/statetracker/tx_sender.go b/protocol/statetracker/tx_sender.go index 142f284206..7dc1f4d563 100644 --- a/protocol/statetracker/tx_sender.go +++ b/protocol/statetracker/tx_sender.go @@ -217,8 +217,8 @@ func NewProviderTxSender(ctx context.Context, clientCtx client.Context, txFactor return ts, nil } -func (pts *ProviderTxSender) TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelayRequest, description string) error { - msg := pairingtypes.NewMsgRelayPayment(pts.clientCtx.FromAddress.String(), relayRequests, description) +func (pts *ProviderTxSender) TxRelayPayment(ctx context.Context, relayRequests []*pairingtypes.RelaySession, dataReliabilityProofs []*pairingtypes.VRFData, description string) error { + msg := pairingtypes.NewMsgRelayPayment(pts.clientCtx.FromAddress.String(), relayRequests, dataReliabilityProofs, description) err := pts.SimulateAndBroadCastTxWithRetryOnSeqMismatch(msg, true) if err != nil { return utils.LavaFormatError("relay_payment - sending Tx Failed", err, nil) diff --git a/relayer/sigs/sigs.go b/relayer/sigs/sigs.go index bb1a08cb6c..f14016cde0 100644 --- a/relayer/sigs/sigs.go +++ b/relayer/sigs/sigs.go @@ -167,14 +167,37 @@ func RecoverPubKeyFromVRFData(vrfData pairingtypes.VRFData) (secp256k1.PubKey, e return pubKey, nil } -func ValidateSignerOnVRFData(signer sdk.AccAddress, dataReliability pairingtypes.VRFData) (valid bool, err error) { +func DataReliabilityByConsumer(vrfs []*pairingtypes.VRFData) (dataReliabilityByConsumer map[string]*pairingtypes.VRFData, err error) { + dataReliabilityByConsumer = map[string]*pairingtypes.VRFData{} + if len(vrfs) == 0 { + return + } + for _, vrf := range vrfs { + signer, err := GetSignerForVRF(*vrf) + if err != nil { + return nil, err + } + dataReliabilityByConsumer[signer.String()] = vrf + } + return dataReliabilityByConsumer, nil +} + +func GetSignerForVRF(dataReliability pairingtypes.VRFData) (signer sdk.AccAddress, err error) { pubKey, err := RecoverPubKeyFromVRFData(dataReliability) if err != nil { - return false, fmt.Errorf("RecoverPubKeyFromVRFData: %w", err) + return nil, fmt.Errorf("RecoverPubKeyFromVRFData: %w", err) } signerAccAddress, err := sdk.AccAddressFromHex(pubKey.Address().String()) // signer if err != nil { - return false, fmt.Errorf("AccAddressFromHex : %w", err) + return nil, fmt.Errorf("AccAddressFromHex : %w", err) + } + return signerAccAddress, nil +} + +func ValidateSignerOnVRFData(signer sdk.AccAddress, dataReliability pairingtypes.VRFData) (valid bool, err error) { + signerAccAddress, err := GetSignerForVRF(dataReliability) + if err != nil { + return false, err } if !signerAccAddress.Equals(signer) { return false, fmt.Errorf("signer on VRFData is not the same as on the original relay request %s, %s", signerAccAddress.String(), signer.String()) diff --git a/x/pairing/keeper/fixation_test.go b/x/pairing/keeper/fixation_test.go index 9ce379773f..2a5e9e6f5a 100644 --- a/x/pairing/keeper/fixation_test.go +++ b/x/pairing/keeper/fixation_test.go @@ -101,17 +101,14 @@ func TestEpochPaymentDeletionWithMemoryShortening(t *testing.T) { epochsToSave, err := ts.keepers.Epochstorage.EpochsToSave(sdk.UnwrapSDKContext(ts.ctx), uint64(sdk.UnwrapSDKContext(ts.ctx).BlockHeight())) require.Nil(t, err) - relayRequest := &pairingtypes.RelayRequest{ - Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, + relayRequest := &pairingtypes.RelaySession{ + Provider: ts.providers[0].address.String(), + ContentHash: []byte(ts.spec.Apis[0].Name), + SessionId: uint64(1), + ChainID: ts.spec.Name, + CuSum: ts.spec.Apis[0].ComputeUnits * 10, + BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + RelayNum: 0, } sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relayRequest) @@ -119,7 +116,7 @@ func TestEpochPaymentDeletionWithMemoryShortening(t *testing.T) { require.Nil(t, err) // make payment request - _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &pairingtypes.MsgRelayPayment{Creator: ts.providers[0].address.String(), Relays: []*pairingtypes.RelayRequest{relayRequest}}) + _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &pairingtypes.MsgRelayPayment{Creator: ts.providers[0].address.String(), Relays: []*pairingtypes.RelaySession{relayRequest}}) require.Nil(t, err) // shorten memory @@ -136,7 +133,7 @@ func TestEpochPaymentDeletionWithMemoryShortening(t *testing.T) { relayRequest.Sig = sig require.Nil(t, err) - _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &pairingtypes.MsgRelayPayment{Creator: ts.providers[0].address.String(), Relays: []*pairingtypes.RelayRequest{relayRequest}}) + _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &pairingtypes.MsgRelayPayment{Creator: ts.providers[0].address.String(), Relays: []*pairingtypes.RelaySession{relayRequest}}) require.Nil(t, err) // check that both payments were deleted diff --git a/x/pairing/keeper/msg_server_relay_payment.go b/x/pairing/keeper/msg_server_relay_payment.go index 4a8cb8f0ed..39a862f108 100644 --- a/x/pairing/keeper/msg_server_relay_payment.go +++ b/x/pairing/keeper/msg_server_relay_payment.go @@ -31,7 +31,13 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen errorLogAndFormat := func(name string, attrs map[string]string, details string) (*types.MsgRelayPaymentResponse, error) { return nil, utils.LavaError(ctx, logger, name, attrs, details) } - for relayIdx, relay := range msg.Relays { + + dataReliabilityByConsumer, err := sigs.DataReliabilityByConsumer(msg.VRFs) + if err != nil { + return errorLogAndFormat("data_reliability_claim", map[string]string{"error": err.Error()}, "error creating dataReliabilityByConsumer") + } + + for _, relay := range msg.Relays { if relay.BlockHeight > ctx.BlockHeight() { return errorLogAndFormat("relay_future_block", map[string]string{"blockheight": string(relay.Sig)}, "relay request for a block in the future") } @@ -82,8 +88,8 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen payReliability := false // validate data reliability - if len(msg.VRFs) > relayIdx && msg.VRFs[relayIdx] != nil { - vrfData := msg.VRFs[relayIdx] + if vrfData, ok := dataReliabilityByConsumer[clientAddr.String()]; ok { + delete(dataReliabilityByConsumer, clientAddr.String()) details := map[string]string{"client": clientAddr.String(), "provider": providerAddr.String()} if !spec.DataReliabilityEnabled { details["chainID"] = relay.ChainID diff --git a/x/pairing/keeper/unresponsive_provider_test.go b/x/pairing/keeper/unresponsive_provider_test.go index 7db4dbeaac..df02355ba2 100644 --- a/x/pairing/keeper/unresponsive_provider_test.go +++ b/x/pairing/keeper/unresponsive_provider_test.go @@ -46,7 +46,7 @@ func TestUnresponsivenessStressTest(t *testing.T) { // create relay requests for that contain complaints about providers with indices 0-100 relayEpoch := sdk.UnwrapSDKContext(ts.ctx).BlockHeight() for clientIndex := 0; clientIndex < testClientAmount; clientIndex++ { // testing testClientAmount of complaints - var Relays []*types.RelayRequest + var Relays []*types.RelaySession // Get pairing for the client to pick a valid provider providersStakeEntries, err := ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), ts.spec.Name, ts.clients[clientIndex].address) @@ -56,17 +56,14 @@ func TestUnresponsivenessStressTest(t *testing.T) { require.Nil(t, err) // create relay request - relayRequest := &types.RelayRequest{ + relayRequest := &types.RelaySession{ Provider: providerAddress, - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), + ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(0), ChainID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits*10 + uint64(clientIndex), BlockHeight: relayEpoch, RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, UnresponsiveProviders: unresponsiveDataList[clientIndex%unresponsiveProviderAmount], // create the complaint } @@ -144,18 +141,15 @@ func TestUnstakingProviderForUnresponsiveness(t *testing.T) { // create relay requests for provider0 that contain complaints about provider1 relayEpoch := sdk.UnwrapSDKContext(ts.ctx).BlockHeight() for clientIndex := 0; clientIndex < testClientAmount; clientIndex++ { // testing testClientAmount of complaints - var Relays []*types.RelayRequest - relayRequest := &types.RelayRequest{ + var Relays []*types.RelaySession + relayRequest := &types.RelaySession{ Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), + ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(0), ChainID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits*10 + uint64(clientIndex), BlockHeight: relayEpoch, RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, UnresponsiveProviders: unresponsiveProvidersData, // create the complaint } @@ -246,18 +240,15 @@ func TestUnstakingProviderForUnresponsivenessContinueComplainingAfterUnstake(t * // create relay requests for provider0 that contain complaints about provider1 relayEpoch := sdk.UnwrapSDKContext(ts.ctx).BlockHeight() for clientIndex := 0; clientIndex < testClientAmount; clientIndex++ { // testing testClientAmount of complaints - var Relays []*types.RelayRequest - relayRequest := &types.RelayRequest{ + var Relays []*types.RelaySession + relayRequest := &types.RelaySession{ Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), + ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(0), ChainID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, BlockHeight: relayEpoch, RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, UnresponsiveProviders: unresponsiveProvidersData, // create the complaint } @@ -297,18 +288,15 @@ func TestUnstakingProviderForUnresponsivenessContinueComplainingAfterUnstake(t * // create more relay requests for provider0 that contain complaints about provider1 (note, sessionID changed) for clientIndex := 0; clientIndex < testClientAmount; clientIndex++ { // testing testClientAmount of complaints - var RelaysAfter []*types.RelayRequest - relayRequest := &types.RelayRequest{ + var RelaysAfter []*types.RelaySession + relayRequest := &types.RelaySession{ Provider: ts.providers[0].address.String(), - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), + ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(2), ChainID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, UnresponsiveProviders: unresponsiveProvidersData, // create the complaint } sig, err := sigs.SignRelay(ts.clients[clientIndex].secretKey, *relayRequest) diff --git a/x/pairing/types/message_relay_payment.go b/x/pairing/types/message_relay_payment.go index 3032d71271..fdef6f354b 100644 --- a/x/pairing/types/message_relay_payment.go +++ b/x/pairing/types/message_relay_payment.go @@ -9,10 +9,11 @@ const TypeMsgRelayPayment = "relay_payment" var _ sdk.Msg = &MsgRelayPayment{} -func NewMsgRelayPayment(creator string, relays []*RelayRequest, description string) *MsgRelayPayment { +func NewMsgRelayPayment(creator string, relays []*RelaySession, dataReliabilityProofs []*VRFData, description string) *MsgRelayPayment { return &MsgRelayPayment{ Creator: creator, Relays: relays, + VRFs: dataReliabilityProofs, DescriptionString: description, } } From da185ec60f3e2c2791f278dda350e3c44c3220ec Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Sun, 12 Mar 2023 10:58:31 +0100 Subject: [PATCH 096/123] provider error update --- protocol/rpcprovider/rpcprovider_server.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 5011daed25..08737bc104 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -280,8 +280,13 @@ func (rpcps *RPCProviderServer) TryRelaySubscribe(ctx context.Context, requestBl func (rpcps *RPCProviderServer) verifyRelaySession(ctx context.Context, request *pairingtypes.RelayRequest) (singleProviderSession *lavasession.SingleProviderSession, extractedConsumerAddress sdk.AccAddress, err error) { valid := rpcps.providerSessionManager.IsValidEpoch(uint64(request.BlockHeight)) if !valid { - return nil, nil, utils.LavaFormatError("user reported invalid lava block height", nil, &map[string]string{ - "current lava block": strconv.FormatInt(rpcps.stateTracker.LatestBlock(), 10), + latestBlock := rpcps.stateTracker.LatestBlock() + errorMessage := "user reported invalid lava block height" + if request.BlockHeight > latestBlock { + errorMessage = "provider is behind user's block height" + } + return nil, nil, utils.LavaFormatError(errorMessage, nil, &map[string]string{ + "current lava block": strconv.FormatInt(latestBlock, 10), "requested lava block": strconv.FormatInt(request.BlockHeight, 10), "threshold": strconv.FormatUint(rpcps.providerSessionManager.GetBlockedEpochHeight(), 10), }) From 2989c23ee5392dc36d13f13d9b087ecffbf98855 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Sun, 12 Mar 2023 18:17:46 +0100 Subject: [PATCH 097/123] Bug fixing session --- protocol/chaintracker/chain_tracker.go | 5 ++-- protocol/lavaprotocol/request_builder.go | 1 + .../provider_session_manager_test.go | 12 ++++---- protocol/lavasession/provider_types.go | 30 ++++++++++--------- protocol/rpcprovider/rpcprovider_server.go | 6 ++-- testutil/e2e/e2e.go | 9 +++--- 6 files changed, 35 insertions(+), 28 deletions(-) diff --git a/protocol/chaintracker/chain_tracker.go b/protocol/chaintracker/chain_tracker.go index bd36b1050e..c6656f918e 100644 --- a/protocol/chaintracker/chain_tracker.go +++ b/protocol/chaintracker/chain_tracker.go @@ -13,6 +13,7 @@ import ( "sync/atomic" "time" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" "github.com/improbable-eng/grpc-web/go/grpcweb" "github.com/lavanet/lava/protocol/lavasession" "github.com/lavanet/lava/utils" @@ -60,10 +61,10 @@ func (cs *ChainTracker) GetLatestBlockData(fromBlock int64, toBlock int64, speci wantedBlocksData := WantedBlocksData{} err = wantedBlocksData.New(fromBlock, toBlock, specificBlock, latestBlock, earliestBlockSaved) if err != nil { - return latestBlock, nil, utils.LavaFormatError("invalid input for GetLatestBlockData", err, &map[string]string{ + return latestBlock, nil, sdkerrors.Wrap(err, fmt.Sprintf("invalid input for GetLatestBlockData %v", &map[string]string{ "fromBlock": strconv.FormatInt(fromBlock, 10), "toBlock": strconv.FormatInt(toBlock, 10), "specificBlock": strconv.FormatInt(specificBlock, 10), "latestBlock": strconv.FormatInt(latestBlock, 10), "earliestBlockSaved": strconv.FormatInt(earliestBlockSaved, 10), - }) + })) } for _, blocksQueueIdx := range wantedBlocksData.IterationIndexes() { diff --git a/protocol/lavaprotocol/request_builder.go b/protocol/lavaprotocol/request_builder.go index 276aabdb67..bf2202187e 100644 --- a/protocol/lavaprotocol/request_builder.go +++ b/protocol/lavaprotocol/request_builder.go @@ -145,6 +145,7 @@ func ConstructDataReliabilityRelayRequest(ctx context.Context, vrfData *pairingt QoSReport: nil, DataReliability: vrfData, UnresponsiveProviders: reportedProviders, + ApiInterface: relayRequestCommonData.ApiInterface, } sig, err := sigs.SignRelay(privKey, *relayRequest) if err != nil { diff --git a/protocol/lavasession/provider_session_manager_test.go b/protocol/lavasession/provider_session_manager_test.go index 60baddc16a..3de5b6a4db 100644 --- a/protocol/lavasession/provider_session_manager_test.go +++ b/protocol/lavasession/provider_session_manager_test.go @@ -53,7 +53,7 @@ func prepareSession(t *testing.T) (*ProviderSessionManager, *SingleProviderSessi require.NotNil(t, sps) // prepare session for usage - err = sps.PrepareSessionForUsage(relayCu, relayCu) + err = sps.PrepareSessionForUsage(relayCu, relayCu, relayNumber) // validate session was prepared successfully require.Nil(t, err) @@ -82,7 +82,7 @@ func prepareDRSession(t *testing.T) (*ProviderSessionManager, *SingleProviderSes require.Empty(t, psm.subscriptionSessionsWithAllConsumers) // // prepare session for usage - sps.PrepareSessionForUsage(relayCu, dataReliabilityRelayCu) + sps.PrepareSessionForUsage(relayCu, dataReliabilityRelayCu, relayNumber) // validate session was prepared successfully require.Equal(t, dataReliabilityRelayCu, sps.LatestRelayCu) @@ -115,7 +115,7 @@ func TestPSMPrepareTwice(t *testing.T) { _, sps := prepareSession(t) // prepare session for usage - err := sps.PrepareSessionForUsage(relayCu, relayCu) + err := sps.PrepareSessionForUsage(relayCu, relayCu, relayNumber) require.Error(t, err) } @@ -202,7 +202,7 @@ func TestPSMUpdateCuMaxCuReached(t *testing.T) { require.NotNil(t, sps) // prepare session with max cu overflow. expect an error - err = sps.PrepareSessionForUsage(relayCu, maxCu+relayCu) + err = sps.PrepareSessionForUsage(relayCu, maxCu+relayCu, relayNumber) require.Error(t, err) require.True(t, MaximumCULimitReachedByConsumer.Is(err)) } @@ -220,7 +220,7 @@ func TestPSMCUMisMatch(t *testing.T) { require.NotNil(t, sps) // prepare session with wrong cu and expect mismatch, consumer wants to pay less than spec requires - err = sps.PrepareSessionForUsage(relayCu+1, relayCu) + err = sps.PrepareSessionForUsage(relayCu+1, relayCu, relayNumber) require.Error(t, err) require.True(t, ProviderConsumerCuMisMatch.Is(err)) } @@ -300,7 +300,7 @@ func TestPSMDataReliabilityRetryAfterFailure(t *testing.T) { require.NotNil(t, sps) // // prepare session for usage - sps.PrepareSessionForUsage(relayCu, dataReliabilityRelayCu) + sps.PrepareSessionForUsage(relayCu, dataReliabilityRelayCu, relayNumber) // validate session was prepared successfully require.Equal(t, dataReliabilityRelayCu, sps.LatestRelayCu) diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index 943f963d1c..435e15788f 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -240,7 +240,7 @@ func (sps *SingleProviderSession) PrepareDataReliabilitySessionForUsage(relayReq } sps.LatestRelayCu = DataReliabilityCuSum // 1. update latest sps.CuSum = relayRequestTotalCU // 2. update CuSum, if consumer wants to pay more, let it - sps.RelayNum = sps.RelayNum + 1 // 3. update RelayNum, we already verified relayNum is valid in GetDataReliabilitySession. + sps.RelayNum += 1 utils.LavaFormatDebug("PrepareDataReliabilitySessionForUsage", &map[string]string{"relayRequestTotalCU": strconv.FormatUint(relayRequestTotalCU, 10), "sps.LatestRelayCu": strconv.FormatUint(sps.LatestRelayCu, 10), "sps.RelayNum": strconv.FormatUint(sps.RelayNum, 10), @@ -248,7 +248,7 @@ func (sps *SingleProviderSession) PrepareDataReliabilitySessionForUsage(relayReq return nil } -func (sps *SingleProviderSession) PrepareSessionForUsage(cuFromSpec uint64, relayRequestTotalCU uint64) error { +func (sps *SingleProviderSession) PrepareSessionForUsage(cuFromSpec uint64, relayRequestTotalCU uint64, relayNumber uint64) error { err := sps.VerifyLock() // sps is locked if err != nil { return utils.LavaFormatError("sps.verifyLock() failed in PrepareSessionForUsage", err, nil) @@ -259,19 +259,15 @@ func (sps *SingleProviderSession) PrepareSessionForUsage(cuFromSpec uint64, rela return sps.PrepareDataReliabilitySessionForUsage(relayRequestTotalCU) } - utils.LavaFormatDebug("Before Update Normal PrepareSessionForUsage", &map[string]string{"relayRequestTotalCU": strconv.FormatUint(relayRequestTotalCU, 10), - "sps.LatestRelayCu": strconv.FormatUint(sps.LatestRelayCu, 10), - "sps.RelayNum": strconv.FormatUint(sps.RelayNum, 10), - "sps.CuSum": strconv.FormatUint(sps.CuSum, 10), - }) - maxCu := sps.userSessionsParent.atomicReadMaxComputeUnits() if relayRequestTotalCU < sps.CuSum+cuFromSpec { sps.lock.Unlock() // unlock on error return utils.LavaFormatError("CU mismatch PrepareSessionForUsage, Provider and consumer disagree on CuSum", ProviderConsumerCuMisMatch, &map[string]string{ - "relayRequestTotalCU": strconv.FormatUint(relayRequestTotalCU, 10), - "sps.CuSum": strconv.FormatUint(sps.CuSum, 10), - "currentCU": strconv.FormatUint(cuFromSpec, 10), + "request.CuSum": strconv.FormatUint(relayRequestTotalCU, 10), + "provider.CuSum": strconv.FormatUint(sps.CuSum, 10), + "specCU": strconv.FormatUint(cuFromSpec, 10), + "expected": strconv.FormatUint(sps.CuSum+cuFromSpec, 10), + "relayNumber": strconv.FormatUint(relayNumber, 10), }) } @@ -285,9 +281,15 @@ func (sps *SingleProviderSession) PrepareSessionForUsage(cuFromSpec uint64, rela return err } // finished validating, can add all info. - sps.LatestRelayCu = cuToAdd // 1. update latest - sps.CuSum += cuToAdd // 2. update CuSum, if consumer wants to pay more, let it - sps.RelayNum = sps.RelayNum + 1 // 3. update RelayNum, we already verified relayNum is valid in GetSession. + sps.LatestRelayCu = cuToAdd // 1. update latest + sps.CuSum += cuToAdd // 2. update CuSum, if consumer wants to pay more, let it + sps.RelayNum = relayNumber // 3. update RelayNum, we already verified relayNum is valid in GetSession. + utils.LavaFormatDebug("Before Update Normal PrepareSessionForUsage", &map[string]string{"relayRequestTotalCU": strconv.FormatUint(relayRequestTotalCU, 10), + "sps.LatestRelayCu": strconv.FormatUint(sps.LatestRelayCu, 10), + "sps.RelayNum": strconv.FormatUint(sps.RelayNum, 10), + "sps.CuSum": strconv.FormatUint(sps.CuSum, 10), + "sps.sessionId": strconv.FormatUint(sps.SessionID, 10), + }) return nil } diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 08737bc104..ac503ea756 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -143,7 +143,7 @@ func (rpcps *RPCProviderServer) initRelay(ctx context.Context, request *pairingt return nil, nil, nil, err } relayCU := chainMessage.GetServiceApi().ComputeUnits - err = relaySession.PrepareSessionForUsage(relayCU, request.CuSum) + err = relaySession.PrepareSessionForUsage(relayCU, request.CuSum, request.RelayNum) if err != nil { // If PrepareSessionForUsage, session lose sync. // We then wrap the error with the SessionOutOfSyncError that has a unique error code. @@ -515,6 +515,9 @@ func (rpcps *RPCProviderServer) TryRelay(ctx context.Context, request *pairingty for _, block := range requestedHashes { if block.Block == request.RequestBlock { requestedBlockHash = []byte(block.Hash) + if int64(len(requestedHashes)) == (toBlock - fromBlock + 1) { + finalizedBlockHashes[block.Block] = block.Hash + } } else { finalizedBlockHashes[block.Block] = block.Hash } @@ -569,7 +572,6 @@ func (rpcps *RPCProviderServer) TryRelay(ctx context.Context, request *pairingty return nil, utils.LavaFormatError("failed unmarshaling finalizedBlockHashes", err, &map[string]string{"finalizedBlockHashes": fmt.Sprintf("%v", finalizedBlockHashes)}) } - reply.FinalizedBlocksHashes = jsonStr reply.LatestBlock = latestBlock diff --git a/testutil/e2e/e2e.go b/testutil/e2e/e2e.go index fc1ed8dcb1..de0aabd95a 100644 --- a/testutil/e2e/e2e.go +++ b/testutil/e2e/e2e.go @@ -217,7 +217,7 @@ func (lt *lavaTest) startJSONRPCProvider(ctx context.Context) { } for idx, providerCommand := range providerCommands { - logName := "03_EthProvider_" + fmt.Sprintf("%02d ", idx) + logName := "03_EthProvider_" + fmt.Sprintf("%02d", idx) lt.logs[logName] = new(bytes.Buffer) cmd := exec.CommandContext(ctx, "", "") cmd.Path = lt.lavadPath @@ -416,7 +416,7 @@ func (lt *lavaTest) startLavaProviders(ctx context.Context) { } for idx, providerCommand := range providerCommands { - logName := "05_LavaProvider_" + fmt.Sprintf("%02d ", idx) + logName := "05_LavaProvider_" + fmt.Sprintf("%02d", idx) lt.logs[logName] = new(bytes.Buffer) cmd := exec.CommandContext(ctx, "", "") cmd.Path = lt.lavadPath @@ -570,7 +570,7 @@ func (lt *lavaTest) startRESTProvider(rpcURL string, ctx context.Context) { } for idx, providerCommand := range providerCommands { - logName := "08_restProvider_" + fmt.Sprintf("%02d ", idx) + logName := "08_restProvider_" + fmt.Sprintf("%02d", idx) lt.logs[logName] = new(bytes.Buffer) cmd := exec.CommandContext(ctx, "", "") cmd.Path = lt.lavadPath @@ -661,7 +661,7 @@ func (lt *lavaTest) startGRPCProvider(rpcURL string, ctx context.Context) { } for idx, providerCommand := range providerCommands { - logName := "10_grpcProvider_" + fmt.Sprintf("%02d ", idx) + logName := "10_grpcProvider_" + fmt.Sprintf("%02d", idx) lt.logs[logName] = new(bytes.Buffer) cmd := exec.CommandContext(ctx, "", "") cmd.Path = lt.lavadPath @@ -772,6 +772,7 @@ func (lt *lavaTest) saveLogs() { writer := bufio.NewWriter(file) writer.Write(logBuffer.Bytes()) writer.Flush() + utils.LavaFormatDebug("writing file", &map[string]string{"fileName": fileName, "lines": strconv.Itoa(len(logBuffer.Bytes()))}) file.Close() lines := strings.Split(logBuffer.String(), "\n") From d2a3421751713d3b389b2184d905cdfafe67fd0c Mon Sep 17 00:00:00 2001 From: omer mishael Date: Mon, 13 Mar 2023 18:08:48 +0200 Subject: [PATCH 098/123] successfully build relay after changes --- cmd/lavad/main.go | 127 ---------------------------------------------- 1 file changed, 127 deletions(-) diff --git a/cmd/lavad/main.go b/cmd/lavad/main.go index 0a3c19e18b..dfa07f4216 100644 --- a/cmd/lavad/main.go +++ b/cmd/lavad/main.go @@ -2,7 +2,6 @@ package main import ( "context" - "fmt" "os" "strconv" "strings" @@ -18,8 +17,6 @@ import ( "github.com/lavanet/lava/protocol/rpcconsumer" "github.com/lavanet/lava/protocol/rpcprovider" "github.com/lavanet/lava/relayer" - "github.com/lavanet/lava/relayer/chainproxy" - "github.com/lavanet/lava/relayer/performance" "github.com/lavanet/lava/relayer/sentry" "github.com/lavanet/lava/utils" "github.com/spf13/cobra" @@ -40,108 +37,6 @@ func main() { app.New, // this line is used by starport scaffolding # root/arguments ) - - cmdServer := &cobra.Command{ - Use: "server [listen-ip] [listen-port] [node-url] [node-chain-id] [api-interface]", - Short: "server", - Long: `server`, - Args: cobra.ExactArgs(5), - RunE: func(cmd *cobra.Command, args []string) error { - utils.LavaFormatInfo("Provider process started", &map[string]string{"args": strings.Join(args, ",")}) - clientCtx, err := client.GetClientTxContext(cmd) - if err != nil { - return err - } - - // - // TODO: there has to be a better way to send txs - // (cosmosclient was a fail) - - clientCtx.SkipConfirm = true - networkChainId, err := cmd.Flags().GetString(flags.FlagChainID) - if err != nil { - return err - } - txFactory := tx.NewFactoryCLI(clientCtx, cmd.Flags()).WithChainID(networkChainId) - - port, err := strconv.Atoi(args[1]) - if err != nil { - return err - } - - chainID := args[3] - - apiInterface := args[4] - - listenAddr := fmt.Sprintf("%s:%d", args[0], port) - ctx := context.Background() - logLevel, err := cmd.Flags().GetString(flags.FlagLogLevel) - if err != nil { - utils.LavaFormatFatal("failed to read log level flag", err, nil) - } - utils.LoggingLevel(logLevel) - relayer.Server(ctx, clientCtx, txFactory, listenAddr, args[2], chainID, apiInterface, cmd.Flags()) - - return nil - }, - } - - cmdPortalServer := &cobra.Command{ - Use: "portal_server [listen-ip] [listen-port] [relayer-chain-id] [api-interface]", - Short: "portal server", - Long: `portal server`, - Args: cobra.ExactArgs(4), - RunE: func(cmd *cobra.Command, args []string) error { - utils.LavaFormatInfo("Gateway Proxy process started", &map[string]string{"args": strings.Join(args, ",")}) - clientCtx, err := client.GetClientTxContext(cmd) - if err != nil { - return err - } - - port, err := strconv.Atoi(args[1]) - if err != nil { - return err - } - - chainID := args[2] - apiInterface := args[3] - - listenAddr := fmt.Sprintf("%s:%d", args[0], port) - ctx := context.Background() - logLevel, err := cmd.Flags().GetString(flags.FlagLogLevel) - if err != nil { - utils.LavaFormatFatal("failed to read log level flag", err, nil) - } - utils.LoggingLevel(logLevel) - - // check if the command includes --pprof-address - pprofAddressFlagUsed := cmd.Flags().Lookup("pprof-address").Changed - if pprofAddressFlagUsed { - // get pprof server ip address (default value: "") - pprofServerAddress, err := cmd.Flags().GetString("pprof-address") - if err != nil { - utils.LavaFormatFatal("failed to read pprof address flag", err, nil) - } - - // start pprof HTTP server - err = performance.StartPprofServer(pprofServerAddress) - if err != nil { - return utils.LavaFormatError("failed to start pprof HTTP server", err, nil) - } - } - - networkChainId, err := cmd.Flags().GetString(flags.FlagChainID) - if err != nil { - return err - } - txFactory := tx.NewFactoryCLI(clientCtx, cmd.Flags()).WithChainID(networkChainId) - - relayer.PortalServer(ctx, clientCtx, txFactory, listenAddr, chainID, apiInterface, cmd.Flags()) - - return nil - }, - } - cmdTestClient := &cobra.Command{ Use: "test_client [chain-id] [api-interface] [duration-seconds]", Short: "test client", @@ -190,28 +85,6 @@ func main() { // rpc provider cobra command cmdRPCProvider := rpcprovider.CreateRPCProviderCobraCommand() - // Server command flags - flags.AddTxFlagsToCmd(cmdServer) - cmdServer.MarkFlagRequired(flags.FlagFrom) - cmdServer.Flags().String(flags.FlagChainID, app.Name, "network chain id") - cmdServer.Flags().Uint64(sentry.GeolocationFlag, 0, "geolocation to run from") - cmdServer.MarkFlagRequired(sentry.GeolocationFlag) - cmdServer.Flags().String(performance.CacheFlagName, "", "address for a cache server to improve performance") - cmdServer.Flags().Uint(chainproxy.ParallelConnectionsFlag, chainproxy.NumberOfParallelConnections, "parallel connections") - cmdServer.Flags().String(chainproxy.TendermintProviderHttpEndpoint, "", "The http endpoint when starting a Tendermint Provider process, otherwise leave empty") - rootCmd.AddCommand(cmdServer) - - // Portal Server command flags - flags.AddTxFlagsToCmd(cmdPortalServer) - cmdPortalServer.MarkFlagRequired(flags.FlagFrom) - cmdPortalServer.Flags().String(flags.FlagChainID, app.Name, "network chain id") - cmdPortalServer.Flags().Uint64(sentry.GeolocationFlag, 0, "geolocation to run from") - cmdPortalServer.MarkFlagRequired(sentry.GeolocationFlag) - cmdPortalServer.Flags().Bool("secure", false, "secure sends reliability on every message") - cmdPortalServer.Flags().String(performance.PprofAddressFlagName, "", "pprof server address, used for code profiling") - cmdPortalServer.Flags().String(performance.CacheFlagName, "", "address for a cache server to improve performance") - rootCmd.AddCommand(cmdPortalServer) - // Test Client command flags flags.AddTxFlagsToCmd(cmdTestClient) cmdTestClient.Flags().String(flags.FlagChainID, app.Name, "network chain id") From 6a37f3f7924ed8a8dfc1cf203ed740635dec9e96 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Mon, 13 Mar 2023 18:10:27 +0200 Subject: [PATCH 099/123] successfully compiled after refactoring relay.pb now to unitesting --- docs/static/openapi.yml | 179 ++- go.mod | 1 - go.sum | 3 - proto/pairing/relay.proto | 6 +- .../lavaprotocol/finalization_consensus.go | 6 +- protocol/lavaprotocol/request_builder.go | 91 +- protocol/lavaprotocol/response_builder.go | 2 +- protocol/rpcconsumer/rpcconsumer_server.go | 31 +- protocol/rpcprovider/provider_listener.go | 4 +- protocol/rpcprovider/rpcprovider_server.go | 4 +- relayer/chainproxy/chainproxy.go | 74 +- relayer/portal_server.go | 101 -- relayer/sentry/sentry.go | 26 +- relayer/server.go | 1348 ----------------- relayer/sigs/sigs.go | 22 +- testutil/common/common.go | 45 +- x/conflict/keeper/conflict.go | 46 +- x/conflict/keeper/msg_server_detection.go | 30 +- .../keeper/msg_server_detection_test.go | 28 +- x/pairing/client/cli/tx_relay_payment.go | 3 +- x/pairing/keeper/msg_server_relay_payment.go | 32 +- .../keeper/msg_server_relay_payment_test.go | 10 + x/pairing/types/relay.pb.go | 256 ++-- x/pairing/types/relay_extensions.go | 13 - x/pairing/types/relay_test.go | 62 - 25 files changed, 609 insertions(+), 1814 deletions(-) delete mode 100644 relayer/portal_server.go delete mode 100644 relayer/server.go delete mode 100644 x/pairing/types/relay_extensions.go delete mode 100644 x/pairing/types/relay_test.go diff --git a/docs/static/openapi.yml b/docs/static/openapi.yml index 8aeeef1c12..5307fa5890 100644 --- a/docs/static/openapi.yml +++ b/docs/static/openapi.yml @@ -54236,6 +54236,11 @@ definitions: sig: type: string format: byte + chainID: + type: string + epoch: + type: string + format: int64 QoSReport: type: object properties: @@ -54248,8 +54253,82 @@ definitions: unresponsive_providers: type: string format: byte - apiInterface: - type: string + relay_session: + type: object + properties: + chainID: + type: string + content_hash: + type: string + format: byte + session_id: + type: string + format: uint64 + cu_sum: + type: string + format: uint64 + title: total compute unit used including this relay + provider: + type: string + relay_num: + type: string + format: uint64 + QoSReport: + type: object + properties: + latency: + type: string + availability: + type: string + sync: + type: string + block_height: + type: string + format: int64 + unresponsive_providers: + type: string + format: byte + lava_chain_id: + type: string + sig: + type: string + format: byte + badge: + type: object + properties: + cu_allocation: + type: string + format: uint64 + epoch: + type: string + format: int64 + badge_pk: + type: string + format: byte + project_sig: + type: string + format: byte + relay_data: + type: object + properties: + connection_type: + type: string + api_url: + type: string + title: >- + some relays have associated urls that are filled with params + ('/block/{height}') + data: + type: string + format: byte + request_block: + type: string + format: int64 + apiInterface: + type: string + salt: + type: string + format: byte lavanet.lava.pairing.VRFData: type: object properties: @@ -54274,6 +54353,11 @@ definitions: sig: type: string format: byte + chainID: + type: string + epoch: + type: string + format: int64 lavanet.lava.epochstorage.Endpoint: type: object properties: @@ -54659,6 +54743,21 @@ definitions: epochBlockHash: type: string format: byte + lavanet.lava.pairing.Badge: + type: object + properties: + cu_allocation: + type: string + format: uint64 + epoch: + type: string + format: int64 + badge_pk: + type: string + format: byte + project_sig: + type: string + format: byte lavanet.lava.pairing.CacheUsage: type: object properties: @@ -55220,6 +55319,82 @@ definitions: index: type: string format: int64 + lavanet.lava.pairing.RelayPrivateData: + type: object + properties: + connection_type: + type: string + api_url: + type: string + title: >- + some relays have associated urls that are filled with params + ('/block/{height}') + data: + type: string + format: byte + request_block: + type: string + format: int64 + apiInterface: + type: string + salt: + type: string + format: byte + lavanet.lava.pairing.RelaySession: + type: object + properties: + chainID: + type: string + content_hash: + type: string + format: byte + session_id: + type: string + format: uint64 + cu_sum: + type: string + format: uint64 + title: total compute unit used including this relay + provider: + type: string + relay_num: + type: string + format: uint64 + QoSReport: + type: object + properties: + latency: + type: string + availability: + type: string + sync: + type: string + block_height: + type: string + format: int64 + unresponsive_providers: + type: string + format: byte + lava_chain_id: + type: string + sig: + type: string + format: byte + badge: + type: object + properties: + cu_allocation: + type: string + format: uint64 + epoch: + type: string + format: int64 + badge_pk: + type: string + format: byte + project_sig: + type: string + format: byte lavanet.lava.pairing.UniquePaymentStorageClientProvider: type: object properties: diff --git a/go.mod b/go.mod index 1cbdf2ddf5..cfccfab970 100644 --- a/go.mod +++ b/go.mod @@ -49,7 +49,6 @@ require ( github.com/ghodss/yaml v1.0.0 // indirect github.com/gogo/googleapis v1.4.0 // indirect github.com/golang/glog v1.0.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect golang.org/x/mod v0.7.0 // indirect golang.org/x/tools v0.2.0 // indirect diff --git a/go.sum b/go.sum index 8331621464..b4521d6dbd 100644 --- a/go.sum +++ b/go.sum @@ -692,7 +692,6 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -786,8 +785,6 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 h1:gDLXvp5S9izjldquuoAhDzccbskOL6tDC5jMSyx3zxE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2/go.mod h1:7pdNwVWBBHGiCxa9lAszqCJMbfTISJ7oMftp8+UGV08= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= diff --git a/proto/pairing/relay.proto b/proto/pairing/relay.proto index c18a6fa0e2..2577e833d7 100644 --- a/proto/pairing/relay.proto +++ b/proto/pairing/relay.proto @@ -59,12 +59,12 @@ message VRFData { string chainID = 1; int64 epoch = 2; bool differentiator = 3; - bytes vrf_value =4; - bytes vrf_proof =5; + bytes vrf_value = 4; + bytes vrf_proof = 5; bytes provider_sig = 6; bytes allDataHash = 7; bytes queryHash = 8; //we only need it for payment later - bytes sig =9; + bytes sig = 9; } message QualityOfServiceReport{ diff --git a/protocol/lavaprotocol/finalization_consensus.go b/protocol/lavaprotocol/finalization_consensus.go index 6e7bd50928..90e6cff266 100644 --- a/protocol/lavaprotocol/finalization_consensus.go +++ b/protocol/lavaprotocol/finalization_consensus.go @@ -42,7 +42,7 @@ func GetLatestFinalizedBlock(latestBlock int64, blockDistanceForFinalizedData in return latestBlock - finalization_criteria } -func (fc *FinalizationConsensus) newProviderHashesConsensus(blockDistanceForFinalizedData int64, providerAcc string, latestBlock int64, finalizedBlocks map[int64]string, reply *pairingtypes.RelayReply, req *pairingtypes.RelayRequest) ProviderHashesConsensus { +func (fc *FinalizationConsensus) newProviderHashesConsensus(blockDistanceForFinalizedData int64, providerAcc string, latestBlock int64, finalizedBlocks map[int64]string, reply *pairingtypes.RelayReply, req *pairingtypes.RelaySession) ProviderHashesConsensus { newProviderDataContainer := providerDataContainer{ LatestFinalizedBlock: GetLatestFinalizedBlock(latestBlock, blockDistanceForFinalizedData), LatestBlockTime: time.Now(), @@ -61,7 +61,7 @@ func (fc *FinalizationConsensus) newProviderHashesConsensus(blockDistanceForFina } } -func (fc *FinalizationConsensus) insertProviderToConsensus(blockDistanceForFinalizedData int64, consensus *ProviderHashesConsensus, finalizedBlocks map[int64]string, latestBlock int64, reply *pairingtypes.RelayReply, req *pairingtypes.RelayRequest, providerAcc string) { +func (fc *FinalizationConsensus) insertProviderToConsensus(blockDistanceForFinalizedData int64, consensus *ProviderHashesConsensus, finalizedBlocks map[int64]string, latestBlock int64, reply *pairingtypes.RelayReply, req *pairingtypes.RelaySession, providerAcc string) { newProviderDataContainer := providerDataContainer{ LatestFinalizedBlock: GetLatestFinalizedBlock(latestBlock, blockDistanceForFinalizedData), LatestBlockTime: time.Now(), @@ -85,7 +85,7 @@ func (fc *FinalizationConsensus) insertProviderToConsensus(blockDistanceForFinal // create new consensus group if no consensus matched // check for discrepancy with old epoch // checks if there is a consensus mismatch between hashes provided by different providers -func (fc *FinalizationConsensus) UpdateFinalizedHashes(blockDistanceForFinalizedData int64, providerAddress string, latestBlock int64, finalizedBlocks map[int64]string, req *pairingtypes.RelayRequest, reply *pairingtypes.RelayReply) (finalizationConflict *conflicttypes.FinalizationConflict, err error) { +func (fc *FinalizationConsensus) UpdateFinalizedHashes(blockDistanceForFinalizedData int64, providerAddress string, latestBlock int64, finalizedBlocks map[int64]string, req *pairingtypes.RelaySession, reply *pairingtypes.RelayReply) (finalizationConflict *conflicttypes.FinalizationConflict, err error) { fc.providerDataContainersMu.Lock() defer fc.providerDataContainersMu.Unlock() diff --git a/protocol/lavaprotocol/request_builder.go b/protocol/lavaprotocol/request_builder.go index 276aabdb67..b9628f2ba9 100644 --- a/protocol/lavaprotocol/request_builder.go +++ b/protocol/lavaprotocol/request_builder.go @@ -3,7 +3,9 @@ package lavaprotocol import ( "bytes" "context" + "encoding/binary" "fmt" + "math/rand" "strconv" "time" @@ -38,39 +40,62 @@ type RelayResult struct { Finalized bool } -func NewRelayRequestCommonData(chainID string, connectionType string, apiUrl string, data []byte, requestBlock int64, apiInterface string) RelayRequestCommonData { - return RelayRequestCommonData{ - ChainID: chainID, +func NewRelayData(connectionType string, apiUrl string, data []byte, requestBlock int64, apiInterface string) *pairingtypes.RelayPrivateData { + nonceBytes := make([]byte, 4) + binary.LittleEndian.PutUint32(nonceBytes, rand.Uint32()) + return &pairingtypes.RelayPrivateData{ ConnectionType: connectionType, ApiUrl: apiUrl, Data: data, RequestBlock: requestBlock, ApiInterface: apiInterface, + Salt: nonceBytes, } } -func ConstructRelayRequest(ctx context.Context, privKey *btcec.PrivateKey, chainID string, relayRequestCommonData RelayRequestCommonData, providerPublicAddress string, consumerSession *lavasession.SingleConsumerSession, epoch int64, reportedProviders []byte) (*pairingtypes.RelayRequest, error) { - relayRequest := &pairingtypes.RelayRequest{ - Provider: providerPublicAddress, - ConnectionType: relayRequestCommonData.ConnectionType, - ApiUrl: relayRequestCommonData.ApiUrl, - Data: relayRequestCommonData.Data, - SessionId: uint64(consumerSession.SessionId), +func ConstructRelaySession(relayRequestData *pairingtypes.RelayPrivateData, chainID string, providerPublicAddress string, consumerSession *lavasession.SingleConsumerSession, epoch int64, reportedProviders []byte) *pairingtypes.RelaySession { + return &pairingtypes.RelaySession{ ChainID: chainID, - CuSum: consumerSession.CuSum + consumerSession.LatestRelayCu, // add the latestRelayCu which will be applied when session is returned properly - BlockHeight: epoch, + ContentHash: sigs.CalculateContentHashForRelayData(relayRequestData), + SessionId: uint64(consumerSession.SessionId), + CuSum: consumerSession.CuSum + consumerSession.LatestRelayCu, // add the latestRelayCu which will be applied when session is returned properly, + Provider: providerPublicAddress, RelayNum: consumerSession.RelayNum + lavasession.RelayNumberIncrement, // increment the relay number. which will be applied when session is returned properly - RequestBlock: relayRequestCommonData.RequestBlock, QoSReport: consumerSession.QoSInfo.LastQoSReport, - DataReliability: nil, + BlockHeight: epoch, UnresponsiveProviders: reportedProviders, - ApiInterface: relayRequestCommonData.ApiInterface, + LavaChainId: "FIXMEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE IF IM HERE ITS A BUG", + Sig: nil, } - sig, err := sigs.SignRelay(privKey, *relayRequest) +} + +func dataReliabilityRelaySession(relayRequestData *pairingtypes.RelayPrivateData, chainID string, providerPublicAddress string, epoch int64) *pairingtypes.RelaySession { + return &pairingtypes.RelaySession{ + ChainID: chainID, + ContentHash: sigs.CalculateContentHashForRelayData(relayRequestData), + SessionId: lavasession.DataReliabilitySessionId, // sessionID for reliability is 0 + CuSum: lavasession.DataReliabilityCuSum, // consumerSession.CuSum == 0 + Provider: providerPublicAddress, + RelayNum: 0, + QoSReport: nil, + BlockHeight: epoch, + UnresponsiveProviders: nil, + LavaChainId: "FIXMEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE IF IM HERE ITS A BUG", + Sig: nil, + } +} + +func ConstructRelayRequest(ctx context.Context, privKey *btcec.PrivateKey, chainID string, relayRequestData *pairingtypes.RelayPrivateData, providerPublicAddress string, consumerSession *lavasession.SingleConsumerSession, epoch int64, reportedProviders []byte) (*pairingtypes.RelayRequest, error) { + relayRequest := &pairingtypes.RelayRequest{ + RelayData: relayRequestData, + RelaySession: ConstructRelaySession(relayRequestData, chainID, providerPublicAddress, consumerSession, epoch, reportedProviders), + DataReliability: nil, + } + sig, err := sigs.SignRelay(privKey, *relayRequest.RelaySession) if err != nil { return nil, err } - relayRequest.Sig = sig + relayRequest.RelaySession.Sig = sig return relayRequest, nil } @@ -78,7 +103,7 @@ func GetTimePerCu(cu uint64) time.Duration { return chainlib.LocalNodeTimePerCu(cu) + chainlib.MinimumTimePerRelayDelay } -func UpdateRequestedBlock(request *pairingtypes.RelayRequest, response *pairingtypes.RelayReply) { +func UpdateRequestedBlock(request *pairingtypes.RelayPrivateData, response *pairingtypes.RelayReply) { // since sometimes the user is sending requested block that is a magic like latest, or earliest we need to specify to the reliability what it is request.RequestBlock = ReplaceRequestedBlock(request.RequestBlock, response.LatestBlock) } @@ -115,42 +140,34 @@ func DataReliabilityThresholdToSession(vrfs [][]byte, uniqueIdentifiers []bool, func NewVRFData(differentiator bool, vrf_res []byte, vrf_proof []byte, request *pairingtypes.RelayRequest, reply *pairingtypes.RelayReply) *pairingtypes.VRFData { dataReliability := &pairingtypes.VRFData{ + ChainID: request.RelaySession.ChainID, + Epoch: request.RelaySession.BlockHeight, Differentiator: differentiator, VrfValue: vrf_res, VrfProof: vrf_proof, ProviderSig: reply.Sig, AllDataHash: sigs.AllDataHash(reply, request), - QueryHash: utils.CalculateQueryHash(*request), + QueryHash: utils.CalculateQueryHash(*request.RelayData), Sig: nil, } return dataReliability } -func ConstructDataReliabilityRelayRequest(ctx context.Context, vrfData *pairingtypes.VRFData, privKey *btcec.PrivateKey, chainID string, relayRequestCommonData *RelayRequestCommonData, providerPublicAddress string, epoch int64, reportedProviders []byte) (*pairingtypes.RelayRequest, error) { - if relayRequestCommonData.RequestBlock < 0 { +func ConstructDataReliabilityRelayRequest(ctx context.Context, vrfData *pairingtypes.VRFData, privKey *btcec.PrivateKey, chainID string, relayRequestData *pairingtypes.RelayPrivateData, providerPublicAddress string, epoch int64, reportedProviders []byte) (*pairingtypes.RelayRequest, error) { + if relayRequestData.RequestBlock < 0 { return nil, utils.LavaFormatError("tried to construct data reliability relay with invalid request block, need to specify exactly what block is required", nil, - &map[string]string{"requested_common_data": fmt.Sprintf("%+v", relayRequestCommonData), "epoch": strconv.FormatInt(epoch, 10), "chainID": chainID}) + &map[string]string{"requested_common_data": fmt.Sprintf("%+v", relayRequestData), "epoch": strconv.FormatInt(epoch, 10), "chainID": chainID}) } relayRequest := &pairingtypes.RelayRequest{ - Provider: providerPublicAddress, - ConnectionType: relayRequestCommonData.ConnectionType, - ApiUrl: relayRequestCommonData.ApiUrl, - Data: relayRequestCommonData.Data, - SessionId: lavasession.DataReliabilitySessionId, // sessionID for reliability is 0 - ChainID: chainID, - CuSum: lavasession.DataReliabilityCuSum, // consumerSession.CuSum == 0 - BlockHeight: epoch, - RelayNum: 0, // consumerSession.RelayNum == 0 - RequestBlock: relayRequestCommonData.RequestBlock, - QoSReport: nil, - DataReliability: vrfData, - UnresponsiveProviders: reportedProviders, + RelayData: relayRequestData, + RelaySession: dataReliabilityRelaySession(relayRequestData, chainID, providerPublicAddress, epoch), + DataReliability: vrfData, } - sig, err := sigs.SignRelay(privKey, *relayRequest) + sig, err := sigs.SignRelay(privKey, *relayRequest.RelaySession) if err != nil { return nil, err } - relayRequest.Sig = sig + relayRequest.RelaySession.Sig = sig sig, err = sigs.SignVRFData(privKey, relayRequest.DataReliability) if err != nil { diff --git a/protocol/lavaprotocol/response_builder.go b/protocol/lavaprotocol/response_builder.go index 65947584bc..194505e9bd 100644 --- a/protocol/lavaprotocol/response_builder.go +++ b/protocol/lavaprotocol/response_builder.go @@ -19,7 +19,7 @@ import ( func SignRelayResponse(consumerAddress sdk.AccAddress, request pairingtypes.RelayRequest, pkey *btcSecp256k1.PrivateKey, reply *pairingtypes.RelayReply, signDataReliability bool) (*pairingtypes.RelayReply, error) { // request is a copy of the original request, but won't modify it // update relay request requestedBlock to the provided one in case it was arbitrary - UpdateRequestedBlock(&request, reply) + UpdateRequestedBlock(request.RelayData, reply) // Update signature, sig, err := sigs.SignRelayResponse(pkey, reply, &request) if err != nil { diff --git a/protocol/rpcconsumer/rpcconsumer_server.go b/protocol/rpcconsumer/rpcconsumer_server.go index c5d0006131..f90bfde3d9 100644 --- a/protocol/rpcconsumer/rpcconsumer_server.go +++ b/protocol/rpcconsumer/rpcconsumer_server.go @@ -99,13 +99,12 @@ func (rpccs *RPCConsumerServer) SendRelay( unwantedProviders := map[string]struct{}{} // do this in a loop with retry attempts, configurable via a flag, limited by the number of providers in CSM - relayRequestCommonData := lavaprotocol.NewRelayRequestCommonData(rpccs.listenEndpoint.ChainID, connectionType, url, []byte(req), chainMessage.RequestedBlock(), rpccs.listenEndpoint.ApiInterface) - + relayRequestData := lavaprotocol.NewRelayData(connectionType, url, []byte(req), chainMessage.RequestedBlock(), rpccs.listenEndpoint.ApiInterface) relayResults := []*lavaprotocol.RelayResult{} relayErrors := []error{} for retries := 0; retries < MaxRelayRetries; retries++ { // TODO: make this async between different providers - relayResult, err := rpccs.sendRelayToProvider(ctx, chainMessage, relayRequestCommonData, dappID, &unwantedProviders) + relayResult, err := rpccs.sendRelayToProvider(ctx, chainMessage, relayRequestData, dappID, &unwantedProviders) if relayResult.ProviderAddress != "" { unwantedProviders[relayResult.ProviderAddress] = struct{}{} } @@ -124,7 +123,7 @@ func (rpccs *RPCConsumerServer) SendRelay( break } // future requests need to ask for the same block height to get consensus on the reply - relayRequestCommonData.RequestBlock = relayResult.Request.RequestBlock + relayRequestData.RequestBlock = relayResult.Request.RelayData.RequestBlock } enabled, dataReliabilityThreshold := rpccs.chainParser.DataReliabilityParams() @@ -133,7 +132,7 @@ func (rpccs *RPCConsumerServer) SendRelay( // new context is needed for data reliability as some clients cancel the context they provide when the relay returns // as data reliability happens in a go routine it will continue while the response returns. dataReliabilityContext := context.Background() - go rpccs.sendDataReliabilityRelayIfApplicable(dataReliabilityContext, relayResult, chainMessage, dataReliabilityThreshold, &relayRequestCommonData) // runs asynchronously + go rpccs.sendDataReliabilityRelayIfApplicable(dataReliabilityContext, relayResult, chainMessage, dataReliabilityThreshold) // runs asynchronously } } @@ -154,7 +153,7 @@ func (rpccs *RPCConsumerServer) SendRelay( func (rpccs *RPCConsumerServer) sendRelayToProvider( ctx context.Context, chainMessage chainlib.ChainMessage, - relayRequestCommonData lavaprotocol.RelayRequestCommonData, + relayRequestData *pairingtypes.RelayPrivateData, dappID string, unwantedProviders *map[string]struct{}, ) (relayResult *lavaprotocol.RelayResult, errRet error) { @@ -180,7 +179,7 @@ func (rpccs *RPCConsumerServer) sendRelayToProvider( } privKey := rpccs.privKey chainID := rpccs.listenEndpoint.ChainID - relayRequest, err := lavaprotocol.ConstructRelayRequest(ctx, privKey, chainID, relayRequestCommonData, providerPublicAddress, singleConsumerSession, int64(epoch), reportedProviders) + relayRequest, err := lavaprotocol.ConstructRelayRequest(ctx, privKey, chainID, relayRequestData, providerPublicAddress, singleConsumerSession, int64(epoch), reportedProviders) if err != nil { return relayResult, err } @@ -255,9 +254,9 @@ func (rpccs *RPCConsumerServer) relayInner(ctx context.Context, singleConsumerSe return relayResult, 0, err } relayResult.Reply = reply - lavaprotocol.UpdateRequestedBlock(relayRequest, reply) // update relay request requestedBlock to the provided one in case it was arbitrary + lavaprotocol.UpdateRequestedBlock(relayRequest.RelayData, reply) // update relay request requestedBlock to the provided one in case it was arbitrary _, _, blockDistanceForFinalizedData, _ := rpccs.chainParser.ChainBlockStats() - finalized := spectypes.IsFinalizedBlock(relayRequest.RequestBlock, reply.LatestBlock, blockDistanceForFinalizedData) + finalized := spectypes.IsFinalizedBlock(relayRequest.RelayData.RequestBlock, reply.LatestBlock, blockDistanceForFinalizedData) err = lavaprotocol.VerifyRelayReply(reply, relayRequest, providerPublicAddress) if err != nil { return relayResult, 0, err @@ -275,7 +274,7 @@ func (rpccs *RPCConsumerServer) relayInner(ctx context.Context, singleConsumerSe return relayResult, 0, err } - finalizationConflict, err = rpccs.finalizationConsensus.UpdateFinalizedHashes(int64(blockDistanceForFinalizedData), providerPublicAddress, reply.LatestBlock, finalizedBlocks, relayRequest, reply) + finalizationConflict, err = rpccs.finalizationConsensus.UpdateFinalizedHashes(int64(blockDistanceForFinalizedData), providerPublicAddress, reply.LatestBlock, finalizedBlocks, relayRequest.RelaySession, reply) if err != nil { go rpccs.consumerTxSender.TxConflictDetection(ctx, finalizationConflict, nil, nil) return relayResult, 0, err @@ -304,7 +303,7 @@ func (rpccs *RPCConsumerServer) relaySubscriptionInner(ctx context.Context, endp return relayResult, err } -func (rpccs *RPCConsumerServer) sendDataReliabilityRelayIfApplicable(ctx context.Context, relayResult *lavaprotocol.RelayResult, chainMessage chainlib.ChainMessage, dataReliabilityThreshold uint32, relayRequestCommonData *lavaprotocol.RelayRequestCommonData) error { +func (rpccs *RPCConsumerServer) sendDataReliabilityRelayIfApplicable(ctx context.Context, relayResult *lavaprotocol.RelayResult, chainMessage chainlib.ChainMessage, dataReliabilityThreshold uint32) error { // Data reliability: // handle data reliability VRF random value check with the lavaprotocol package // asynchronous: if applicable, get a data reliability session from ConsumerSessionManager @@ -318,10 +317,10 @@ func (rpccs *RPCConsumerServer) sendDataReliabilityRelayIfApplicable(ctx context return nil // disabled for this spec and requested block so no data reliability messages } var dataReliabilitySessions []*lavasession.DataReliabilitySession - sessionEpoch := uint64(relayResult.Request.BlockHeight) + sessionEpoch := uint64(relayResult.Request.RelaySession.BlockHeight) providerPubAddress := relayResult.ProviderAddress // handle data reliability - vrfRes0, vrfRes1 := utils.CalculateVrfOnRelay(relayResult.Request, relayResult.Reply, rpccs.VrfSk, sessionEpoch) + vrfRes0, vrfRes1 := utils.CalculateVrfOnRelay(relayResult.Request.RelayData, relayResult.Reply, rpccs.VrfSk, sessionEpoch) // get two indexesMap for data reliability. providersCount := uint32(rpccs.consumerSessionManager.GetAtomicPairingAddressesLength()) indexesMap := lavaprotocol.DataReliabilityThresholdToSession([][]byte{vrfRes0, vrfRes1}, []bool{false, true}, dataReliabilityThreshold, providersCount) @@ -353,7 +352,7 @@ func (rpccs *RPCConsumerServer) sendDataReliabilityRelayIfApplicable(ctx context } sendReliabilityRelay := func(singleConsumerSession *lavasession.SingleConsumerSession, providerAddress string, differentiator bool, epoch int64) (reliabilityResult *lavaprotocol.RelayResult, err error) { - vrf_res, vrf_proof := utils.ProveVrfOnRelay(relayResult.Request, relayResult.Reply, rpccs.VrfSk, differentiator, sessionEpoch) + vrf_res, vrf_proof := utils.ProveVrfOnRelay(relayResult.Request.RelayData, relayResult.Reply, rpccs.VrfSk, differentiator, sessionEpoch) // calculated from query body anyway, but we will use this on payment // calculated in cb_send_reliability vrfData := lavaprotocol.NewVRFData(differentiator, vrf_res, vrf_proof, relayResult.Request, relayResult.Reply) @@ -362,9 +361,9 @@ func (rpccs *RPCConsumerServer) sendDataReliabilityRelayIfApplicable(ctx context reportedProviders = nil utils.LavaFormatError("failed reading reported providers for epoch", err, &map[string]string{"epoch": strconv.FormatInt(epoch, 10)}) } - reliabilityRequest, err := lavaprotocol.ConstructDataReliabilityRelayRequest(ctx, vrfData, rpccs.privKey, rpccs.listenEndpoint.ChainID, relayRequestCommonData, providerAddress, epoch, reportedProviders) + reliabilityRequest, err := lavaprotocol.ConstructDataReliabilityRelayRequest(ctx, vrfData, rpccs.privKey, rpccs.listenEndpoint.ChainID, relayResult.Request.RelayData, providerAddress, epoch, reportedProviders) if err != nil { - return nil, utils.LavaFormatError("failed creating data reliability relay", err, &map[string]string{"relayRequestCommonData": fmt.Sprintf("%+v", relayRequestCommonData)}) + return nil, utils.LavaFormatError("failed creating data reliability relay", err, &map[string]string{"relayRequestData": fmt.Sprintf("%+v", relayResult.Request.RelayData)}) } relayResult = &lavaprotocol.RelayResult{Request: reliabilityRequest, ProviderAddress: providerAddress, Finalized: false} relayTimeout := lavaprotocol.GetTimePerCu(singleConsumerSession.LatestRelayCu) + chainlib.AverageWorldLatency + chainlib.DataReliabilityTimeoutIncrease diff --git a/protocol/rpcprovider/provider_listener.go b/protocol/rpcprovider/provider_listener.go index 5059661e7f..b611d22eeb 100644 --- a/protocol/rpcprovider/provider_listener.go +++ b/protocol/rpcprovider/provider_listener.go @@ -115,8 +115,8 @@ func (rs *relayServer) RelaySubscribe(request *pairingtypes.RelayRequest, srv pa } func (rs *relayServer) findReceiver(request *pairingtypes.RelayRequest) (RelayReceiver, error) { - apiInterface := request.ApiInterface - chainID := request.ChainID + apiInterface := request.RelayData.ApiInterface + chainID := request.RelaySession.ChainID endpoint := lavasession.RPCEndpoint{ChainID: chainID, ApiInterface: apiInterface} rs.lock.RLock() defer rs.lock.RUnlock() diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 02e274df59..f2fe294800 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -547,7 +547,7 @@ func (rpcps *RPCProviderServer) TryRelay(ctx context.Context, request *pairingty } if requestedBlockHash != nil || finalized { err := cache.SetEntry(ctx, request, rpcps.rpcProviderEndpoint.ApiInterface, requestedBlockHash, rpcps.rpcProviderEndpoint.ChainID, consumerAddr.String(), reply, finalized) - if err != nil && !performance.NotInitialisedError.Is(err) && request.BlockHeight != spectypes.NOT_APPLICABLE { + if err != nil && !performance.NotInitialisedError.Is(err) && request.RelaySession.BlockHeight != spectypes.NOT_APPLICABLE { utils.LavaFormatWarning("error updating cache with new entry", err, nil) } } @@ -555,7 +555,7 @@ func (rpcps *RPCProviderServer) TryRelay(ctx context.Context, request *pairingty apiName := chainMsg.GetServiceApi().Name if reqMsg != nil && strings.Contains(apiName, "unsubscribe") { - err := rpcps.processUnsubscribe(apiName, consumerAddr, reqParams, uint64(request.RequestBlock)) + err := rpcps.processUnsubscribe(apiName, consumerAddr, reqParams, uint64(request.RelayData.RequestBlock)) if err != nil { return nil, err } diff --git a/relayer/chainproxy/chainproxy.go b/relayer/chainproxy/chainproxy.go index 08c80c02fd..bea51e07f2 100644 --- a/relayer/chainproxy/chainproxy.go +++ b/relayer/chainproxy/chainproxy.go @@ -139,26 +139,30 @@ func SendRelay( // we need to apply CuSum and relay number that we plan to add in the relay request. even if we didn't yet apply them to the consumerSession. relayRequest := &pairingtypes.RelayRequest{ - Provider: providerPublicAddress, - ConnectionType: connectionType, - ApiUrl: url, - Data: []byte(req), - SessionId: uint64(consumerSession.SessionId), - ChainID: cp.GetSentry().ChainID, - CuSum: consumerSession.CuSum + consumerSession.LatestRelayCu, // add the latestRelayCu which will be applied when session is returned properly - BlockHeight: blockHeight, - RelayNum: consumerSession.RelayNum + lavasession.RelayNumberIncrement, // increment the relay number. which will be applied when session is returned properly - RequestBlock: nodeMsg.RequestedBlock(), - QoSReport: consumerSession.QoSInfo.LastQoSReport, - DataReliability: nil, - UnresponsiveProviders: reportedProviders, + RelaySession: &pairingtypes.RelaySession{ + SessionId: uint64(consumerSession.SessionId), + Provider: providerPublicAddress, + ChainID: cp.GetSentry().ChainID, + BlockHeight: blockHeight, + RelayNum: consumerSession.RelayNum + lavasession.RelayNumberIncrement, // increment the relay number. which will be applied when session is returned properly + QoSReport: consumerSession.QoSInfo.LastQoSReport, + UnresponsiveProviders: reportedProviders, + CuSum: consumerSession.CuSum + consumerSession.LatestRelayCu, // add the latestRelayCu which will be applied when session is returned properly + }, + RelayData: &pairingtypes.RelayPrivateData{ + ConnectionType: connectionType, + Data: []byte(req), + RequestBlock: nodeMsg.RequestedBlock(), + ApiUrl: url, + }, + DataReliability: nil, } - sig, err := sigs.SignRelay(privKey, *relayRequest) + sig, err := sigs.SignRelay(privKey, *relayRequest.RelaySession) if err != nil { return nil, nil, nil, 0, false, err } - relayRequest.Sig = sig + relayRequest.RelaySession.Sig = sig c := *consumerSession.Endpoint.Client connectCtx, cancel := context.WithTimeout(ctx, relayTimeout) @@ -188,7 +192,7 @@ func SendRelay( if analytics != nil { analytics.Latency = currentLatency.Milliseconds() - analytics.ComputeUnits = relayRequest.CuSum + analytics.ComputeUnits = relayRequest.RelaySession.CuSum } if err != nil { @@ -198,12 +202,12 @@ func SendRelay( if !isSubscription { // update relay request requestedBlock to the provided one in case it was arbitrary sentry.UpdateRequestedBlock(relayRequest, reply) - finalized := cp.GetSentry().IsFinalizedBlock(relayRequest.RequestBlock, reply.LatestBlock) + finalized := cp.GetSentry().IsFinalizedBlock(relayRequest.RelayData.RequestBlock, reply.LatestBlock) err = VerifyRelayReply(reply, relayRequest, providerPublicAddress, cp.GetSentry().GetSpecDataReliabilityEnabled()) if err != nil { return nil, nil, nil, 0, false, err } - requestedBlock = relayRequest.RequestBlock + requestedBlock = relayRequest.RelayData.RequestBlock cache := cp.GetCache() // TODO: response sanity, check its under an expected format add that format to spec err := cache.SetEntry(ctx, relayRequest, cp.GetSentry().ApiInterface, nil, cp.GetSentry().ChainID, dappID, reply, finalized) // caching in the portal doesn't care about hashes @@ -224,26 +228,30 @@ func SendRelay( } relayRequest := &pairingtypes.RelayRequest{ - Provider: providerAddress, - ApiUrl: url, - Data: []byte(req), - SessionId: lavasession.DataReliabilitySessionId, // sessionID for reliability is 0 - ChainID: sentry.ChainID, - CuSum: lavasession.DataReliabilityCuSum, // consumerSession.CuSum == 0 - BlockHeight: blockHeight, - RelayNum: 0, // consumerSession.RelayNum == 0 - RequestBlock: requestedBlock, - QoSReport: nil, - DataReliability: dataReliability, - ConnectionType: connectionType, - UnresponsiveProviders: reportedProviders, + RelaySession: &pairingtypes.RelaySession{ + SessionId: lavasession.DataReliabilitySessionId, // sessionID for reliability is 0 + Provider: providerAddress, + ChainID: sentry.ChainID, + BlockHeight: blockHeight, + RelayNum: 0, // consumerSession.RelayNum == 0 + QoSReport: nil, + UnresponsiveProviders: reportedProviders, + CuSum: lavasession.DataReliabilityCuSum, // consumerSession.CuSum == 0 + }, + RelayData: &pairingtypes.RelayPrivateData{ + ConnectionType: connectionType, + Data: []byte(req), + RequestBlock: requestedBlock, + ApiUrl: url, + }, + DataReliability: dataReliability, } - sig, err := sigs.SignRelay(privKey, *relayRequest) + sig, err := sigs.SignRelay(privKey, *relayRequest.RelaySession) if err != nil { return nil, nil, 0, 0, err } - relayRequest.Sig = sig + relayRequest.RelaySession.Sig = sig sig, err = sigs.SignVRFData(privKey, relayRequest.DataReliability) if err != nil { diff --git a/relayer/portal_server.go b/relayer/portal_server.go deleted file mode 100644 index e007996324..0000000000 --- a/relayer/portal_server.go +++ /dev/null @@ -1,101 +0,0 @@ -package relayer - -import ( - context "context" - "fmt" - "log" - "math/rand" - "time" - - "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/client/tx" - "github.com/cosmos/cosmos-sdk/version" - "github.com/lavanet/lava/relayer/chainproxy" - "github.com/lavanet/lava/relayer/performance" - "github.com/lavanet/lava/relayer/sentry" - "github.com/lavanet/lava/relayer/sigs" - "github.com/lavanet/lava/utils" - "github.com/spf13/pflag" -) - -func PortalServer( - ctx context.Context, - clientCtx client.Context, - txFactory tx.Factory, - listenAddr string, - chainID string, - apiInterface string, - flagSet *pflag.FlagSet, -) { - fmt.Printf("\n\n") - utils.LavaFormatError("++++++++++++++++++++++++++++++++++++++++++++++++++++++++", nil, nil) - utils.LavaFormatError("+ + Important Notice + +", nil, nil) - utils.LavaFormatError("+ lavad portal_server is deprecated. +", nil, nil) - utils.LavaFormatError("+ And will cease to be available +", nil, nil) - utils.LavaFormatError("+ in version v0.7.0 +", nil, nil) - utils.LavaFormatError("+ Please use rpcconsumer instead. +", nil, nil) - utils.LavaFormatError("++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n", nil, nil) - // - utils.LavaFormatInfo("lavad Binary Version: "+version.Version, nil) - rand.Seed(time.Now().UnixNano()) - sk, _, err := utils.GetOrCreateVRFKey(clientCtx) - if err != nil { - log.Fatalln("error: GetOrCreateVRFKey", err) - } - // Start sentry - sentry := sentry.NewSentry(clientCtx, txFactory, chainID, true, nil, nil, apiInterface, sk, flagSet, 0) - err = sentry.Init(ctx) - if err != nil { - log.Fatalln("error sentry.Init", err) - } - go sentry.Start(ctx) - for sentry.GetBlockHeight() == 0 { - time.Sleep(1 * time.Second) - } - g_sentry = sentry - g_serverChainID = chainID - - // Node - pLogs, err := chainproxy.NewPortalLogs() - if err != nil { - log.Fatalln("error: NewPortalLogs", err) - } - chainProxy, err := chainproxy.GetChainProxy("", 1, sentry, pLogs, nil) - if err != nil { - log.Fatalln("error: GetChainProxy", err) - } - // Setting up the sentry callback - err = sentry.SetupConsumerSessionManager(ctx, chainProxy.GetConsumerSessionManager()) - if err != nil { - log.Fatalln("error: SetupConsumerSessionManager", err) - } - // - // Set up a connection to the server. - utils.LavaFormatInfo("PortalServer"+apiInterface, nil) - keyName, err := sigs.GetKeyName(clientCtx) - if err != nil { - log.Fatalln("error: getKeyName", err) - } - privKey, err := sigs.GetPrivKey(clientCtx, keyName) - if err != nil { - log.Fatalln("error: getPrivKey", err) - } - clientKey, _ := clientCtx.Keyring.Key(keyName) - - utils.LavaFormatInfo("Client pubkey: "+fmt.Sprintf("%s", clientKey.GetPubKey().Address()), nil) - - cacheAddr, err := flagSet.GetString(performance.CacheFlagName) - if err != nil { - utils.LavaFormatError("Failed To Get Cache Address flag", err, &map[string]string{"flags": fmt.Sprintf("%v", flagSet)}) - } else if cacheAddr != "" { - cache, err := performance.InitCache(ctx, cacheAddr) - if err != nil { - utils.LavaFormatError("Failed To Connect to cache at address", err, &map[string]string{"address": cacheAddr}) - } else { - utils.LavaFormatInfo("cache service connected", &map[string]string{"address": cacheAddr}) - chainProxy.SetCache(cache) - } - } - - chainProxy.PortalStart(ctx, privKey, listenAddr) -} diff --git a/relayer/sentry/sentry.go b/relayer/sentry/sentry.go index 6f875f1e8c..ff5c64392b 100755 --- a/relayer/sentry/sentry.go +++ b/relayer/sentry/sentry.go @@ -951,9 +951,9 @@ func (s *Sentry) initProviderHashesConsensus(providerAcc string, latestBlock int LatestBlockTime: time.Now(), FinalizedBlocksHashes: finalizedBlocks, SigBlocks: reply.SigBlocks, - SessionId: req.SessionId, - RelayNum: req.RelayNum, - BlockHeight: req.BlockHeight, + SessionId: req.RelaySession.SessionId, + RelayNum: req.RelaySession.RelayNum, + BlockHeight: req.RelaySession.BlockHeight, LatestBlock: latestBlock, } providerDataContainers := map[string]providerDataContainer{} @@ -970,9 +970,9 @@ func (s *Sentry) insertProviderToConsensus(consensus *ProviderHashesConsensus, f LatestBlockTime: time.Now(), FinalizedBlocksHashes: finalizedBlocks, SigBlocks: reply.SigBlocks, - SessionId: req.SessionId, - RelayNum: req.RelayNum, - BlockHeight: req.BlockHeight, + SessionId: req.RelaySession.SessionId, + RelayNum: req.RelaySession.RelayNum, + BlockHeight: req.RelaySession.BlockHeight, LatestBlock: latestBlock, } consensus.agreeingProviders[providerAcc] = newProviderDataContainer @@ -1038,12 +1038,12 @@ func (s *Sentry) SendRelay( return nil, nil, latency, fromCache, utils.LavaFormatError("failed to check finalized hashes", lavasession.SendRelayError, &map[string]string{"ErrMsg": err.Error()}) } - if specCategory.Deterministic && s.IsFinalizedBlock(request.RequestBlock, reply.LatestBlock) { + if specCategory.Deterministic && s.IsFinalizedBlock(request.RelayData.RequestBlock, reply.LatestBlock) { var dataReliabilitySessions []*DataReliabilitySession // handle data reliability s.VrfSkMu.Lock() - vrfRes0, vrfRes1 := utils.CalculateVrfOnRelay(request, reply, s.VrfSk, sessionEpoch) + vrfRes0, vrfRes1 := utils.CalculateVrfOnRelay(request.RelayData, reply, s.VrfSk, sessionEpoch) s.VrfSkMu.Unlock() // get two indexesMap for data reliability. indexesMap := s.DataReliabilityThresholdToSession([][]byte{vrfRes0, vrfRes1}, []bool{false, true}) @@ -1078,16 +1078,18 @@ func (s *Sentry) SendRelay( var dataReliabilityLatency time.Duration var dataReliabilityTimeout time.Duration s.VrfSkMu.Lock() - vrf_res, vrf_proof := utils.ProveVrfOnRelay(request, reply, s.VrfSk, differentiator, sessionEpoch) + vrf_res, vrf_proof := utils.ProveVrfOnRelay(request.RelayData, reply, s.VrfSk, differentiator, sessionEpoch) s.VrfSkMu.Unlock() dataReliability := &pairingtypes.VRFData{ + ChainID: request.RelaySession.ChainID, + Epoch: request.RelaySession.BlockHeight, Differentiator: differentiator, VrfValue: vrf_res, VrfProof: vrf_proof, ProviderSig: reply.Sig, AllDataHash: sigs.AllDataHash(reply, request), - QueryHash: utils.CalculateQueryHash(*request), // calculated from query body anyway, but we will use this on payment - Sig: nil, // calculated in cb_send_reliability + QueryHash: utils.CalculateQueryHash(*request.RelayData), // calculated from query body anyway, but we will use this on payment + Sig: nil, // calculated in cb_send_reliability } relay_rep, relay_req, dataReliabilityLatency, dataReliabilityTimeout, err = cb_send_reliability(singleConsumerSession, dataReliability, providerAddress) if err != nil { @@ -1537,7 +1539,7 @@ func NewSentry( func UpdateRequestedBlock(request *pairingtypes.RelayRequest, response *pairingtypes.RelayReply) { // since sometimes the user is sending requested block that is a magic like latest, or earliest we need to specify to the reliability what it is - request.RequestBlock = ReplaceRequestedBlock(request.RequestBlock, response.LatestBlock) + request.RelayData.RequestBlock = ReplaceRequestedBlock(request.RelayData.RequestBlock, response.LatestBlock) } func ReplaceRequestedBlock(requestedBlock int64, latestBlock int64) int64 { diff --git a/relayer/server.go b/relayer/server.go deleted file mode 100644 index 1a14017c01..0000000000 --- a/relayer/server.go +++ /dev/null @@ -1,1348 +0,0 @@ -package relayer - -import ( - "bytes" - context "context" - "encoding/json" - "errors" - "fmt" - "math/rand" - "net" - "net/http" - "os" - "os/signal" - "regexp" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/gogo/status" - "github.com/improbable-eng/grpc-web/go/grpcweb" - "golang.org/x/net/http2" - "golang.org/x/net/http2/h2c" - - "golang.org/x/exp/slices" - - btcSecp256k1 "github.com/btcsuite/btcd/btcec" - "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/client/tx" - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - "github.com/cosmos/cosmos-sdk/version" - "github.com/lavanet/lava/protocol/lavasession" - "github.com/lavanet/lava/relayer/chainproxy" - "github.com/lavanet/lava/relayer/chainproxy/rpcclient" - "github.com/lavanet/lava/relayer/chainsentry" - "github.com/lavanet/lava/relayer/performance" - "github.com/lavanet/lava/relayer/sentry" - "github.com/lavanet/lava/relayer/sigs" - "github.com/lavanet/lava/utils" - conflicttypes "github.com/lavanet/lava/x/conflict/types" - pairingtypes "github.com/lavanet/lava/x/pairing/types" - spectypes "github.com/lavanet/lava/x/spec/types" - "github.com/spf13/pflag" - tenderbytes "github.com/tendermint/tendermint/libs/bytes" - grpc "google.golang.org/grpc" - "google.golang.org/grpc/codes" -) - -const ( - RETRY_INCORRECT_SEQUENCE = 5 - TimeWaitInitializeChainSentry = 10 - RetryInitAttempts = 10 -) - -var ( - g_privKey *btcSecp256k1.PrivateKey - g_sessions map[string]*UserSessions - g_sessions_mutex utils.LavaMutex - g_votes map[string]*voteData - g_votes_mutex utils.LavaMutex - g_sentry *sentry.Sentry - g_serverChainID string - g_txFactory tx.Factory - g_chainProxy chainproxy.ChainProxy - g_chainSentry *chainsentry.ChainSentry - g_rewardsSessions map[uint64][]*RelaySession // map[epochHeight][]*rewardableSessions - g_rewardsSessions_mutex utils.LavaMutex - g_serverID uint64 - g_askForRewards_mutex sync.Mutex -) - -type UserSessionsEpochData struct { - UsedComputeUnits uint64 - MaxComputeUnits uint64 - DataReliability *pairingtypes.VRFData - VrfPk utils.VrfPubKey -} - -type UserSessions struct { - Sessions map[uint64]*RelaySession - Subs map[string]*subscription // key: subscriptionID - IsBlockListed bool - user string - dataByEpoch map[uint64]*UserSessionsEpochData - Lock utils.LavaMutex -} - -type RelaySession struct { - userSessionsParent *UserSessions - CuSum uint64 - UniqueIdentifier uint64 - Lock utils.LavaMutex - Proof *pairingtypes.RelayRequest // saves last relay request of a session as proof - RelayNum uint64 - PairingEpoch uint64 -} - -func (rs *RelaySession) atomicReadRelayNum() uint64 { - return atomic.LoadUint64(&rs.RelayNum) -} - -type subscription struct { - id string - sub *rpcclient.ClientSubscription - subscribeRepliesChan chan interface{} -} - -// TODO Perform payment stuff here -func (s *subscription) disconnect() { - s.sub.Unsubscribe() -} - -func (r *RelaySession) GetPairingEpoch() uint64 { - return atomic.LoadUint64(&r.PairingEpoch) -} - -func (r *RelaySession) SetPairingEpoch(epoch uint64) { - atomic.StoreUint64(&r.PairingEpoch, epoch) -} - -type voteData struct { - RelayDataHash []byte - Nonce int64 - CommitHash []byte -} -type relayServer struct { - pairingtypes.UnimplementedRelayerServer -} - -func askForRewards(staleEpochHeight int64) { - g_askForRewards_mutex.Lock() - defer g_askForRewards_mutex.Unlock() - staleEpochs := []uint64{uint64(staleEpochHeight)} - g_rewardsSessions_mutex.Lock() - if len(g_rewardsSessions) > sentry.StaleEpochDistance+1 { - utils.LavaFormatWarning("Some epochs were not rewarded, catching up and asking for rewards...", nil, &map[string]string{ - "requested epoch": strconv.FormatInt(staleEpochHeight, 10), - "provider block": strconv.FormatInt(g_sentry.GetBlockHeight(), 10), - "rewards to claim len": strconv.FormatInt(int64(len(g_rewardsSessions)), 10), - }) - - // go over all epochs and look for stale unhandled epochs - for epoch := range g_rewardsSessions { - if epoch < uint64(staleEpochHeight) { - staleEpochs = append(staleEpochs, epoch) - } - } - } - g_rewardsSessions_mutex.Unlock() - - relays := []*pairingtypes.RelayRequest{} - reliability := false - sessionsToDelete := make([]*RelaySession, 0) - - for _, staleEpoch := range staleEpochs { - g_rewardsSessions_mutex.Lock() - staleEpochSessions, ok := g_rewardsSessions[staleEpoch] - g_rewardsSessions_mutex.Unlock() - if !ok { - continue - } - - for _, session := range staleEpochSessions { - session.Lock.Lock() // TODO:: is it ok to lock session without g_sessions_mutex? - if session.Proof == nil { - // this can happen if the data reliability created a session, we dont save a proof on data reliability message - - if session.UniqueIdentifier != 0 { - utils.LavaFormatError("Missing proof, cannot get rewards for this session, deleting it", nil, &map[string]string{ - "UniqueIdentifier": strconv.FormatUint(session.UniqueIdentifier, 10), - }) - } - session.Lock.Unlock() - continue - } - - relay := session.Proof - relays = append(relays, relay) - sessionsToDelete = append(sessionsToDelete, session) - - userSessions := session.userSessionsParent - session.Lock.Unlock() - userSessions.Lock.Lock() - userAccAddr, err := sdk.AccAddressFromBech32(userSessions.user) - if err != nil { - utils.LavaFormatError("get rewards invalid user address", err, &map[string]string{ - "address": userSessions.user, - }) - } - - userSessionsEpochData, ok := userSessions.dataByEpoch[staleEpoch] - if !ok { - utils.LavaFormatError("get rewards Missing epoch data for this user", err, &map[string]string{ - "address": userSessions.user, - "requested epoch": strconv.FormatUint(staleEpoch, 10), - }) - userSessions.Lock.Unlock() - continue - } - - if relay.BlockHeight != int64(staleEpoch) { - utils.LavaFormatError("relay proof is under incorrect epoch in relay rewards", err, &map[string]string{ - "relay epoch": strconv.FormatInt(relay.BlockHeight, 10), - "requested epoch": strconv.FormatUint(staleEpoch, 10), - }) - } - - if userSessionsEpochData.DataReliability != nil { - relay.DataReliability = userSessionsEpochData.DataReliability - userSessionsEpochData.DataReliability = nil - reliability = true - } - userSessions.Lock.Unlock() - - g_sentry.AddExpectedPayment(sentry.PaymentRequest{CU: relay.CuSum, BlockHeightDeadline: relay.BlockHeight, Amount: sdk.Coin{}, Client: userAccAddr, UniqueIdentifier: relay.SessionId}) - g_sentry.UpdateCUServiced(relay.CuSum) - } - - g_rewardsSessions_mutex.Lock() - delete(g_rewardsSessions, staleEpoch) // All rewards handles for that epoch - g_rewardsSessions_mutex.Unlock() - } - - userSessionObjsToDelete := make([]string, 0) - for _, session := range sessionsToDelete { - session.Lock.Lock() - userSessions := session.userSessionsParent - sessionID := session.UniqueIdentifier - session.Lock.Unlock() - userSessions.Lock.Lock() - delete(userSessions.Sessions, sessionID) - if len(userSessions.Sessions) == 0 { - userSessionObjsToDelete = append(userSessionObjsToDelete, userSessions.user) - } - userSessions.Lock.Unlock() - } - - g_sessions_mutex.Lock() - for _, user := range userSessionObjsToDelete { - delete(g_sessions, user) - } - g_sessions_mutex.Unlock() - if len(relays) == 0 { - // no rewards to ask for - return - } - - utils.LavaFormatInfo("asking for rewards", &map[string]string{ - "account": g_sentry.Acc, - "reliability": fmt.Sprintf("%t", reliability), - }) - - myWriter := bytes.Buffer{} - hasSequenceError := false - success := false - idx := -1 - sequenceNumberParsed := 0 - summarizedTransactionResult := "" - for ; idx < RETRY_INCORRECT_SEQUENCE && !success; idx++ { - msg := pairingtypes.NewMsgRelayPayment(g_sentry.Acc, relays, strconv.FormatUint(g_serverID, 10)) - g_sentry.ClientCtx.Output = &myWriter - if hasSequenceError { // a retry - // if sequence number error happened it means that we already sent a tx this block. - // we need to wait a block for the tx to be approved, - // only then we can ask for a new sequence number continue and try again. - var seq uint64 - if sequenceNumberParsed != 0 { - utils.LavaFormatInfo("Sequence Number extracted from transaction error, retrying", &map[string]string{"sequence": strconv.Itoa(sequenceNumberParsed)}) - seq = uint64(sequenceNumberParsed) - } else { - var err error - _, seq, err = g_sentry.ClientCtx.AccountRetriever.GetAccountNumberSequence(g_sentry.ClientCtx, g_sentry.ClientCtx.GetFromAddress()) - if err != nil { - utils.LavaFormatError("failed to get correct sequence number for account, give up", err, nil) - break // give up - } - } - g_txFactory = g_txFactory.WithSequence(seq) - myWriter.Reset() - utils.LavaFormatInfo("Retrying with sequence number:", &map[string]string{ - "SeqNum": strconv.FormatUint(seq, 10), - }) - } - var transactionResult string - err := sentry.CheckProfitabilityAndBroadCastTx(g_sentry.ClientCtx, g_txFactory, msg) - if err != nil { - utils.LavaFormatWarning("Sending CheckProfitabilityAndBroadCastTx failed", err, &map[string]string{ - "msg": fmt.Sprintf("%+v", msg), - }) - transactionResult = err.Error() // incase we got an error the tx result is basically the error - } else { - transactionResult = myWriter.String() - } - - var returnCode int - summarizedTransactionResult, returnCode = parseTransactionResult(transactionResult) - - if returnCode == 0 { // if we get some other code which isn't 0 then keep retrying - success = true - } else if strings.Contains(transactionResult, "account sequence") { - hasSequenceError = true - sequenceNumberParsed, err = findSequenceNumber(transactionResult) - if err != nil { - utils.LavaFormatWarning("Failed findSequenceNumber", err, &map[string]string{"sequence": transactionResult}) - } - summarizedTransactionResult = transactionResult - } - } - - if !success { - utils.LavaFormatError(fmt.Sprintf("askForRewards ERROR, transaction results: \n%s\n", summarizedTransactionResult), nil, nil) - } else { - utils.LavaFormatInfo(fmt.Sprintf("askForRewards SUCCESS!, transaction results: %s\n", summarizedTransactionResult), nil) - } -} - -// extract requested sequence number from tx error. -func findSequenceNumber(sequence string) (int, error) { - re := regexp.MustCompile(`expected (\d+), got (\d+)`) - match := re.FindStringSubmatch(sequence) - if match == nil || len(match) < 2 { - return 0, utils.LavaFormatWarning("Failed to parse sequence number from error", nil, &map[string]string{"sequence": sequence}) - } - return strconv.Atoi(match[1]) // atoi return 0 upon error, so it will be ok when sequenceNumberParsed uses it -} - -func parseTransactionResult(transactionResult string) (string, int) { - transactionResult = strings.ReplaceAll(transactionResult, ": ", ":") - transactionResults := strings.Split(transactionResult, "\n") - summarizedResult := "" - for _, str := range transactionResults { - if strings.Contains(str, "raw_log:") || strings.Contains(str, "txhash:") || strings.Contains(str, "code:") { - summarizedResult = summarizedResult + str + ", " - } - } - - re := regexp.MustCompile(`code:(\d+)`) // extracting code from transaction result (in format code:%d) - match := re.FindStringSubmatch(transactionResult) - if match == nil || len(match) < 2 { - return summarizedResult, 1 // not zero - } - retCode, err := strconv.Atoi(match[1]) // extract return code. - if err != nil { - return summarizedResult, 1 // not zero - } - return summarizedResult, retCode -} - -func getRelayUser(in *pairingtypes.RelayRequest) (tenderbytes.HexBytes, error) { - pubKey, err := sigs.RecoverPubKeyFromRelay(*in) - if err != nil { - return nil, err - } - - return pubKey.Address(), nil -} - -func isSupportedSpec(in *pairingtypes.RelayRequest) bool { - return in.ChainID == g_serverChainID -} - -func validateRequestedBlockHeight(blockHeight uint64) bool { - return (blockHeight == g_sentry.GetCurrentEpochHeight() || blockHeight == g_sentry.GetPrevEpochHeight()) -} - -func getOrCreateSession(ctx context.Context, userAddr string, req *pairingtypes.RelayRequest) (*RelaySession, error) { - userSessions := getOrCreateUserSessions(userAddr) - - userSessions.Lock.Lock() - if userSessions.IsBlockListed { - userSessions.Lock.Unlock() - return nil, utils.LavaFormatError("User blocklisted!", nil, &map[string]string{ - "userAddr": userAddr, - }) - } - - var sessionEpoch uint64 - session, ok := userSessions.Sessions[req.SessionId] - userSessions.Lock.Unlock() - - if !ok { - vrf_pk, maxcuRes, err := g_sentry.GetVrfPkAndMaxCuForUser(ctx, userAddr, req.ChainID, req.BlockHeight) - if err != nil { - return nil, utils.LavaFormatError("failed to get the Max allowed compute units for the user!", err, &map[string]string{ - "userAddr": userAddr, - }) - } - - isValidBlockHeight := validateRequestedBlockHeight(uint64(req.BlockHeight)) - if !isValidBlockHeight { - return nil, utils.LavaFormatError("User requested with invalid block height", err, &map[string]string{ - "req.BlockHeight": strconv.FormatInt(req.BlockHeight, 10), "expected": strconv.FormatUint(g_sentry.GetCurrentEpochHeight(), 10), - }) - } - - sessionEpoch = uint64(req.BlockHeight) - - userSessions.Lock.Lock() - session = &RelaySession{userSessionsParent: userSessions, RelayNum: 0, UniqueIdentifier: req.SessionId, PairingEpoch: sessionEpoch} - utils.LavaFormatInfo("new session for user", &map[string]string{ - "userAddr": userAddr, - "created for epoch": strconv.FormatUint(sessionEpoch, 10), - "request blockheight": strconv.FormatInt(req.BlockHeight, 10), - "req.SessionId": strconv.FormatUint(req.SessionId, 10), - }) - userSessions.Sessions[req.SessionId] = session - getOrCreateDataByEpoch(userSessions, sessionEpoch, maxcuRes, vrf_pk, userAddr) - userSessions.Lock.Unlock() - - g_rewardsSessions_mutex.Lock() - if _, ok := g_rewardsSessions[sessionEpoch]; !ok { - g_rewardsSessions[sessionEpoch] = make([]*RelaySession, 0) - } - g_rewardsSessions[sessionEpoch] = append(g_rewardsSessions[sessionEpoch], session) - g_rewardsSessions_mutex.Unlock() - } - - return session, nil -} - -// Must lock UserSessions before using this func -func getOrCreateDataByEpoch(userSessions *UserSessions, sessionEpoch uint64, maxcuRes uint64, vrf_pk *utils.VrfPubKey, userAddr string) *UserSessionsEpochData { - if _, ok := userSessions.dataByEpoch[sessionEpoch]; !ok { - userSessions.dataByEpoch[sessionEpoch] = &UserSessionsEpochData{UsedComputeUnits: 0, MaxComputeUnits: maxcuRes, VrfPk: *vrf_pk} - utils.LavaFormatInfo("new user sessions in epoch", &map[string]string{ - "userAddr": userAddr, - "maxcuRes": strconv.FormatUint(maxcuRes, 10), - "saved under epoch": strconv.FormatUint(sessionEpoch, 10), - "sentry epoch": strconv.FormatUint(g_sentry.GetCurrentEpochHeight(), 10), - }) - } - return userSessions.dataByEpoch[sessionEpoch] -} - -func getOrCreateUserSessions(userAddr string) *UserSessions { - g_sessions_mutex.Lock() - userSessions, ok := g_sessions[userAddr] - if !ok { - userSessions = &UserSessions{dataByEpoch: map[uint64]*UserSessionsEpochData{}, Sessions: map[uint64]*RelaySession{}, user: userAddr, Subs: make(map[string]*subscription)} - g_sessions[userAddr] = userSessions - } - g_sessions_mutex.Unlock() - return userSessions -} - -func updateSessionCu(sess *RelaySession, userSessions *UserSessions, serviceApi *spectypes.ServiceApi, request *pairingtypes.RelayRequest, pairingEpoch uint64) error { - sess.Lock.Lock() - relayNum := sess.RelayNum - cuSum := sess.CuSum - sess.Lock.Unlock() - - if relayNum+1 != request.RelayNum { - utils.LavaFormatError("consumer requested incorrect relaynum, expected it to increment by 1", nil, &map[string]string{ - "request.SessionId": strconv.FormatUint(request.SessionId, 10), - "expected": strconv.FormatUint(relayNum+1, 10), - "received": strconv.FormatUint(request.RelayNum, 10), - }) - } - - // Check that relaynum gets incremented by user - if relayNum+1 > request.RelayNum { - return utils.LavaFormatError("consumer requested a smaller relay num than expected, trying to overwrite past usage", lavasession.SessionOutOfSyncError, &map[string]string{ - "request.SessionId": strconv.FormatUint(request.SessionId, 10), - "expected": strconv.FormatUint(relayNum+1, 10), - "received": strconv.FormatUint(request.RelayNum, 10), - }) - } - - sess.Lock.Lock() - sess.RelayNum++ - sess.Lock.Unlock() - - // utils.LavaFormatDebug("updateSessionCu", &map[string]string{ - // "serviceApi.Name": serviceApi.Name, - // "request.SessionId": strconv.FormatUint(request.SessionId, 10), - // }) - // - // TODO: do we worry about overflow here? - if cuSum >= request.CuSum { - return utils.LavaFormatError("bad CU sum", lavasession.SessionOutOfSyncError, &map[string]string{ - "request.SessionId": strconv.FormatUint(request.SessionId, 10), - "cuSum": strconv.FormatUint(cuSum, 10), - "request.CuSum": strconv.FormatUint(request.CuSum, 10), - }) - } - if cuSum+serviceApi.ComputeUnits != request.CuSum { - return utils.LavaFormatError("bad CU sum", lavasession.SessionOutOfSyncError, &map[string]string{ - "request.SessionId": strconv.FormatUint(request.SessionId, 10), - "cuSum": strconv.FormatUint(cuSum, 10), - "request.CuSum": strconv.FormatUint(request.CuSum, 10), - "serviceApi.ComputeUnits": strconv.FormatUint(serviceApi.ComputeUnits, 10), - }) - } - - userSessions.Lock.Lock() - epochData := userSessions.dataByEpoch[pairingEpoch] - - if epochData.UsedComputeUnits+serviceApi.ComputeUnits > epochData.MaxComputeUnits { - userSessions.Lock.Unlock() - return utils.LavaFormatError("client cu overflow", nil, &map[string]string{ - "request.SessionId": strconv.FormatUint(request.SessionId, 10), - "epochData.MaxComputeUnits": strconv.FormatUint(epochData.MaxComputeUnits, 10), - "epochData.UsedComputeUnits": strconv.FormatUint(epochData.UsedComputeUnits, 10), - "serviceApi.ComputeUnits": strconv.FormatUint(request.CuSum, 10), - }) - } - - epochData.UsedComputeUnits += serviceApi.ComputeUnits - userSessions.Lock.Unlock() - - sess.Lock.Lock() - sess.CuSum = request.CuSum - sess.Lock.Unlock() - - return nil -} - -func processUnsubscribeEthereum(subscriptionID string, userSessions *UserSessions) { - if sub, ok := userSessions.Subs[subscriptionID]; ok { - sub.disconnect() - delete(userSessions.Subs, subscriptionID) - } -} - -func processUnsubscribeTendermint(apiName string, subscriptionID string, userSessions *UserSessions) { - if apiName == "unsubscribe" { - if sub, ok := userSessions.Subs[subscriptionID]; ok { - sub.disconnect() - delete(userSessions.Subs, subscriptionID) - } - } else { - for subscriptionID, sub := range userSessions.Subs { - sub.disconnect() - delete(userSessions.Subs, subscriptionID) - } - } -} - -func processUnsubscribe(apiName string, userAddr sdk.AccAddress, reqParams interface{}) error { - userSessions := getOrCreateUserSessions(userAddr.String()) - userSessions.Lock.Lock() - defer userSessions.Lock.Unlock() - switch p := reqParams.(type) { - case []interface{}: - subscriptionID, ok := p[0].(string) - if !ok { - return fmt.Errorf("processUnsubscribe - p[0].(string) - type assertion failed, type:" + fmt.Sprintf("%s", p[0])) - } - processUnsubscribeEthereum(subscriptionID, userSessions) - case map[string]interface{}: - subscriptionID := "" - if apiName == "unsubscribe" { - var ok bool - subscriptionID, ok = p["query"].(string) - if !ok { - return fmt.Errorf("processUnsubscribe - p['query'].(string) - type assertion failed, type:" + fmt.Sprintf("%s", p["query"])) - } - } - processUnsubscribeTendermint(apiName, subscriptionID, userSessions) - } - return nil -} - -func (s *relayServer) initRelay(ctx context.Context, request *pairingtypes.RelayRequest) (sdk.AccAddress, chainproxy.NodeMessage, *UserSessions, *RelaySession, error) { - // client blockheight can only be at at prev epoch but not earlier - if request.BlockHeight < int64(g_sentry.GetPrevEpochHeight()) { - return nil, nil, nil, nil, utils.LavaFormatError("user reported very old lava block height", nil, &map[string]string{ - "current epoch block": strconv.FormatUint(g_sentry.GetCurrentEpochHeight(), 10), - "current lava block": strconv.FormatInt(g_sentry.GetBlockHeight(), 10), - "requested lava block": strconv.FormatInt(request.BlockHeight, 10), - }) - } - - // Checks - if g_sentry.Acc != request.Provider { - return nil, nil, nil, nil, utils.LavaFormatError("User is trying to communicate with the wrong provider address.", nil, &map[string]string{ - "ProviderWhoGotTheRequest": g_sentry.Acc, - "ProviderInTheRequest": request.Provider, - }) - } - - user, err := getRelayUser(request) - if err != nil { - return nil, nil, nil, nil, utils.LavaFormatError("get relay user", err, &map[string]string{}) - } - userAddr, err := sdk.AccAddressFromHex(user.String()) - if err != nil { - return nil, nil, nil, nil, utils.LavaFormatError("get relay acc address", err, &map[string]string{}) - } - - if !isSupportedSpec(request) { - return nil, nil, nil, nil, utils.LavaFormatError("spec not supported by server", err, &map[string]string{"request.chainID": request.ChainID, "chainID": g_serverChainID}) - } - - var nodeMsg chainproxy.NodeMessage - authorizeAndParseMessage := func(ctx context.Context, userAddr sdk.AccAddress, request *pairingtypes.RelayRequest, blockHeightToAuthorize uint64) (*pairingtypes.QueryVerifyPairingResponse, chainproxy.NodeMessage, error) { - // TODO: cache this client, no need to run the query every time - authorisedUserResponse, err := g_sentry.IsAuthorizedConsumer(ctx, userAddr.String(), blockHeightToAuthorize) - if err != nil { - return nil, nil, utils.LavaFormatError("user not authorized or error occurred", err, &map[string]string{"userAddr": userAddr.String(), "block": strconv.FormatUint(blockHeightToAuthorize, 10), "userRequest": fmt.Sprintf("%+v", request)}) - } - // Parse message, check valid api, etc - nodeMsg, err := g_chainProxy.ParseMsg(request.ApiUrl, request.Data, request.ConnectionType) - if err != nil { - return nil, nil, utils.LavaFormatError("failed parsing request message", err, &map[string]string{"apiInterface": g_sentry.ApiInterface, "request URL": request.ApiUrl, "request data": string(request.Data), "userAddr": userAddr.String()}) - } - return authorisedUserResponse, nodeMsg, nil - } - var authorisedUserResponse *pairingtypes.QueryVerifyPairingResponse - authorisedUserResponse, nodeMsg, err = authorizeAndParseMessage(ctx, userAddr, request, uint64(request.BlockHeight)) - if err != nil { - return nil, nil, nil, nil, utils.LavaFormatError("failed authorizing user request", err, nil) - } - var relaySession *RelaySession - var userSessions *UserSessions - if request.DataReliability != nil { - if request.RelayNum > lavasession.DataReliabilitySessionId { - return nil, nil, nil, nil, utils.LavaFormatError("request's relay num is larger than the data reliability session ID", nil, &map[string]string{"relayNum": strconv.FormatUint(request.RelayNum, 10), "DataReliabilitySessionId": strconv.Itoa(lavasession.DataReliabilitySessionId)}) - } - if request.CuSum != lavasession.DataReliabilityCuSum { - return nil, nil, nil, nil, utils.LavaFormatError("request's CU sum is not equal to the data reliability CU sum", nil, &map[string]string{"cuSum": strconv.FormatUint(request.CuSum, 10), "DataReliabilityCuSum": strconv.Itoa(lavasession.DataReliabilityCuSum)}) - } - userSessions = getOrCreateUserSessions(userAddr.String()) - vrf_pk, maxcuRes, err := g_sentry.GetVrfPkAndMaxCuForUser(ctx, userAddr.String(), request.ChainID, request.BlockHeight) - if err != nil { - return nil, nil, nil, nil, utils.LavaFormatError("failed to get vrfpk and maxCURes for data reliability!", err, &map[string]string{ - "userAddr": userAddr.String(), - }) - } - - userSessions.Lock.Lock() - if epochData, ok := userSessions.dataByEpoch[uint64(request.BlockHeight)]; ok { - // data reliability message - if epochData.DataReliability != nil { - userSessions.Lock.Unlock() - return nil, nil, nil, nil, utils.LavaFormatError("Simulation: dataReliability can only be used once per client per epoch", nil, - &map[string]string{"requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": userAddr.String(), "dataReliability": fmt.Sprintf("%v", epochData.DataReliability)}) - } - } - userSessions.Lock.Unlock() - // data reliability is not session dependant, its always sent with sessionID 0 and if not we don't care - if vrf_pk == nil { - return nil, nil, nil, nil, utils.LavaFormatError("dataReliability Triggered with vrf_pk == nil", nil, - &map[string]string{"requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": userAddr.String()}) - } - // verify the providerSig is ineed a signature by a valid provider on this query - valid, err := s.VerifyReliabilityAddressSigning(ctx, userAddr, request) - if err != nil { - return nil, nil, nil, nil, utils.LavaFormatError("VerifyReliabilityAddressSigning invalid", err, - &map[string]string{"requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": userAddr.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) - } - if !valid { - return nil, nil, nil, nil, utils.LavaFormatError("invalid DataReliability Provider signing", nil, - &map[string]string{"requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": userAddr.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) - } - // verify data reliability fields correspond to the right vrf - valid = utils.VerifyVrfProof(request, *vrf_pk, uint64(request.BlockHeight)) - if !valid { - return nil, nil, nil, nil, utils.LavaFormatError("invalid DataReliability fields, VRF wasn't verified with provided proof", nil, - &map[string]string{"requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": userAddr.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) - } - - vrfIndex, vrfErr := utils.GetIndexForVrf(request.DataReliability.VrfValue, uint32(g_sentry.GetProvidersCount()), g_sentry.GetReliabilityThreshold()) - if vrfErr != nil { - dataReliabilityMarshalled, err := json.Marshal(request.DataReliability) - if err != nil { - dataReliabilityMarshalled = []byte{} - } - return nil, nil, nil, nil, utils.LavaFormatError("Provider identified vrf value in data reliability request does not meet threshold", vrfErr, - &map[string]string{ - "requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": userAddr.String(), - "dataReliability": string(dataReliabilityMarshalled), "relayEpochStart": strconv.FormatInt(request.BlockHeight, 10), - "vrfIndex": strconv.FormatInt(vrfIndex, 10), - "self Index": strconv.FormatInt(authorisedUserResponse.Index, 10), - }) - } - if authorisedUserResponse.Index != vrfIndex { - dataReliabilityMarshalled, err := json.Marshal(request.DataReliability) - if err != nil { - dataReliabilityMarshalled = []byte{} - } - return nil, nil, nil, nil, utils.LavaFormatError("Provider identified invalid vrfIndex in data reliability request, the given index and self index are different", nil, - &map[string]string{ - "requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": userAddr.String(), - "dataReliability": string(dataReliabilityMarshalled), "relayEpochStart": strconv.FormatInt(request.BlockHeight, 10), - "vrfIndex": strconv.FormatInt(vrfIndex, 10), - "self Index": strconv.FormatInt(authorisedUserResponse.Index, 10), - }) - } - utils.LavaFormatInfo("Simulation: server got valid DataReliability request", nil) - - userSessions.Lock.Lock() - getOrCreateDataByEpoch(userSessions, uint64(request.BlockHeight), maxcuRes, vrf_pk, userAddr.String()) - userSessions.dataByEpoch[uint64(request.BlockHeight)].DataReliability = request.DataReliability - userSessions.Lock.Unlock() - } else { - relaySession, err = getOrCreateSession(ctx, userAddr.String(), request) - if err != nil { - return nil, nil, nil, nil, err - } - if relaySession == nil { - return nil, nil, nil, nil, utils.LavaFormatError("getOrCreateSession has a RelaySession nil without an error", nil, nil) - } - relaySession.Lock.Lock() - pairingEpoch := relaySession.GetPairingEpoch() - - if request.BlockHeight != int64(pairingEpoch) { - relaySession.Lock.Unlock() - return nil, nil, nil, nil, utils.LavaFormatError("request blockheight mismatch to session epoch", nil, - &map[string]string{ - "pairingEpoch": strconv.FormatUint(pairingEpoch, 10), "userAddr": userAddr.String(), - "relay blockheight": strconv.FormatInt(request.BlockHeight, 10), - }) - } - - userSessions = relaySession.userSessionsParent - relaySession.Lock.Unlock() - - // Validate - if request.SessionId == 0 { - return nil, nil, nil, nil, utils.LavaFormatError("SessionID cannot be 0 for non-data reliability requests", nil, - &map[string]string{ - "pairingEpoch": strconv.FormatUint(pairingEpoch, 10), "userAddr": userAddr.String(), - "relay request": fmt.Sprintf("%v", request), - }) - } - // Update session - err = updateSessionCu(relaySession, userSessions, nodeMsg.GetServiceApi(), request, pairingEpoch) - if err != nil { - return nil, nil, nil, nil, err - } - - relaySession.Lock.Lock() - - // Make a shallow copy of relay request and save it as session proof - relaySession.Proof = request.ShallowCopy() - - relaySession.Lock.Unlock() - } - if userSessions == nil { - return nil, nil, nil, nil, utils.LavaFormatError("relay Init has a nil UserSession", nil, &map[string]string{"userSessions": fmt.Sprintf("%+v", userSessions)}) - } - return userAddr, nodeMsg, userSessions, relaySession, nil -} - -func (s *relayServer) onRelayFailure(userSessions *UserSessions, relaySession *RelaySession, nodeMsg chainproxy.NodeMessage) error { - if userSessions == nil || relaySession == nil { // verify sessions are not nil - return utils.LavaFormatError("relayFailure had a UserSession Or RelaySession nil", nil, &map[string]string{"userSessions": fmt.Sprintf("%+v", userSessions), "relaySession": fmt.Sprintf("%+v", relaySession)}) - } - // deal with relaySession - computeUnits := nodeMsg.GetServiceApi().ComputeUnits - relaySession.Lock.Lock() - pairingEpoch := relaySession.PairingEpoch - relaySession.RelayNum -= 1 - relaySession.CuSum -= computeUnits - var retError error - if int64(relaySession.RelayNum) < 0 || int64(relaySession.CuSum) < 0 { // relayNumber must be greater than zero. - utils.LavaFormatError("consumer RelayNumber or CuSum are negative values", nil, &map[string]string{ - "RelayNum": strconv.FormatUint(relaySession.RelayNum, 10), - "CuSum": strconv.FormatUint(relaySession.CuSum, 10), - }) - relaySession.RelayNum = 0 - relaySession.CuSum = 0 - retError = lavasession.SessionOutOfSyncError - } - relaySession.Lock.Unlock() - // deal with userSessions - userSessions.Lock.Lock() - userSessions.dataByEpoch[pairingEpoch].UsedComputeUnits -= computeUnits - if int64(userSessions.dataByEpoch[pairingEpoch].UsedComputeUnits) < 0 { - // if the provider lost sync with the consumer itself, and not just a session. we blockList the consumer. - userSessions.dataByEpoch[pairingEpoch].UsedComputeUnits = 0 - userSessions.IsBlockListed = true - retError = utils.LavaFormatError("userSessions Out of sync, Blocking consumer", - fmt.Errorf("userSessions.dataByEpoch[pairingEpoch].UsedComputeUnits reached negative value"), - &map[string]string{ - "consumer_address": userSessions.user, - "userSessions.dataByEpoch[pairingEpoch].UsedComputeUnits": strconv.FormatUint(userSessions.dataByEpoch[pairingEpoch].UsedComputeUnits, 10), - }) - } - userSessions.Lock.Unlock() - return retError -} - -func (s *relayServer) handleRelayErrorStatus(err error) error { - if err == nil { - return nil - } - if lavasession.SessionOutOfSyncError.Is(err) { - err = status.Error(codes.Code(lavasession.SessionOutOfSyncError.ABCICode()), err.Error()) - } - return err -} - -func (s *relayServer) Relay(ctx context.Context, request *pairingtypes.RelayRequest) (*pairingtypes.RelayReply, error) { - utils.LavaFormatDebug("Provider got relay request", &map[string]string{ - "request.SessionId": strconv.FormatUint(request.SessionId, 10), - "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), - "request.cu": strconv.FormatUint(request.CuSum, 10), - }) - userAddr, nodeMsg, userSessions, relaySession, err := s.initRelay(ctx, request) - if err != nil { - return nil, s.handleRelayErrorStatus(err) - } - - reply, err := s.TryRelay(ctx, request, userAddr, nodeMsg) - if err != nil && request.DataReliability == nil { // we ignore data reliability because its not checking/adding cu/relaynum. - // failed to send relay. we need to adjust session state. cuSum and relayNumber. - relayFailureError := s.onRelayFailure(userSessions, relaySession, nodeMsg) - if relayFailureError != nil { - err = sdkerrors.Wrapf(relayFailureError, "On relay failure: "+err.Error()) - } - utils.LavaFormatError("TryRelay Failed", err, &map[string]string{ - "request.SessionId": strconv.FormatUint(request.SessionId, 10), - "request.userAddr": userAddr.String(), - }) - } else { - utils.LavaFormatDebug("Provider Finished Relay Successfully", &map[string]string{ - "request.SessionId": strconv.FormatUint(request.SessionId, 10), - "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), - }) - } - return reply, s.handleRelayErrorStatus(err) -} - -func (s *relayServer) TryRelay(ctx context.Context, request *pairingtypes.RelayRequest, userAddr sdk.AccAddress, nodeMsg chainproxy.NodeMessage) (*pairingtypes.RelayReply, error) { - // Send - var reqMsg *chainproxy.JsonrpcMessage - var reqParams interface{} - switch msg := nodeMsg.GetMsg().(type) { - case *chainproxy.JsonrpcMessage: - reqMsg = msg - reqParams = reqMsg.Params - default: - reqMsg = nil - } - latestBlock := int64(0) - finalizedBlockHashes := map[int64]interface{}{} - var requestedBlockHash []byte = nil - finalized := false - if g_sentry.GetSpecDataReliabilityEnabled() { - // Add latest block and finalized data - var requestedBlockHashStr string - var err error - latestBlock, finalizedBlockHashes, requestedBlockHashStr, err = g_chainSentry.GetLatestBlockData(request.RequestBlock) - if err != nil { - return nil, utils.LavaFormatError("Could not guarantee data reliability", err, &map[string]string{"requestedBlock": strconv.FormatInt(request.RequestBlock, 10), "latestBlock": strconv.FormatInt(latestBlock, 10)}) - } - if requestedBlockHashStr == "" { - // avoid using cache, but can still service - utils.LavaFormatWarning("no hash data for requested block", nil, &map[string]string{"requestedBlock": strconv.FormatInt(request.RequestBlock, 10), "latestBlock": strconv.FormatInt(latestBlock, 10)}) - } else { - requestedBlockHash = []byte(requestedBlockHashStr) - } - request.RequestBlock = sentry.ReplaceRequestedBlock(request.RequestBlock, latestBlock) - - // TODO: uncomment when we add chain tracker - // if request.RequestBlock > latestBlock { - // // consumer asked for a block that is newer than our state tracker, we cant sign this for DR - // return nil, utils.LavaFormatError("Requested a block that is too new", err, &map[string]string{"requestedBlock": strconv.FormatInt(request.RequestBlock, 10), "latestBlock": strconv.FormatInt(latestBlock, 10)}) - // } - - finalized = g_sentry.IsFinalizedBlock(request.RequestBlock, latestBlock) - } - cache := g_chainProxy.GetCache() - // TODO: handle cache on fork for dataReliability = false - var reply *pairingtypes.RelayReply = nil - var err error = nil - if requestedBlockHash != nil || finalized { - reply, err = cache.GetEntry(ctx, request, g_sentry.ApiInterface, requestedBlockHash, g_sentry.ChainID, finalized) - } - if err != nil || reply == nil { - if err != nil && performance.NotConnectedError.Is(err) { - utils.LavaFormatWarning("cache not connected", err, nil) - } - // cache miss or invalid - reply, _, _, err = nodeMsg.Send(ctx, nil) - if err != nil { - return nil, utils.LavaFormatError("Sending nodeMsg failed", err, nil) - } - if requestedBlockHash != nil || finalized { - err := cache.SetEntry(ctx, request, g_sentry.ApiInterface, requestedBlockHash, g_sentry.ChainID, userAddr.String(), reply, finalized) - if err != nil && !performance.NotInitialisedError.Is(err) { - utils.LavaFormatWarning("error updating cache with new entry", err, nil) - } - } - } - - apiName := nodeMsg.GetServiceApi().Name - if reqMsg != nil && strings.Contains(apiName, "unsubscribe") { - err := processUnsubscribe(apiName, userAddr, reqParams) - if err != nil { - return nil, err - } - } - // TODO: verify that the consumer still listens, if it took to much time to get the response we cant update the CU. - - jsonStr, err := json.Marshal(finalizedBlockHashes) - if err != nil { - return nil, utils.LavaFormatError("failed unmarshaling finalizedBlockHashes", err, - &map[string]string{"finalizedBlockHashes": fmt.Sprintf("%v", finalizedBlockHashes)}) - } - - reply.FinalizedBlocksHashes = jsonStr - reply.LatestBlock = latestBlock - - getSignaturesFromRequest := func(request pairingtypes.RelayRequest) error { - // request is a copy of the original request, but won't modify it - // update relay request requestedBlock to the provided one in case it was arbitrary - sentry.UpdateRequestedBlock(&request, reply) - // Update signature, - sig, err := sigs.SignRelayResponse(g_privKey, reply, &request) - if err != nil { - return utils.LavaFormatError("failed signing relay response", err, - &map[string]string{"request": fmt.Sprintf("%v", request), "reply": fmt.Sprintf("%v", reply)}) - } - reply.Sig = sig - - if g_sentry.GetSpecDataReliabilityEnabled() { - // update sig blocks signature - sigBlocks, err := sigs.SignResponseFinalizationData(g_privKey, reply, &request, userAddr) - if err != nil { - return utils.LavaFormatError("failed signing finalization data", err, - &map[string]string{"request": fmt.Sprintf("%v", request), "reply": fmt.Sprintf("%v", reply), "userAddr": userAddr.String()}) - } - reply.SigBlocks = sigBlocks - } - return nil - } - err = getSignaturesFromRequest(*request) - if err != nil { - return nil, err - } - - // return reply to user - return reply, nil -} - -func (s *relayServer) RelaySubscribe(request *pairingtypes.RelayRequest, srv pairingtypes.Relayer_RelaySubscribeServer) error { - utils.LavaFormatInfo("Provider got relay request subscribe", &map[string]string{ - "request.SessionId": strconv.FormatUint(request.SessionId, 10), - }) - _, nodeMsg, userSessions, relaySession, err := s.initRelay(context.Background(), request) - if err != nil { - return err - } - - err = s.TryRelaySubscribe(request, srv, nodeMsg, userSessions) - if err != nil && request.DataReliability == nil { // we ignore data reliability because its not checking/adding cu/relaynum. - // failed to send relay. we need to adjust session state. cuSum and relayNumber. - relayFailureError := s.onRelayFailure(userSessions, relaySession, nodeMsg) - if relayFailureError != nil { - err = sdkerrors.Wrapf(relayFailureError, "Relay Error: "+err.Error()) - } - } - return err -} - -func (s *relayServer) TryRelaySubscribe(request *pairingtypes.RelayRequest, srv pairingtypes.Relayer_RelaySubscribeServer, nodeMsg chainproxy.NodeMessage, userSessions *UserSessions) error { - var reply *pairingtypes.RelayReply - var clientSub *rpcclient.ClientSubscription - var subscriptionID string - subscribeRepliesChan := make(chan interface{}) - reply, subscriptionID, clientSub, err := nodeMsg.Send(context.Background(), subscribeRepliesChan) - if err != nil { - return utils.LavaFormatError("Subscription failed", err, nil) - } - - userSessions.Lock.Lock() - if _, ok := userSessions.Subs[subscriptionID]; ok { - return utils.LavaFormatError("SubscriptiodID: "+subscriptionID+"exists", nil, nil) - } - userSessions.Subs[subscriptionID] = &subscription{ - id: subscriptionID, - sub: clientSub, - subscribeRepliesChan: subscribeRepliesChan, - } - userSessions.Lock.Unlock() - - err = srv.Send(reply) // this reply contains the RPC ID - if err != nil { - utils.LavaFormatError("Error getting RPC ID", err, nil) - } - - for { - select { - case <-clientSub.Err(): - utils.LavaFormatError("client sub", err, nil) - // delete this connection from the subs map - userSessions.Lock.Lock() - if sub, ok := userSessions.Subs[subscriptionID]; ok { - sub.disconnect() - delete(userSessions.Subs, subscriptionID) - } - userSessions.Lock.Unlock() - return err - case subscribeReply := <-subscribeRepliesChan: - data, err := json.Marshal(subscribeReply) - if err != nil { - utils.LavaFormatError("client sub unmarshal", err, nil) - userSessions.Lock.Lock() - if sub, ok := userSessions.Subs[subscriptionID]; ok { - sub.disconnect() - delete(userSessions.Subs, subscriptionID) - } - userSessions.Lock.Unlock() - return err - } - - err = srv.Send( - &pairingtypes.RelayReply{ - Data: data, - }, - ) - if err != nil { - // usually triggered when client closes connection - if strings.Contains(err.Error(), "Canceled desc = context canceled") { - utils.LavaFormatWarning("Client closed connection", err, nil) - } else { - utils.LavaFormatError("srv.Send", err, nil) - } - userSessions.Lock.Lock() - if sub, ok := userSessions.Subs[subscriptionID]; ok { - sub.disconnect() - delete(userSessions.Subs, subscriptionID) - } - userSessions.Lock.Unlock() - return err - } - - utils.LavaFormatInfo("Sending data", &map[string]string{"data": string(data)}) - } - } -} - -func (relayServ *relayServer) VerifyReliabilityAddressSigning(ctx context.Context, consumer sdk.AccAddress, request *pairingtypes.RelayRequest) (valid bool, err error) { - queryHash := utils.CalculateQueryHash(*request) - if !bytes.Equal(queryHash, request.DataReliability.QueryHash) { - return false, utils.LavaFormatError("query hash mismatch on data reliability message", nil, - &map[string]string{"queryHash": string(queryHash), "request QueryHash": string(request.DataReliability.QueryHash)}) - } - - // validate consumer signing on VRF data - valid, err = sigs.ValidateSignerOnVRFData(consumer, *request.DataReliability) - if err != nil { - return false, utils.LavaFormatError("failed to Validate Signer On VRF Data", err, - &map[string]string{"consumer": consumer.String(), "request.DataReliability": fmt.Sprintf("%v", request.DataReliability)}) - } - if !valid { - return false, nil - } - // validate provider signing on query data - pubKey, err := sigs.RecoverProviderPubKeyFromVrfDataAndQuery(request) - if err != nil { - return false, utils.LavaFormatError("failed to Recover Provider PubKey From Vrf Data And Query", err, - &map[string]string{"consumer": consumer.String(), "request": fmt.Sprintf("%v", request)}) - } - providerAccAddress, err := sdk.AccAddressFromHex(pubKey.Address().String()) // consumer signer - if err != nil { - return false, utils.LavaFormatError("failed converting signer to address", err, - &map[string]string{"consumer": consumer.String(), "PubKey": pubKey.Address().String()}) - } - return g_sentry.IsAuthorizedPairing(ctx, consumer.String(), providerAccAddress.String(), uint64(request.BlockHeight)) // return if this pairing is authorised -} - -func SendVoteCommitment(voteID string, vote *voteData) { - msg := conflicttypes.NewMsgConflictVoteCommit(g_sentry.Acc, voteID, vote.CommitHash) - myWriter := bytes.Buffer{} - g_sentry.ClientCtx.Output = &myWriter - err := tx.GenerateOrBroadcastTxWithFactory(g_sentry.ClientCtx, g_txFactory, msg) - if err != nil { - utils.LavaFormatError("failed to send vote commitment", err, nil) - } -} - -func SendVoteReveal(voteID string, vote *voteData) { - msg := conflicttypes.NewMsgConflictVoteReveal(g_sentry.Acc, voteID, vote.Nonce, vote.RelayDataHash) - myWriter := bytes.Buffer{} - g_sentry.ClientCtx.Output = &myWriter - err := tx.GenerateOrBroadcastTxWithFactory(g_sentry.ClientCtx, g_txFactory, msg) - if err != nil { - utils.LavaFormatError("failed to send vote Reveal", err, nil) - } -} - -func voteEventHandler(ctx context.Context, voteID string, voteDeadline uint64, voteParams *sentry.VoteParams) { - // got a vote event, handle the cases here - - if !voteParams.GetCloseVote() { - // meaning we dont close a vote, so we should check stuff - if voteParams != nil { - // chainID is sent only on new votes - chainID := voteParams.ChainID - if chainID != g_serverChainID { - // not our chain ID - return - } - } - nodeHeight := uint64(g_sentry.GetBlockHeight()) - if voteDeadline < nodeHeight { - // its too late to vote - utils.LavaFormatError("Vote Event received but it's too late to vote", nil, - &map[string]string{"deadline": strconv.FormatUint(voteDeadline, 10), "nodeHeight": strconv.FormatUint(nodeHeight, 10)}) - return - } - } - g_votes_mutex.Lock() - defer g_votes_mutex.Unlock() - vote, ok := g_votes[voteID] - if ok { - // we have an existing vote with this ID - if voteParams != nil { - if voteParams.GetCloseVote() { - // we are closing the vote, so its okay we have this voteID - utils.LavaFormatInfo("Received Vote termination event for vote, cleared entry", - &map[string]string{"voteID": voteID}) - delete(g_votes, voteID) - return - } - // expected to start a new vote but found an existing one - utils.LavaFormatError("new vote Request for vote had existing entry", nil, - &map[string]string{"voteParams": fmt.Sprintf("%+v", voteParams), "voteID": voteID, "voteData": fmt.Sprintf("%+v", vote)}) - return - } - utils.LavaFormatInfo(" Received Vote Reveal for vote, sending Reveal for result", - &map[string]string{"voteID": voteID, "voteData": fmt.Sprintf("%+v", vote)}) - SendVoteReveal(voteID, vote) - return - } else { - // new vote - if voteParams == nil { - utils.LavaFormatError("vote reveal Request didn't have a vote entry", nil, - &map[string]string{"voteID": voteID}) - return - } - if voteParams.GetCloseVote() { - utils.LavaFormatError("vote closing received but didn't have a vote entry", nil, - &map[string]string{"voteID": voteID}) - return - } - // try to find this provider in the jury - found := slices.Contains(voteParams.Voters, g_sentry.Acc) - if !found { - utils.LavaFormatInfo("new vote initiated but not for this provider to vote", nil) - // this is a new vote but not for us - return - } - // we need to send a commit, first we need to use the chainProxy and get the response - // TODO: implement code that verified the requested block is finalized and if its not waits and tries again - nodeMsg, err := g_chainProxy.ParseMsg(voteParams.ApiURL, voteParams.RequestData, voteParams.ConnectionType) - if err != nil { - utils.LavaFormatError("vote Request did not pass the api check on chain proxy", err, - &map[string]string{"voteID": voteID, "chainID": voteParams.ChainID}) - return - } - reply, _, _, err := nodeMsg.Send(ctx, nil) - if err != nil { - utils.LavaFormatError("vote relay send has failed", err, - &map[string]string{"ApiURL": voteParams.ApiURL, "RequestData": string(voteParams.RequestData)}) - return - } - nonce := rand.Int63() - replyDataHash := sigs.HashMsg(reply.Data) - commitHash := conflicttypes.CommitVoteData(nonce, replyDataHash) - - vote = &voteData{RelayDataHash: replyDataHash, Nonce: nonce, CommitHash: commitHash} - g_votes[voteID] = vote - utils.LavaFormatInfo("Received Vote start, sending commitment for result", &map[string]string{"voteID": voteID, "voteData": fmt.Sprintf("%+v", vote)}) - SendVoteCommitment(voteID, vote) - return - } -} - -func Server( - ctx context.Context, - clientCtx client.Context, - txFactory tx.Factory, - listenAddr string, - nodeUrl string, - chainID string, - apiInterface string, - flagSet *pflag.FlagSet, -) { - utils.LavaFormatInfo("lavad Binary Version: "+version.Version, nil) - // - // ctrl+c - ctx, cancel := context.WithCancel(ctx) - signalChan := make(chan os.Signal, 1) - signal.Notify(signalChan, os.Interrupt) - defer func() { - signal.Stop(signalChan) - cancel() - }() - - // Init random seed - rand.Seed(time.Now().UnixNano()) - g_serverID = uint64(rand.Int63()) - - // - - // Start newSentry - newSentry := sentry.NewSentry(clientCtx, txFactory, chainID, false, voteEventHandler, askForRewards, apiInterface, nil, flagSet, g_serverID) - err := newSentry.Init(ctx) - if err != nil { - utils.LavaFormatError("sentry init failure to initialize", err, &map[string]string{"apiInterface": apiInterface, "ChainID": chainID}) - return - } - go newSentry.Start(ctx) - for newSentry.GetSpecHash() == nil { - time.Sleep(1 * time.Second) - } - g_sentry = newSentry - g_sessions = map[string]*UserSessions{} - g_votes = map[string]*voteData{} - g_rewardsSessions = map[uint64][]*RelaySession{} - g_serverChainID = chainID - // allow more gas - g_txFactory = txFactory.WithGas(1000000) - - // - // Info - utils.LavaFormatInfo("Server starting", &map[string]string{"listenAddr": listenAddr, "ChainID": newSentry.GetChainID(), "node": nodeUrl, "spec": newSentry.GetSpecName(), "api Interface": apiInterface}) - - // - // Keys - keyName, err := sigs.GetKeyName(clientCtx) - if err != nil { - utils.LavaFormatFatal("provider failure to getKeyName", err, &map[string]string{"apiInterface": apiInterface, "ChainID": chainID}) - } - - privKey, err := sigs.GetPrivKey(clientCtx, keyName) - if err != nil { - utils.LavaFormatFatal("provider failure to getPrivKey", err, &map[string]string{"apiInterface": apiInterface, "ChainID": chainID}) - } - g_privKey = privKey - serverKey, _ := clientCtx.Keyring.Key(keyName) - utils.LavaFormatInfo("Server loaded keys", &map[string]string{"PublicKey": serverKey.GetPubKey().Address().String()}) - // - // Node - // get portal logs - pLogs, err := chainproxy.NewPortalLogs() - if err != nil { - utils.LavaFormatFatal("provider failure to NewPortalLogs", err, &map[string]string{"apiInterface": apiInterface, "ChainID": chainID}) - } - numberOfNodeParallelConnections, err := flagSet.GetUint(chainproxy.ParallelConnectionsFlag) - if err != nil { - utils.LavaFormatFatal("error fetching chainproxy.ParallelConnectionsFlag", err, nil) - } - - chainProxy, err := chainproxy.GetChainProxy(nodeUrl, numberOfNodeParallelConnections, newSentry, pLogs, flagSet) - if err != nil { - utils.LavaFormatFatal("provider failure to GetChainProxy", err, &map[string]string{"apiInterface": apiInterface, "ChainID": chainID}) - } - chainProxy.Start(ctx) - g_chainProxy = chainProxy - - if g_sentry.GetSpecDataReliabilityEnabled() { - // Start chain sentry - chainSentry := chainsentry.NewChainSentry(clientCtx, chainProxy, chainID) - var chainSentryInitError error - errMapInfo := &map[string]string{"apiInterface": apiInterface, "ChainID": chainID, "nodeUrl": nodeUrl} - for attempt := 0; attempt < RetryInitAttempts; attempt++ { - chainSentryInitError = chainSentry.Init(ctx) - if chainSentryInitError != nil { - if chainsentry.ErrorFailedToFetchLatestBlock.Is(chainSentryInitError) { // we allow ErrorFailedToFetchLatestBlock. to retry - utils.LavaFormatWarning(fmt.Sprintf("chainSentry Init failed. Attempt Number: %d/%d, Retrying in %d seconds", - attempt+1, RetryInitAttempts, TimeWaitInitializeChainSentry), nil, nil) - time.Sleep(TimeWaitInitializeChainSentry * time.Second) - continue - } else { // other errors are currently fatal. - utils.LavaFormatFatal("Provider Init failure", chainSentryInitError, errMapInfo) - } - } - // break when chainSentry was initialized successfully - break - } - if chainSentryInitError != nil { - utils.LavaFormatFatal("provider failure initializing chainSentry - nodeUrl might be unreachable or offline", chainSentryInitError, errMapInfo) - } - - chainSentry.Start(ctx) - g_chainSentry = chainSentry - } - - // - // GRPC - lis, err := net.Listen("tcp", listenAddr) - if err != nil { - utils.LavaFormatFatal("provider failure setting up listener", err, &map[string]string{"listenAddr": listenAddr, "ChainID": chainID}) - } - s := grpc.NewServer() - - wrappedServer := grpcweb.WrapServer(s) - handler := func(resp http.ResponseWriter, req *http.Request) { - // Set CORS headers - resp.Header().Set("Access-Control-Allow-Origin", "*") - resp.Header().Set("Access-Control-Allow-Headers", "Content-Type,x-grpc-web") - - wrappedServer.ServeHTTP(resp, req) - } - - httpServer := http.Server{ - Handler: h2c.NewHandler(http.HandlerFunc(handler), &http2.Server{}), - } - - go func() { - select { - case <-ctx.Done(): - utils.LavaFormatInfo("Provider Server ctx.Done", nil) - case <-signalChan: - utils.LavaFormatInfo("Provider Server signalChan", nil) - } - - shutdownCtx, shutdownRelease := context.WithTimeout(context.Background(), 10*time.Second) - defer shutdownRelease() - - if err := httpServer.Shutdown(shutdownCtx); err != nil { - utils.LavaFormatFatal("Provider failed to shutdown", err, &map[string]string{}) - } - }() - - Server := &relayServer{} - - pairingtypes.RegisterRelayerServer(s, Server) - - cacheAddr, err := flagSet.GetString(performance.CacheFlagName) - if err != nil { - utils.LavaFormatError("Failed To Get Cache Address flag", err, &map[string]string{"flags": fmt.Sprintf("%v", flagSet)}) - } else if cacheAddr != "" { - cache, err := performance.InitCache(ctx, cacheAddr) - if err != nil { - utils.LavaFormatError("Failed To Connect to cache at address", err, &map[string]string{"address": cacheAddr}) - } else { - utils.LavaFormatInfo("cache service connected", &map[string]string{"address": cacheAddr}) - chainProxy.SetCache(cache) - } - } - - utils.LavaFormatInfo("Server listening", &map[string]string{"Address": lis.Addr().String()}) - // serve is blocking, until terminated - if err := httpServer.Serve(lis); !errors.Is(err, http.ErrServerClosed) { - utils.LavaFormatFatal("provider failed to serve", err, &map[string]string{"Address": lis.Addr().String(), "ChainID": chainID}) - } - // in case we stop serving, claim rewards - askForRewards(int64(g_sentry.GetCurrentEpochHeight())) -} diff --git a/relayer/sigs/sigs.go b/relayer/sigs/sigs.go index f14016cde0..f7148f17ce 100644 --- a/relayer/sigs/sigs.go +++ b/relayer/sigs/sigs.go @@ -167,21 +167,6 @@ func RecoverPubKeyFromVRFData(vrfData pairingtypes.VRFData) (secp256k1.PubKey, e return pubKey, nil } -func DataReliabilityByConsumer(vrfs []*pairingtypes.VRFData) (dataReliabilityByConsumer map[string]*pairingtypes.VRFData, err error) { - dataReliabilityByConsumer = map[string]*pairingtypes.VRFData{} - if len(vrfs) == 0 { - return - } - for _, vrf := range vrfs { - signer, err := GetSignerForVRF(*vrf) - if err != nil { - return nil, err - } - dataReliabilityByConsumer[signer.String()] = vrf - } - return dataReliabilityByConsumer, nil -} - func GetSignerForVRF(dataReliability pairingtypes.VRFData) (signer sdk.AccAddress, err error) { pubKey, err := RecoverPubKeyFromVRFData(dataReliability) if err != nil { @@ -267,3 +252,10 @@ func GenerateFloatingKey() (secretKey *btcSecp256k1.PrivateKey, addr sdk.AccAddr addr, _ = sdk.AccAddressFromHex(publicBytes.Address().String()) return } + +func CalculateContentHashForRelayData(relayRequestData *pairingtypes.RelayPrivateData) []byte { + requestBlockBytes := make([]byte, 8) + binary.LittleEndian.PutUint64(requestBlockBytes, uint64(relayRequestData.RequestBlock)) + msgData := bytes.Join([][]byte{[]byte(relayRequestData.ApiInterface), []byte(relayRequestData.ConnectionType), []byte(relayRequestData.ApiUrl), relayRequestData.Data, requestBlockBytes, relayRequestData.Salt}, nil) + return HashMsg(msgData) +} diff --git a/testutil/common/common.go b/testutil/common/common.go index 40c7601cff..87146414b2 100644 --- a/testutil/common/common.go +++ b/testutil/common/common.go @@ -85,42 +85,49 @@ func CreateMsgDetection(ctx context.Context, consumer Account, provider0 Account msg.Creator = consumer.Addr.String() // request 0 msg.ResponseConflict = &conflicttypes.ResponseConflict{ConflictRelayData0: &conflicttypes.ConflictRelayData{Request: &types.RelayRequest{}, Reply: &types.RelayReply{}}, ConflictRelayData1: &conflicttypes.ConflictRelayData{Request: &types.RelayRequest{}, Reply: &types.RelayReply{}}} - msg.ResponseConflict.ConflictRelayData0.Request.ConnectionType = "" - msg.ResponseConflict.ConflictRelayData0.Request.ApiUrl = "" - msg.ResponseConflict.ConflictRelayData0.Request.BlockHeight = sdk.UnwrapSDKContext(ctx).BlockHeight() - msg.ResponseConflict.ConflictRelayData0.Request.ChainID = spec.Index - msg.ResponseConflict.ConflictRelayData0.Request.CuSum = 0 - msg.ResponseConflict.ConflictRelayData0.Request.Data = []byte("DUMMYREQUEST") - msg.ResponseConflict.ConflictRelayData0.Request.Provider = provider0.Addr.String() - msg.ResponseConflict.ConflictRelayData0.Request.QoSReport = &types.QualityOfServiceReport{Latency: sdk.OneDec(), Availability: sdk.OneDec(), Sync: sdk.OneDec()} - msg.ResponseConflict.ConflictRelayData0.Request.RelayNum = 1 - msg.ResponseConflict.ConflictRelayData0.Request.SessionId = 1 - msg.ResponseConflict.ConflictRelayData0.Request.RequestBlock = 100 + msg.ResponseConflict.ConflictRelayData0.Request.RelayData = &types.RelayPrivateData{ + ConnectionType: "", + ApiUrl: "", + Data: []byte("DUMMYREQUEST"), + RequestBlock: 100, + ApiInterface: "", + Salt: []byte{1}, + } + msg.ResponseConflict.ConflictRelayData0.Request.RelaySession = &types.RelaySession{ + Provider: provider0.Addr.String(), + ContentHash: sigs.CalculateContentHashForRelayData(msg.ResponseConflict.ConflictRelayData0.Request.RelayData), + SessionId: uint64(1), + ChainID: spec.Index, + CuSum: 0, + BlockHeight: sdk.UnwrapSDKContext(ctx).BlockHeight(), + RelayNum: 0, + QoSReport: &types.QualityOfServiceReport{Latency: sdk.OneDec(), Availability: sdk.OneDec(), Sync: sdk.OneDec()}, + } + msg.ResponseConflict.ConflictRelayData0.Request.DataReliability = nil - msg.ResponseConflict.ConflictRelayData0.Request.Sig = []byte{} - sig, err := sigs.SignRelay(consumer.SK, *msg.ResponseConflict.ConflictRelayData0.Request) + sig, err := sigs.SignRelay(consumer.SK, *msg.ResponseConflict.ConflictRelayData0.Request.RelaySession) if err != nil { return msg, err } - msg.ResponseConflict.ConflictRelayData0.Request.Sig = sig + msg.ResponseConflict.ConflictRelayData0.Request.RelaySession.Sig = sig // request 1 temp, _ := msg.ResponseConflict.ConflictRelayData0.Request.Marshal() msg.ResponseConflict.ConflictRelayData1.Request.Unmarshal(temp) - msg.ResponseConflict.ConflictRelayData1.Request.Provider = provider1.Addr.String() - msg.ResponseConflict.ConflictRelayData1.Request.Sig = []byte{} - sig, err = sigs.SignRelay(consumer.SK, *msg.ResponseConflict.ConflictRelayData1.Request) + msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.Provider = provider1.Addr.String() + msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.Sig = []byte{} + sig, err = sigs.SignRelay(consumer.SK, *msg.ResponseConflict.ConflictRelayData1.Request.RelaySession) if err != nil { return msg, err } - msg.ResponseConflict.ConflictRelayData1.Request.Sig = sig + msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.Sig = sig // reply 0 msg.ResponseConflict.ConflictRelayData0.Reply.Nonce = 10 msg.ResponseConflict.ConflictRelayData0.Reply.FinalizedBlocksHashes = []byte{} - msg.ResponseConflict.ConflictRelayData0.Reply.LatestBlock = msg.ResponseConflict.ConflictRelayData0.Request.RequestBlock + int64(spec.BlockDistanceForFinalizedData) + msg.ResponseConflict.ConflictRelayData0.Reply.LatestBlock = msg.ResponseConflict.ConflictRelayData0.Request.RelayData.RequestBlock + int64(spec.BlockDistanceForFinalizedData) msg.ResponseConflict.ConflictRelayData0.Reply.Data = []byte("DUMMYREPLY") sig, err = sigs.SignRelayResponse(provider0.SK, msg.ResponseConflict.ConflictRelayData0.Reply, msg.ResponseConflict.ConflictRelayData0.Request) if err != nil { diff --git a/x/conflict/keeper/conflict.go b/x/conflict/keeper/conflict.go index 21a589c59e..a2545c985c 100644 --- a/x/conflict/keeper/conflict.go +++ b/x/conflict/keeper/conflict.go @@ -16,39 +16,39 @@ func (k Keeper) ValidateFinalizationConflict(ctx sdk.Context, conflictData *type func (k Keeper) ValidateResponseConflict(ctx sdk.Context, conflictData *types.ResponseConflict, clientAddr sdk.AccAddress) error { // 1. validate mismatching data - chainID := conflictData.ConflictRelayData0.Request.ChainID - if chainID != conflictData.ConflictRelayData1.Request.ChainID { - return fmt.Errorf("mismatching request parameters between providers %s, %s", chainID, conflictData.ConflictRelayData1.Request.ChainID) + chainID := conflictData.ConflictRelayData0.Request.RelaySession.ChainID + if chainID != conflictData.ConflictRelayData1.Request.RelaySession.ChainID { + return fmt.Errorf("mismatching request parameters between providers %s, %s", chainID, conflictData.ConflictRelayData1.Request.RelaySession.ChainID) } - block := conflictData.ConflictRelayData0.Request.BlockHeight - if block != conflictData.ConflictRelayData1.Request.BlockHeight { - return fmt.Errorf("mismatching request parameters between providers %d, %d", block, conflictData.ConflictRelayData1.Request.BlockHeight) + block := conflictData.ConflictRelayData0.Request.RelaySession.BlockHeight + if block != conflictData.ConflictRelayData1.Request.RelaySession.BlockHeight { + return fmt.Errorf("mismatching request parameters between providers %d, %d", block, conflictData.ConflictRelayData1.Request.RelaySession.BlockHeight) } - if conflictData.ConflictRelayData0.Request.ConnectionType != conflictData.ConflictRelayData1.Request.ConnectionType { - return fmt.Errorf("mismatching request parameters between providers %s, %s", conflictData.ConflictRelayData0.Request.ConnectionType, conflictData.ConflictRelayData1.Request.ConnectionType) + if conflictData.ConflictRelayData0.Request.RelayData.ConnectionType != conflictData.ConflictRelayData1.Request.RelayData.ConnectionType { + return fmt.Errorf("mismatching request parameters between providers %s, %s", conflictData.ConflictRelayData0.Request.RelayData.ConnectionType, conflictData.ConflictRelayData1.Request.RelayData.ConnectionType) } - if conflictData.ConflictRelayData0.Request.ApiUrl != conflictData.ConflictRelayData1.Request.ApiUrl { - return fmt.Errorf("mismatching request parameters between providers %s, %s", conflictData.ConflictRelayData0.Request.ApiUrl, conflictData.ConflictRelayData1.Request.ApiUrl) + if conflictData.ConflictRelayData0.Request.RelayData.ApiUrl != conflictData.ConflictRelayData1.Request.RelayData.ApiUrl { + return fmt.Errorf("mismatching request parameters between providers %s, %s", conflictData.ConflictRelayData0.Request.RelayData.ApiUrl, conflictData.ConflictRelayData1.Request.RelayData.ApiUrl) } - if !bytes.Equal(conflictData.ConflictRelayData0.Request.Data, conflictData.ConflictRelayData1.Request.Data) { - return fmt.Errorf("mismatching request parameters between providers %s, %s", conflictData.ConflictRelayData0.Request.Data, conflictData.ConflictRelayData1.Request.Data) + if !bytes.Equal(conflictData.ConflictRelayData0.Request.RelayData.Data, conflictData.ConflictRelayData1.Request.RelayData.Data) { + return fmt.Errorf("mismatching request parameters between providers %s, %s", conflictData.ConflictRelayData0.Request.RelayData.Data, conflictData.ConflictRelayData1.Request.RelayData.Data) } - if conflictData.ConflictRelayData0.Request.ApiUrl != conflictData.ConflictRelayData1.Request.ApiUrl { - return fmt.Errorf("mismatching request parameters between providers %s, %s", conflictData.ConflictRelayData0.Request.ApiUrl, conflictData.ConflictRelayData1.Request.ApiUrl) + if conflictData.ConflictRelayData0.Request.RelayData.ApiUrl != conflictData.ConflictRelayData1.Request.RelayData.ApiUrl { + return fmt.Errorf("mismatching request parameters between providers %s, %s", conflictData.ConflictRelayData0.Request.RelayData.ApiUrl, conflictData.ConflictRelayData1.Request.RelayData.ApiUrl) } - if conflictData.ConflictRelayData0.Request.RequestBlock != conflictData.ConflictRelayData1.Request.RequestBlock { - return fmt.Errorf("mismatching request parameters between providers %d, %d", conflictData.ConflictRelayData0.Request.RequestBlock, conflictData.ConflictRelayData1.Request.RequestBlock) + if conflictData.ConflictRelayData0.Request.RelayData.RequestBlock != conflictData.ConflictRelayData1.Request.RelayData.RequestBlock { + return fmt.Errorf("mismatching request parameters between providers %d, %d", conflictData.ConflictRelayData0.Request.RelayData.RequestBlock, conflictData.ConflictRelayData1.Request.RelayData.RequestBlock) } - if conflictData.ConflictRelayData0.Request.ApiInterface != conflictData.ConflictRelayData1.Request.ApiInterface { - return fmt.Errorf("mismatching request parameters between providers %s, %s", conflictData.ConflictRelayData0.Request.ApiInterface, conflictData.ConflictRelayData1.Request.ApiInterface) + if conflictData.ConflictRelayData0.Request.RelayData.ApiInterface != conflictData.ConflictRelayData1.Request.RelayData.ApiInterface { + return fmt.Errorf("mismatching request parameters between providers %s, %s", conflictData.ConflictRelayData0.Request.RelayData.ApiInterface, conflictData.ConflictRelayData1.Request.RelayData.ApiInterface) } // 1.5 validate params epochStart, _, err := k.epochstorageKeeper.GetEpochStartForBlock(ctx, uint64(block)) if err != nil { return fmt.Errorf("could not find epoch for block %d", block) } - if conflictData.ConflictRelayData0.Request.RequestBlock < 0 { - return fmt.Errorf("invalid request block height %d", conflictData.ConflictRelayData0.Request.RequestBlock) + if conflictData.ConflictRelayData0.Request.RelayData.RequestBlock < 0 { + return fmt.Errorf("invalid request block height %d", conflictData.ConflictRelayData0.Request.RelayData.RequestBlock) } epochBlocks, err := k.epochstorageKeeper.EpochBlocks(ctx, uint64(block)) @@ -67,7 +67,7 @@ func (k Keeper) ValidateResponseConflict(ctx sdk.Context, conflictData *types.Re return fmt.Errorf("did not find a stake entry for consumer %s on epoch %d, chainID %s error: %s", clientAddr, epochStart, chainID, err.Error()) } verifyClientAddrFromSignatureOnRequest := func(conflictRelayData types.ConflictRelayData) error { - pubKey, err := sigs.RecoverPubKeyFromRelay(*conflictRelayData.Request) + pubKey, err := sigs.RecoverPubKeyFromRelay(*conflictRelayData.Request.RelaySession) if err != nil { return fmt.Errorf("invalid consumer signature in relay request %+v , error: %s", conflictRelayData.Request, err.Error()) } @@ -135,8 +135,8 @@ func (k Keeper) ValidateResponseConflict(ctx sdk.Context, conflictData *types.Re return fmt.Errorf("mismatching %s provider address signature and responseFinazalizationData %s , %s", print_st, derived_providerAccAddress, expectedAddress) } // validate the responses are finalized - if !k.specKeeper.IsFinalizedBlock(ctx, chainID, request.RequestBlock, response.LatestBlock) { - return fmt.Errorf("block isn't finalized on %s provider! %d,%d ", print_st, request.RequestBlock, response.LatestBlock) + if !k.specKeeper.IsFinalizedBlock(ctx, chainID, request.RelayData.RequestBlock, response.LatestBlock) { + return fmt.Errorf("block isn't finalized on %s provider! %d,%d ", print_st, request.RelayData.RequestBlock, response.LatestBlock) } return nil } diff --git a/x/conflict/keeper/msg_server_detection.go b/x/conflict/keeper/msg_server_detection.go index d83d9868c6..5dd455eeb2 100644 --- a/x/conflict/keeper/msg_server_detection.go +++ b/x/conflict/keeper/msg_server_detection.go @@ -13,7 +13,7 @@ import ( ) func DetectionIndex(msg *types.MsgDetection, epochStart uint64) string { - return msg.Creator + msg.ResponseConflict.ConflictRelayData0.Request.Provider + msg.ResponseConflict.ConflictRelayData1.Request.Provider + strconv.FormatUint(epochStart, 10) + return msg.Creator + msg.ResponseConflict.ConflictRelayData0.Request.RelaySession.Provider + msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.Provider + strconv.FormatUint(epochStart, 10) } func (k msgServer) Detection(goCtx context.Context, msg *types.MsgDetection) (*types.MsgDetectionResponse, error) { @@ -45,38 +45,38 @@ func (k msgServer) Detection(goCtx context.Context, msg *types.MsgDetection) (*t // 3. accept incoming commit transactions for this vote, // 4. after vote ends, accept reveal transactions, strike down every provider that voted (only valid if there was a commit) // 5. majority wins, minority gets penalised - epochStart, _, err := k.epochstorageKeeper.GetEpochStartForBlock(ctx, uint64(msg.ResponseConflict.ConflictRelayData0.Request.BlockHeight)) + epochStart, _, err := k.epochstorageKeeper.GetEpochStartForBlock(ctx, uint64(msg.ResponseConflict.ConflictRelayData0.Request.RelaySession.BlockHeight)) if err != nil { - return nil, utils.LavaError(ctx, logger, "response_conflict_detection", map[string]string{"client": msg.Creator, "provider0": msg.ResponseConflict.ConflictRelayData0.Request.Provider, "provider1": msg.ResponseConflict.ConflictRelayData1.Request.Provider}, "Simulation: could not get EpochStart for specific block") + return nil, utils.LavaError(ctx, logger, "response_conflict_detection", map[string]string{"client": msg.Creator, "provider0": msg.ResponseConflict.ConflictRelayData0.Request.RelaySession.Provider, "provider1": msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.Provider}, "Simulation: could not get EpochStart for specific block") } index := DetectionIndex(msg, epochStart) found := k.Keeper.AllocateNewConflictVote(ctx, index) if found { - return nil, utils.LavaError(ctx, logger, "response_conflict_detection", map[string]string{"client": msg.Creator, "provider0": msg.ResponseConflict.ConflictRelayData0.Request.Provider, "provider1": msg.ResponseConflict.ConflictRelayData1.Request.Provider}, "Simulation: conflict with is already open for this client and providers in this epoch") + return nil, utils.LavaError(ctx, logger, "response_conflict_detection", map[string]string{"client": msg.Creator, "provider0": msg.ResponseConflict.ConflictRelayData0.Request.RelaySession.Provider, "provider1": msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.Provider}, "Simulation: conflict with is already open for this client and providers in this epoch") } conflictVote := types.ConflictVote{} conflictVote.Index = index conflictVote.VoteState = types.StateCommit - conflictVote.VoteStartBlock = uint64(msg.ResponseConflict.ConflictRelayData0.Request.BlockHeight) + conflictVote.VoteStartBlock = uint64(msg.ResponseConflict.ConflictRelayData0.Request.RelaySession.BlockHeight) epochBlocks, err := k.epochstorageKeeper.EpochBlocks(ctx, uint64(ctx.BlockHeight())) if err != nil { - return nil, utils.LavaError(ctx, logger, "response_conflict_detection", map[string]string{"client": msg.Creator, "provider0": msg.ResponseConflict.ConflictRelayData0.Request.Provider, "provider1": msg.ResponseConflict.ConflictRelayData1.Request.Provider}, "Simulation: could not get epochblocks") + return nil, utils.LavaError(ctx, logger, "response_conflict_detection", map[string]string{"client": msg.Creator, "provider0": msg.ResponseConflict.ConflictRelayData0.Request.RelaySession.Provider, "provider1": msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.Provider}, "Simulation: could not get epochblocks") } voteDeadline, err := k.Keeper.epochstorageKeeper.GetNextEpoch(ctx, uint64(ctx.BlockHeight())+k.VotePeriod(ctx)*epochBlocks) if err != nil { - return nil, utils.LavaError(ctx, logger, "response_conflict_detection", map[string]string{"client": msg.Creator, "provider0": msg.ResponseConflict.ConflictRelayData0.Request.Provider, "provider1": msg.ResponseConflict.ConflictRelayData1.Request.Provider}, "Simulation: could not get NextEpoch") + return nil, utils.LavaError(ctx, logger, "response_conflict_detection", map[string]string{"client": msg.Creator, "provider0": msg.ResponseConflict.ConflictRelayData0.Request.RelaySession.Provider, "provider1": msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.Provider}, "Simulation: could not get NextEpoch") } conflictVote.VoteDeadline = voteDeadline - conflictVote.ApiUrl = msg.ResponseConflict.ConflictRelayData0.Request.ApiUrl + conflictVote.ApiUrl = msg.ResponseConflict.ConflictRelayData0.Request.RelayData.ApiUrl conflictVote.ClientAddress = msg.Creator - conflictVote.ChainID = msg.ResponseConflict.ConflictRelayData0.Request.ChainID - conflictVote.RequestBlock = uint64(msg.ResponseConflict.ConflictRelayData0.Request.RequestBlock) - conflictVote.RequestData = msg.ResponseConflict.ConflictRelayData0.Request.Data + conflictVote.ChainID = msg.ResponseConflict.ConflictRelayData0.Request.RelaySession.ChainID + conflictVote.RequestBlock = uint64(msg.ResponseConflict.ConflictRelayData0.Request.RelayData.RequestBlock) + conflictVote.RequestData = msg.ResponseConflict.ConflictRelayData0.Request.RelayData.Data - conflictVote.FirstProvider.Account = msg.ResponseConflict.ConflictRelayData0.Request.Provider + conflictVote.FirstProvider.Account = msg.ResponseConflict.ConflictRelayData0.Request.RelaySession.Provider conflictVote.FirstProvider.Response = tendermintcrypto.Sha256(msg.ResponseConflict.ConflictRelayData0.Reply.Data) - conflictVote.SecondProvider.Account = msg.ResponseConflict.ConflictRelayData1.Request.Provider + conflictVote.SecondProvider.Account = msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.Provider conflictVote.SecondProvider.Response = tendermintcrypto.Sha256(msg.ResponseConflict.ConflictRelayData1.Reply.Data) conflictVote.Votes = []types.Vote{} voters := k.Keeper.LotteryVoters(goCtx, epochStart, conflictVote.ChainID, []string{conflictVote.FirstProvider.Account, conflictVote.SecondProvider.Account}) @@ -89,13 +89,13 @@ func (k msgServer) Detection(goCtx context.Context, msg *types.MsgDetection) (*t eventData := map[string]string{"client": msg.Creator} eventData["voteID"] = conflictVote.Index eventData["chainID"] = conflictVote.ChainID - eventData["connectionType"] = msg.ResponseConflict.ConflictRelayData0.Request.ConnectionType + eventData["connectionType"] = msg.ResponseConflict.ConflictRelayData0.Request.RelayData.ConnectionType eventData["apiURL"] = conflictVote.ApiUrl eventData["requestData"] = string(conflictVote.RequestData) eventData["requestBlock"] = strconv.FormatUint(conflictVote.RequestBlock, 10) eventData["voteDeadline"] = strconv.FormatUint(conflictVote.VoteDeadline, 10) eventData["voters"] = strings.Join(voters, ",") - eventData["apiInterface"] = msg.ResponseConflict.ConflictRelayData0.Request.ApiInterface + eventData["apiInterface"] = msg.ResponseConflict.ConflictRelayData0.Request.RelayData.ApiInterface utils.LogLavaEvent(ctx, logger, types.ConflictVoteDetectionEventName, eventData, "Simulation: Got a new valid conflict detection from consumer, starting new vote") return &types.MsgDetectionResponse{}, nil diff --git a/x/conflict/keeper/msg_server_detection_test.go b/x/conflict/keeper/msg_server_detection_test.go index b428dd425d..fa26f315ae 100644 --- a/x/conflict/keeper/msg_server_detection_test.go +++ b/x/conflict/keeper/msg_server_detection_test.go @@ -98,21 +98,21 @@ func TestDetection(t *testing.T) { msg.Creator = tt.Creator.Addr.String() //changes to request1 according to test - msg.ResponseConflict.ConflictRelayData1.Request.ConnectionType += tt.ConnectionType - msg.ResponseConflict.ConflictRelayData1.Request.ApiUrl += tt.ApiUrl - msg.ResponseConflict.ConflictRelayData1.Request.BlockHeight += tt.BlockHeight - msg.ResponseConflict.ConflictRelayData1.Request.ChainID += tt.ChainID - msg.ResponseConflict.ConflictRelayData1.Request.Data = append(msg.ResponseConflict.ConflictRelayData1.Request.Data, tt.Data...) - msg.ResponseConflict.ConflictRelayData1.Request.RequestBlock += tt.RequestBlock - msg.ResponseConflict.ConflictRelayData1.Request.CuSum += tt.Cusum - msg.ResponseConflict.ConflictRelayData1.Request.QoSReport = tt.QoSReport - msg.ResponseConflict.ConflictRelayData1.Request.RelayNum += tt.RelayNum - msg.ResponseConflict.ConflictRelayData1.Request.SessionId += tt.SeassionID - msg.ResponseConflict.ConflictRelayData1.Request.Provider = tt.Provider1.Addr.String() - msg.ResponseConflict.ConflictRelayData1.Request.Sig = []byte{} - sig, err := sigs.SignRelay(ts.consumer.SK, *msg.ResponseConflict.ConflictRelayData1.Request) + msg.ResponseConflict.ConflictRelayData1.Request.RelayData.ConnectionType += tt.ConnectionType + msg.ResponseConflict.ConflictRelayData1.Request.RelayData.ApiUrl += tt.ApiUrl + msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.BlockHeight += tt.BlockHeight + msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.ChainID += tt.ChainID + msg.ResponseConflict.ConflictRelayData1.Request.RelayData.Data = append(msg.ResponseConflict.ConflictRelayData1.Request.RelayData.Data, tt.Data...) + msg.ResponseConflict.ConflictRelayData1.Request.RelayData.RequestBlock += tt.RequestBlock + msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.CuSum += tt.Cusum + msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.QoSReport = tt.QoSReport + msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.RelayNum += tt.RelayNum + msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.SessionId += tt.SeassionID + msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.Provider = tt.Provider1.Addr.String() + msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.Sig = []byte{} + sig, err := sigs.SignRelay(ts.consumer.SK, *msg.ResponseConflict.ConflictRelayData1.Request.RelaySession) require.Nil(t, err) - msg.ResponseConflict.ConflictRelayData1.Request.Sig = sig + msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.Sig = sig //changes to reply1 according to test msg.ResponseConflict.ConflictRelayData1.Reply.Data = append(msg.ResponseConflict.ConflictRelayData1.Reply.Data, tt.ReplyData...) diff --git a/x/pairing/client/cli/tx_relay_payment.go b/x/pairing/client/cli/tx_relay_payment.go index 9c3491c357..22b41a8a42 100644 --- a/x/pairing/client/cli/tx_relay_payment.go +++ b/x/pairing/client/cli/tx_relay_payment.go @@ -27,7 +27,8 @@ func CmdRelayPayment() *cobra.Command { msg := types.NewMsgRelayPayment( clientCtx.GetFromAddress().String(), - []*types.RelayRequest{}, + []*types.RelaySession{}, + []*types.VRFData{}, "", ) if err := msg.ValidateBasic(); err != nil { diff --git a/x/pairing/keeper/msg_server_relay_payment.go b/x/pairing/keeper/msg_server_relay_payment.go index 39a862f108..ab4da46ef0 100644 --- a/x/pairing/keeper/msg_server_relay_payment.go +++ b/x/pairing/keeper/msg_server_relay_payment.go @@ -32,7 +32,7 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen return nil, utils.LavaError(ctx, logger, name, attrs, details) } - dataReliabilityByConsumer, err := sigs.DataReliabilityByConsumer(msg.VRFs) + dataReliabilityStore, err := dataReliabilityByConsumer(msg.VRFs) if err != nil { return errorLogAndFormat("data_reliability_claim", map[string]string{"error": err.Error()}, "error creating dataReliabilityByConsumer") } @@ -88,8 +88,9 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen payReliability := false // validate data reliability - if vrfData, ok := dataReliabilityByConsumer[clientAddr.String()]; ok { - delete(dataReliabilityByConsumer, clientAddr.String()) + vrfStoreKey := VRFKey{ChainID: relay.ChainID, Epoch: epochStart, Consumer: clientAddr.String()} + if vrfData, ok := dataReliabilityStore[vrfStoreKey]; ok { + delete(dataReliabilityStore, vrfStoreKey) details := map[string]string{"client": clientAddr.String(), "provider": providerAddr.String()} if !spec.DataReliabilityEnabled { details["chainID"] = relay.ChainID @@ -358,3 +359,28 @@ func (k msgServer) updateProviderPaymentStorageWithComplainerCU(ctx sdk.Context, return nil } + +type VRFKey struct { + Consumer string + Epoch uint64 + ChainID string +} + +func dataReliabilityByConsumer(vrfs []*types.VRFData) (dataReliabilityByConsumer map[VRFKey]*types.VRFData, err error) { + dataReliabilityByConsumer = map[VRFKey]*types.VRFData{} + if len(vrfs) == 0 { + return + } + for _, vrf := range vrfs { + signer, err := sigs.GetSignerForVRF(*vrf) + if err != nil { + return nil, err + } + dataReliabilityByConsumer[VRFKey{ + Consumer: signer.String(), + Epoch: uint64(vrf.Epoch), + ChainID: vrf.ChainID, + }] = vrf + } + return dataReliabilityByConsumer, nil +} diff --git a/x/pairing/keeper/msg_server_relay_payment_test.go b/x/pairing/keeper/msg_server_relay_payment_test.go index 987a766ef6..628427da43 100644 --- a/x/pairing/keeper/msg_server_relay_payment_test.go +++ b/x/pairing/keeper/msg_server_relay_payment_test.go @@ -863,6 +863,8 @@ func TestRelayPaymentDataReliability(t *testing.T) { } vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].vrfSk, false, currentEpoch) dataReliability0 := &types.VRFData{ + ChainID: relayRequest.RelaySession.ChainID, + Epoch: relayRequest.RelaySession.BlockHeight, Differentiator: false, VrfValue: vrf_res0, VrfProof: vrf_proof0, @@ -1016,6 +1018,8 @@ GetWrongProvider: } vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].vrfSk, false, currentEpoch) dataReliability0 := &types.VRFData{ + ChainID: relayRequest.RelaySession.ChainID, + Epoch: relayRequest.RelaySession.BlockHeight, Differentiator: false, VrfValue: vrf_res0, VrfProof: vrf_proof0, @@ -1102,6 +1106,8 @@ func TestRelayPaymentDataReliabilityBelowReliabilityThreshold(t *testing.T) { require.Equal(t, index1, int64(-1)) vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].vrfSk, false, currentEpoch) dataReliability0 := &types.VRFData{ + ChainID: relayRequest.RelaySession.ChainID, + Epoch: relayRequest.RelaySession.BlockHeight, Differentiator: false, VrfValue: vrf_res0, VrfProof: vrf_proof0, @@ -1198,6 +1204,8 @@ func TestRelayPaymentDataReliabilityDifferentClientSign(t *testing.T) { vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].vrfSk, false, currentEpoch) dataReliability0 := &types.VRFData{ + ChainID: relayRequest.RelaySession.ChainID, + Epoch: relayRequest.RelaySession.BlockHeight, Differentiator: false, VrfValue: vrf_res0, VrfProof: vrf_proof0, @@ -1293,6 +1301,8 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].vrfSk, false, currentEpoch) dataReliability0 := &types.VRFData{ + ChainID: relayRequest.RelaySession.ChainID, + Epoch: relayRequest.RelaySession.BlockHeight, Differentiator: false, VrfValue: vrf_res0, VrfProof: vrf_proof0, diff --git a/x/pairing/types/relay.pb.go b/x/pairing/types/relay.pb.go index 5b2c656f12..a3dbfb2d04 100644 --- a/x/pairing/types/relay.pb.go +++ b/x/pairing/types/relay.pb.go @@ -458,13 +458,15 @@ func (m *RelayReply) GetSigBlocks() []byte { } type VRFData struct { - Differentiator bool `protobuf:"varint,1,opt,name=differentiator,proto3" json:"differentiator,omitempty"` - VrfValue []byte `protobuf:"bytes,2,opt,name=vrf_value,json=vrfValue,proto3" json:"vrf_value,omitempty"` - VrfProof []byte `protobuf:"bytes,3,opt,name=vrf_proof,json=vrfProof,proto3" json:"vrf_proof,omitempty"` - ProviderSig []byte `protobuf:"bytes,4,opt,name=provider_sig,json=providerSig,proto3" json:"provider_sig,omitempty"` - AllDataHash []byte `protobuf:"bytes,5,opt,name=allDataHash,proto3" json:"allDataHash,omitempty"` - QueryHash []byte `protobuf:"bytes,6,opt,name=queryHash,proto3" json:"queryHash,omitempty"` - Sig []byte `protobuf:"bytes,7,opt,name=sig,proto3" json:"sig,omitempty"` + ChainID string `protobuf:"bytes,1,opt,name=chainID,proto3" json:"chainID,omitempty"` + Epoch int64 `protobuf:"varint,2,opt,name=epoch,proto3" json:"epoch,omitempty"` + Differentiator bool `protobuf:"varint,3,opt,name=differentiator,proto3" json:"differentiator,omitempty"` + VrfValue []byte `protobuf:"bytes,4,opt,name=vrf_value,json=vrfValue,proto3" json:"vrf_value,omitempty"` + VrfProof []byte `protobuf:"bytes,5,opt,name=vrf_proof,json=vrfProof,proto3" json:"vrf_proof,omitempty"` + ProviderSig []byte `protobuf:"bytes,6,opt,name=provider_sig,json=providerSig,proto3" json:"provider_sig,omitempty"` + AllDataHash []byte `protobuf:"bytes,7,opt,name=allDataHash,proto3" json:"allDataHash,omitempty"` + QueryHash []byte `protobuf:"bytes,8,opt,name=queryHash,proto3" json:"queryHash,omitempty"` + Sig []byte `protobuf:"bytes,9,opt,name=sig,proto3" json:"sig,omitempty"` } func (m *VRFData) Reset() { *m = VRFData{} } @@ -500,6 +502,20 @@ func (m *VRFData) XXX_DiscardUnknown() { var xxx_messageInfo_VRFData proto.InternalMessageInfo +func (m *VRFData) GetChainID() string { + if m != nil { + return m.ChainID + } + return "" +} + +func (m *VRFData) GetEpoch() int64 { + if m != nil { + return m.Epoch + } + return 0 +} + func (m *VRFData) GetDifferentiator() bool { if m != nil { return m.Differentiator @@ -601,71 +617,71 @@ func init() { func init() { proto.RegisterFile("pairing/relay.proto", fileDescriptor_10cd1bfeb9978acf) } var fileDescriptor_10cd1bfeb9978acf = []byte{ - // 1013 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x41, 0x6f, 0x1b, 0xc5, - 0x17, 0xcf, 0xc6, 0x76, 0x6c, 0xbf, 0x75, 0xd3, 0x6a, 0x9a, 0xb4, 0xfb, 0x4f, 0xff, 0x75, 0xcc, - 0x22, 0xa5, 0x39, 0x80, 0x0d, 0x41, 0x70, 0x40, 0x42, 0xa2, 0x26, 0xa5, 0x09, 0x42, 0x34, 0x19, - 0x43, 0x0f, 0xb9, 0xac, 0xc6, 0xeb, 0xf1, 0x7a, 0xc8, 0x7a, 0x67, 0x3b, 0xb3, 0x6b, 0x61, 0x0e, - 0x7c, 0x06, 0x3e, 0x0b, 0x07, 0x3e, 0x00, 0x12, 0x52, 0xb9, 0xf5, 0x88, 0x38, 0x44, 0x55, 0x72, - 0xe0, 0xce, 0x27, 0x40, 0xf3, 0x76, 0xd6, 0x71, 0xa3, 0x28, 0x52, 0x25, 0x4e, 0x3b, 0xf3, 0x7b, - 0x6f, 0x7e, 0xb3, 0xef, 0xf7, 0x7e, 0xfb, 0x6c, 0xb8, 0x9b, 0x32, 0xa1, 0x44, 0x12, 0xf5, 0x14, - 0x8f, 0xd9, 0xbc, 0x9b, 0x2a, 0x99, 0x49, 0xb2, 0x11, 0xb3, 0x19, 0x4b, 0x78, 0xd6, 0x35, 0xcf, - 0xae, 0xcd, 0xd8, 0xda, 0x88, 0x64, 0x24, 0x31, 0xa1, 0x67, 0x56, 0x45, 0xae, 0xff, 0x47, 0x05, - 0x5a, 0xd4, 0x9c, 0x1d, 0x70, 0xad, 0x85, 0x4c, 0x88, 0x07, 0xf5, 0x70, 0xc2, 0x44, 0x72, 0xb8, - 0xef, 0x39, 0x1d, 0x67, 0xb7, 0x49, 0xcb, 0x2d, 0x79, 0x07, 0x5a, 0xa1, 0x4c, 0x32, 0x9e, 0x64, - 0xc1, 0x84, 0xe9, 0x89, 0xb7, 0xda, 0x71, 0x76, 0x5b, 0xd4, 0xb5, 0xd8, 0x01, 0xd3, 0x13, 0xf2, - 0x10, 0x40, 0x17, 0x3c, 0x81, 0x18, 0x79, 0x95, 0x8e, 0xb3, 0x5b, 0xa5, 0x4d, 0x8b, 0x1c, 0x8e, - 0xc8, 0x26, 0xac, 0x85, 0x79, 0xa0, 0xf3, 0xa9, 0x57, 0xc5, 0x50, 0x2d, 0xcc, 0x07, 0xf9, 0x94, - 0x6c, 0x41, 0x23, 0x55, 0x72, 0x26, 0x46, 0x5c, 0x79, 0x35, 0xbc, 0x73, 0xb1, 0x27, 0x0f, 0xa0, - 0x89, 0xa5, 0x05, 0x49, 0x3e, 0xf5, 0xd6, 0xf0, 0x54, 0x03, 0x81, 0x6f, 0xf2, 0x29, 0xf9, 0x0a, - 0x9a, 0xc7, 0x72, 0x40, 0x79, 0x2a, 0x55, 0xe6, 0xd5, 0x3b, 0xce, 0xae, 0xbb, 0xf7, 0x5e, 0xf7, - 0xba, 0xe2, 0xbb, 0xc7, 0x39, 0x8b, 0x45, 0x36, 0x7f, 0x36, 0x1e, 0x70, 0x35, 0x13, 0x21, 0x2f, - 0xce, 0xd0, 0xcb, 0xe3, 0xa6, 0xba, 0x61, 0x2c, 0xc3, 0xd3, 0x60, 0xc2, 0x45, 0x34, 0xc9, 0xbc, - 0x46, 0xc7, 0xd9, 0xad, 0x50, 0x17, 0xb1, 0x03, 0x84, 0xc8, 0xc7, 0x70, 0x2f, 0x4f, 0x14, 0xd7, - 0xa9, 0x4c, 0xb4, 0x98, 0xf1, 0xa0, 0x7c, 0x49, 0xed, 0x35, 0x51, 0x8a, 0xcd, 0xe5, 0xe8, 0x51, - 0x19, 0x24, 0x3e, 0xdc, 0x32, 0xef, 0x12, 0xa0, 0x8e, 0x46, 0x17, 0xc0, 0x1a, 0x5d, 0x03, 0x7e, - 0x81, 0xda, 0x8e, 0xc8, 0x1d, 0xa8, 0x68, 0x11, 0x79, 0x2e, 0xf2, 0x98, 0x25, 0xf9, 0x10, 0x6a, - 0x43, 0x36, 0x8a, 0xb8, 0xd7, 0xc2, 0xba, 0x1e, 0x5c, 0x5f, 0x57, 0xdf, 0xa4, 0xd0, 0x22, 0xd3, - 0xff, 0xdd, 0x81, 0x3b, 0xd8, 0xcb, 0x23, 0x25, 0x66, 0x2c, 0xe3, 0xfb, 0x2c, 0x63, 0xe4, 0x11, - 0xdc, 0x0e, 0x65, 0x92, 0xf0, 0x30, 0x33, 0x5d, 0xc9, 0xe6, 0x29, 0xb7, 0x7d, 0x5d, 0xbf, 0x84, - 0xbf, 0x9d, 0xa7, 0x9c, 0xdc, 0x87, 0x3a, 0x4b, 0x45, 0x90, 0xab, 0x18, 0x3b, 0xdb, 0xa4, 0x6b, - 0x2c, 0x15, 0xdf, 0xa9, 0x98, 0x10, 0xa8, 0x8e, 0x58, 0xc6, 0xb0, 0x9d, 0x2d, 0x8a, 0x6b, 0xf2, - 0x2e, 0xdc, 0x52, 0xfc, 0x45, 0xce, 0x75, 0x16, 0xa0, 0x42, 0xd8, 0xd0, 0x0a, 0x6d, 0x59, 0xb0, - 0x6f, 0x30, 0xe2, 0x43, 0x8b, 0xa5, 0xe2, 0x30, 0xc9, 0xb8, 0x1a, 0xb3, 0x90, 0xdb, 0xde, 0xbe, - 0x81, 0x19, 0x72, 0xcd, 0xe2, 0x0c, 0x5b, 0xdb, 0xa2, 0xb8, 0xf6, 0xff, 0x76, 0xac, 0x27, 0x69, - 0xc1, 0x46, 0x9e, 0x9a, 0xdb, 0x8c, 0x09, 0xac, 0x95, 0xb0, 0x02, 0x77, 0xcf, 0xbf, 0x5e, 0x93, - 0x65, 0x3b, 0x9b, 0x37, 0x5a, 0x32, 0xf7, 0x13, 0x80, 0x82, 0x08, 0x0b, 0x5a, 0x45, 0x96, 0x9d, - 0x1b, 0x58, 0x96, 0x84, 0xa4, 0x85, 0x0f, 0x51, 0xd3, 0xa7, 0x70, 0x1b, 0x21, 0x1e, 0x0b, 0x36, - 0x14, 0xc6, 0x58, 0x28, 0x8e, 0xbb, 0xf7, 0xf0, 0x7a, 0xae, 0xe7, 0xf4, 0x4b, 0xcc, 0xbf, 0x7a, - 0xca, 0xff, 0x09, 0x6a, 0xd8, 0x41, 0xa3, 0x67, 0x98, 0x07, 0x2c, 0x8e, 0x65, 0xc8, 0xb2, 0xb2, - 0xc2, 0x2a, 0x6d, 0x85, 0xf9, 0xe3, 0x05, 0x46, 0x36, 0xa0, 0xc6, 0x53, 0x19, 0x16, 0x5f, 0x5e, - 0x85, 0x16, 0x1b, 0xf2, 0x3f, 0x68, 0x60, 0xfb, 0x83, 0xf4, 0xd4, 0xb6, 0xa8, 0x8e, 0xfb, 0xa3, - 0x53, 0xb2, 0x0d, 0x6e, 0xaa, 0xe4, 0xf7, 0x3c, 0xcc, 0x02, 0xe3, 0xae, 0x2a, 0x46, 0xc1, 0x42, - 0x03, 0x11, 0xf9, 0xbf, 0x39, 0x00, 0x56, 0xe9, 0x34, 0x9e, 0x2f, 0x3a, 0xed, 0x2c, 0x75, 0xda, - 0x3a, 0x73, 0xf5, 0xd2, 0x99, 0x1b, 0x50, 0x4b, 0x64, 0x12, 0x72, 0xbc, 0xed, 0x16, 0x2d, 0x36, - 0xe6, 0xfb, 0x89, 0x59, 0x76, 0xd5, 0x10, 0x6e, 0x81, 0x15, 0x7e, 0xf8, 0x04, 0xee, 0x8f, 0x45, - 0xc2, 0x62, 0xf1, 0x23, 0x1f, 0x15, 0x59, 0x1a, 0x27, 0x09, 0xd7, 0x68, 0x8d, 0x16, 0xdd, 0x5c, - 0x84, 0xf1, 0x80, 0x3e, 0xc0, 0x20, 0x4e, 0x15, 0x11, 0xd9, 0x13, 0xd6, 0x29, 0x4d, 0x2d, 0xa2, - 0x22, 0xc9, 0x7f, 0xed, 0x40, 0xdd, 0x2a, 0x4c, 0x76, 0x60, 0x7d, 0x24, 0xc6, 0x63, 0xae, 0x78, - 0x92, 0x09, 0x96, 0x49, 0x85, 0xb5, 0x34, 0xe8, 0x15, 0xd4, 0x8c, 0x95, 0x99, 0x1a, 0x07, 0x33, - 0x16, 0xe7, 0xdc, 0xd6, 0xd6, 0x98, 0xa9, 0xf1, 0x73, 0xb3, 0x2f, 0x83, 0xa9, 0x92, 0x72, 0x6c, - 0x25, 0x35, 0xc1, 0x23, 0xb3, 0x37, 0x75, 0x96, 0xdf, 0xfd, 0x92, 0xa8, 0x6e, 0x89, 0x0d, 0x44, - 0x44, 0x3a, 0xe0, 0xb2, 0x38, 0x36, 0xef, 0x63, 0x0a, 0xb0, 0xb5, 0x2d, 0x43, 0xe4, 0xff, 0xd0, - 0x7c, 0x91, 0x73, 0x35, 0xc7, 0xb8, 0x2d, 0x68, 0x01, 0x94, 0x92, 0xd7, 0x17, 0x92, 0xfb, 0xbf, - 0xac, 0xc2, 0xbd, 0xeb, 0x47, 0x18, 0x39, 0x81, 0xba, 0xd1, 0x38, 0x09, 0xe7, 0xc5, 0x77, 0xdd, - 0xff, 0xfc, 0xe5, 0xd9, 0xf6, 0xca, 0x5f, 0x67, 0xdb, 0x3b, 0x91, 0xc8, 0x26, 0xf9, 0xb0, 0x1b, - 0xca, 0x69, 0x2f, 0x94, 0x7a, 0x2a, 0xb5, 0x7d, 0xbc, 0xaf, 0x47, 0xa7, 0x3d, 0x33, 0x08, 0x74, - 0x77, 0x9f, 0x87, 0xff, 0x9c, 0x6d, 0xaf, 0xcf, 0xd9, 0x34, 0xfe, 0xd4, 0xff, 0xba, 0xa0, 0xf1, - 0x69, 0x49, 0x48, 0x04, 0xb4, 0xd8, 0x8c, 0x89, 0xb8, 0x34, 0x39, 0xce, 0x85, 0xfe, 0x93, 0xb7, - 0xbe, 0xe0, 0x6e, 0x71, 0xc1, 0x32, 0x97, 0x4f, 0xdf, 0xa0, 0x26, 0xc7, 0x50, 0xd5, 0xf3, 0x24, - 0x44, 0xb9, 0x9b, 0xfd, 0xcf, 0xde, 0xfa, 0x0a, 0xb7, 0xb8, 0xc2, 0x70, 0xf8, 0x14, 0xa9, 0xf6, - 0x7e, 0x75, 0xa0, 0x8e, 0xe6, 0xe6, 0x8a, 0x3c, 0x83, 0x1a, 0x2e, 0xc9, 0x4d, 0x33, 0xc3, 0x8e, - 0x9b, 0xad, 0xce, 0x8d, 0x39, 0x69, 0x3c, 0xf7, 0x57, 0xc8, 0x09, 0xac, 0x17, 0x73, 0x26, 0x1f, - 0xea, 0x50, 0x89, 0x21, 0xff, 0xaf, 0x98, 0x3f, 0x70, 0xfa, 0x8f, 0x5f, 0x9e, 0xb7, 0x9d, 0x57, - 0xe7, 0x6d, 0xe7, 0xf5, 0x79, 0xdb, 0xf9, 0xf9, 0xa2, 0xbd, 0xf2, 0xea, 0xa2, 0xbd, 0xf2, 0xe7, - 0x45, 0x7b, 0xe5, 0xe4, 0xd1, 0x92, 0x1e, 0x96, 0x09, 0x9f, 0xbd, 0x1f, 0x7a, 0xe5, 0x1f, 0x01, - 0x14, 0x65, 0xb8, 0x86, 0xbf, 0xee, 0x1f, 0xfd, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x25, 0x7e, 0xca, - 0xaa, 0x20, 0x08, 0x00, 0x00, + // 1021 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x41, 0x6f, 0x1b, 0x45, + 0x14, 0xce, 0xc6, 0x76, 0x6c, 0xbf, 0xdd, 0xa4, 0xd5, 0x34, 0x69, 0x97, 0x94, 0x3a, 0x66, 0x91, + 0xd2, 0x1c, 0xc0, 0x86, 0x20, 0x38, 0x20, 0x21, 0x51, 0x93, 0xd2, 0x04, 0x21, 0x9a, 0x8c, 0xa1, + 0x87, 0x5c, 0x56, 0xe3, 0xf5, 0x78, 0x3d, 0x64, 0xbd, 0xb3, 0x9d, 0xd9, 0xb5, 0x30, 0x07, 0x7e, + 0x43, 0x7f, 0x0b, 0x07, 0x7e, 0x00, 0x12, 0x52, 0xb9, 0xf5, 0x88, 0x38, 0x44, 0x28, 0x39, 0x70, + 0xe7, 0x17, 0xa0, 0x79, 0xbb, 0xeb, 0xb8, 0x91, 0x15, 0xa9, 0x52, 0x4f, 0x9e, 0xf9, 0xde, 0x9b, + 0x6f, 0xe6, 0x7d, 0xdf, 0xdb, 0x27, 0xc3, 0x9d, 0x84, 0x09, 0x25, 0xe2, 0xb0, 0xab, 0x78, 0xc4, + 0x66, 0x9d, 0x44, 0xc9, 0x54, 0x92, 0xcd, 0x88, 0x4d, 0x59, 0xcc, 0xd3, 0x8e, 0xf9, 0xed, 0x14, + 0x19, 0xdb, 0x9b, 0xa1, 0x0c, 0x25, 0x26, 0x74, 0xcd, 0x2a, 0xcf, 0xf5, 0xfe, 0xac, 0x80, 0x43, + 0xcd, 0xd9, 0x3e, 0xd7, 0x5a, 0xc8, 0x98, 0xb8, 0x50, 0x0f, 0xc6, 0x4c, 0xc4, 0x47, 0x07, 0xae, + 0xd5, 0xb6, 0xf6, 0x9a, 0xb4, 0xdc, 0x92, 0xf7, 0xc0, 0x09, 0x64, 0x9c, 0xf2, 0x38, 0xf5, 0xc7, + 0x4c, 0x8f, 0xdd, 0xd5, 0xb6, 0xb5, 0xe7, 0x50, 0xbb, 0xc0, 0x0e, 0x99, 0x1e, 0x93, 0x07, 0x00, + 0x3a, 0xe7, 0xf1, 0xc5, 0xd0, 0xad, 0xb4, 0xad, 0xbd, 0x2a, 0x6d, 0x16, 0xc8, 0xd1, 0x90, 0x6c, + 0xc1, 0x5a, 0x90, 0xf9, 0x3a, 0x9b, 0xb8, 0x55, 0x0c, 0xd5, 0x82, 0xac, 0x9f, 0x4d, 0xc8, 0x36, + 0x34, 0x12, 0x25, 0xa7, 0x62, 0xc8, 0x95, 0x5b, 0xc3, 0x3b, 0xe7, 0x7b, 0x72, 0x1f, 0x9a, 0x58, + 0x9a, 0x1f, 0x67, 0x13, 0x77, 0x0d, 0x4f, 0x35, 0x10, 0xf8, 0x2e, 0x9b, 0x90, 0x6f, 0xa0, 0x79, + 0x22, 0xfb, 0x94, 0x27, 0x52, 0xa5, 0x6e, 0xbd, 0x6d, 0xed, 0xd9, 0xfb, 0x1f, 0x74, 0x96, 0x15, + 0xdf, 0x39, 0xc9, 0x58, 0x24, 0xd2, 0xd9, 0xd3, 0x51, 0x9f, 0xab, 0xa9, 0x08, 0x78, 0x7e, 0x86, + 0x5e, 0x1d, 0x37, 0xd5, 0x0d, 0x22, 0x19, 0x9c, 0xf9, 0x63, 0x2e, 0xc2, 0x71, 0xea, 0x36, 0xda, + 0xd6, 0x5e, 0x85, 0xda, 0x88, 0x1d, 0x22, 0x44, 0x3e, 0x85, 0xbb, 0x59, 0xac, 0xb8, 0x4e, 0x64, + 0xac, 0xc5, 0x94, 0xfb, 0xe5, 0x23, 0xb5, 0xdb, 0x44, 0x29, 0xb6, 0x16, 0xa3, 0xc7, 0x65, 0x90, + 0x78, 0xb0, 0x6e, 0xde, 0xe2, 0xa3, 0x8e, 0x46, 0x17, 0xc0, 0x1a, 0x6d, 0x03, 0x7e, 0x85, 0xda, + 0x0e, 0xc9, 0x6d, 0xa8, 0x68, 0x11, 0xba, 0x36, 0xf2, 0x98, 0x25, 0xf9, 0x18, 0x6a, 0x03, 0x36, + 0x0c, 0xb9, 0xeb, 0x60, 0x5d, 0xf7, 0x97, 0xd7, 0xd5, 0x33, 0x29, 0x34, 0xcf, 0xf4, 0xfe, 0xb0, + 0xe0, 0x36, 0x7a, 0x79, 0xac, 0xc4, 0x94, 0xa5, 0xfc, 0x80, 0xa5, 0x8c, 0x3c, 0x84, 0x5b, 0x81, + 0x8c, 0x63, 0x1e, 0xa4, 0xc6, 0x95, 0x74, 0x96, 0xf0, 0xc2, 0xd7, 0x8d, 0x2b, 0xf8, 0xfb, 0x59, + 0xc2, 0xc9, 0x3d, 0xa8, 0xb3, 0x44, 0xf8, 0x99, 0x8a, 0xd0, 0xd9, 0x26, 0x5d, 0x63, 0x89, 0xf8, + 0x41, 0x45, 0x84, 0x40, 0x75, 0xc8, 0x52, 0x86, 0x76, 0x3a, 0x14, 0xd7, 0xe4, 0x7d, 0x58, 0x57, + 0xfc, 0x79, 0xc6, 0x75, 0xea, 0xa3, 0x42, 0x68, 0x68, 0x85, 0x3a, 0x05, 0xd8, 0x33, 0x18, 0xf1, + 0xc0, 0x61, 0x89, 0x38, 0x8a, 0x53, 0xae, 0x46, 0x2c, 0xe0, 0x85, 0xb7, 0xaf, 0x61, 0x86, 0x5c, + 0xb3, 0x28, 0x45, 0x6b, 0x1d, 0x8a, 0x6b, 0xef, 0x5f, 0xab, 0xe8, 0x49, 0x9a, 0xb3, 0x91, 0x27, + 0xe6, 0x36, 0xd3, 0x04, 0x45, 0x2b, 0x61, 0x05, 0xf6, 0xbe, 0xb7, 0x5c, 0x93, 0xc5, 0x76, 0x36, + 0x2f, 0x5a, 0x68, 0xee, 0xc7, 0x00, 0x39, 0x11, 0x16, 0xb4, 0x8a, 0x2c, 0xbb, 0x37, 0xb0, 0x2c, + 0x08, 0x49, 0xf3, 0x3e, 0x44, 0x4d, 0x9f, 0xc0, 0x2d, 0x84, 0x78, 0x24, 0xd8, 0x40, 0x98, 0xc6, + 0x42, 0x71, 0xec, 0xfd, 0x07, 0xcb, 0xb9, 0x9e, 0xd1, 0xaf, 0x31, 0xff, 0xfa, 0x29, 0xef, 0x17, + 0xa8, 0xa1, 0x83, 0x46, 0xcf, 0x20, 0xf3, 0x59, 0x14, 0xc9, 0x80, 0xa5, 0x65, 0x85, 0x55, 0xea, + 0x04, 0xd9, 0xa3, 0x39, 0x46, 0x36, 0xa1, 0xc6, 0x13, 0x19, 0xe4, 0x5f, 0x5e, 0x85, 0xe6, 0x1b, + 0xf2, 0x0e, 0x34, 0xd0, 0x7e, 0x3f, 0x39, 0x2b, 0x2c, 0xaa, 0xe3, 0xfe, 0xf8, 0x8c, 0xec, 0x80, + 0x9d, 0x28, 0xf9, 0x23, 0x0f, 0x52, 0xdf, 0x74, 0x57, 0x15, 0xa3, 0x50, 0x40, 0x7d, 0x11, 0x7a, + 0xbf, 0x5b, 0x00, 0x85, 0xd2, 0x49, 0x34, 0x9b, 0x3b, 0x6d, 0x2d, 0x38, 0x5d, 0x74, 0xe6, 0xea, + 0x55, 0x67, 0x6e, 0x42, 0x2d, 0x96, 0x71, 0xc0, 0xf1, 0xb6, 0x75, 0x9a, 0x6f, 0xcc, 0xf7, 0x13, + 0xb1, 0xf4, 0x7a, 0x43, 0xd8, 0x39, 0x96, 0xf7, 0xc3, 0x67, 0x70, 0x6f, 0x24, 0x62, 0x16, 0x89, + 0x9f, 0xf9, 0x30, 0xcf, 0xd2, 0x38, 0x49, 0xb8, 0xc6, 0xd6, 0x70, 0xe8, 0xd6, 0x3c, 0x8c, 0x07, + 0xf4, 0x21, 0x06, 0x71, 0xaa, 0x88, 0xb0, 0x38, 0x51, 0x74, 0x4a, 0x53, 0x8b, 0x30, 0x4f, 0xf2, + 0x5e, 0xac, 0x42, 0xbd, 0x50, 0xf8, 0x86, 0xe9, 0xb5, 0x5c, 0xbc, 0x5d, 0xd8, 0x18, 0x8a, 0xd1, + 0x88, 0x2b, 0x1e, 0xa7, 0x82, 0xa5, 0x52, 0x61, 0x51, 0x0d, 0x7a, 0x0d, 0x35, 0x63, 0x68, 0xaa, + 0x46, 0xfe, 0x94, 0x45, 0x19, 0x2f, 0x74, 0x6c, 0x4c, 0xd5, 0xe8, 0x99, 0xd9, 0x97, 0xc1, 0x44, + 0x49, 0x39, 0x2a, 0x2a, 0x31, 0xc1, 0x63, 0xb3, 0x37, 0xba, 0x94, 0x73, 0x02, 0x4d, 0xc8, 0x9f, + 0x6f, 0x97, 0x58, 0x5f, 0x84, 0xa4, 0x0d, 0x36, 0x8b, 0x22, 0xf3, 0x7e, 0x53, 0x30, 0x0e, 0x32, + 0x87, 0x2e, 0x42, 0xe4, 0x5d, 0x68, 0x3e, 0xcf, 0xb8, 0x9a, 0x61, 0xbc, 0x91, 0x0b, 0x30, 0x07, + 0x4a, 0x8b, 0x9a, 0x73, 0x8b, 0xbc, 0x5f, 0x57, 0xe1, 0xee, 0xf2, 0x91, 0x47, 0x4e, 0xa1, 0x6e, + 0x3c, 0x89, 0x83, 0x59, 0xae, 0x50, 0xef, 0xcb, 0x97, 0xe7, 0x3b, 0x2b, 0x7f, 0x9f, 0xef, 0xec, + 0x86, 0x22, 0x1d, 0x67, 0x83, 0x4e, 0x20, 0x27, 0xdd, 0x40, 0xea, 0x89, 0xd4, 0xc5, 0xcf, 0x87, + 0x7a, 0x78, 0xd6, 0x35, 0x83, 0x43, 0x77, 0x0e, 0x78, 0xf0, 0xdf, 0xf9, 0xce, 0xc6, 0x8c, 0x4d, + 0xa2, 0xcf, 0xbd, 0x6f, 0x73, 0x1a, 0x8f, 0x96, 0x84, 0x44, 0x80, 0xc3, 0xa6, 0x4c, 0x44, 0xe5, + 0x47, 0x81, 0x73, 0xa4, 0xf7, 0xf8, 0x8d, 0x2f, 0xb8, 0x93, 0x5f, 0xb0, 0xc8, 0xe5, 0xd1, 0xd7, + 0xa8, 0xc9, 0x09, 0x54, 0xf5, 0x2c, 0x0e, 0xd0, 0xae, 0x66, 0xef, 0x8b, 0x37, 0xbe, 0xc2, 0xce, + 0xaf, 0x30, 0x1c, 0x1e, 0x45, 0xaa, 0xfd, 0xdf, 0x2c, 0xa8, 0xe3, 0xc7, 0xc0, 0x15, 0x79, 0x0a, + 0x35, 0x5c, 0x92, 0x9b, 0x66, 0x4c, 0x31, 0x9e, 0xb6, 0xdb, 0x37, 0xe6, 0x24, 0xd1, 0xcc, 0x5b, + 0x21, 0xa7, 0xb0, 0x91, 0xcf, 0xa5, 0x6c, 0xa0, 0x03, 0x25, 0x06, 0xfc, 0x6d, 0x31, 0x7f, 0x64, + 0xf5, 0x1e, 0xbd, 0xbc, 0x68, 0x59, 0xaf, 0x2e, 0x5a, 0xd6, 0x3f, 0x17, 0x2d, 0xeb, 0xc5, 0x65, + 0x6b, 0xe5, 0xd5, 0x65, 0x6b, 0xe5, 0xaf, 0xcb, 0xd6, 0xca, 0xe9, 0xc3, 0x05, 0x3d, 0x0a, 0x26, + 0xfc, 0xed, 0xfe, 0xd4, 0x2d, 0xff, 0x38, 0xa0, 0x28, 0x83, 0x35, 0xfc, 0x37, 0xf0, 0xc9, 0xff, + 0x01, 0x00, 0x00, 0xff, 0xff, 0xc5, 0x73, 0x75, 0xdd, 0x50, 0x08, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1176,42 +1192,42 @@ func (m *VRFData) MarshalToSizedBuffer(dAtA []byte) (int, error) { copy(dAtA[i:], m.Sig) i = encodeVarintRelay(dAtA, i, uint64(len(m.Sig))) i-- - dAtA[i] = 0x3a + dAtA[i] = 0x4a } if len(m.QueryHash) > 0 { i -= len(m.QueryHash) copy(dAtA[i:], m.QueryHash) i = encodeVarintRelay(dAtA, i, uint64(len(m.QueryHash))) i-- - dAtA[i] = 0x32 + dAtA[i] = 0x42 } if len(m.AllDataHash) > 0 { i -= len(m.AllDataHash) copy(dAtA[i:], m.AllDataHash) i = encodeVarintRelay(dAtA, i, uint64(len(m.AllDataHash))) i-- - dAtA[i] = 0x2a + dAtA[i] = 0x3a } if len(m.ProviderSig) > 0 { i -= len(m.ProviderSig) copy(dAtA[i:], m.ProviderSig) i = encodeVarintRelay(dAtA, i, uint64(len(m.ProviderSig))) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x32 } if len(m.VrfProof) > 0 { i -= len(m.VrfProof) copy(dAtA[i:], m.VrfProof) i = encodeVarintRelay(dAtA, i, uint64(len(m.VrfProof))) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x2a } if len(m.VrfValue) > 0 { i -= len(m.VrfValue) copy(dAtA[i:], m.VrfValue) i = encodeVarintRelay(dAtA, i, uint64(len(m.VrfValue))) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x22 } if m.Differentiator { i-- @@ -1221,7 +1237,19 @@ func (m *VRFData) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0 } i-- - dAtA[i] = 0x8 + dAtA[i] = 0x18 + } + if m.Epoch != 0 { + i = encodeVarintRelay(dAtA, i, uint64(m.Epoch)) + i-- + dAtA[i] = 0x10 + } + if len(m.ChainID) > 0 { + i -= len(m.ChainID) + copy(dAtA[i:], m.ChainID) + i = encodeVarintRelay(dAtA, i, uint64(len(m.ChainID))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } @@ -1456,6 +1484,13 @@ func (m *VRFData) Size() (n int) { } var l int _ = l + l = len(m.ChainID) + if l > 0 { + n += 1 + l + sovRelay(uint64(l)) + } + if m.Epoch != 0 { + n += 1 + sovRelay(uint64(m.Epoch)) + } if m.Differentiator { n += 2 } @@ -2704,6 +2739,57 @@ func (m *VRFData) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRelay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRelay + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRelay + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Epoch", wireType) + } + m.Epoch = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRelay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Epoch |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Differentiator", wireType) } @@ -2723,7 +2809,7 @@ func (m *VRFData) Unmarshal(dAtA []byte) error { } } m.Differentiator = bool(v != 0) - case 2: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field VrfValue", wireType) } @@ -2757,7 +2843,7 @@ func (m *VRFData) Unmarshal(dAtA []byte) error { m.VrfValue = []byte{} } iNdEx = postIndex - case 3: + case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field VrfProof", wireType) } @@ -2791,7 +2877,7 @@ func (m *VRFData) Unmarshal(dAtA []byte) error { m.VrfProof = []byte{} } iNdEx = postIndex - case 4: + case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ProviderSig", wireType) } @@ -2825,7 +2911,7 @@ func (m *VRFData) Unmarshal(dAtA []byte) error { m.ProviderSig = []byte{} } iNdEx = postIndex - case 5: + case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field AllDataHash", wireType) } @@ -2859,7 +2945,7 @@ func (m *VRFData) Unmarshal(dAtA []byte) error { m.AllDataHash = []byte{} } iNdEx = postIndex - case 6: + case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field QueryHash", wireType) } @@ -2893,7 +2979,7 @@ func (m *VRFData) Unmarshal(dAtA []byte) error { m.QueryHash = []byte{} } iNdEx = postIndex - case 7: + case 9: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sig", wireType) } diff --git a/x/pairing/types/relay_extensions.go b/x/pairing/types/relay_extensions.go deleted file mode 100644 index 4c0e695bea..0000000000 --- a/x/pairing/types/relay_extensions.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -// ShallowCopy makes a shallow copy of the relay request, and returns it -// A shallow copy includes the values of all fields in the original struct, -// but any nested values (such as slices, maps, and pointers) are shared between the original and the copy. -func (m *RelayRequest) ShallowCopy() *RelayRequest { - if m == nil { - return nil - } - - requestCopy := *m - return &requestCopy -} diff --git a/x/pairing/types/relay_test.go b/x/pairing/types/relay_test.go deleted file mode 100644 index 5edc50873f..0000000000 --- a/x/pairing/types/relay_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package types - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -// getDummyRequest creates dummy request used in tests -func createDummyRequest(requestedBlock int64, dataReliability *VRFData) *RelayRequest { - return &RelayRequest{ - ChainID: "testID", - Data: []byte("Dummy data"), - RequestBlock: requestedBlock, - DataReliability: dataReliability, - } -} - -// TestRelayShallowCopy tests shallow copy method of relay request -func TestRelayShallowCopy(t *testing.T) { - t.Parallel() - t.Run( - "Check if copy object has same data as original", - func(t *testing.T) { - t.Parallel() - - dataReliability := &VRFData{ - Differentiator: true, - } - - request := createDummyRequest(-2, dataReliability) - copy := request.ShallowCopy() - - assert.Equal(t, request, copy) - }) - t.Run( - "Only nested values should be shared", - func(t *testing.T) { - t.Parallel() - - dataReliability := &VRFData{ - Differentiator: true, - } - - requestedBlock := int64(-2) - - request := createDummyRequest(requestedBlock, dataReliability) - copy := request.ShallowCopy() - - // Change RequestBlock - copy.RequestBlock = 1000 - - // Check that Requested block has not changed in the original - assert.Equal(t, request.RequestBlock, requestedBlock) - - // Change shared dataReliability - dataReliability.Differentiator = false - - // DataReliability should be changed on both objects - assert.Equal(t, request.DataReliability, copy.DataReliability) - }) -} From 61f0f5c9cd4ccf29c88ac2afa217239e2a4c384e Mon Sep 17 00:00:00 2001 From: omer mishael Date: Mon, 13 Mar 2023 18:15:51 +0200 Subject: [PATCH 100/123] rename relay fields to better represent what they are --- go.mod | 1 + go.sum | 2 + proto/pairing/relay.proto | 4 +- .../lavaprotocol/finalization_consensus.go | 4 +- protocol/lavaprotocol/request_builder.go | 12 +- protocol/rpcconsumer/rpcconsumer_server.go | 2 +- protocol/rpcprovider/provider_listener.go | 2 +- .../rpcprovider/rewardserver/reward_server.go | 2 +- protocol/rpcprovider/rpcprovider_server.go | 54 +++--- relayer/chainproxy/chainproxy.go | 8 +- relayer/sentry/sentry.go | 8 +- relayer/sigs/sigs.go | 2 +- testutil/common/common.go | 4 +- x/conflict/keeper/conflict.go | 12 +- x/conflict/keeper/msg_server_detection.go | 6 +- .../keeper/msg_server_detection_test.go | 4 +- x/pairing/keeper/fixation_test.go | 4 +- x/pairing/keeper/msg_server_relay_payment.go | 32 ++-- .../msg_server_relay_payment_gov_test.go | 56 +++--- .../keeper/msg_server_relay_payment_test.go | 122 ++++++------- .../keeper/unresponsive_provider_test.go | 16 +- x/pairing/types/relay.pb.go | 168 +++++++++--------- 22 files changed, 264 insertions(+), 261 deletions(-) diff --git a/go.mod b/go.mod index cfccfab970..1cbdf2ddf5 100644 --- a/go.mod +++ b/go.mod @@ -49,6 +49,7 @@ require ( github.com/ghodss/yaml v1.0.0 // indirect github.com/gogo/googleapis v1.4.0 // indirect github.com/golang/glog v1.0.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect golang.org/x/mod v0.7.0 // indirect golang.org/x/tools v0.2.0 // indirect diff --git a/go.sum b/go.sum index b4521d6dbd..c9d4ca44f4 100644 --- a/go.sum +++ b/go.sum @@ -785,6 +785,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 h1:gDLXvp5S9izjldquuoAhDzccbskOL6tDC5jMSyx3zxE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2/go.mod h1:7pdNwVWBBHGiCxa9lAszqCJMbfTISJ7oMftp8+UGV08= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= diff --git a/proto/pairing/relay.proto b/proto/pairing/relay.proto index 2577e833d7..db2d54bfe5 100644 --- a/proto/pairing/relay.proto +++ b/proto/pairing/relay.proto @@ -10,14 +10,14 @@ service Relayer { } message RelaySession { - string chainID = 1; + string specID = 1; bytes content_hash = 2; uint64 session_id = 3; uint64 cu_sum = 4; // total compute unit used including this relay string provider = 5; uint64 relay_num = 6; QualityOfServiceReport QoSReport = 7; - int64 block_height = 8; + int64 epoch = 8; bytes unresponsive_providers = 9; string lava_chain_id = 10; bytes sig = 11; diff --git a/protocol/lavaprotocol/finalization_consensus.go b/protocol/lavaprotocol/finalization_consensus.go index 90e6cff266..e45dc49fe7 100644 --- a/protocol/lavaprotocol/finalization_consensus.go +++ b/protocol/lavaprotocol/finalization_consensus.go @@ -50,7 +50,7 @@ func (fc *FinalizationConsensus) newProviderHashesConsensus(blockDistanceForFina SigBlocks: reply.SigBlocks, SessionId: req.SessionId, RelayNum: req.RelayNum, - BlockHeight: req.BlockHeight, + BlockHeight: req.Epoch, LatestBlock: latestBlock, } providerDataContainers := map[string]providerDataContainer{} @@ -69,7 +69,7 @@ func (fc *FinalizationConsensus) insertProviderToConsensus(blockDistanceForFinal SigBlocks: reply.SigBlocks, SessionId: req.SessionId, RelayNum: req.RelayNum, - BlockHeight: req.BlockHeight, + BlockHeight: req.Epoch, LatestBlock: latestBlock, } consensus.agreeingProviders[providerAcc] = newProviderDataContainer diff --git a/protocol/lavaprotocol/request_builder.go b/protocol/lavaprotocol/request_builder.go index b9628f2ba9..35c4df8cf4 100644 --- a/protocol/lavaprotocol/request_builder.go +++ b/protocol/lavaprotocol/request_builder.go @@ -55,14 +55,14 @@ func NewRelayData(connectionType string, apiUrl string, data []byte, requestBloc func ConstructRelaySession(relayRequestData *pairingtypes.RelayPrivateData, chainID string, providerPublicAddress string, consumerSession *lavasession.SingleConsumerSession, epoch int64, reportedProviders []byte) *pairingtypes.RelaySession { return &pairingtypes.RelaySession{ - ChainID: chainID, + SpecID: chainID, ContentHash: sigs.CalculateContentHashForRelayData(relayRequestData), SessionId: uint64(consumerSession.SessionId), CuSum: consumerSession.CuSum + consumerSession.LatestRelayCu, // add the latestRelayCu which will be applied when session is returned properly, Provider: providerPublicAddress, RelayNum: consumerSession.RelayNum + lavasession.RelayNumberIncrement, // increment the relay number. which will be applied when session is returned properly QoSReport: consumerSession.QoSInfo.LastQoSReport, - BlockHeight: epoch, + Epoch: epoch, UnresponsiveProviders: reportedProviders, LavaChainId: "FIXMEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE IF IM HERE ITS A BUG", Sig: nil, @@ -71,14 +71,14 @@ func ConstructRelaySession(relayRequestData *pairingtypes.RelayPrivateData, chai func dataReliabilityRelaySession(relayRequestData *pairingtypes.RelayPrivateData, chainID string, providerPublicAddress string, epoch int64) *pairingtypes.RelaySession { return &pairingtypes.RelaySession{ - ChainID: chainID, + SpecID: chainID, ContentHash: sigs.CalculateContentHashForRelayData(relayRequestData), SessionId: lavasession.DataReliabilitySessionId, // sessionID for reliability is 0 CuSum: lavasession.DataReliabilityCuSum, // consumerSession.CuSum == 0 Provider: providerPublicAddress, RelayNum: 0, QoSReport: nil, - BlockHeight: epoch, + Epoch: epoch, UnresponsiveProviders: nil, LavaChainId: "FIXMEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE IF IM HERE ITS A BUG", Sig: nil, @@ -140,8 +140,8 @@ func DataReliabilityThresholdToSession(vrfs [][]byte, uniqueIdentifiers []bool, func NewVRFData(differentiator bool, vrf_res []byte, vrf_proof []byte, request *pairingtypes.RelayRequest, reply *pairingtypes.RelayReply) *pairingtypes.VRFData { dataReliability := &pairingtypes.VRFData{ - ChainID: request.RelaySession.ChainID, - Epoch: request.RelaySession.BlockHeight, + ChainID: request.RelaySession.SpecID, + Epoch: request.RelaySession.Epoch, Differentiator: differentiator, VrfValue: vrf_res, VrfProof: vrf_proof, diff --git a/protocol/rpcconsumer/rpcconsumer_server.go b/protocol/rpcconsumer/rpcconsumer_server.go index f90bfde3d9..e38261bdc8 100644 --- a/protocol/rpcconsumer/rpcconsumer_server.go +++ b/protocol/rpcconsumer/rpcconsumer_server.go @@ -317,7 +317,7 @@ func (rpccs *RPCConsumerServer) sendDataReliabilityRelayIfApplicable(ctx context return nil // disabled for this spec and requested block so no data reliability messages } var dataReliabilitySessions []*lavasession.DataReliabilitySession - sessionEpoch := uint64(relayResult.Request.RelaySession.BlockHeight) + sessionEpoch := uint64(relayResult.Request.RelaySession.Epoch) providerPubAddress := relayResult.ProviderAddress // handle data reliability vrfRes0, vrfRes1 := utils.CalculateVrfOnRelay(relayResult.Request.RelayData, relayResult.Reply, rpccs.VrfSk, sessionEpoch) diff --git a/protocol/rpcprovider/provider_listener.go b/protocol/rpcprovider/provider_listener.go index b611d22eeb..bd8920e1fe 100644 --- a/protocol/rpcprovider/provider_listener.go +++ b/protocol/rpcprovider/provider_listener.go @@ -116,7 +116,7 @@ func (rs *relayServer) RelaySubscribe(request *pairingtypes.RelayRequest, srv pa func (rs *relayServer) findReceiver(request *pairingtypes.RelayRequest) (RelayReceiver, error) { apiInterface := request.RelayData.ApiInterface - chainID := request.RelaySession.ChainID + chainID := request.RelaySession.SpecID endpoint := lavasession.RPCEndpoint{ChainID: chainID, ApiInterface: apiInterface} rs.lock.RLock() defer rs.lock.RUnlock() diff --git a/protocol/rpcprovider/rewardserver/reward_server.go b/protocol/rpcprovider/rewardserver/reward_server.go index 4304027c77..4b87a23628 100644 --- a/protocol/rpcprovider/rewardserver/reward_server.go +++ b/protocol/rpcprovider/rewardserver/reward_server.go @@ -145,7 +145,7 @@ func (rws *RewardServer) sendRewardsClaim(ctx context.Context, epoch uint64) err utils.LavaFormatError("invalid consumer address extraction from relay", err, &map[string]string{"relay": fmt.Sprintf("%+v", relay), "consumerBytes": consumerBytes.String()}) continue } - expectedPay := PaymentRequest{ChainID: relay.ChainID, CU: relay.CuSum, BlockHeightDeadline: relay.BlockHeight, Amount: sdk.Coin{}, Client: consumerAddr, UniqueIdentifier: relay.SessionId, Description: strconv.FormatUint(rws.serverID, 10)} + expectedPay := PaymentRequest{ChainID: relay.SpecID, CU: relay.CuSum, BlockHeightDeadline: relay.Epoch, Amount: sdk.Coin{}, Client: consumerAddr, UniqueIdentifier: relay.SessionId, Description: strconv.FormatUint(rws.serverID, 10)} rws.addExpectedPayment(expectedPay) rws.updateCUServiced(relay.CuSum) } diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index f2fe294800..f3128cfdb6 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -170,7 +170,7 @@ func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayReques if err != nil { return rpcps.handleRelayErrorStatus(err) } - subscribed, err := rpcps.TryRelaySubscribe(ctx, uint64(request.RelaySession.BlockHeight), srv, chainMessage, consumerAddress, relaySession) // this function does not return until subscription ends + subscribed, err := rpcps.TryRelaySubscribe(ctx, uint64(request.RelaySession.Epoch), srv, chainMessage, consumerAddress, relaySession) // this function does not return until subscription ends if subscribed { // meaning we created a subscription and used it for at least a message relayError := rpcps.providerSessionManager.OnSessionDone(relaySession) // TODO: when we pay as u go on subscription this will need to change @@ -281,11 +281,11 @@ func (rpcps *RPCProviderServer) TryRelaySubscribe(ctx context.Context, requestBl // verifies basic relay fields, and gets a provider session func (rpcps *RPCProviderServer) verifyRelaySession(ctx context.Context, request *pairingtypes.RelayRequest) (singleProviderSession *lavasession.SingleProviderSession, extractedConsumerAddress sdk.AccAddress, err error) { - valid := rpcps.providerSessionManager.IsValidEpoch(uint64(request.RelaySession.BlockHeight)) + valid := rpcps.providerSessionManager.IsValidEpoch(uint64(request.RelaySession.Epoch)) if !valid { return nil, nil, utils.LavaFormatError("user reported invalid lava block height", nil, &map[string]string{ "current lava block": strconv.FormatInt(rpcps.stateTracker.LatestBlock(), 10), - "requested lava block": strconv.FormatInt(request.RelaySession.BlockHeight, 10), + "requested lava block": strconv.FormatInt(request.RelaySession.Epoch, 10), "threshold": strconv.FormatUint(rpcps.providerSessionManager.GetBlockedEpochHeight(), 10), }) } @@ -316,35 +316,35 @@ func (rpcps *RPCProviderServer) verifyRelaySession(ctx context.Context, request if err != nil { return nil, nil, utils.LavaFormatError("failed data reliability validation", err, nil) } - dataReliabilitySingleProviderSession, err := rpcps.providerSessionManager.GetDataReliabilitySession(extractedConsumerAddress.String(), uint64(request.RelaySession.BlockHeight), request.RelaySession.SessionId, request.RelaySession.RelayNum) + dataReliabilitySingleProviderSession, err := rpcps.providerSessionManager.GetDataReliabilitySession(extractedConsumerAddress.String(), uint64(request.RelaySession.Epoch), request.RelaySession.SessionId, request.RelaySession.RelayNum) if err != nil { if lavasession.DataReliabilityAlreadySentThisEpochError.Is(err) { return nil, nil, err } - return nil, nil, utils.LavaFormatError("failed to get a provider data reliability session", err, &map[string]string{"sessionID": strconv.FormatUint(request.RelaySession.SessionId, 10), "consumer": extractedConsumerAddress.String(), "epoch": strconv.FormatInt(request.RelaySession.BlockHeight, 10)}) + return nil, nil, utils.LavaFormatError("failed to get a provider data reliability session", err, &map[string]string{"sessionID": strconv.FormatUint(request.RelaySession.SessionId, 10), "consumer": extractedConsumerAddress.String(), "epoch": strconv.FormatInt(request.RelaySession.Epoch, 10)}) } return dataReliabilitySingleProviderSession, extractedConsumerAddress, nil } func (rpcps *RPCProviderServer) getSingleProviderSession(ctx context.Context, request *pairingtypes.RelaySession, consumerAddressString string) (*lavasession.SingleProviderSession, error) { // regular session, verifies pairing epoch and relay number - singleProviderSession, err := rpcps.providerSessionManager.GetSession(consumerAddressString, uint64(request.BlockHeight), request.SessionId, request.RelayNum) + singleProviderSession, err := rpcps.providerSessionManager.GetSession(consumerAddressString, uint64(request.Epoch), request.SessionId, request.RelayNum) if err != nil { if lavasession.ConsumerNotRegisteredYet.Is(err) { - valid, _, verifyPairingError := rpcps.stateTracker.VerifyPairing(ctx, consumerAddressString, rpcps.providerAddress.String(), uint64(request.BlockHeight), request.ChainID) + valid, _, verifyPairingError := rpcps.stateTracker.VerifyPairing(ctx, consumerAddressString, rpcps.providerAddress.String(), uint64(request.Epoch), request.SpecID) if verifyPairingError != nil { return nil, utils.LavaFormatError("Failed to VerifyPairing after ConsumerNotRegisteredYet", verifyPairingError, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) } if !valid { - return nil, utils.LavaFormatError("VerifyPairing, this consumer address is not valid with this provider", nil, &map[string]string{"epoch": strconv.FormatInt(request.BlockHeight, 10), "sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) + return nil, utils.LavaFormatError("VerifyPairing, this consumer address is not valid with this provider", nil, &map[string]string{"epoch": strconv.FormatInt(request.Epoch, 10), "sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) } - _, maxCuForConsumer, getVrfAndMaxCuError := rpcps.stateTracker.GetVrfPkAndMaxCuForUser(ctx, consumerAddressString, request.ChainID, uint64(request.BlockHeight)) + _, maxCuForConsumer, getVrfAndMaxCuError := rpcps.stateTracker.GetVrfPkAndMaxCuForUser(ctx, consumerAddressString, request.SpecID, uint64(request.Epoch)) if getVrfAndMaxCuError != nil { - return nil, utils.LavaFormatError("ConsumerNotRegisteredYet: GetVrfPkAndMaxCuForUser failed", getVrfAndMaxCuError, &map[string]string{"epoch": strconv.FormatInt(request.BlockHeight, 10), "sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) + return nil, utils.LavaFormatError("ConsumerNotRegisteredYet: GetVrfPkAndMaxCuForUser failed", getVrfAndMaxCuError, &map[string]string{"epoch": strconv.FormatInt(request.Epoch, 10), "sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) } // After validating the consumer we can register it with provider session manager. - singleProviderSession, err = rpcps.providerSessionManager.RegisterProviderSessionWithConsumer(consumerAddressString, uint64(request.BlockHeight), request.SessionId, request.RelayNum, maxCuForConsumer) + singleProviderSession, err = rpcps.providerSessionManager.RegisterProviderSessionWithConsumer(consumerAddressString, uint64(request.Epoch), request.SessionId, request.RelayNum, maxCuForConsumer) if err != nil { return nil, utils.LavaFormatError("Failed to RegisterProviderSessionWithConsumer", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "relayNum": strconv.FormatUint(request.RelayNum, 10)}) } @@ -360,8 +360,8 @@ func (rpcps *RPCProviderServer) verifyRelayRequestMetaData(requestSession *pairi if requestSession.Provider != providerAddress { return utils.LavaFormatError("request had the wrong provider", nil, &map[string]string{"providerAddress": providerAddress, "request_provider": requestSession.Provider}) } - if requestSession.ChainID != rpcps.rpcProviderEndpoint.ChainID { - return utils.LavaFormatError("request had the wrong chainID", nil, &map[string]string{"request_chainID": requestSession.ChainID, "chainID": rpcps.rpcProviderEndpoint.ChainID}) + if requestSession.SpecID != rpcps.rpcProviderEndpoint.ChainID { + return utils.LavaFormatError("request had the wrong chainID", nil, &map[string]string{"request_chainID": requestSession.SpecID, "chainID": rpcps.rpcProviderEndpoint.ChainID}) } return nil } @@ -370,7 +370,7 @@ func (rpcps *RPCProviderServer) verifyDataReliabilityRelayRequest(ctx context.Co if request.RelaySession.CuSum != lavasession.DataReliabilityCuSum { return utils.LavaFormatError("request's CU sum is not equal to the data reliability CU sum", nil, &map[string]string{"cuSum": strconv.FormatUint(request.RelaySession.CuSum, 10), "DataReliabilityCuSum": strconv.Itoa(lavasession.DataReliabilityCuSum)}) } - vrf_pk, _, err := rpcps.stateTracker.GetVrfPkAndMaxCuForUser(ctx, consumerAddress.String(), request.RelaySession.ChainID, uint64(request.RelaySession.BlockHeight)) + vrf_pk, _, err := rpcps.stateTracker.GetVrfPkAndMaxCuForUser(ctx, consumerAddress.String(), request.RelaySession.SpecID, uint64(request.RelaySession.Epoch)) if err != nil { return utils.LavaFormatError("failed to get vrfpk and maxCURes for data reliability!", err, &map[string]string{ "userAddr": consumerAddress.String(), @@ -380,28 +380,28 @@ func (rpcps *RPCProviderServer) verifyDataReliabilityRelayRequest(ctx context.Co // data reliability is not session dependant, its always sent with sessionID 0 and if not we don't care if vrf_pk == nil { return utils.LavaFormatError("dataReliability Triggered with vrf_pk == nil", nil, - &map[string]string{"requested epoch": strconv.FormatInt(request.RelaySession.BlockHeight, 10), "userAddr": consumerAddress.String()}) + &map[string]string{"requested epoch": strconv.FormatInt(request.RelaySession.Epoch, 10), "userAddr": consumerAddress.String()}) } // verify the providerSig is indeed a signature by a valid provider on this query valid, index, err := rpcps.VerifyReliabilityAddressSigning(ctx, consumerAddress, request) if err != nil { return utils.LavaFormatError("VerifyReliabilityAddressSigning invalid", err, - &map[string]string{"requested epoch": strconv.FormatInt(request.RelaySession.BlockHeight, 10), "userAddr": consumerAddress.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) + &map[string]string{"requested epoch": strconv.FormatInt(request.RelaySession.Epoch, 10), "userAddr": consumerAddress.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) } if !valid { return utils.LavaFormatError("invalid DataReliability Provider signing", nil, - &map[string]string{"requested epoch": strconv.FormatInt(request.RelaySession.BlockHeight, 10), "userAddr": consumerAddress.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) + &map[string]string{"requested epoch": strconv.FormatInt(request.RelaySession.Epoch, 10), "userAddr": consumerAddress.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) } // verify data reliability fields correspond to the right vrf - valid = utils.VerifyVrfProof(request, *vrf_pk, uint64(request.RelaySession.BlockHeight)) + valid = utils.VerifyVrfProof(request, *vrf_pk, uint64(request.RelaySession.Epoch)) if !valid { return utils.LavaFormatError("invalid DataReliability fields, VRF wasn't verified with provided proof", nil, - &map[string]string{"requested epoch": strconv.FormatInt(request.RelaySession.BlockHeight, 10), "userAddr": consumerAddress.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) + &map[string]string{"requested epoch": strconv.FormatInt(request.RelaySession.Epoch, 10), "userAddr": consumerAddress.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) } _, dataReliabilityThreshold := rpcps.chainParser.DataReliabilityParams() - providersCount, err := rpcps.stateTracker.GetProvidersCountForConsumer(ctx, consumerAddress.String(), uint64(request.RelaySession.BlockHeight), request.RelaySession.ChainID) + providersCount, err := rpcps.stateTracker.GetProvidersCountForConsumer(ctx, consumerAddress.String(), uint64(request.RelaySession.Epoch), request.RelaySession.SpecID) if err != nil { - return utils.LavaFormatError("VerifyReliabilityAddressSigning failed fetching providers count for consumer", err, &map[string]string{"chainID": request.RelaySession.ChainID, "consumer": consumerAddress.String(), "epoch": strconv.FormatInt(request.RelaySession.BlockHeight, 10)}) + return utils.LavaFormatError("VerifyReliabilityAddressSigning failed fetching providers count for consumer", err, &map[string]string{"chainID": request.RelaySession.SpecID, "consumer": consumerAddress.String(), "epoch": strconv.FormatInt(request.RelaySession.Epoch, 10)}) } vrfIndex, vrfErr := utils.GetIndexForVrf(request.DataReliability.VrfValue, providersCount, dataReliabilityThreshold) if vrfErr != nil { @@ -411,8 +411,8 @@ func (rpcps *RPCProviderServer) verifyDataReliabilityRelayRequest(ctx context.Co } return utils.LavaFormatError("Provider identified vrf value in data reliability request does not meet threshold", vrfErr, &map[string]string{ - "requested epoch": strconv.FormatInt(request.RelaySession.BlockHeight, 10), "userAddr": consumerAddress.String(), - "dataReliability": string(dataReliabilityMarshalled), "relayEpochStart": strconv.FormatInt(request.RelaySession.BlockHeight, 10), + "requested epoch": strconv.FormatInt(request.RelaySession.Epoch, 10), "userAddr": consumerAddress.String(), + "dataReliability": string(dataReliabilityMarshalled), "relayEpochStart": strconv.FormatInt(request.RelaySession.Epoch, 10), "vrfIndex": strconv.FormatInt(vrfIndex, 10), "self Index": strconv.FormatInt(index, 10), }) @@ -424,8 +424,8 @@ func (rpcps *RPCProviderServer) verifyDataReliabilityRelayRequest(ctx context.Co } return utils.LavaFormatError("Provider identified invalid vrfIndex in data reliability request, the given index and self index are different", nil, &map[string]string{ - "requested epoch": strconv.FormatInt(request.RelaySession.BlockHeight, 10), "userAddr": consumerAddress.String(), - "dataReliability": string(dataReliabilityMarshalled), "relayEpochStart": strconv.FormatInt(request.RelaySession.BlockHeight, 10), + "requested epoch": strconv.FormatInt(request.RelaySession.Epoch, 10), "userAddr": consumerAddress.String(), + "dataReliability": string(dataReliabilityMarshalled), "relayEpochStart": strconv.FormatInt(request.RelaySession.Epoch, 10), "vrfIndex": strconv.FormatInt(vrfIndex, 10), "self Index": strconv.FormatInt(index, 10), }) @@ -461,7 +461,7 @@ func (rpcps *RPCProviderServer) VerifyReliabilityAddressSigning(ctx context.Cont return false, 0, utils.LavaFormatError("failed converting signer to address", err, &map[string]string{"consumer": consumer.String(), "PubKey": pubKey.Address().String()}) } - return rpcps.stateTracker.VerifyPairing(ctx, consumer.String(), providerAccAddress.String(), uint64(request.RelaySession.BlockHeight), request.RelaySession.ChainID) // return if this pairing is authorised + return rpcps.stateTracker.VerifyPairing(ctx, consumer.String(), providerAccAddress.String(), uint64(request.RelaySession.Epoch), request.RelaySession.SpecID) // return if this pairing is authorised } func (rpcps *RPCProviderServer) handleRelayErrorStatus(err error) error { @@ -547,7 +547,7 @@ func (rpcps *RPCProviderServer) TryRelay(ctx context.Context, request *pairingty } if requestedBlockHash != nil || finalized { err := cache.SetEntry(ctx, request, rpcps.rpcProviderEndpoint.ApiInterface, requestedBlockHash, rpcps.rpcProviderEndpoint.ChainID, consumerAddr.String(), reply, finalized) - if err != nil && !performance.NotInitialisedError.Is(err) && request.RelaySession.BlockHeight != spectypes.NOT_APPLICABLE { + if err != nil && !performance.NotInitialisedError.Is(err) && request.RelaySession.Epoch != spectypes.NOT_APPLICABLE { utils.LavaFormatWarning("error updating cache with new entry", err, nil) } } diff --git a/relayer/chainproxy/chainproxy.go b/relayer/chainproxy/chainproxy.go index bea51e07f2..89b0ed536b 100644 --- a/relayer/chainproxy/chainproxy.go +++ b/relayer/chainproxy/chainproxy.go @@ -142,8 +142,8 @@ func SendRelay( RelaySession: &pairingtypes.RelaySession{ SessionId: uint64(consumerSession.SessionId), Provider: providerPublicAddress, - ChainID: cp.GetSentry().ChainID, - BlockHeight: blockHeight, + SpecID: cp.GetSentry().ChainID, + Epoch: blockHeight, RelayNum: consumerSession.RelayNum + lavasession.RelayNumberIncrement, // increment the relay number. which will be applied when session is returned properly QoSReport: consumerSession.QoSInfo.LastQoSReport, UnresponsiveProviders: reportedProviders, @@ -231,8 +231,8 @@ func SendRelay( RelaySession: &pairingtypes.RelaySession{ SessionId: lavasession.DataReliabilitySessionId, // sessionID for reliability is 0 Provider: providerAddress, - ChainID: sentry.ChainID, - BlockHeight: blockHeight, + SpecID: sentry.ChainID, + Epoch: blockHeight, RelayNum: 0, // consumerSession.RelayNum == 0 QoSReport: nil, UnresponsiveProviders: reportedProviders, diff --git a/relayer/sentry/sentry.go b/relayer/sentry/sentry.go index ff5c64392b..9d64a340d6 100755 --- a/relayer/sentry/sentry.go +++ b/relayer/sentry/sentry.go @@ -953,7 +953,7 @@ func (s *Sentry) initProviderHashesConsensus(providerAcc string, latestBlock int SigBlocks: reply.SigBlocks, SessionId: req.RelaySession.SessionId, RelayNum: req.RelaySession.RelayNum, - BlockHeight: req.RelaySession.BlockHeight, + BlockHeight: req.RelaySession.Epoch, LatestBlock: latestBlock, } providerDataContainers := map[string]providerDataContainer{} @@ -972,7 +972,7 @@ func (s *Sentry) insertProviderToConsensus(consensus *ProviderHashesConsensus, f SigBlocks: reply.SigBlocks, SessionId: req.RelaySession.SessionId, RelayNum: req.RelaySession.RelayNum, - BlockHeight: req.RelaySession.BlockHeight, + BlockHeight: req.RelaySession.Epoch, LatestBlock: latestBlock, } consensus.agreeingProviders[providerAcc] = newProviderDataContainer @@ -1081,8 +1081,8 @@ func (s *Sentry) SendRelay( vrf_res, vrf_proof := utils.ProveVrfOnRelay(request.RelayData, reply, s.VrfSk, differentiator, sessionEpoch) s.VrfSkMu.Unlock() dataReliability := &pairingtypes.VRFData{ - ChainID: request.RelaySession.ChainID, - Epoch: request.RelaySession.BlockHeight, + ChainID: request.RelaySession.SpecID, + Epoch: request.RelaySession.Epoch, Differentiator: differentiator, VrfValue: vrf_res, VrfProof: vrf_proof, diff --git a/relayer/sigs/sigs.go b/relayer/sigs/sigs.go index f7148f17ce..f105f4162a 100644 --- a/relayer/sigs/sigs.go +++ b/relayer/sigs/sigs.go @@ -104,7 +104,7 @@ func DataToVerifyProviderSig(request *pairingtypes.RelayRequest, data_hash []byt func DataToSignResponseFinalizationData(relayResponse *pairingtypes.RelayReply, relayReq *pairingtypes.RelayRequest, clientAddress sdk.AccAddress) (dataToSign []byte) { // sign latest_block+finalized_blocks_hashes+session_id+block_height+relay_num - return DataToSignResponseFinalizationDataInner(relayResponse.LatestBlock, relayReq.RelaySession.SessionId, relayReq.RelaySession.BlockHeight, relayReq.RelaySession.RelayNum, relayResponse.FinalizedBlocksHashes, clientAddress) + return DataToSignResponseFinalizationDataInner(relayResponse.LatestBlock, relayReq.RelaySession.SessionId, relayReq.RelaySession.Epoch, relayReq.RelaySession.RelayNum, relayResponse.FinalizedBlocksHashes, clientAddress) } func DataToSignResponseFinalizationDataInner(latestBlock int64, sessionID uint64, blockHeight int64, relayNum uint64, finalizedBlockHashes []byte, clientAddress sdk.AccAddress) (dataToSign []byte) { diff --git a/testutil/common/common.go b/testutil/common/common.go index 87146414b2..d1c46e7412 100644 --- a/testutil/common/common.go +++ b/testutil/common/common.go @@ -97,9 +97,9 @@ func CreateMsgDetection(ctx context.Context, consumer Account, provider0 Account Provider: provider0.Addr.String(), ContentHash: sigs.CalculateContentHashForRelayData(msg.ResponseConflict.ConflictRelayData0.Request.RelayData), SessionId: uint64(1), - ChainID: spec.Index, + SpecID: spec.Index, CuSum: 0, - BlockHeight: sdk.UnwrapSDKContext(ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ctx).BlockHeight(), RelayNum: 0, QoSReport: &types.QualityOfServiceReport{Latency: sdk.OneDec(), Availability: sdk.OneDec(), Sync: sdk.OneDec()}, } diff --git a/x/conflict/keeper/conflict.go b/x/conflict/keeper/conflict.go index a2545c985c..e50cad1f9b 100644 --- a/x/conflict/keeper/conflict.go +++ b/x/conflict/keeper/conflict.go @@ -16,13 +16,13 @@ func (k Keeper) ValidateFinalizationConflict(ctx sdk.Context, conflictData *type func (k Keeper) ValidateResponseConflict(ctx sdk.Context, conflictData *types.ResponseConflict, clientAddr sdk.AccAddress) error { // 1. validate mismatching data - chainID := conflictData.ConflictRelayData0.Request.RelaySession.ChainID - if chainID != conflictData.ConflictRelayData1.Request.RelaySession.ChainID { - return fmt.Errorf("mismatching request parameters between providers %s, %s", chainID, conflictData.ConflictRelayData1.Request.RelaySession.ChainID) + chainID := conflictData.ConflictRelayData0.Request.RelaySession.SpecID + if chainID != conflictData.ConflictRelayData1.Request.RelaySession.SpecID { + return fmt.Errorf("mismatching request parameters between providers %s, %s", chainID, conflictData.ConflictRelayData1.Request.RelaySession.SpecID) } - block := conflictData.ConflictRelayData0.Request.RelaySession.BlockHeight - if block != conflictData.ConflictRelayData1.Request.RelaySession.BlockHeight { - return fmt.Errorf("mismatching request parameters between providers %d, %d", block, conflictData.ConflictRelayData1.Request.RelaySession.BlockHeight) + block := conflictData.ConflictRelayData0.Request.RelaySession.Epoch + if block != conflictData.ConflictRelayData1.Request.RelaySession.Epoch { + return fmt.Errorf("mismatching request parameters between providers %d, %d", block, conflictData.ConflictRelayData1.Request.RelaySession.Epoch) } if conflictData.ConflictRelayData0.Request.RelayData.ConnectionType != conflictData.ConflictRelayData1.Request.RelayData.ConnectionType { return fmt.Errorf("mismatching request parameters between providers %s, %s", conflictData.ConflictRelayData0.Request.RelayData.ConnectionType, conflictData.ConflictRelayData1.Request.RelayData.ConnectionType) diff --git a/x/conflict/keeper/msg_server_detection.go b/x/conflict/keeper/msg_server_detection.go index 5dd455eeb2..3cbe592751 100644 --- a/x/conflict/keeper/msg_server_detection.go +++ b/x/conflict/keeper/msg_server_detection.go @@ -45,7 +45,7 @@ func (k msgServer) Detection(goCtx context.Context, msg *types.MsgDetection) (*t // 3. accept incoming commit transactions for this vote, // 4. after vote ends, accept reveal transactions, strike down every provider that voted (only valid if there was a commit) // 5. majority wins, minority gets penalised - epochStart, _, err := k.epochstorageKeeper.GetEpochStartForBlock(ctx, uint64(msg.ResponseConflict.ConflictRelayData0.Request.RelaySession.BlockHeight)) + epochStart, _, err := k.epochstorageKeeper.GetEpochStartForBlock(ctx, uint64(msg.ResponseConflict.ConflictRelayData0.Request.RelaySession.Epoch)) if err != nil { return nil, utils.LavaError(ctx, logger, "response_conflict_detection", map[string]string{"client": msg.Creator, "provider0": msg.ResponseConflict.ConflictRelayData0.Request.RelaySession.Provider, "provider1": msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.Provider}, "Simulation: could not get EpochStart for specific block") } @@ -57,7 +57,7 @@ func (k msgServer) Detection(goCtx context.Context, msg *types.MsgDetection) (*t conflictVote := types.ConflictVote{} conflictVote.Index = index conflictVote.VoteState = types.StateCommit - conflictVote.VoteStartBlock = uint64(msg.ResponseConflict.ConflictRelayData0.Request.RelaySession.BlockHeight) + conflictVote.VoteStartBlock = uint64(msg.ResponseConflict.ConflictRelayData0.Request.RelaySession.Epoch) epochBlocks, err := k.epochstorageKeeper.EpochBlocks(ctx, uint64(ctx.BlockHeight())) if err != nil { return nil, utils.LavaError(ctx, logger, "response_conflict_detection", map[string]string{"client": msg.Creator, "provider0": msg.ResponseConflict.ConflictRelayData0.Request.RelaySession.Provider, "provider1": msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.Provider}, "Simulation: could not get epochblocks") @@ -70,7 +70,7 @@ func (k msgServer) Detection(goCtx context.Context, msg *types.MsgDetection) (*t conflictVote.VoteDeadline = voteDeadline conflictVote.ApiUrl = msg.ResponseConflict.ConflictRelayData0.Request.RelayData.ApiUrl conflictVote.ClientAddress = msg.Creator - conflictVote.ChainID = msg.ResponseConflict.ConflictRelayData0.Request.RelaySession.ChainID + conflictVote.ChainID = msg.ResponseConflict.ConflictRelayData0.Request.RelaySession.SpecID conflictVote.RequestBlock = uint64(msg.ResponseConflict.ConflictRelayData0.Request.RelayData.RequestBlock) conflictVote.RequestData = msg.ResponseConflict.ConflictRelayData0.Request.RelayData.Data diff --git a/x/conflict/keeper/msg_server_detection_test.go b/x/conflict/keeper/msg_server_detection_test.go index fa26f315ae..70574197f3 100644 --- a/x/conflict/keeper/msg_server_detection_test.go +++ b/x/conflict/keeper/msg_server_detection_test.go @@ -100,8 +100,8 @@ func TestDetection(t *testing.T) { //changes to request1 according to test msg.ResponseConflict.ConflictRelayData1.Request.RelayData.ConnectionType += tt.ConnectionType msg.ResponseConflict.ConflictRelayData1.Request.RelayData.ApiUrl += tt.ApiUrl - msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.BlockHeight += tt.BlockHeight - msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.ChainID += tt.ChainID + msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.Epoch += tt.BlockHeight + msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.SpecID += tt.ChainID msg.ResponseConflict.ConflictRelayData1.Request.RelayData.Data = append(msg.ResponseConflict.ConflictRelayData1.Request.RelayData.Data, tt.Data...) msg.ResponseConflict.ConflictRelayData1.Request.RelayData.RequestBlock += tt.RequestBlock msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.CuSum += tt.Cusum diff --git a/x/pairing/keeper/fixation_test.go b/x/pairing/keeper/fixation_test.go index 2a5e9e6f5a..5759e00025 100644 --- a/x/pairing/keeper/fixation_test.go +++ b/x/pairing/keeper/fixation_test.go @@ -105,9 +105,9 @@ func TestEpochPaymentDeletionWithMemoryShortening(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, } diff --git a/x/pairing/keeper/msg_server_relay_payment.go b/x/pairing/keeper/msg_server_relay_payment.go index ab4da46ef0..082056f788 100644 --- a/x/pairing/keeper/msg_server_relay_payment.go +++ b/x/pairing/keeper/msg_server_relay_payment.go @@ -38,7 +38,7 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen } for _, relay := range msg.Relays { - if relay.BlockHeight > ctx.BlockHeight() { + if relay.Epoch > ctx.BlockHeight() { return errorLogAndFormat("relay_future_block", map[string]string{"blockheight": string(relay.Sig)}, "relay request for a block in the future") } @@ -59,17 +59,17 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen } // TODO: add support for spec changes - spec, found := k.specKeeper.GetSpec(ctx, relay.ChainID) + spec, found := k.specKeeper.GetSpec(ctx, relay.SpecID) if !found || !spec.Enabled { - return errorLogAndFormat("relay_payment_spec", map[string]string{"chainID": relay.ChainID}, "invalid spec ID specified in proof") + return errorLogAndFormat("relay_payment_spec", map[string]string{"chainID": relay.SpecID}, "invalid spec ID specified in proof") } isValidPairing, userStake, thisProviderIndex, err := k.Keeper.ValidatePairingForClient( ctx, - relay.ChainID, + relay.SpecID, clientAddr, providerAddr, - uint64(relay.BlockHeight), + uint64(relay.Epoch), ) if err != nil { details := map[string]string{"client": clientAddr.String(), "provider": providerAddr.String(), "error": err.Error()} @@ -80,20 +80,20 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen return errorLogAndFormat("relay_payment_pairing", details, "invalid pairing claim on proof of relay") } - epochStart, _, err := k.epochStorageKeeper.GetEpochStartForBlock(ctx, uint64(relay.BlockHeight)) + epochStart, _, err := k.epochStorageKeeper.GetEpochStartForBlock(ctx, uint64(relay.Epoch)) if err != nil { - details := map[string]string{"epoch": strconv.FormatUint(epochStart, 10), "block": strconv.FormatUint(uint64(relay.BlockHeight), 10), "error": err.Error()} + details := map[string]string{"epoch": strconv.FormatUint(epochStart, 10), "block": strconv.FormatUint(uint64(relay.Epoch), 10), "error": err.Error()} return errorLogAndFormat("relay_payment_epoch_start", details, "problem getting epoch start") } payReliability := false // validate data reliability - vrfStoreKey := VRFKey{ChainID: relay.ChainID, Epoch: epochStart, Consumer: clientAddr.String()} + vrfStoreKey := VRFKey{ChainID: relay.SpecID, Epoch: epochStart, Consumer: clientAddr.String()} if vrfData, ok := dataReliabilityStore[vrfStoreKey]; ok { delete(dataReliabilityStore, vrfStoreKey) details := map[string]string{"client": clientAddr.String(), "provider": providerAddr.String()} if !spec.DataReliabilityEnabled { - details["chainID"] = relay.ChainID + details["chainID"] = relay.SpecID return errorLogAndFormat("relay_payment_data_reliability_disabled", details, "compares_hashes false for spec and reliability was received") } @@ -115,10 +115,10 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen // check this other provider is indeed legitimate isValidPairing, _, _, err := k.Keeper.ValidatePairingForClient( ctx, - relay.ChainID, + relay.SpecID, clientAddr, otherProviderAddress, - uint64(relay.BlockHeight), + uint64(relay.Epoch), ) if err != nil { details["error"] = err.Error() @@ -142,7 +142,7 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen return errorLogAndFormat("relay_data_reliability_vrf_proof", details, "invalid vrf proof by consumer, result doesn't correspond to proof") } - providersCount, err := k.ServicersToPairCount(ctx, uint64(relay.BlockHeight)) + providersCount, err := k.ServicersToPairCount(ctx, uint64(relay.Epoch)) if err != nil { details["error"] = err.Error() return errorLogAndFormat("relay_payment_reliability_servicerstopaircount", details, err.Error()) @@ -165,7 +165,7 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen } // this prevents double spend attacks, and tracks the CU per session a client can use - totalCUInEpochForUserProvider, err := k.Keeper.AddEpochPayment(ctx, relay.ChainID, epochStart, clientAddr, providerAddr, relay.CuSum, strconv.FormatUint(relay.SessionId, 16)) + totalCUInEpochForUserProvider, err := k.Keeper.AddEpochPayment(ctx, relay.SpecID, epochStart, clientAddr, providerAddr, relay.CuSum, strconv.FormatUint(relay.SessionId, 16)) if err != nil { // double spending on user detected! details := map[string]string{ @@ -205,7 +205,7 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen if len(msg.DescriptionString) > 20 { msg.DescriptionString = msg.DescriptionString[:20] } - details := map[string]string{"chainID": fmt.Sprintf(relay.ChainID), "client": clientAddr.String(), "provider": providerAddr.String(), "CU": strconv.FormatUint(relay.CuSum, 10), "BasePay": rewardCoins.String(), "totalCUInEpoch": strconv.FormatUint(totalCUInEpochForUserProvider, 10), "uniqueIdentifier": strconv.FormatUint(relay.SessionId, 10), "descriptionString": msg.DescriptionString} + details := map[string]string{"chainID": fmt.Sprintf(relay.SpecID), "client": clientAddr.String(), "provider": providerAddr.String(), "CU": strconv.FormatUint(relay.CuSum, 10), "BasePay": rewardCoins.String(), "totalCUInEpoch": strconv.FormatUint(totalCUInEpochForUserProvider, 10), "uniqueIdentifier": strconv.FormatUint(relay.SessionId, 10), "descriptionString": msg.DescriptionString} if relay.QoSReport != nil { QoS, err := relay.QoSReport.ComputeQoS() @@ -224,7 +224,7 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen amountToBurnClient := k.Keeper.BurnCoinsPerCU(ctx).MulInt64(int64(relay.CuSum)) burnAmount := sdk.Coin{Amount: amountToBurnClient.TruncateInt(), Denom: epochstoragetypes.TokenDenom} - burnSucceeded, err2 := k.BurnClientStake(ctx, relay.ChainID, clientAddr, burnAmount, false) + burnSucceeded, err2 := k.BurnClientStake(ctx, relay.SpecID, clientAddr, burnAmount, false) if err2 != nil { details["amountToBurn"] = burnAmount.String() @@ -276,7 +276,7 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen } // update provider payment storage with complainer's CU - err = k.updateProviderPaymentStorageWithComplainerCU(ctx, relay.UnresponsiveProviders, logger, epochStart, relay.ChainID, relay.CuSum, servicersToPair, clientAddr) + err = k.updateProviderPaymentStorageWithComplainerCU(ctx, relay.UnresponsiveProviders, logger, epochStart, relay.SpecID, relay.CuSum, servicersToPair, clientAddr) if err != nil { utils.LogLavaEvent(ctx, logger, types.UnresponsiveProviderUnstakeFailedEventName, map[string]string{"err:": err.Error()}, "Error Unresponsive Providers could not unstake") } diff --git a/x/pairing/keeper/msg_server_relay_payment_gov_test.go b/x/pairing/keeper/msg_server_relay_payment_gov_test.go index f2fcb40d23..46a352b230 100644 --- a/x/pairing/keeper/msg_server_relay_payment_gov_test.go +++ b/x/pairing/keeper/msg_server_relay_payment_gov_test.go @@ -68,9 +68,9 @@ func TestRelayPaymentGovQosWeightChange(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(ti), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: int64(tt.epoch), + Epoch: int64(tt.epoch), RelayNum: 0, QoSReport: badQoS, } @@ -170,9 +170,9 @@ func TestRelayPaymentGovEpochBlocksDecrease(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(ti), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: int64(tt.epoch), + Epoch: int64(tt.epoch), RelayNum: 0, } @@ -249,9 +249,9 @@ func TestRelayPaymentGovEpochBlocksIncrease(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(ti), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: int64(tt.epoch), + Epoch: int64(tt.epoch), RelayNum: 0, } @@ -333,9 +333,9 @@ func TestRelayPaymentGovEpochToSaveDecrease(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(ti), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: int64(tt.epoch), + Epoch: int64(tt.epoch), RelayNum: 0, } @@ -406,9 +406,9 @@ func TestRelayPaymentGovEpochToSaveIncrease(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(ti), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: int64(tt.epoch), + Epoch: int64(tt.epoch), RelayNum: 0, } @@ -496,9 +496,9 @@ func TestRelayPaymentGovStakeToMaxCUListMaxCUDecrease(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(ti), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: uint64(250001), // the relayRequest costs 250001 (more than the previous limit, and less than in the new limit). This should influence the validity of the request - BlockHeight: int64(tt.epoch), + Epoch: int64(tt.epoch), RelayNum: 0, } @@ -587,9 +587,9 @@ func TestRelayPaymentGovStakeToMaxCUListStakeThresholdIncrease(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(ti), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: uint64(200000), // the relayRequest costs 200000 (less than the previous limit, and more than in the new limit). This should influence the validity of the request - BlockHeight: int64(tt.epoch), + Epoch: int64(tt.epoch), RelayNum: 0, } @@ -680,9 +680,9 @@ func TestRelayPaymentGovEpochBlocksMultipleChanges(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(ti), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: int64(tt.paymentEpoch), + Epoch: int64(tt.paymentEpoch), RelayNum: 0, } @@ -806,9 +806,9 @@ func TestStakePaymentUnstake(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: uint64(10000), - BlockHeight: int64(sdk.UnwrapSDKContext(ts.ctx).BlockHeight()), + Epoch: int64(sdk.UnwrapSDKContext(ts.ctx).BlockHeight()), RelayNum: 0, } @@ -880,9 +880,9 @@ func TestRelayPaymentMemoryTransferAfterEpochChangeWithGovParamChange(t *testing Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: uint64(10000), - BlockHeight: int64(epochAfterEpochBlocksChanged), + Epoch: int64(epochAfterEpochBlocksChanged), RelayNum: 0, } @@ -923,7 +923,7 @@ func TestRelayPaymentMemoryTransferAfterEpochChangeWithGovParamChange(t *testing // Helper function to verify the relay payment objects that are saved on-chain after getting payment from a relay request func verifyRelayPaymentObjects(t *testing.T, ts *testStruct, relayRequest *pairingtypes.RelaySession, objectExists bool) { // Get EpochPayment struct from current epoch and perform basic verifications - epochPayments, found, epochPaymentKey := ts.keepers.Pairing.GetEpochPaymentsFromBlock(sdk.UnwrapSDKContext(ts.ctx), uint64(relayRequest.GetBlockHeight())) + epochPayments, found, epochPaymentKey := ts.keepers.Pairing.GetEpochPaymentsFromBlock(sdk.UnwrapSDKContext(ts.ctx), uint64(relayRequest.GetEpoch())) if objectExists { require.Equal(t, true, found) require.Equal(t, epochPaymentKey, epochPayments.GetIndex()) @@ -933,7 +933,7 @@ func verifyRelayPaymentObjects(t *testing.T, ts *testStruct, relayRequest *pairi } // Get the providerPaymentStorageKey - providerPaymentStorageKey := ts.keepers.Pairing.GetProviderPaymentStorageKey(sdk.UnwrapSDKContext(ts.ctx), ts.spec.Name, uint64(relayRequest.GetBlockHeight()), ts.providers[0].address) + providerPaymentStorageKey := ts.keepers.Pairing.GetProviderPaymentStorageKey(sdk.UnwrapSDKContext(ts.ctx), ts.spec.Name, uint64(relayRequest.GetEpoch()), ts.providers[0].address) // Get the providerPaymentStorage struct from epochPayments providerPaymentStorageFromEpochPayments := pairingtypes.ProviderPaymentStorage{} @@ -944,7 +944,7 @@ func verifyRelayPaymentObjects(t *testing.T, ts *testStruct, relayRequest *pairi } } require.NotEmpty(t, providerPaymentStorageFromEpochPayments.GetIndex()) - require.Equal(t, uint64(relayRequest.GetBlockHeight()), providerPaymentStorageFromEpochPayments.GetEpoch()) + require.Equal(t, uint64(relayRequest.GetEpoch()), providerPaymentStorageFromEpochPayments.GetEpoch()) // Get the UniquePaymentStorageClientProvider key uniquePaymentStorageClientProviderKey := ts.keepers.Pairing.EncodeUniquePaymentKey(sdk.UnwrapSDKContext(ts.ctx), ts.clients[0].address, ts.providers[0].address, strconv.FormatUint(relayRequest.SessionId, 16), ts.spec.Name) @@ -958,13 +958,13 @@ func verifyRelayPaymentObjects(t *testing.T, ts *testStruct, relayRequest *pairi } } require.NotEmpty(t, uniquePaymentStorageClientProviderFromProviderPaymentStorage.GetIndex()) - require.Equal(t, uint64(relayRequest.GetBlockHeight()), uniquePaymentStorageClientProviderFromProviderPaymentStorage.GetBlock()) + require.Equal(t, uint64(relayRequest.GetEpoch()), uniquePaymentStorageClientProviderFromProviderPaymentStorage.GetBlock()) require.Equal(t, relayRequest.GetCuSum(), uniquePaymentStorageClientProviderFromProviderPaymentStorage.GetUsedCU()) // when checking CU, the client may be trying to use a relay request with more CU than his MaxCU (determined by StakeThreshold) - clientStakeEntry, err := ts.keepers.Epochstorage.GetStakeEntryForClientEpoch(sdk.UnwrapSDKContext(ts.ctx), relayRequest.GetChainID(), ts.clients[0].address, uint64(relayRequest.GetBlockHeight())) + clientStakeEntry, err := ts.keepers.Epochstorage.GetStakeEntryForClientEpoch(sdk.UnwrapSDKContext(ts.ctx), relayRequest.GetSpecID(), ts.clients[0].address, uint64(relayRequest.GetEpoch())) require.Nil(t, err) - clientMaxCU, err := ts.keepers.Pairing.ClientMaxCUProviderForBlock(sdk.UnwrapSDKContext(ts.ctx), uint64(relayRequest.GetBlockHeight()), clientStakeEntry) + clientMaxCU, err := ts.keepers.Pairing.ClientMaxCUProviderForBlock(sdk.UnwrapSDKContext(ts.ctx), uint64(relayRequest.GetEpoch()), clientStakeEntry) require.Nil(t, err) if clientMaxCU < relayRequest.CuSum { require.Equal(t, relayRequest.GetCuSum(), clientMaxCU) @@ -975,12 +975,12 @@ func verifyRelayPaymentObjects(t *testing.T, ts *testStruct, relayRequest *pairi // Get the providerPaymentStorage struct directly providerPaymentStorage, found := ts.keepers.Pairing.GetProviderPaymentStorage(sdk.UnwrapSDKContext(ts.ctx), providerPaymentStorageKey) require.Equal(t, true, found) - require.Equal(t, uint64(relayRequest.GetBlockHeight()), providerPaymentStorage.GetEpoch()) + require.Equal(t, uint64(relayRequest.GetEpoch()), providerPaymentStorage.GetEpoch()) // Get one of the UniquePaymentStorageClientProvider struct directly uniquePaymentStorageClientProvider, found := ts.keepers.Pairing.GetUniquePaymentStorageClientProvider(sdk.UnwrapSDKContext(ts.ctx), uniquePaymentStorageClientProviderKey) require.Equal(t, true, found) - require.Equal(t, uint64(relayRequest.GetBlockHeight()), uniquePaymentStorageClientProvider.GetBlock()) + require.Equal(t, uint64(relayRequest.GetEpoch()), uniquePaymentStorageClientProvider.GetBlock()) if clientMaxCU < relayRequest.CuSum { require.Equal(t, relayRequest.GetCuSum(), clientMaxCU) diff --git a/x/pairing/keeper/msg_server_relay_payment_test.go b/x/pairing/keeper/msg_server_relay_payment_test.go index 628427da43..c7378a3bd6 100644 --- a/x/pairing/keeper/msg_server_relay_payment_test.go +++ b/x/pairing/keeper/msg_server_relay_payment_test.go @@ -149,9 +149,9 @@ func TestRelayPaymentMemoryTransferAfterEpochChange(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: sessionCounter, - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: int64(firstEpoch), + Epoch: int64(firstEpoch), RelayNum: 0, } @@ -216,9 +216,9 @@ func TestRelayPaymentBlockHeight(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight() + tt.blockTime, + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight() + tt.blockTime, RelayNum: 0, } @@ -278,9 +278,9 @@ func TestRelayPaymentOverUse(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: maxcu * 2, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, } @@ -334,9 +334,9 @@ func TestRelayPaymentNotUnstakingProviderForUnresponsivenessIfNoEpochInformation Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, UnresponsiveProviders: unresponsiveProvidersData, // create the complaint } @@ -386,9 +386,9 @@ func TestRelayPaymentUnstakingProviderForUnresponsivenessWithBadDataInput(t *tes Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, UnresponsiveProviders: unresponsiveProvidersData[clientIndex], // create the complaint } @@ -422,9 +422,9 @@ func TestRelayPaymentNotUnstakingProviderForUnresponsivenessBecauseOfServices(t Provider: ts.providers[1].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, } @@ -446,9 +446,9 @@ func TestRelayPaymentNotUnstakingProviderForUnresponsivenessBecauseOfServices(t Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, UnresponsiveProviders: unresponsiveProvidersData, // create the complaint } @@ -484,9 +484,9 @@ func TestRelayPaymentDoubleSpending(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, } @@ -529,9 +529,9 @@ func TestRelayPaymentDataModification(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, } @@ -581,9 +581,9 @@ func TestRelayPaymentDelayedDoubleSpending(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, } @@ -666,9 +666,9 @@ func TestRelayPaymentOldEpochs(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: tt.sid, - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight() - int64(blocksInEpoch)*tt.epoch, + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight() - int64(blocksInEpoch)*tt.epoch, RelayNum: 0, } @@ -735,9 +735,9 @@ func TestRelayPaymentQoS(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, QoSReport: QoS, } @@ -820,9 +820,9 @@ func TestRelayPaymentDataReliability(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, QoSReport: QoS, } @@ -852,7 +852,7 @@ func TestRelayPaymentDataReliability(t *testing.T) { index0, err = utils.GetIndexForVrf(vrfRes0, uint32(ts.keepers.Pairing.ServicersToPairCountRaw(sdk.UnwrapSDKContext(ts.ctx))), ts.spec.ReliabilityThreshold) require.Nil(t, err) - providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relaySession.ChainID, ts.clients[0].address) + providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relaySession.SpecID, ts.clients[0].address) require.Nil(t, err) if providers[index0].Address != ts.providers[0].address.String() { @@ -863,8 +863,8 @@ func TestRelayPaymentDataReliability(t *testing.T) { } vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].vrfSk, false, currentEpoch) dataReliability0 := &types.VRFData{ - ChainID: relayRequest.RelaySession.ChainID, - Epoch: relayRequest.RelaySession.BlockHeight, + ChainID: relayRequest.RelaySession.SpecID, + Epoch: relayRequest.RelaySession.Epoch, Differentiator: false, VrfValue: vrf_res0, VrfProof: vrf_proof0, @@ -908,9 +908,9 @@ func TestRelayPaymentDataReliability(t *testing.T) { Provider: providers[index0].Address, ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, QoSReport: QoSDR, } @@ -962,9 +962,9 @@ func TestRelayPaymentDataReliabilityWrongProvider(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, QoSReport: QoS, } @@ -994,7 +994,7 @@ GetWrongProvider: index0, _ = utils.GetIndexForVrf(vrfRes0, uint32(ts.keepers.Pairing.ServicersToPairCountRaw(sdk.UnwrapSDKContext(ts.ctx))), ts.spec.ReliabilityThreshold) index1, _ := utils.GetIndexForVrf(vrfRes1, uint32(ts.keepers.Pairing.ServicersToPairCountRaw(sdk.UnwrapSDKContext(ts.ctx))), ts.spec.ReliabilityThreshold) - providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relaySession.ChainID, ts.clients[0].address) + providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relaySession.SpecID, ts.clients[0].address) require.Nil(t, err) // two providers returned by GetIndexForVrf and the provider getting tested need 1 more to perform this test properly require.Greater(t, len(providers), 3) @@ -1018,8 +1018,8 @@ GetWrongProvider: } vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].vrfSk, false, currentEpoch) dataReliability0 := &types.VRFData{ - ChainID: relayRequest.RelaySession.ChainID, - Epoch: relayRequest.RelaySession.BlockHeight, + ChainID: relayRequest.RelaySession.SpecID, + Epoch: relayRequest.RelaySession.Epoch, Differentiator: false, VrfValue: vrf_res0, VrfProof: vrf_proof0, @@ -1036,9 +1036,9 @@ GetWrongProvider: Provider: providers[wrongProviderIndex].Address, ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, QoSReport: QoSDR, } @@ -1076,9 +1076,9 @@ func TestRelayPaymentDataReliabilityBelowReliabilityThreshold(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, QoSReport: QoS, } @@ -1106,8 +1106,8 @@ func TestRelayPaymentDataReliabilityBelowReliabilityThreshold(t *testing.T) { require.Equal(t, index1, int64(-1)) vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].vrfSk, false, currentEpoch) dataReliability0 := &types.VRFData{ - ChainID: relayRequest.RelaySession.ChainID, - Epoch: relayRequest.RelaySession.BlockHeight, + ChainID: relayRequest.RelaySession.SpecID, + Epoch: relayRequest.RelaySession.Epoch, Differentiator: false, VrfValue: vrf_res0, VrfProof: vrf_proof0, @@ -1126,9 +1126,9 @@ func TestRelayPaymentDataReliabilityBelowReliabilityThreshold(t *testing.T) { Provider: provider.address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, QoSReport: QoSDR, } @@ -1165,9 +1165,9 @@ func TestRelayPaymentDataReliabilityDifferentClientSign(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, QoSReport: QoS, } @@ -1193,7 +1193,7 @@ func TestRelayPaymentDataReliabilityDifferentClientSign(t *testing.T) { index0, _ = utils.GetIndexForVrf(vrfRes0, uint32(ts.keepers.Pairing.ServicersToPairCountRaw(sdk.UnwrapSDKContext(ts.ctx))), ts.spec.ReliabilityThreshold) - providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relaySession.ChainID, ts.clients[0].address) + providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relaySession.SpecID, ts.clients[0].address) require.Nil(t, err) if providers[index0].Address != ts.providers[0].address.String() { @@ -1204,8 +1204,8 @@ func TestRelayPaymentDataReliabilityDifferentClientSign(t *testing.T) { vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].vrfSk, false, currentEpoch) dataReliability0 := &types.VRFData{ - ChainID: relayRequest.RelaySession.ChainID, - Epoch: relayRequest.RelaySession.BlockHeight, + ChainID: relayRequest.RelaySession.SpecID, + Epoch: relayRequest.RelaySession.Epoch, Differentiator: false, VrfValue: vrf_res0, VrfProof: vrf_proof0, @@ -1222,9 +1222,9 @@ func TestRelayPaymentDataReliabilityDifferentClientSign(t *testing.T) { Provider: providers[index0].Address, ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, QoSReport: QoSDR, } @@ -1261,9 +1261,9 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, QoSReport: QoS, } @@ -1289,7 +1289,7 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { index0, _ = utils.GetIndexForVrf(vrfRes0, uint32(ts.keepers.Pairing.ServicersToPairCountRaw(sdk.UnwrapSDKContext(ts.ctx))), ts.spec.ReliabilityThreshold) - providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relaySession.ChainID, ts.clients[0].address) + providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relaySession.SpecID, ts.clients[0].address) require.Nil(t, err) if providers[index0].Address != ts.providers[0].address.String() { @@ -1301,8 +1301,8 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].vrfSk, false, currentEpoch) dataReliability0 := &types.VRFData{ - ChainID: relayRequest.RelaySession.ChainID, - Epoch: relayRequest.RelaySession.BlockHeight, + ChainID: relayRequest.RelaySession.SpecID, + Epoch: relayRequest.RelaySession.Epoch, Differentiator: false, VrfValue: vrf_res0, VrfProof: vrf_proof0, @@ -1319,9 +1319,9 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { Provider: providers[index0].Address, ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: cuSum, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, QoSReport: QoSDR, } @@ -1338,7 +1338,7 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { // Advance Epoch and set block height and resign the tx ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) - relayRequestWithDataReliability0.BlockHeight = sdk.UnwrapSDKContext(ts.ctx).BlockHeight() + relayRequestWithDataReliability0.Epoch = sdk.UnwrapSDKContext(ts.ctx).BlockHeight() relayRequestWithDataReliability0.SessionId = uint64(2) relayRequestWithDataReliability0.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relayRequestWithDataReliability0) require.Nil(t, err) @@ -1389,9 +1389,9 @@ func TestEpochPaymentDeletion(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, } diff --git a/x/pairing/keeper/unresponsive_provider_test.go b/x/pairing/keeper/unresponsive_provider_test.go index df02355ba2..46d02e902a 100644 --- a/x/pairing/keeper/unresponsive_provider_test.go +++ b/x/pairing/keeper/unresponsive_provider_test.go @@ -60,9 +60,9 @@ func TestUnresponsivenessStressTest(t *testing.T) { Provider: providerAddress, ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(0), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits*10 + uint64(clientIndex), - BlockHeight: relayEpoch, + Epoch: relayEpoch, RelayNum: 0, UnresponsiveProviders: unresponsiveDataList[clientIndex%unresponsiveProviderAmount], // create the complaint } @@ -146,9 +146,9 @@ func TestUnstakingProviderForUnresponsiveness(t *testing.T) { Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(0), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits*10 + uint64(clientIndex), - BlockHeight: relayEpoch, + Epoch: relayEpoch, RelayNum: 0, UnresponsiveProviders: unresponsiveProvidersData, // create the complaint } @@ -245,9 +245,9 @@ func TestUnstakingProviderForUnresponsivenessContinueComplainingAfterUnstake(t * Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(0), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: relayEpoch, + Epoch: relayEpoch, RelayNum: 0, UnresponsiveProviders: unresponsiveProvidersData, // create the complaint } @@ -293,9 +293,9 @@ func TestUnstakingProviderForUnresponsivenessContinueComplainingAfterUnstake(t * Provider: ts.providers[0].address.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(2), - ChainID: ts.spec.Name, + SpecID: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), + Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, UnresponsiveProviders: unresponsiveProvidersData, // create the complaint } diff --git a/x/pairing/types/relay.pb.go b/x/pairing/types/relay.pb.go index a3dbfb2d04..2268e763cc 100644 --- a/x/pairing/types/relay.pb.go +++ b/x/pairing/types/relay.pb.go @@ -30,14 +30,14 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type RelaySession struct { - ChainID string `protobuf:"bytes,1,opt,name=chainID,proto3" json:"chainID,omitempty"` + SpecID string `protobuf:"bytes,1,opt,name=specID,proto3" json:"specID,omitempty"` ContentHash []byte `protobuf:"bytes,2,opt,name=content_hash,json=contentHash,proto3" json:"content_hash,omitempty"` SessionId uint64 `protobuf:"varint,3,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` CuSum uint64 `protobuf:"varint,4,opt,name=cu_sum,json=cuSum,proto3" json:"cu_sum,omitempty"` Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider,omitempty"` RelayNum uint64 `protobuf:"varint,6,opt,name=relay_num,json=relayNum,proto3" json:"relay_num,omitempty"` QoSReport *QualityOfServiceReport `protobuf:"bytes,7,opt,name=QoSReport,proto3" json:"QoSReport,omitempty"` - BlockHeight int64 `protobuf:"varint,8,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` + Epoch int64 `protobuf:"varint,8,opt,name=epoch,proto3" json:"epoch,omitempty"` UnresponsiveProviders []byte `protobuf:"bytes,9,opt,name=unresponsive_providers,json=unresponsiveProviders,proto3" json:"unresponsive_providers,omitempty"` LavaChainId string `protobuf:"bytes,10,opt,name=lava_chain_id,json=lavaChainId,proto3" json:"lava_chain_id,omitempty"` Sig []byte `protobuf:"bytes,11,opt,name=sig,proto3" json:"sig,omitempty"` @@ -77,9 +77,9 @@ func (m *RelaySession) XXX_DiscardUnknown() { var xxx_messageInfo_RelaySession proto.InternalMessageInfo -func (m *RelaySession) GetChainID() string { +func (m *RelaySession) GetSpecID() string { if m != nil { - return m.ChainID + return m.SpecID } return "" } @@ -126,9 +126,9 @@ func (m *RelaySession) GetQoSReport() *QualityOfServiceReport { return nil } -func (m *RelaySession) GetBlockHeight() int64 { +func (m *RelaySession) GetEpoch() int64 { if m != nil { - return m.BlockHeight + return m.Epoch } return 0 } @@ -617,71 +617,71 @@ func init() { func init() { proto.RegisterFile("pairing/relay.proto", fileDescriptor_10cd1bfeb9978acf) } var fileDescriptor_10cd1bfeb9978acf = []byte{ - // 1021 bytes of a gzipped FileDescriptorProto + // 1017 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x41, 0x6f, 0x1b, 0x45, - 0x14, 0xce, 0xc6, 0x76, 0x6c, 0xbf, 0xdd, 0xa4, 0xd5, 0x34, 0x69, 0x97, 0x94, 0x3a, 0x66, 0x91, - 0xd2, 0x1c, 0xc0, 0x86, 0x20, 0x38, 0x20, 0x21, 0x51, 0x93, 0xd2, 0x04, 0x21, 0x9a, 0x8c, 0xa1, - 0x87, 0x5c, 0x56, 0xe3, 0xf5, 0x78, 0x3d, 0x64, 0xbd, 0xb3, 0x9d, 0xd9, 0xb5, 0x30, 0x07, 0x7e, - 0x43, 0x7f, 0x0b, 0x07, 0x7e, 0x00, 0x12, 0x52, 0xb9, 0xf5, 0x88, 0x38, 0x44, 0x28, 0x39, 0x70, - 0xe7, 0x17, 0xa0, 0x79, 0xbb, 0xeb, 0xb8, 0x91, 0x15, 0xa9, 0x52, 0x4f, 0x9e, 0xf9, 0xde, 0x9b, - 0x6f, 0xe6, 0x7d, 0xdf, 0xdb, 0x27, 0xc3, 0x9d, 0x84, 0x09, 0x25, 0xe2, 0xb0, 0xab, 0x78, 0xc4, - 0x66, 0x9d, 0x44, 0xc9, 0x54, 0x92, 0xcd, 0x88, 0x4d, 0x59, 0xcc, 0xd3, 0x8e, 0xf9, 0xed, 0x14, - 0x19, 0xdb, 0x9b, 0xa1, 0x0c, 0x25, 0x26, 0x74, 0xcd, 0x2a, 0xcf, 0xf5, 0xfe, 0xac, 0x80, 0x43, - 0xcd, 0xd9, 0x3e, 0xd7, 0x5a, 0xc8, 0x98, 0xb8, 0x50, 0x0f, 0xc6, 0x4c, 0xc4, 0x47, 0x07, 0xae, - 0xd5, 0xb6, 0xf6, 0x9a, 0xb4, 0xdc, 0x92, 0xf7, 0xc0, 0x09, 0x64, 0x9c, 0xf2, 0x38, 0xf5, 0xc7, - 0x4c, 0x8f, 0xdd, 0xd5, 0xb6, 0xb5, 0xe7, 0x50, 0xbb, 0xc0, 0x0e, 0x99, 0x1e, 0x93, 0x07, 0x00, - 0x3a, 0xe7, 0xf1, 0xc5, 0xd0, 0xad, 0xb4, 0xad, 0xbd, 0x2a, 0x6d, 0x16, 0xc8, 0xd1, 0x90, 0x6c, - 0xc1, 0x5a, 0x90, 0xf9, 0x3a, 0x9b, 0xb8, 0x55, 0x0c, 0xd5, 0x82, 0xac, 0x9f, 0x4d, 0xc8, 0x36, - 0x34, 0x12, 0x25, 0xa7, 0x62, 0xc8, 0x95, 0x5b, 0xc3, 0x3b, 0xe7, 0x7b, 0x72, 0x1f, 0x9a, 0x58, - 0x9a, 0x1f, 0x67, 0x13, 0x77, 0x0d, 0x4f, 0x35, 0x10, 0xf8, 0x2e, 0x9b, 0x90, 0x6f, 0xa0, 0x79, - 0x22, 0xfb, 0x94, 0x27, 0x52, 0xa5, 0x6e, 0xbd, 0x6d, 0xed, 0xd9, 0xfb, 0x1f, 0x74, 0x96, 0x15, - 0xdf, 0x39, 0xc9, 0x58, 0x24, 0xd2, 0xd9, 0xd3, 0x51, 0x9f, 0xab, 0xa9, 0x08, 0x78, 0x7e, 0x86, - 0x5e, 0x1d, 0x37, 0xd5, 0x0d, 0x22, 0x19, 0x9c, 0xf9, 0x63, 0x2e, 0xc2, 0x71, 0xea, 0x36, 0xda, - 0xd6, 0x5e, 0x85, 0xda, 0x88, 0x1d, 0x22, 0x44, 0x3e, 0x85, 0xbb, 0x59, 0xac, 0xb8, 0x4e, 0x64, - 0xac, 0xc5, 0x94, 0xfb, 0xe5, 0x23, 0xb5, 0xdb, 0x44, 0x29, 0xb6, 0x16, 0xa3, 0xc7, 0x65, 0x90, - 0x78, 0xb0, 0x6e, 0xde, 0xe2, 0xa3, 0x8e, 0x46, 0x17, 0xc0, 0x1a, 0x6d, 0x03, 0x7e, 0x85, 0xda, - 0x0e, 0xc9, 0x6d, 0xa8, 0x68, 0x11, 0xba, 0x36, 0xf2, 0x98, 0x25, 0xf9, 0x18, 0x6a, 0x03, 0x36, - 0x0c, 0xb9, 0xeb, 0x60, 0x5d, 0xf7, 0x97, 0xd7, 0xd5, 0x33, 0x29, 0x34, 0xcf, 0xf4, 0xfe, 0xb0, - 0xe0, 0x36, 0x7a, 0x79, 0xac, 0xc4, 0x94, 0xa5, 0xfc, 0x80, 0xa5, 0x8c, 0x3c, 0x84, 0x5b, 0x81, - 0x8c, 0x63, 0x1e, 0xa4, 0xc6, 0x95, 0x74, 0x96, 0xf0, 0xc2, 0xd7, 0x8d, 0x2b, 0xf8, 0xfb, 0x59, - 0xc2, 0xc9, 0x3d, 0xa8, 0xb3, 0x44, 0xf8, 0x99, 0x8a, 0xd0, 0xd9, 0x26, 0x5d, 0x63, 0x89, 0xf8, - 0x41, 0x45, 0x84, 0x40, 0x75, 0xc8, 0x52, 0x86, 0x76, 0x3a, 0x14, 0xd7, 0xe4, 0x7d, 0x58, 0x57, - 0xfc, 0x79, 0xc6, 0x75, 0xea, 0xa3, 0x42, 0x68, 0x68, 0x85, 0x3a, 0x05, 0xd8, 0x33, 0x18, 0xf1, - 0xc0, 0x61, 0x89, 0x38, 0x8a, 0x53, 0xae, 0x46, 0x2c, 0xe0, 0x85, 0xb7, 0xaf, 0x61, 0x86, 0x5c, - 0xb3, 0x28, 0x45, 0x6b, 0x1d, 0x8a, 0x6b, 0xef, 0x5f, 0xab, 0xe8, 0x49, 0x9a, 0xb3, 0x91, 0x27, - 0xe6, 0x36, 0xd3, 0x04, 0x45, 0x2b, 0x61, 0x05, 0xf6, 0xbe, 0xb7, 0x5c, 0x93, 0xc5, 0x76, 0x36, - 0x2f, 0x5a, 0x68, 0xee, 0xc7, 0x00, 0x39, 0x11, 0x16, 0xb4, 0x8a, 0x2c, 0xbb, 0x37, 0xb0, 0x2c, - 0x08, 0x49, 0xf3, 0x3e, 0x44, 0x4d, 0x9f, 0xc0, 0x2d, 0x84, 0x78, 0x24, 0xd8, 0x40, 0x98, 0xc6, - 0x42, 0x71, 0xec, 0xfd, 0x07, 0xcb, 0xb9, 0x9e, 0xd1, 0xaf, 0x31, 0xff, 0xfa, 0x29, 0xef, 0x17, - 0xa8, 0xa1, 0x83, 0x46, 0xcf, 0x20, 0xf3, 0x59, 0x14, 0xc9, 0x80, 0xa5, 0x65, 0x85, 0x55, 0xea, - 0x04, 0xd9, 0xa3, 0x39, 0x46, 0x36, 0xa1, 0xc6, 0x13, 0x19, 0xe4, 0x5f, 0x5e, 0x85, 0xe6, 0x1b, - 0xf2, 0x0e, 0x34, 0xd0, 0x7e, 0x3f, 0x39, 0x2b, 0x2c, 0xaa, 0xe3, 0xfe, 0xf8, 0x8c, 0xec, 0x80, - 0x9d, 0x28, 0xf9, 0x23, 0x0f, 0x52, 0xdf, 0x74, 0x57, 0x15, 0xa3, 0x50, 0x40, 0x7d, 0x11, 0x7a, - 0xbf, 0x5b, 0x00, 0x85, 0xd2, 0x49, 0x34, 0x9b, 0x3b, 0x6d, 0x2d, 0x38, 0x5d, 0x74, 0xe6, 0xea, - 0x55, 0x67, 0x6e, 0x42, 0x2d, 0x96, 0x71, 0xc0, 0xf1, 0xb6, 0x75, 0x9a, 0x6f, 0xcc, 0xf7, 0x13, - 0xb1, 0xf4, 0x7a, 0x43, 0xd8, 0x39, 0x96, 0xf7, 0xc3, 0x67, 0x70, 0x6f, 0x24, 0x62, 0x16, 0x89, - 0x9f, 0xf9, 0x30, 0xcf, 0xd2, 0x38, 0x49, 0xb8, 0xc6, 0xd6, 0x70, 0xe8, 0xd6, 0x3c, 0x8c, 0x07, - 0xf4, 0x21, 0x06, 0x71, 0xaa, 0x88, 0xb0, 0x38, 0x51, 0x74, 0x4a, 0x53, 0x8b, 0x30, 0x4f, 0xf2, - 0x5e, 0xac, 0x42, 0xbd, 0x50, 0xf8, 0x86, 0xe9, 0xb5, 0x5c, 0xbc, 0x5d, 0xd8, 0x18, 0x8a, 0xd1, - 0x88, 0x2b, 0x1e, 0xa7, 0x82, 0xa5, 0x52, 0x61, 0x51, 0x0d, 0x7a, 0x0d, 0x35, 0x63, 0x68, 0xaa, - 0x46, 0xfe, 0x94, 0x45, 0x19, 0x2f, 0x74, 0x6c, 0x4c, 0xd5, 0xe8, 0x99, 0xd9, 0x97, 0xc1, 0x44, - 0x49, 0x39, 0x2a, 0x2a, 0x31, 0xc1, 0x63, 0xb3, 0x37, 0xba, 0x94, 0x73, 0x02, 0x4d, 0xc8, 0x9f, - 0x6f, 0x97, 0x58, 0x5f, 0x84, 0xa4, 0x0d, 0x36, 0x8b, 0x22, 0xf3, 0x7e, 0x53, 0x30, 0x0e, 0x32, - 0x87, 0x2e, 0x42, 0xe4, 0x5d, 0x68, 0x3e, 0xcf, 0xb8, 0x9a, 0x61, 0xbc, 0x91, 0x0b, 0x30, 0x07, - 0x4a, 0x8b, 0x9a, 0x73, 0x8b, 0xbc, 0x5f, 0x57, 0xe1, 0xee, 0xf2, 0x91, 0x47, 0x4e, 0xa1, 0x6e, - 0x3c, 0x89, 0x83, 0x59, 0xae, 0x50, 0xef, 0xcb, 0x97, 0xe7, 0x3b, 0x2b, 0x7f, 0x9f, 0xef, 0xec, - 0x86, 0x22, 0x1d, 0x67, 0x83, 0x4e, 0x20, 0x27, 0xdd, 0x40, 0xea, 0x89, 0xd4, 0xc5, 0xcf, 0x87, - 0x7a, 0x78, 0xd6, 0x35, 0x83, 0x43, 0x77, 0x0e, 0x78, 0xf0, 0xdf, 0xf9, 0xce, 0xc6, 0x8c, 0x4d, - 0xa2, 0xcf, 0xbd, 0x6f, 0x73, 0x1a, 0x8f, 0x96, 0x84, 0x44, 0x80, 0xc3, 0xa6, 0x4c, 0x44, 0xe5, - 0x47, 0x81, 0x73, 0xa4, 0xf7, 0xf8, 0x8d, 0x2f, 0xb8, 0x93, 0x5f, 0xb0, 0xc8, 0xe5, 0xd1, 0xd7, - 0xa8, 0xc9, 0x09, 0x54, 0xf5, 0x2c, 0x0e, 0xd0, 0xae, 0x66, 0xef, 0x8b, 0x37, 0xbe, 0xc2, 0xce, - 0xaf, 0x30, 0x1c, 0x1e, 0x45, 0xaa, 0xfd, 0xdf, 0x2c, 0xa8, 0xe3, 0xc7, 0xc0, 0x15, 0x79, 0x0a, - 0x35, 0x5c, 0x92, 0x9b, 0x66, 0x4c, 0x31, 0x9e, 0xb6, 0xdb, 0x37, 0xe6, 0x24, 0xd1, 0xcc, 0x5b, - 0x21, 0xa7, 0xb0, 0x91, 0xcf, 0xa5, 0x6c, 0xa0, 0x03, 0x25, 0x06, 0xfc, 0x6d, 0x31, 0x7f, 0x64, - 0xf5, 0x1e, 0xbd, 0xbc, 0x68, 0x59, 0xaf, 0x2e, 0x5a, 0xd6, 0x3f, 0x17, 0x2d, 0xeb, 0xc5, 0x65, - 0x6b, 0xe5, 0xd5, 0x65, 0x6b, 0xe5, 0xaf, 0xcb, 0xd6, 0xca, 0xe9, 0xc3, 0x05, 0x3d, 0x0a, 0x26, - 0xfc, 0xed, 0xfe, 0xd4, 0x2d, 0xff, 0x38, 0xa0, 0x28, 0x83, 0x35, 0xfc, 0x37, 0xf0, 0xc9, 0xff, - 0x01, 0x00, 0x00, 0xff, 0xff, 0xc5, 0x73, 0x75, 0xdd, 0x50, 0x08, 0x00, 0x00, + 0x14, 0xce, 0x3a, 0x76, 0x6c, 0xbf, 0xdd, 0xa6, 0xd5, 0x34, 0x4d, 0x97, 0x94, 0x3a, 0x66, 0x91, + 0xd2, 0x1c, 0xc0, 0x86, 0x20, 0x38, 0x20, 0x21, 0x51, 0x93, 0xd2, 0x06, 0x21, 0x9a, 0x8c, 0xa1, + 0x87, 0x5c, 0x56, 0xe3, 0xf1, 0xd8, 0x1e, 0xb2, 0xde, 0xd9, 0xce, 0xec, 0x5a, 0x98, 0x03, 0xbf, + 0xa1, 0xbf, 0x85, 0x03, 0x07, 0x8e, 0x48, 0x48, 0x3d, 0xf6, 0x88, 0x38, 0x44, 0x28, 0x39, 0x70, + 0xe7, 0x17, 0xa0, 0x79, 0xbb, 0xeb, 0xb8, 0x51, 0x88, 0x54, 0x89, 0xd3, 0xce, 0xfb, 0xde, 0x9b, + 0xef, 0xed, 0x7b, 0xdf, 0xdb, 0x67, 0xc3, 0xed, 0x84, 0x49, 0x2d, 0xe3, 0x71, 0x57, 0x8b, 0x88, + 0xcd, 0x3b, 0x89, 0x56, 0xa9, 0x22, 0x1b, 0x11, 0x9b, 0xb1, 0x58, 0xa4, 0x1d, 0xfb, 0xec, 0x14, + 0x11, 0x5b, 0x1b, 0x63, 0x35, 0x56, 0x18, 0xd0, 0xb5, 0xa7, 0x3c, 0x36, 0xf8, 0x75, 0x15, 0x3c, + 0x6a, 0xef, 0xf6, 0x85, 0x31, 0x52, 0xc5, 0x64, 0x13, 0xd6, 0x4c, 0x22, 0xf8, 0xc1, 0xbe, 0xef, + 0xb4, 0x9d, 0xdd, 0x26, 0x2d, 0x2c, 0xf2, 0x0e, 0x78, 0x5c, 0xc5, 0xa9, 0x88, 0xd3, 0x70, 0xc2, + 0xcc, 0xc4, 0xaf, 0xb4, 0x9d, 0x5d, 0x8f, 0xba, 0x05, 0xf6, 0x84, 0x99, 0x09, 0xb9, 0x0f, 0x60, + 0x72, 0x96, 0x50, 0x0e, 0xfd, 0xd5, 0xb6, 0xb3, 0x5b, 0xa5, 0xcd, 0x02, 0x39, 0x18, 0x92, 0x3b, + 0xb0, 0xc6, 0xb3, 0xd0, 0x64, 0x53, 0xbf, 0x8a, 0xae, 0x1a, 0xcf, 0xfa, 0xd9, 0x94, 0x6c, 0x41, + 0x23, 0xd1, 0x6a, 0x26, 0x87, 0x42, 0xfb, 0x35, 0x4c, 0xb9, 0xb0, 0xc9, 0x3d, 0x68, 0x62, 0x61, + 0x61, 0x9c, 0x4d, 0xfd, 0x35, 0xbc, 0xd5, 0x40, 0xe0, 0x9b, 0x6c, 0x4a, 0xbe, 0x82, 0xe6, 0x91, + 0xea, 0x53, 0x91, 0x28, 0x9d, 0xfa, 0xf5, 0xb6, 0xb3, 0xeb, 0xee, 0xbd, 0xd7, 0xb9, 0xaa, 0xf4, + 0xce, 0x51, 0xc6, 0x22, 0x99, 0xce, 0x9f, 0x8e, 0xfa, 0x42, 0xcf, 0x24, 0x17, 0xf9, 0x1d, 0x7a, + 0x71, 0x9d, 0x6c, 0x40, 0x4d, 0x24, 0x8a, 0x4f, 0xfc, 0x46, 0xdb, 0xd9, 0x5d, 0xa5, 0xb9, 0x41, + 0x3e, 0x86, 0xcd, 0x2c, 0xd6, 0xc2, 0x24, 0x2a, 0x36, 0x72, 0x26, 0xc2, 0xf2, 0xbd, 0x8c, 0xdf, + 0xc4, 0xea, 0xef, 0x2c, 0x7b, 0x0f, 0x4b, 0x27, 0x09, 0xe0, 0x86, 0x4d, 0x1f, 0xf2, 0x09, 0x93, + 0xd8, 0x0a, 0xc0, 0xb2, 0x5c, 0x0b, 0x7e, 0x61, 0xb1, 0x83, 0x21, 0xb9, 0x05, 0xab, 0x46, 0x8e, + 0x7d, 0x17, 0x79, 0xec, 0x91, 0x7c, 0x08, 0xb5, 0x01, 0x1b, 0x8e, 0x85, 0xef, 0x61, 0x29, 0xf7, + 0xae, 0x2e, 0xa5, 0x67, 0x43, 0x68, 0x1e, 0x19, 0xfc, 0xee, 0xc0, 0x2d, 0x14, 0xef, 0x50, 0xcb, + 0x19, 0x4b, 0xc5, 0x3e, 0x4b, 0x19, 0x79, 0x00, 0x37, 0xb9, 0x8a, 0x63, 0xc1, 0x53, 0x2b, 0x44, + 0x3a, 0x4f, 0x44, 0xa1, 0xe4, 0xfa, 0x05, 0xfc, 0xed, 0x3c, 0x11, 0xe4, 0x2e, 0xd4, 0x59, 0x22, + 0xc3, 0x4c, 0x47, 0x28, 0x66, 0x93, 0xae, 0xb1, 0x44, 0x7e, 0xa7, 0x23, 0x42, 0xa0, 0x3a, 0x64, + 0x29, 0x43, 0x05, 0x3d, 0x8a, 0x67, 0xf2, 0x2e, 0xdc, 0xd0, 0xe2, 0x79, 0x26, 0x4c, 0x1a, 0x0e, + 0x22, 0xc5, 0x4f, 0x50, 0xc3, 0x55, 0xea, 0x15, 0x60, 0xcf, 0x62, 0x24, 0x00, 0x8f, 0x25, 0xf2, + 0x20, 0x4e, 0x85, 0x1e, 0x31, 0x2e, 0x0a, 0x39, 0x5f, 0xc3, 0x2c, 0xb9, 0x61, 0x51, 0x8a, 0x6a, + 0x7a, 0x14, 0xcf, 0xc1, 0xdf, 0x4e, 0x31, 0x84, 0x34, 0x67, 0x23, 0x8f, 0x6d, 0x36, 0xab, 0x7b, + 0x31, 0x3d, 0x58, 0x81, 0xbb, 0x17, 0x5c, 0xdd, 0x93, 0xe5, 0xf9, 0xb5, 0x6f, 0xb4, 0x34, 0xcd, + 0x8f, 0x00, 0x72, 0x22, 0x2c, 0xa8, 0x82, 0x2c, 0x3b, 0xd7, 0xb0, 0x2c, 0x35, 0x92, 0xe6, 0xa3, + 0x87, 0x3d, 0x7d, 0x0c, 0x37, 0x11, 0x12, 0x91, 0x64, 0x03, 0x69, 0x67, 0x09, 0x9b, 0xe3, 0xee, + 0xdd, 0xbf, 0x9a, 0xeb, 0x19, 0xfd, 0x12, 0xe3, 0x2f, 0xdf, 0x0a, 0x7e, 0x82, 0x1a, 0x2a, 0x68, + 0xfb, 0xc9, 0xb3, 0x90, 0x45, 0x91, 0xe2, 0x2c, 0x2d, 0x2b, 0xac, 0x52, 0x8f, 0x67, 0x0f, 0x17, + 0xd8, 0xc5, 0x54, 0x56, 0x96, 0xa7, 0xf2, 0x2d, 0x68, 0xa0, 0xfc, 0x61, 0x72, 0x52, 0x48, 0x54, + 0x47, 0xfb, 0xf0, 0x84, 0x6c, 0x83, 0x9b, 0x68, 0xf5, 0xbd, 0xe0, 0x69, 0x68, 0xa7, 0xab, 0x8a, + 0x5e, 0x28, 0xa0, 0xbe, 0x1c, 0x07, 0xbf, 0x39, 0x00, 0x45, 0xa7, 0x93, 0x68, 0xbe, 0x50, 0xda, + 0x59, 0x52, 0xba, 0x98, 0xcc, 0xca, 0xc5, 0x64, 0x6e, 0x40, 0x2d, 0x56, 0x31, 0x17, 0x98, 0xed, + 0x06, 0xcd, 0x0d, 0xbb, 0x10, 0x22, 0x96, 0x5e, 0x1e, 0x08, 0x37, 0xc7, 0xf2, 0x79, 0xf8, 0x04, + 0xee, 0x8e, 0x64, 0xcc, 0x22, 0xf9, 0xa3, 0x18, 0xe6, 0x51, 0x06, 0x97, 0x87, 0x30, 0x38, 0x1a, + 0x1e, 0xbd, 0xb3, 0x70, 0xe3, 0x05, 0xf3, 0x04, 0x9d, 0xb8, 0x48, 0xe4, 0xb8, 0xb8, 0x51, 0x4c, + 0x4a, 0xd3, 0xc8, 0x71, 0x1e, 0x14, 0xbc, 0xa8, 0x40, 0xbd, 0xe8, 0x30, 0xf1, 0xa1, 0x8e, 0x9f, + 0xd9, 0x62, 0x5f, 0x95, 0xe6, 0x7f, 0x34, 0x6f, 0x07, 0xd6, 0x87, 0x72, 0x34, 0x12, 0x5a, 0xc4, + 0xa9, 0x64, 0xa9, 0xd2, 0x58, 0x54, 0x83, 0x5e, 0x42, 0xed, 0xe6, 0x99, 0xe9, 0x51, 0x38, 0x63, + 0x51, 0x26, 0x8a, 0x3e, 0x36, 0x66, 0x7a, 0xf4, 0xcc, 0xda, 0xa5, 0x33, 0xd1, 0x4a, 0x8d, 0x8a, + 0x4a, 0xac, 0xf3, 0xd0, 0xda, 0xb6, 0x2f, 0xe5, 0x9e, 0x40, 0x11, 0xf2, 0xd7, 0x77, 0x4b, 0xac, + 0x2f, 0xc7, 0xa4, 0x0d, 0x2e, 0x8b, 0x22, 0xfb, 0xfe, 0xb6, 0x60, 0xdc, 0x5d, 0x1e, 0x5d, 0x86, + 0xc8, 0xdb, 0xd0, 0x7c, 0x9e, 0x09, 0x3d, 0x47, 0x7f, 0x23, 0x6f, 0xc0, 0x02, 0x28, 0x25, 0x6a, + 0x2e, 0x24, 0x0a, 0x7e, 0xae, 0xc0, 0xe6, 0xd5, 0x5b, 0x8e, 0x1c, 0x43, 0xdd, 0x6a, 0x12, 0xf3, + 0x79, 0xde, 0xa1, 0xde, 0xe7, 0x2f, 0x4f, 0xb7, 0x57, 0xfe, 0x3c, 0xdd, 0xde, 0x19, 0xcb, 0x74, + 0x92, 0x0d, 0x3a, 0x5c, 0x4d, 0xbb, 0x5c, 0x99, 0xa9, 0x32, 0xc5, 0xe3, 0x7d, 0x33, 0x3c, 0xe9, + 0xda, 0xc5, 0x61, 0x3a, 0xfb, 0x82, 0xff, 0x73, 0xba, 0xbd, 0x3e, 0x67, 0xd3, 0xe8, 0xd3, 0xe0, + 0xeb, 0x9c, 0x26, 0xa0, 0x25, 0x21, 0x91, 0xe0, 0xb1, 0x19, 0x93, 0x51, 0xf9, 0x51, 0xe0, 0x1e, + 0xe9, 0x3d, 0x7a, 0xe3, 0x04, 0xb7, 0xf3, 0x04, 0xcb, 0x5c, 0x01, 0x7d, 0x8d, 0x9a, 0x1c, 0x41, + 0xd5, 0xcc, 0x63, 0x8e, 0x72, 0x35, 0x7b, 0x9f, 0xbd, 0x71, 0x0a, 0x37, 0x4f, 0x61, 0x39, 0x02, + 0x8a, 0x54, 0x7b, 0xbf, 0x38, 0x50, 0xc7, 0x8f, 0x41, 0x68, 0xf2, 0x14, 0x6a, 0x78, 0x24, 0xd7, + 0xed, 0x98, 0x62, 0x3d, 0x6d, 0xb5, 0xaf, 0x8d, 0x49, 0xa2, 0x79, 0xb0, 0x42, 0x8e, 0x61, 0x3d, + 0xdf, 0x4b, 0xd9, 0xc0, 0x70, 0x2d, 0x07, 0xe2, 0xff, 0x62, 0xfe, 0xc0, 0xe9, 0x3d, 0x7c, 0x79, + 0xd6, 0x72, 0x5e, 0x9d, 0xb5, 0x9c, 0xbf, 0xce, 0x5a, 0xce, 0x8b, 0xf3, 0xd6, 0xca, 0xab, 0xf3, + 0xd6, 0xca, 0x1f, 0xe7, 0xad, 0x95, 0xe3, 0x07, 0x4b, 0xfd, 0x28, 0x98, 0xf0, 0xd9, 0xfd, 0xa1, + 0x5b, 0xfe, 0x53, 0xc0, 0xa6, 0x0c, 0xd6, 0xf0, 0xe7, 0xff, 0xa3, 0x7f, 0x03, 0x00, 0x00, 0xff, + 0xff, 0x34, 0xe5, 0xf5, 0x3c, 0x41, 0x08, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -881,8 +881,8 @@ func (m *RelaySession) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x4a } - if m.BlockHeight != 0 { - i = encodeVarintRelay(dAtA, i, uint64(m.BlockHeight)) + if m.Epoch != 0 { + i = encodeVarintRelay(dAtA, i, uint64(m.Epoch)) i-- dAtA[i] = 0x40 } @@ -927,10 +927,10 @@ func (m *RelaySession) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if len(m.ChainID) > 0 { - i -= len(m.ChainID) - copy(dAtA[i:], m.ChainID) - i = encodeVarintRelay(dAtA, i, uint64(len(m.ChainID))) + if len(m.SpecID) > 0 { + i -= len(m.SpecID) + copy(dAtA[i:], m.SpecID) + i = encodeVarintRelay(dAtA, i, uint64(len(m.SpecID))) i-- dAtA[i] = 0xa } @@ -1324,7 +1324,7 @@ func (m *RelaySession) Size() (n int) { } var l int _ = l - l = len(m.ChainID) + l = len(m.SpecID) if l > 0 { n += 1 + l + sovRelay(uint64(l)) } @@ -1349,8 +1349,8 @@ func (m *RelaySession) Size() (n int) { l = m.QoSReport.Size() n += 1 + l + sovRelay(uint64(l)) } - if m.BlockHeight != 0 { - n += 1 + sovRelay(uint64(m.BlockHeight)) + if m.Epoch != 0 { + n += 1 + sovRelay(uint64(m.Epoch)) } l = len(m.UnresponsiveProviders) if l > 0 { @@ -1573,7 +1573,7 @@ func (m *RelaySession) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SpecID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1601,7 +1601,7 @@ func (m *RelaySession) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ChainID = string(dAtA[iNdEx:postIndex]) + m.SpecID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -1764,9 +1764,9 @@ func (m *RelaySession) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 8: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockHeight", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Epoch", wireType) } - m.BlockHeight = 0 + m.Epoch = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRelay @@ -1776,7 +1776,7 @@ func (m *RelaySession) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.BlockHeight |= int64(b&0x7F) << shift + m.Epoch |= int64(b&0x7F) << shift if b < 0x80 { break } From 796f8bacb21f63b2760f997b56f157ef0d3a4a3e Mon Sep 17 00:00:00 2001 From: omer mishael Date: Mon, 13 Mar 2023 18:18:21 +0200 Subject: [PATCH 101/123] added some fields to badge --- proto/pairing/relay.proto | 3 +- x/pairing/types/relay.pb.go | 174 +++++++++++++++++++++++------------- 2 files changed, 115 insertions(+), 62 deletions(-) diff --git a/proto/pairing/relay.proto b/proto/pairing/relay.proto index db2d54bfe5..38b7644903 100644 --- a/proto/pairing/relay.proto +++ b/proto/pairing/relay.proto @@ -43,7 +43,8 @@ message Badge { uint64 cu_allocation =1; int64 epoch = 2; bytes badge_pk = 3; - bytes project_sig = 4; + string spec_id = 4; + bytes project_sig = 5; } message RelayReply { diff --git a/x/pairing/types/relay.pb.go b/x/pairing/types/relay.pb.go index 2268e763cc..b4efacca75 100644 --- a/x/pairing/types/relay.pb.go +++ b/x/pairing/types/relay.pb.go @@ -309,7 +309,8 @@ type Badge struct { CuAllocation uint64 `protobuf:"varint,1,opt,name=cu_allocation,json=cuAllocation,proto3" json:"cu_allocation,omitempty"` Epoch int64 `protobuf:"varint,2,opt,name=epoch,proto3" json:"epoch,omitempty"` BadgePk []byte `protobuf:"bytes,3,opt,name=badge_pk,json=badgePk,proto3" json:"badge_pk,omitempty"` - ProjectSig []byte `protobuf:"bytes,4,opt,name=project_sig,json=projectSig,proto3" json:"project_sig,omitempty"` + SpecId string `protobuf:"bytes,4,opt,name=spec_id,json=specId,proto3" json:"spec_id,omitempty"` + ProjectSig []byte `protobuf:"bytes,5,opt,name=project_sig,json=projectSig,proto3" json:"project_sig,omitempty"` } func (m *Badge) Reset() { *m = Badge{} } @@ -366,6 +367,13 @@ func (m *Badge) GetBadgePk() []byte { return nil } +func (m *Badge) GetSpecId() string { + if m != nil { + return m.SpecId + } + return "" +} + func (m *Badge) GetProjectSig() []byte { if m != nil { return m.ProjectSig @@ -617,71 +625,72 @@ func init() { func init() { proto.RegisterFile("pairing/relay.proto", fileDescriptor_10cd1bfeb9978acf) } var fileDescriptor_10cd1bfeb9978acf = []byte{ - // 1017 bytes of a gzipped FileDescriptorProto + // 1029 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x41, 0x6f, 0x1b, 0x45, 0x14, 0xce, 0x3a, 0x76, 0x6c, 0xbf, 0xdd, 0xa6, 0xd5, 0x34, 0x4d, 0x97, 0x94, 0x3a, 0x66, 0x91, 0xd2, 0x1c, 0xc0, 0x86, 0x20, 0x38, 0x20, 0x21, 0x51, 0x93, 0xd2, 0x06, 0x21, 0x9a, 0x8c, 0xa1, - 0x87, 0x5c, 0x56, 0xe3, 0xf1, 0xd8, 0x1e, 0xb2, 0xde, 0xd9, 0xce, 0xec, 0x5a, 0x98, 0x03, 0xbf, - 0xa1, 0xbf, 0x85, 0x03, 0x07, 0x8e, 0x48, 0x48, 0x3d, 0xf6, 0x88, 0x38, 0x44, 0x28, 0x39, 0x70, - 0xe7, 0x17, 0xa0, 0x79, 0xbb, 0xeb, 0xb8, 0x51, 0x88, 0x54, 0x89, 0xd3, 0xce, 0xfb, 0xde, 0x9b, - 0xef, 0xed, 0x7b, 0xdf, 0xdb, 0x67, 0xc3, 0xed, 0x84, 0x49, 0x2d, 0xe3, 0x71, 0x57, 0x8b, 0x88, - 0xcd, 0x3b, 0x89, 0x56, 0xa9, 0x22, 0x1b, 0x11, 0x9b, 0xb1, 0x58, 0xa4, 0x1d, 0xfb, 0xec, 0x14, - 0x11, 0x5b, 0x1b, 0x63, 0x35, 0x56, 0x18, 0xd0, 0xb5, 0xa7, 0x3c, 0x36, 0xf8, 0x75, 0x15, 0x3c, - 0x6a, 0xef, 0xf6, 0x85, 0x31, 0x52, 0xc5, 0x64, 0x13, 0xd6, 0x4c, 0x22, 0xf8, 0xc1, 0xbe, 0xef, - 0xb4, 0x9d, 0xdd, 0x26, 0x2d, 0x2c, 0xf2, 0x0e, 0x78, 0x5c, 0xc5, 0xa9, 0x88, 0xd3, 0x70, 0xc2, + 0x87, 0x5c, 0x56, 0xe3, 0xf1, 0xd8, 0x1e, 0xb2, 0xde, 0xd9, 0xce, 0xec, 0x5a, 0x98, 0x5f, 0xd1, + 0x1b, 0xff, 0x83, 0x03, 0x07, 0x8e, 0x48, 0x48, 0x3d, 0xf6, 0x88, 0x38, 0x44, 0x28, 0x39, 0x70, + 0xe7, 0x17, 0xa0, 0x79, 0xbb, 0xeb, 0xb8, 0x51, 0x88, 0x54, 0x89, 0xd3, 0xce, 0x7c, 0xf3, 0xe6, + 0x9b, 0x79, 0xef, 0xfb, 0xe6, 0x69, 0xe1, 0x76, 0xc2, 0xa4, 0x96, 0xf1, 0xb8, 0xab, 0x45, 0xc4, + 0xe6, 0x9d, 0x44, 0xab, 0x54, 0x91, 0x8d, 0x88, 0xcd, 0x58, 0x2c, 0xd2, 0x8e, 0xfd, 0x76, 0x8a, + 0x88, 0xad, 0x8d, 0xb1, 0x1a, 0x2b, 0x0c, 0xe8, 0xda, 0x51, 0x1e, 0x1b, 0xfc, 0xba, 0x0a, 0x1e, + 0xb5, 0x7b, 0xfb, 0xc2, 0x18, 0xa9, 0x62, 0xb2, 0x09, 0x6b, 0x26, 0x11, 0xfc, 0x60, 0xdf, 0x77, + 0xda, 0xce, 0x6e, 0x93, 0x16, 0x33, 0xf2, 0x0e, 0x78, 0x5c, 0xc5, 0xa9, 0x88, 0xd3, 0x70, 0xc2, 0xcc, 0xc4, 0xaf, 0xb4, 0x9d, 0x5d, 0x8f, 0xba, 0x05, 0xf6, 0x84, 0x99, 0x09, 0xb9, 0x0f, 0x60, 0x72, 0x96, 0x50, 0x0e, 0xfd, 0xd5, 0xb6, 0xb3, 0x5b, 0xa5, 0xcd, 0x02, 0x39, 0x18, 0x92, 0x3b, - 0xb0, 0xc6, 0xb3, 0xd0, 0x64, 0x53, 0xbf, 0x8a, 0xae, 0x1a, 0xcf, 0xfa, 0xd9, 0x94, 0x6c, 0x41, - 0x23, 0xd1, 0x6a, 0x26, 0x87, 0x42, 0xfb, 0x35, 0x4c, 0xb9, 0xb0, 0xc9, 0x3d, 0x68, 0x62, 0x61, - 0x61, 0x9c, 0x4d, 0xfd, 0x35, 0xbc, 0xd5, 0x40, 0xe0, 0x9b, 0x6c, 0x4a, 0xbe, 0x82, 0xe6, 0x91, - 0xea, 0x53, 0x91, 0x28, 0x9d, 0xfa, 0xf5, 0xb6, 0xb3, 0xeb, 0xee, 0xbd, 0xd7, 0xb9, 0xaa, 0xf4, - 0xce, 0x51, 0xc6, 0x22, 0x99, 0xce, 0x9f, 0x8e, 0xfa, 0x42, 0xcf, 0x24, 0x17, 0xf9, 0x1d, 0x7a, - 0x71, 0x9d, 0x6c, 0x40, 0x4d, 0x24, 0x8a, 0x4f, 0xfc, 0x46, 0xdb, 0xd9, 0x5d, 0xa5, 0xb9, 0x41, - 0x3e, 0x86, 0xcd, 0x2c, 0xd6, 0xc2, 0x24, 0x2a, 0x36, 0x72, 0x26, 0xc2, 0xf2, 0xbd, 0x8c, 0xdf, - 0xc4, 0xea, 0xef, 0x2c, 0x7b, 0x0f, 0x4b, 0x27, 0x09, 0xe0, 0x86, 0x4d, 0x1f, 0xf2, 0x09, 0x93, - 0xd8, 0x0a, 0xc0, 0xb2, 0x5c, 0x0b, 0x7e, 0x61, 0xb1, 0x83, 0x21, 0xb9, 0x05, 0xab, 0x46, 0x8e, - 0x7d, 0x17, 0x79, 0xec, 0x91, 0x7c, 0x08, 0xb5, 0x01, 0x1b, 0x8e, 0x85, 0xef, 0x61, 0x29, 0xf7, - 0xae, 0x2e, 0xa5, 0x67, 0x43, 0x68, 0x1e, 0x19, 0xfc, 0xee, 0xc0, 0x2d, 0x14, 0xef, 0x50, 0xcb, - 0x19, 0x4b, 0xc5, 0x3e, 0x4b, 0x19, 0x79, 0x00, 0x37, 0xb9, 0x8a, 0x63, 0xc1, 0x53, 0x2b, 0x44, - 0x3a, 0x4f, 0x44, 0xa1, 0xe4, 0xfa, 0x05, 0xfc, 0xed, 0x3c, 0x11, 0xe4, 0x2e, 0xd4, 0x59, 0x22, - 0xc3, 0x4c, 0x47, 0x28, 0x66, 0x93, 0xae, 0xb1, 0x44, 0x7e, 0xa7, 0x23, 0x42, 0xa0, 0x3a, 0x64, - 0x29, 0x43, 0x05, 0x3d, 0x8a, 0x67, 0xf2, 0x2e, 0xdc, 0xd0, 0xe2, 0x79, 0x26, 0x4c, 0x1a, 0x0e, - 0x22, 0xc5, 0x4f, 0x50, 0xc3, 0x55, 0xea, 0x15, 0x60, 0xcf, 0x62, 0x24, 0x00, 0x8f, 0x25, 0xf2, - 0x20, 0x4e, 0x85, 0x1e, 0x31, 0x2e, 0x0a, 0x39, 0x5f, 0xc3, 0x2c, 0xb9, 0x61, 0x51, 0x8a, 0x6a, - 0x7a, 0x14, 0xcf, 0xc1, 0xdf, 0x4e, 0x31, 0x84, 0x34, 0x67, 0x23, 0x8f, 0x6d, 0x36, 0xab, 0x7b, - 0x31, 0x3d, 0x58, 0x81, 0xbb, 0x17, 0x5c, 0xdd, 0x93, 0xe5, 0xf9, 0xb5, 0x6f, 0xb4, 0x34, 0xcd, - 0x8f, 0x00, 0x72, 0x22, 0x2c, 0xa8, 0x82, 0x2c, 0x3b, 0xd7, 0xb0, 0x2c, 0x35, 0x92, 0xe6, 0xa3, - 0x87, 0x3d, 0x7d, 0x0c, 0x37, 0x11, 0x12, 0x91, 0x64, 0x03, 0x69, 0x67, 0x09, 0x9b, 0xe3, 0xee, - 0xdd, 0xbf, 0x9a, 0xeb, 0x19, 0xfd, 0x12, 0xe3, 0x2f, 0xdf, 0x0a, 0x7e, 0x82, 0x1a, 0x2a, 0x68, - 0xfb, 0xc9, 0xb3, 0x90, 0x45, 0x91, 0xe2, 0x2c, 0x2d, 0x2b, 0xac, 0x52, 0x8f, 0x67, 0x0f, 0x17, - 0xd8, 0xc5, 0x54, 0x56, 0x96, 0xa7, 0xf2, 0x2d, 0x68, 0xa0, 0xfc, 0x61, 0x72, 0x52, 0x48, 0x54, - 0x47, 0xfb, 0xf0, 0x84, 0x6c, 0x83, 0x9b, 0x68, 0xf5, 0xbd, 0xe0, 0x69, 0x68, 0xa7, 0xab, 0x8a, - 0x5e, 0x28, 0xa0, 0xbe, 0x1c, 0x07, 0xbf, 0x39, 0x00, 0x45, 0xa7, 0x93, 0x68, 0xbe, 0x50, 0xda, - 0x59, 0x52, 0xba, 0x98, 0xcc, 0xca, 0xc5, 0x64, 0x6e, 0x40, 0x2d, 0x56, 0x31, 0x17, 0x98, 0xed, - 0x06, 0xcd, 0x0d, 0xbb, 0x10, 0x22, 0x96, 0x5e, 0x1e, 0x08, 0x37, 0xc7, 0xf2, 0x79, 0xf8, 0x04, - 0xee, 0x8e, 0x64, 0xcc, 0x22, 0xf9, 0xa3, 0x18, 0xe6, 0x51, 0x06, 0x97, 0x87, 0x30, 0x38, 0x1a, - 0x1e, 0xbd, 0xb3, 0x70, 0xe3, 0x05, 0xf3, 0x04, 0x9d, 0xb8, 0x48, 0xe4, 0xb8, 0xb8, 0x51, 0x4c, - 0x4a, 0xd3, 0xc8, 0x71, 0x1e, 0x14, 0xbc, 0xa8, 0x40, 0xbd, 0xe8, 0x30, 0xf1, 0xa1, 0x8e, 0x9f, - 0xd9, 0x62, 0x5f, 0x95, 0xe6, 0x7f, 0x34, 0x6f, 0x07, 0xd6, 0x87, 0x72, 0x34, 0x12, 0x5a, 0xc4, - 0xa9, 0x64, 0xa9, 0xd2, 0x58, 0x54, 0x83, 0x5e, 0x42, 0xed, 0xe6, 0x99, 0xe9, 0x51, 0x38, 0x63, - 0x51, 0x26, 0x8a, 0x3e, 0x36, 0x66, 0x7a, 0xf4, 0xcc, 0xda, 0xa5, 0x33, 0xd1, 0x4a, 0x8d, 0x8a, - 0x4a, 0xac, 0xf3, 0xd0, 0xda, 0xb6, 0x2f, 0xe5, 0x9e, 0x40, 0x11, 0xf2, 0xd7, 0x77, 0x4b, 0xac, - 0x2f, 0xc7, 0xa4, 0x0d, 0x2e, 0x8b, 0x22, 0xfb, 0xfe, 0xb6, 0x60, 0xdc, 0x5d, 0x1e, 0x5d, 0x86, - 0xc8, 0xdb, 0xd0, 0x7c, 0x9e, 0x09, 0x3d, 0x47, 0x7f, 0x23, 0x6f, 0xc0, 0x02, 0x28, 0x25, 0x6a, - 0x2e, 0x24, 0x0a, 0x7e, 0xae, 0xc0, 0xe6, 0xd5, 0x5b, 0x8e, 0x1c, 0x43, 0xdd, 0x6a, 0x12, 0xf3, - 0x79, 0xde, 0xa1, 0xde, 0xe7, 0x2f, 0x4f, 0xb7, 0x57, 0xfe, 0x3c, 0xdd, 0xde, 0x19, 0xcb, 0x74, - 0x92, 0x0d, 0x3a, 0x5c, 0x4d, 0xbb, 0x5c, 0x99, 0xa9, 0x32, 0xc5, 0xe3, 0x7d, 0x33, 0x3c, 0xe9, - 0xda, 0xc5, 0x61, 0x3a, 0xfb, 0x82, 0xff, 0x73, 0xba, 0xbd, 0x3e, 0x67, 0xd3, 0xe8, 0xd3, 0xe0, - 0xeb, 0x9c, 0x26, 0xa0, 0x25, 0x21, 0x91, 0xe0, 0xb1, 0x19, 0x93, 0x51, 0xf9, 0x51, 0xe0, 0x1e, - 0xe9, 0x3d, 0x7a, 0xe3, 0x04, 0xb7, 0xf3, 0x04, 0xcb, 0x5c, 0x01, 0x7d, 0x8d, 0x9a, 0x1c, 0x41, - 0xd5, 0xcc, 0x63, 0x8e, 0x72, 0x35, 0x7b, 0x9f, 0xbd, 0x71, 0x0a, 0x37, 0x4f, 0x61, 0x39, 0x02, - 0x8a, 0x54, 0x7b, 0xbf, 0x38, 0x50, 0xc7, 0x8f, 0x41, 0x68, 0xf2, 0x14, 0x6a, 0x78, 0x24, 0xd7, - 0xed, 0x98, 0x62, 0x3d, 0x6d, 0xb5, 0xaf, 0x8d, 0x49, 0xa2, 0x79, 0xb0, 0x42, 0x8e, 0x61, 0x3d, - 0xdf, 0x4b, 0xd9, 0xc0, 0x70, 0x2d, 0x07, 0xe2, 0xff, 0x62, 0xfe, 0xc0, 0xe9, 0x3d, 0x7c, 0x79, - 0xd6, 0x72, 0x5e, 0x9d, 0xb5, 0x9c, 0xbf, 0xce, 0x5a, 0xce, 0x8b, 0xf3, 0xd6, 0xca, 0xab, 0xf3, - 0xd6, 0xca, 0x1f, 0xe7, 0xad, 0x95, 0xe3, 0x07, 0x4b, 0xfd, 0x28, 0x98, 0xf0, 0xd9, 0xfd, 0xa1, - 0x5b, 0xfe, 0x53, 0xc0, 0xa6, 0x0c, 0xd6, 0xf0, 0xe7, 0xff, 0xa3, 0x7f, 0x03, 0x00, 0x00, 0xff, - 0xff, 0x34, 0xe5, 0xf5, 0x3c, 0x41, 0x08, 0x00, 0x00, + 0xb0, 0xc6, 0xb3, 0xd0, 0x64, 0x53, 0xbf, 0x8a, 0x4b, 0x35, 0x9e, 0xf5, 0xb3, 0x29, 0xd9, 0x82, + 0x46, 0xa2, 0xd5, 0x4c, 0x0e, 0x85, 0xf6, 0x6b, 0x78, 0xe4, 0x62, 0x4e, 0xee, 0x41, 0x13, 0x13, + 0x0b, 0xe3, 0x6c, 0xea, 0xaf, 0xe1, 0xae, 0x06, 0x02, 0xdf, 0x64, 0x53, 0xf2, 0x15, 0x34, 0x8f, + 0x54, 0x9f, 0x8a, 0x44, 0xe9, 0xd4, 0xaf, 0xb7, 0x9d, 0x5d, 0x77, 0xef, 0xbd, 0xce, 0x55, 0xa9, + 0x77, 0x8e, 0x32, 0x16, 0xc9, 0x74, 0xfe, 0x74, 0xd4, 0x17, 0x7a, 0x26, 0xb9, 0xc8, 0xf7, 0xd0, + 0x8b, 0xed, 0x64, 0x03, 0x6a, 0x22, 0x51, 0x7c, 0xe2, 0x37, 0xda, 0xce, 0xee, 0x2a, 0xcd, 0x27, + 0xe4, 0x63, 0xd8, 0xcc, 0x62, 0x2d, 0x4c, 0xa2, 0x62, 0x23, 0x67, 0x22, 0x2c, 0xef, 0x65, 0xfc, + 0x26, 0x66, 0x7f, 0x67, 0x79, 0xf5, 0xb0, 0x5c, 0x24, 0x01, 0xdc, 0xb0, 0xc7, 0x87, 0x7c, 0xc2, + 0x24, 0x96, 0x02, 0x30, 0x2d, 0xd7, 0x82, 0x5f, 0x58, 0xec, 0x60, 0x48, 0x6e, 0xc1, 0xaa, 0x91, + 0x63, 0xdf, 0x45, 0x1e, 0x3b, 0x24, 0x1f, 0x42, 0x6d, 0xc0, 0x86, 0x63, 0xe1, 0x7b, 0x98, 0xca, + 0xbd, 0xab, 0x53, 0xe9, 0xd9, 0x10, 0x9a, 0x47, 0x06, 0xbf, 0x3b, 0x70, 0x0b, 0xc5, 0x3b, 0xd4, + 0x72, 0xc6, 0x52, 0xb1, 0xcf, 0x52, 0x46, 0x1e, 0xc0, 0x4d, 0xae, 0xe2, 0x58, 0xf0, 0xd4, 0x0a, + 0x91, 0xce, 0x13, 0x51, 0x28, 0xb9, 0x7e, 0x01, 0x7f, 0x3b, 0x4f, 0x04, 0xb9, 0x0b, 0x75, 0x96, + 0xc8, 0x30, 0xd3, 0x11, 0x8a, 0xd9, 0xa4, 0x6b, 0x2c, 0x91, 0xdf, 0xe9, 0x88, 0x10, 0xa8, 0x0e, + 0x59, 0xca, 0x50, 0x41, 0x8f, 0xe2, 0x98, 0xbc, 0x0b, 0x37, 0xb4, 0x78, 0x9e, 0x09, 0x93, 0x86, + 0x83, 0x48, 0xf1, 0x13, 0xd4, 0x70, 0x95, 0x7a, 0x05, 0xd8, 0xb3, 0x18, 0x09, 0xc0, 0x63, 0x89, + 0x3c, 0x88, 0x53, 0xa1, 0x47, 0x8c, 0x8b, 0x42, 0xce, 0xd7, 0x30, 0x4b, 0x6e, 0x58, 0x94, 0xa2, + 0x9a, 0x1e, 0xc5, 0x71, 0xf0, 0xb7, 0x53, 0x98, 0x90, 0xe6, 0x6c, 0xe4, 0xb1, 0x3d, 0xcd, 0xea, + 0x5e, 0xb8, 0x07, 0x33, 0x70, 0xf7, 0x82, 0xab, 0x6b, 0xb2, 0xec, 0x5f, 0x7b, 0xa3, 0x25, 0x37, + 0x3f, 0x02, 0xc8, 0x89, 0x30, 0xa1, 0x0a, 0xb2, 0xec, 0x5c, 0xc3, 0xb2, 0x54, 0x48, 0x9a, 0x5b, + 0x0f, 0x6b, 0xfa, 0x18, 0x6e, 0x22, 0x24, 0x22, 0xc9, 0x06, 0xd2, 0x7a, 0x09, 0x8b, 0xe3, 0xee, + 0xdd, 0xbf, 0x9a, 0xeb, 0x19, 0xfd, 0x12, 0xe3, 0x2f, 0xef, 0x0a, 0x7e, 0x72, 0xa0, 0x86, 0x12, + 0xda, 0x82, 0xf2, 0x2c, 0x64, 0x51, 0xa4, 0x38, 0x4b, 0xcb, 0x14, 0xab, 0xd4, 0xe3, 0xd9, 0xc3, + 0x05, 0x76, 0x61, 0xcb, 0xca, 0xb2, 0x2d, 0xdf, 0x82, 0x06, 0xea, 0x1f, 0x26, 0x27, 0x85, 0x46, + 0x75, 0x9c, 0x1f, 0x9e, 0x58, 0x4d, 0xed, 0x7b, 0xb5, 0xa6, 0xab, 0x2e, 0x3d, 0xdf, 0x21, 0xd9, + 0x06, 0x37, 0xd1, 0xea, 0x7b, 0xc1, 0xd3, 0xd0, 0xfa, 0xae, 0x86, 0xdb, 0xa0, 0x80, 0xfa, 0x72, + 0x1c, 0xfc, 0xe6, 0x00, 0x14, 0x1a, 0x24, 0xd1, 0x7c, 0xe1, 0x01, 0x67, 0xc9, 0x03, 0x85, 0x67, + 0x2b, 0x17, 0x9e, 0xdd, 0x80, 0x5a, 0xac, 0x62, 0x2e, 0xf0, 0x1a, 0x37, 0x68, 0x3e, 0xb1, 0xad, + 0x22, 0x62, 0xe9, 0x65, 0xab, 0xb8, 0x39, 0x96, 0x3b, 0xe5, 0x13, 0xb8, 0x3b, 0x92, 0x31, 0x8b, + 0xe4, 0x8f, 0x62, 0x98, 0x47, 0x19, 0x6c, 0x2b, 0xc2, 0x14, 0x57, 0xbb, 0xb3, 0x58, 0xc6, 0x0d, + 0xe6, 0x09, 0x2e, 0x62, 0x8b, 0x91, 0xe3, 0x62, 0x47, 0xe1, 0xa1, 0xa6, 0x91, 0xe3, 0x3c, 0x28, + 0x78, 0x51, 0x81, 0x7a, 0x51, 0x7b, 0xe2, 0x43, 0x1d, 0x1f, 0xe0, 0xa2, 0x93, 0x95, 0xd3, 0xff, + 0xa8, 0xea, 0x0e, 0xac, 0x0f, 0xe5, 0x68, 0x24, 0xb4, 0x88, 0x53, 0xc9, 0x52, 0xa5, 0x31, 0xa9, + 0x06, 0xbd, 0x84, 0xda, 0x9e, 0x34, 0xd3, 0xa3, 0x70, 0xc6, 0xa2, 0x4c, 0x60, 0x6a, 0x1e, 0x6d, + 0xcc, 0xf4, 0xe8, 0x99, 0x9d, 0x97, 0x8b, 0x89, 0x56, 0x6a, 0x54, 0x64, 0x62, 0x17, 0x0f, 0xed, + 0xdc, 0xd6, 0xa5, 0xec, 0x20, 0x28, 0x42, 0x7e, 0x7d, 0xb7, 0xc4, 0xfa, 0x72, 0x4c, 0xda, 0xe0, + 0xb2, 0x28, 0xb2, 0xf7, 0xb7, 0x09, 0x63, 0x57, 0xf3, 0xe8, 0x32, 0x44, 0xde, 0x86, 0xe6, 0xf3, + 0x4c, 0xe8, 0x39, 0xae, 0x37, 0xf2, 0x02, 0x2c, 0x80, 0x52, 0xa2, 0xe6, 0x42, 0xa2, 0xe0, 0xe7, + 0x0a, 0x6c, 0x5e, 0xdd, 0xff, 0xc8, 0x31, 0xd4, 0xad, 0x26, 0x31, 0x9f, 0xe7, 0x15, 0xea, 0x7d, + 0xfe, 0xf2, 0x74, 0x7b, 0xe5, 0xcf, 0xd3, 0xed, 0x9d, 0xb1, 0x4c, 0x27, 0xd9, 0xa0, 0xc3, 0xd5, + 0xb4, 0xcb, 0x95, 0x99, 0x2a, 0x53, 0x7c, 0xde, 0x37, 0xc3, 0x93, 0xae, 0x6d, 0x29, 0xa6, 0xb3, + 0x2f, 0xf8, 0x3f, 0xa7, 0xdb, 0xeb, 0x73, 0x36, 0x8d, 0x3e, 0x0d, 0xbe, 0xce, 0x69, 0x02, 0x5a, + 0x12, 0x12, 0x09, 0x1e, 0x9b, 0x31, 0x19, 0x95, 0xcf, 0x05, 0x3b, 0x4c, 0xef, 0xd1, 0x1b, 0x1f, + 0x70, 0x3b, 0x3f, 0x60, 0x99, 0x2b, 0xa0, 0xaf, 0x51, 0x93, 0x23, 0xa8, 0x9a, 0x79, 0xcc, 0x51, + 0xae, 0x66, 0xef, 0xb3, 0x37, 0x3e, 0xc2, 0xcd, 0x8f, 0xb0, 0x1c, 0x01, 0x45, 0xaa, 0xbd, 0x5f, + 0x1c, 0xa8, 0xe3, 0x63, 0x10, 0x9a, 0x3c, 0x85, 0x1a, 0x0e, 0xc9, 0x75, 0xdd, 0xa7, 0x68, 0x5c, + 0x5b, 0xed, 0x6b, 0x63, 0x92, 0x68, 0x1e, 0xac, 0x90, 0x63, 0x58, 0xcf, 0x3b, 0x56, 0x36, 0x30, + 0x5c, 0xcb, 0x81, 0xf8, 0xbf, 0x98, 0x3f, 0x70, 0x7a, 0x0f, 0x5f, 0x9e, 0xb5, 0x9c, 0x57, 0x67, + 0x2d, 0xe7, 0xaf, 0xb3, 0x96, 0xf3, 0xe2, 0xbc, 0xb5, 0xf2, 0xea, 0xbc, 0xb5, 0xf2, 0xc7, 0x79, + 0x6b, 0xe5, 0xf8, 0xc1, 0x52, 0x3d, 0x0a, 0x26, 0xfc, 0x76, 0x7f, 0xe8, 0x96, 0xff, 0x10, 0x58, + 0x94, 0xc1, 0x1a, 0xfe, 0x18, 0x7c, 0xf4, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x4d, 0x5d, + 0xac, 0x5b, 0x08, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1084,6 +1093,13 @@ func (m *Badge) MarshalToSizedBuffer(dAtA []byte) (int, error) { copy(dAtA[i:], m.ProjectSig) i = encodeVarintRelay(dAtA, i, uint64(len(m.ProjectSig))) i-- + dAtA[i] = 0x2a + } + if len(m.SpecId) > 0 { + i -= len(m.SpecId) + copy(dAtA[i:], m.SpecId) + i = encodeVarintRelay(dAtA, i, uint64(len(m.SpecId))) + i-- dAtA[i] = 0x22 } if len(m.BadgePk) > 0 { @@ -1440,6 +1456,10 @@ func (m *Badge) Size() (n int) { if l > 0 { n += 1 + l + sovRelay(uint64(l)) } + l = len(m.SpecId) + if l > 0 { + n += 1 + l + sovRelay(uint64(l)) + } l = len(m.ProjectSig) if l > 0 { n += 1 + l + sovRelay(uint64(l)) @@ -2431,6 +2451,38 @@ func (m *Badge) Unmarshal(dAtA []byte) error { } iNdEx = postIndex case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpecId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRelay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRelay + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRelay + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpecId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ProjectSig", wireType) } From 2dd573dadddb9ba29751b9cbea2fa6e8c4192a0e Mon Sep 17 00:00:00 2001 From: omer mishael Date: Tue, 14 Mar 2023 07:38:06 +0200 Subject: [PATCH 102/123] added chain id verification to relays --- protocol/lavaprotocol/request_builder.go | 17 +++++++++-------- protocol/rpcconsumer/rpcconsumer.go | 7 ++++--- protocol/rpcconsumer/rpcconsumer_server.go | 8 ++++++-- protocol/rpcprovider/rpcprovider.go | 7 ++++--- protocol/rpcprovider/rpcprovider_server.go | 8 +++++++- x/pairing/keeper/msg_server_relay_payment.go | 5 ++++- 6 files changed, 34 insertions(+), 18 deletions(-) diff --git a/protocol/lavaprotocol/request_builder.go b/protocol/lavaprotocol/request_builder.go index 35c4df8cf4..82d2f181d7 100644 --- a/protocol/lavaprotocol/request_builder.go +++ b/protocol/lavaprotocol/request_builder.go @@ -53,7 +53,7 @@ func NewRelayData(connectionType string, apiUrl string, data []byte, requestBloc } } -func ConstructRelaySession(relayRequestData *pairingtypes.RelayPrivateData, chainID string, providerPublicAddress string, consumerSession *lavasession.SingleConsumerSession, epoch int64, reportedProviders []byte) *pairingtypes.RelaySession { +func ConstructRelaySession(lavaChainID string, relayRequestData *pairingtypes.RelayPrivateData, chainID string, providerPublicAddress string, consumerSession *lavasession.SingleConsumerSession, epoch int64, reportedProviders []byte) *pairingtypes.RelaySession { return &pairingtypes.RelaySession{ SpecID: chainID, ContentHash: sigs.CalculateContentHashForRelayData(relayRequestData), @@ -64,12 +64,12 @@ func ConstructRelaySession(relayRequestData *pairingtypes.RelayPrivateData, chai QoSReport: consumerSession.QoSInfo.LastQoSReport, Epoch: epoch, UnresponsiveProviders: reportedProviders, - LavaChainId: "FIXMEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE IF IM HERE ITS A BUG", + LavaChainId: lavaChainID, Sig: nil, } } -func dataReliabilityRelaySession(relayRequestData *pairingtypes.RelayPrivateData, chainID string, providerPublicAddress string, epoch int64) *pairingtypes.RelaySession { +func dataReliabilityRelaySession(lavaChainID string, relayRequestData *pairingtypes.RelayPrivateData, chainID string, providerPublicAddress string, epoch int64) *pairingtypes.RelaySession { return &pairingtypes.RelaySession{ SpecID: chainID, ContentHash: sigs.CalculateContentHashForRelayData(relayRequestData), @@ -80,15 +80,16 @@ func dataReliabilityRelaySession(relayRequestData *pairingtypes.RelayPrivateData QoSReport: nil, Epoch: epoch, UnresponsiveProviders: nil, - LavaChainId: "FIXMEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE IF IM HERE ITS A BUG", + LavaChainId: lavaChainID, Sig: nil, } } -func ConstructRelayRequest(ctx context.Context, privKey *btcec.PrivateKey, chainID string, relayRequestData *pairingtypes.RelayPrivateData, providerPublicAddress string, consumerSession *lavasession.SingleConsumerSession, epoch int64, reportedProviders []byte) (*pairingtypes.RelayRequest, error) { +func ConstructRelayRequest(ctx context.Context, privKey *btcec.PrivateKey, lavaChainID string, chainID string, relayRequestData *pairingtypes.RelayPrivateData, providerPublicAddress string, consumerSession *lavasession.SingleConsumerSession, epoch int64, reportedProviders []byte) (*pairingtypes.RelayRequest, error) { + relayRequest := &pairingtypes.RelayRequest{ RelayData: relayRequestData, - RelaySession: ConstructRelaySession(relayRequestData, chainID, providerPublicAddress, consumerSession, epoch, reportedProviders), + RelaySession: ConstructRelaySession(lavaChainID, relayRequestData, chainID, providerPublicAddress, consumerSession, epoch, reportedProviders), DataReliability: nil, } sig, err := sigs.SignRelay(privKey, *relayRequest.RelaySession) @@ -153,14 +154,14 @@ func NewVRFData(differentiator bool, vrf_res []byte, vrf_proof []byte, request * return dataReliability } -func ConstructDataReliabilityRelayRequest(ctx context.Context, vrfData *pairingtypes.VRFData, privKey *btcec.PrivateKey, chainID string, relayRequestData *pairingtypes.RelayPrivateData, providerPublicAddress string, epoch int64, reportedProviders []byte) (*pairingtypes.RelayRequest, error) { +func ConstructDataReliabilityRelayRequest(ctx context.Context, lavaChainID string, vrfData *pairingtypes.VRFData, privKey *btcec.PrivateKey, chainID string, relayRequestData *pairingtypes.RelayPrivateData, providerPublicAddress string, epoch int64, reportedProviders []byte) (*pairingtypes.RelayRequest, error) { if relayRequestData.RequestBlock < 0 { return nil, utils.LavaFormatError("tried to construct data reliability relay with invalid request block, need to specify exactly what block is required", nil, &map[string]string{"requested_common_data": fmt.Sprintf("%+v", relayRequestData), "epoch": strconv.FormatInt(epoch, 10), "chainID": chainID}) } relayRequest := &pairingtypes.RelayRequest{ RelayData: relayRequestData, - RelaySession: dataReliabilityRelaySession(relayRequestData, chainID, providerPublicAddress, epoch), + RelaySession: dataReliabilityRelaySession(lavaChainID, relayRequestData, chainID, providerPublicAddress, epoch), DataReliability: vrfData, } sig, err := sigs.SignRelay(privKey, *relayRequest.RelaySession) diff --git a/protocol/rpcconsumer/rpcconsumer.go b/protocol/rpcconsumer/rpcconsumer.go index 88198d1578..1678341f29 100644 --- a/protocol/rpcconsumer/rpcconsumer.go +++ b/protocol/rpcconsumer/rpcconsumer.go @@ -58,7 +58,7 @@ func (rpcc *RPCConsumer) Start(ctx context.Context, txFactory tx.Factory, client } rpcc.consumerStateTracker = consumerStateTracker rpcc.rpcConsumerServers = make(map[string]*RPCConsumerServer, len(rpcEndpoints)) - + lavaChainID := clientCtx.ChainID keyName, err := sigs.GetKeyName(clientCtx) if err != nil { utils.LavaFormatFatal("failed getting key name from clientCtx", err, nil) @@ -92,7 +92,7 @@ func (rpcc *RPCConsumer) Start(ctx context.Context, txFactory tx.Factory, client consumerStateTracker.RegisterFinalizationConsensusForUpdates(ctx, finalizationConsensus) rpcc.rpcConsumerServers[key] = &RPCConsumerServer{} utils.LavaFormatInfo("RPCConsumer Listening", &map[string]string{"endpoints": rpcEndpoint.String()}) - rpcc.rpcConsumerServers[key].ServeRPCRequests(ctx, rpcEndpoint, rpcc.consumerStateTracker, chainParser, finalizationConsensus, consumerSessionManager, requiredResponses, privKey, vrf_sk, cache) + rpcc.rpcConsumerServers[key].ServeRPCRequests(ctx, rpcEndpoint, rpcc.consumerStateTracker, chainParser, finalizationConsensus, consumerSessionManager, requiredResponses, privKey, vrf_sk, lavaChainID, cache) } signalChan := make(chan os.Signal, 1) @@ -208,7 +208,8 @@ rpcconsumer 127.0.0.1:3333 COS3 tendermintrpc 127.0.0.1:3334 COS3 rest `, return utils.LavaFormatError("failed to start pprof HTTP server", err, nil) } } - txFactory := tx.NewFactoryCLI(clientCtx, cmd.Flags()).WithChainID(networkChainId) + clientCtx = clientCtx.WithChainID(networkChainId) + txFactory := tx.NewFactoryCLI(clientCtx, cmd.Flags()) rpcConsumer := RPCConsumer{} requiredResponses := 1 // TODO: handle secure flag, for a majority between providers utils.LavaFormatInfo("lavad Binary Version: "+version.Version, nil) diff --git a/protocol/rpcconsumer/rpcconsumer_server.go b/protocol/rpcconsumer/rpcconsumer_server.go index e38261bdc8..fe7186b71a 100644 --- a/protocol/rpcconsumer/rpcconsumer_server.go +++ b/protocol/rpcconsumer/rpcconsumer_server.go @@ -37,6 +37,7 @@ type RPCConsumerServer struct { requiredResponses int finalizationConsensus *lavaprotocol.FinalizationConsensus VrfSk vrf.PrivateKey + lavaChainID string } type ConsumerTxSender interface { @@ -51,6 +52,7 @@ func (rpccs *RPCConsumerServer) ServeRPCRequests(ctx context.Context, listenEndp requiredResponses int, privKey *btcec.PrivateKey, vrfSk vrf.PrivateKey, + lavaChainID string, cache *performance.Cache, // optional ) (err error) { rpccs.consumerSessionManager = consumerSessionManager @@ -63,6 +65,7 @@ func (rpccs *RPCConsumerServer) ServeRPCRequests(ctx context.Context, listenEndp if err != nil { utils.LavaFormatFatal("failed creating RPCConsumer logs", err, nil) } + rpccs.lavaChainID = lavaChainID rpccs.rpcConsumerLogs = pLogs rpccs.privKey = privKey rpccs.chainParser = chainParser @@ -179,7 +182,8 @@ func (rpccs *RPCConsumerServer) sendRelayToProvider( } privKey := rpccs.privKey chainID := rpccs.listenEndpoint.ChainID - relayRequest, err := lavaprotocol.ConstructRelayRequest(ctx, privKey, chainID, relayRequestData, providerPublicAddress, singleConsumerSession, int64(epoch), reportedProviders) + lavaChainID := rpccs.lavaChainID + relayRequest, err := lavaprotocol.ConstructRelayRequest(ctx, privKey, lavaChainID, chainID, relayRequestData, providerPublicAddress, singleConsumerSession, int64(epoch), reportedProviders) if err != nil { return relayResult, err } @@ -361,7 +365,7 @@ func (rpccs *RPCConsumerServer) sendDataReliabilityRelayIfApplicable(ctx context reportedProviders = nil utils.LavaFormatError("failed reading reported providers for epoch", err, &map[string]string{"epoch": strconv.FormatInt(epoch, 10)}) } - reliabilityRequest, err := lavaprotocol.ConstructDataReliabilityRelayRequest(ctx, vrfData, rpccs.privKey, rpccs.listenEndpoint.ChainID, relayResult.Request.RelayData, providerAddress, epoch, reportedProviders) + reliabilityRequest, err := lavaprotocol.ConstructDataReliabilityRelayRequest(ctx, rpccs.lavaChainID, vrfData, rpccs.privKey, rpccs.listenEndpoint.ChainID, relayResult.Request.RelayData, providerAddress, epoch, reportedProviders) if err != nil { return nil, utils.LavaFormatError("failed creating data reliability relay", err, &map[string]string{"relayRequestData": fmt.Sprintf("%+v", relayResult.Request.RelayData)}) } diff --git a/protocol/rpcprovider/rpcprovider.go b/protocol/rpcprovider/rpcprovider.go index c122f9881d..ef95475ed9 100644 --- a/protocol/rpcprovider/rpcprovider.go +++ b/protocol/rpcprovider/rpcprovider.go @@ -99,7 +99,7 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client utils.LavaFormatFatal("failed getting private key from key name", err, &map[string]string{"keyName": keyName}) } clientKey, _ := clientCtx.Keyring.Key(keyName) - + lavaChainID := clientCtx.ChainID var addr sdk.AccAddress err = addr.Unmarshal(clientKey.GetPubKey().Address()) if err != nil { @@ -157,7 +157,7 @@ func (rpcp *RPCProvider) Start(ctx context.Context, txFactory tx.Factory, client &map[string]string{"key": key}) } rpcp.rpcProviderServers[key] = rpcProviderServer - rpcProviderServer.ServeRPCRequests(ctx, rpcProviderEndpoint, chainParser, rewardServer, providerSessionManager, reliabilityManager, privKey, cache, chainProxy, providerStateTracker, addr) + rpcProviderServer.ServeRPCRequests(ctx, rpcProviderEndpoint, chainParser, rewardServer, providerSessionManager, reliabilityManager, privKey, cache, chainProxy, providerStateTracker, addr, lavaChainID) // set up grpc listener var listener *ProviderListener @@ -292,7 +292,8 @@ rpcprovider 127.0.0.1:3333 COS3 tendermintrpc "wss://www.node-path.com:80,https: if err != nil { return err } - txFactory := tx.NewFactoryCLI(clientCtx, cmd.Flags()).WithChainID(networkChainId) + clientCtx = clientCtx.WithChainID(networkChainId) + txFactory := tx.NewFactoryCLI(clientCtx, cmd.Flags()) logLevel, err := cmd.Flags().GetString(flags.FlagLogLevel) if err != nil { utils.LavaFormatFatal("failed to read log level flag", err, nil) diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index f3128cfdb6..5d7935fb96 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -37,6 +37,7 @@ type RPCProviderServer struct { rpcProviderEndpoint *lavasession.RPCProviderEndpoint stateTracker StateTrackerInf providerAddress sdk.AccAddress + lavaChainID string } type ReliabilityManagerInf interface { @@ -68,6 +69,7 @@ func (rpcps *RPCProviderServer) ServeRPCRequests( cache *performance.Cache, chainProxy chainlib.ChainProxy, stateTracker StateTrackerInf, providerAddress sdk.AccAddress, + lavaChainID string, ) { rpcps.cache = cache rpcps.chainProxy = chainProxy @@ -79,6 +81,7 @@ func (rpcps *RPCProviderServer) ServeRPCRequests( rpcps.rpcProviderEndpoint = rpcProviderEndpoint rpcps.stateTracker = stateTracker rpcps.providerAddress = providerAddress + rpcps.lavaChainID = lavaChainID } // function used to handle relay requests from a consumer, it is called by a provider_listener by calling RegisterReceiver @@ -361,7 +364,10 @@ func (rpcps *RPCProviderServer) verifyRelayRequestMetaData(requestSession *pairi return utils.LavaFormatError("request had the wrong provider", nil, &map[string]string{"providerAddress": providerAddress, "request_provider": requestSession.Provider}) } if requestSession.SpecID != rpcps.rpcProviderEndpoint.ChainID { - return utils.LavaFormatError("request had the wrong chainID", nil, &map[string]string{"request_chainID": requestSession.SpecID, "chainID": rpcps.rpcProviderEndpoint.ChainID}) + return utils.LavaFormatError("request had the wrong specID", nil, &map[string]string{"request_specID": requestSession.SpecID, "chainID": rpcps.rpcProviderEndpoint.ChainID}) + } + if requestSession.LavaChainId != rpcps.lavaChainID { + return utils.LavaFormatError("request had the wrong lava chain ID", nil, &map[string]string{"request_lavaChainID": requestSession.LavaChainId, "lava chain id": rpcps.lavaChainID}) } return nil } diff --git a/x/pairing/keeper/msg_server_relay_payment.go b/x/pairing/keeper/msg_server_relay_payment.go index 082056f788..48231a6484 100644 --- a/x/pairing/keeper/msg_server_relay_payment.go +++ b/x/pairing/keeper/msg_server_relay_payment.go @@ -23,7 +23,7 @@ const ( func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPayment) (*types.MsgRelayPaymentResponse, error) { ctx := sdk.UnwrapSDKContext(goCtx) logger := k.Logger(ctx) - + lavaChainID := ctx.BlockHeader().ChainID creator, err := sdk.AccAddressFromBech32(msg.Creator) if err != nil { return nil, err @@ -38,6 +38,9 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen } for _, relay := range msg.Relays { + if relay.LavaChainId != lavaChainID { + return errorLogAndFormat("relay_payment_lava_chain_id", map[string]string{"relay.LavaChainId": relay.LavaChainId, "expected_ChainID": lavaChainID}, "relay request for the wrong lava chain") + } if relay.Epoch > ctx.BlockHeight() { return errorLogAndFormat("relay_future_block", map[string]string{"blockheight": string(relay.Sig)}, "relay request for a block in the future") } From 48651c25a7b5700ff7bdb72b679dafef3f4ab14d Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Tue, 14 Mar 2023 18:44:13 +0100 Subject: [PATCH 103/123] Fixing VRF issue with rpcprovider --- docs/static/openapi.yml | 40 +-- proto/pairing/relay.proto | 16 +- protocol/lavaprotocol/request_builder.go | 2 + protocol/lavaprotocol/response_builder.go | 9 - protocol/lavaprotocol/reuqest_builder_test.go | 38 +++ protocol/lavasession/common.go | 1 + .../lavasession/consumer_session_manager.go | 12 +- .../consumer_session_manager_test.go | 40 +-- protocol/lavasession/consumer_types.go | 4 +- protocol/lavasession/errors.go | 34 +-- .../lavasession/provider_session_manager.go | 39 ++- .../provider_session_manager_test.go | 10 +- protocol/lavasession/provider_types.go | 13 +- .../rpcprovider/rewardserver/reward_server.go | 9 +- protocol/rpcprovider/rpcprovider_server.go | 124 ++++++---- protocol/statetracker/pairing_updater.go | 12 +- relayer/sentry/sentry.go | 32 +-- relayer/sigs/sigs.go | 12 + testutil/e2e/e2e.go | 2 +- x/pairing/keeper/msg_server_relay_payment.go | 6 +- x/pairing/keeper/pairing_test.go | 53 ++++ x/pairing/types/relay.pb.go | 233 ++++++++++++------ 22 files changed, 488 insertions(+), 253 deletions(-) create mode 100644 protocol/lavaprotocol/reuqest_builder_test.go diff --git a/docs/static/openapi.yml b/docs/static/openapi.yml index 8aeeef1c12..1dd8b05c00 100644 --- a/docs/static/openapi.yml +++ b/docs/static/openapi.yml @@ -30241,16 +30241,6 @@ paths: in: query required: false type: boolean - - name: pagination.reverse - description: >- - reverse is set to true if results are to be returned in the - descending order. - - - Since: cosmos-sdk 0.43 - in: query - required: false - type: boolean tags: - Query '/lavanet/lava/pairing/epoch_payments/{index}': @@ -30582,16 +30572,6 @@ paths: in: query required: false type: boolean - - name: pagination.reverse - description: >- - reverse is set to true if results are to be returned in the - descending order. - - - Since: cosmos-sdk 0.43 - in: query - required: false - type: boolean tags: - Query '/lavanet/lava/pairing/provider_payment_storage/{index}': @@ -30927,16 +30907,6 @@ paths: in: query required: false type: boolean - - name: pagination.reverse - description: >- - reverse is set to true if results are to be returned in the - descending order. - - - Since: cosmos-sdk 0.43 - in: query - required: false - type: boolean tags: - Query '/lavanet/lava/pairing/unique_payment_storage_client_provider/{index}': @@ -54236,6 +54206,11 @@ definitions: sig: type: string format: byte + chainID: + type: string + epoch: + type: string + format: int64 QoSReport: type: object properties: @@ -54274,6 +54249,11 @@ definitions: sig: type: string format: byte + chainID: + type: string + epoch: + type: string + format: int64 lavanet.lava.epochstorage.Endpoint: type: object properties: diff --git a/proto/pairing/relay.proto b/proto/pairing/relay.proto index 37c20ba8ff..4b1c5c905a 100644 --- a/proto/pairing/relay.proto +++ b/proto/pairing/relay.proto @@ -39,13 +39,15 @@ message RelayReply { } message VRFData { - bool differentiator = 1; - bytes vrf_value =2; - bytes vrf_proof =3; - bytes provider_sig = 4; - bytes allDataHash = 5; - bytes queryHash = 6; //we only need it for payment later - bytes sig =7; + string chainID = 1; + int64 epoch = 2; + bool differentiator = 3; + bytes vrf_value = 4; + bytes vrf_proof = 5; + bytes provider_sig = 6; + bytes allDataHash = 7; + bytes queryHash = 8; //we only need it for payment later + bytes sig = 9; } message QualityOfServiceReport{ diff --git a/protocol/lavaprotocol/request_builder.go b/protocol/lavaprotocol/request_builder.go index bf2202187e..1cfd25cb69 100644 --- a/protocol/lavaprotocol/request_builder.go +++ b/protocol/lavaprotocol/request_builder.go @@ -115,6 +115,8 @@ func DataReliabilityThresholdToSession(vrfs [][]byte, uniqueIdentifiers []bool, func NewVRFData(differentiator bool, vrf_res []byte, vrf_proof []byte, request *pairingtypes.RelayRequest, reply *pairingtypes.RelayReply) *pairingtypes.VRFData { dataReliability := &pairingtypes.VRFData{ + ChainID: request.ChainID, + Epoch: request.BlockHeight, Differentiator: differentiator, VrfValue: vrf_res, VrfProof: vrf_proof, diff --git a/protocol/lavaprotocol/response_builder.go b/protocol/lavaprotocol/response_builder.go index 2c85be1af8..c50a73d443 100644 --- a/protocol/lavaprotocol/response_builder.go +++ b/protocol/lavaprotocol/response_builder.go @@ -13,7 +13,6 @@ import ( conflicttypes "github.com/lavanet/lava/x/conflict/types" pairingtypes "github.com/lavanet/lava/x/pairing/types" spectypes "github.com/lavanet/lava/x/spec/types" - tenderbytes "github.com/tendermint/tendermint/libs/bytes" ) func SignRelayResponse(consumerAddress sdk.AccAddress, request pairingtypes.RelayRequest, pkey *btcSecp256k1.PrivateKey, reply *pairingtypes.RelayReply, signDataReliability bool) (*pairingtypes.RelayReply, error) { @@ -40,14 +39,6 @@ func SignRelayResponse(consumerAddress sdk.AccAddress, request pairingtypes.Rela return reply, nil } -func ExtractSignerAddress(in *pairingtypes.RelayRequest) (tenderbytes.HexBytes, error) { - pubKey, err := sigs.RecoverPubKeyFromRelay(*in) - if err != nil { - return nil, err - } - return pubKey.Address(), nil -} - func VerifyRelayReply(reply *pairingtypes.RelayReply, relayRequest *pairingtypes.RelayRequest, addr string) error { serverKey, err := sigs.RecoverPubKeyFromRelayReply(reply, relayRequest) if err != nil { diff --git a/protocol/lavaprotocol/reuqest_builder_test.go b/protocol/lavaprotocol/reuqest_builder_test.go new file mode 100644 index 0000000000..8914e6b921 --- /dev/null +++ b/protocol/lavaprotocol/reuqest_builder_test.go @@ -0,0 +1,38 @@ +package lavaprotocol + +import ( + "context" + "testing" + + "github.com/lavanet/lava/protocol/lavasession" + "github.com/lavanet/lava/relayer/sigs" + pairingtypes "github.com/lavanet/lava/x/pairing/types" + "github.com/stretchr/testify/require" +) + +func TestSignAndExtract(t *testing.T) { + ctx := context.Background() + sk, address := sigs.GenerateFloatingKey() + chainId := "LAV1" + epoch := int64(100) + singleConsumerSession := &lavasession.SingleConsumerSession{ + CuSum: 20, + LatestRelayCu: 10, // set by GetSession cuNeededForSession + QoSInfo: lavasession.QoSReport{LastQoSReport: &pairingtypes.QualityOfServiceReport{}}, + SessionId: 123, + Client: nil, + RelayNum: 1, + LatestBlock: epoch, + Endpoint: nil, + BlockListed: false, // if session lost sync we blacklist it. + ConsecutiveNumberOfFailures: 0, // number of times this session has failed + } + commonData := NewRelayRequestCommonData(chainId, "GET", "stub_url", []byte("stub_data"), 10, "tendermintrpc") + relay, err := ConstructRelayRequest(ctx, sk, chainId, commonData, "lava@stubProviderAddress", singleConsumerSession, epoch, []byte("stubbytes")) + require.Nil(t, err) + + // check signature + extractedConsumerAddress, err := sigs.ExtractSignerAddress(relay) + require.Nil(t, err) + require.Equal(t, extractedConsumerAddress, address) +} diff --git a/protocol/lavasession/common.go b/protocol/lavasession/common.go index ea6e67b6e6..c8ad1dc83d 100644 --- a/protocol/lavasession/common.go +++ b/protocol/lavasession/common.go @@ -18,6 +18,7 @@ const ( DataReliabilityCuSum = 0 GeolocationFlag = "geolocation" TendermintUnsubscribeAll = "unsubscribe_all" + IndexNotFound = -15 ) var AvailabilityPercentage sdk.Dec = sdk.NewDecWithPrec(5, 2) // TODO move to params pairing diff --git a/protocol/lavasession/consumer_session_manager.go b/protocol/lavasession/consumer_session_manager.go index c561084fd6..3936278b1b 100644 --- a/protocol/lavasession/consumer_session_manager.go +++ b/protocol/lavasession/consumer_session_manager.go @@ -24,7 +24,7 @@ type ConsumerSessionManager struct { currentEpoch uint64 numberOfResets uint64 // pairingAddresses for Data reliability - pairingAddresses []string // contains all addresses from the initial pairing. + pairingAddresses map[uint64]string // contains all addresses from the initial pairing. and the keys are the vrf indexes pairingAddressesLength uint64 validAddresses []string // contains all addresses that are currently valid @@ -40,7 +40,7 @@ func (csm *ConsumerSessionManager) RPCEndpoint() RPCEndpoint { } // Update the provider pairing list for the ConsumerSessionManager -func (csm *ConsumerSessionManager) UpdateAllProviders(epoch uint64, pairingList []*ConsumerSessionsWithProvider) error { +func (csm *ConsumerSessionManager) UpdateAllProviders(epoch uint64, pairingList map[uint64]*ConsumerSessionsWithProvider) error { pairingListLength := len(pairingList) csm.lock.Lock() // start by locking the class lock. @@ -54,7 +54,7 @@ func (csm *ConsumerSessionManager) UpdateAllProviders(epoch uint64, pairingList // Reset States // csm.validAddresses length is reset in setValidAddressesToDefaultValue - csm.pairingAddresses = make([]string, pairingListLength) + csm.pairingAddresses = make(map[uint64]string, 0) csm.addedToPurgeAndReport = make(map[string]struct{}, 0) csm.pairingAddressesLength = uint64(pairingListLength) csm.numberOfResets = 0 @@ -74,7 +74,11 @@ func (csm *ConsumerSessionManager) UpdateAllProviders(epoch uint64, pairingList func (csm *ConsumerSessionManager) setValidAddressesToDefaultValue() { csm.validAddresses = make([]string, len(csm.pairingAddresses)) - copy(csm.validAddresses, csm.pairingAddresses) + index := 0 + for _, provider := range csm.pairingAddresses { + csm.validAddresses[index] = provider + index++ + } } // reads cs.currentEpoch atomically diff --git a/protocol/lavasession/consumer_session_manager_test.go b/protocol/lavasession/consumer_session_manager_test.go index e705d59611..02b4c4177b 100644 --- a/protocol/lavasession/consumer_session_manager_test.go +++ b/protocol/lavasession/consumer_session_manager_test.go @@ -44,20 +44,20 @@ func createGRPCServer(t *testing.T) *grpc.Server { return s } -func createPairingList() []*ConsumerSessionsWithProvider { - cswpList := make([]*ConsumerSessionsWithProvider, 0) +func createPairingList(providerPrefixAddress string) map[uint64]*ConsumerSessionsWithProvider { + cswpList := make(map[uint64]*ConsumerSessionsWithProvider, 0) pairingEndpoints := make([]*Endpoint, 1) // we need a grpc server to connect to. so we use the public rpc endpoint for now. pairingEndpoints[0] = &Endpoint{NetworkAddress: grpcListener, Enabled: true, Client: nil, ConnectionRefusals: 0} for p := 0; p < numberOfProviders; p++ { - cswpList = append(cswpList, &ConsumerSessionsWithProvider{ - PublicLavaAddress: "provider" + strconv.Itoa(p), + cswpList[uint64(p)] = &ConsumerSessionsWithProvider{ + PublicLavaAddress: "provider" + providerPrefixAddress + strconv.Itoa(p), Endpoints: pairingEndpoints, Sessions: map[int64]*SingleConsumerSession{}, MaxComputeUnits: 200, ReliabilitySent: false, PairingEpoch: firstEpochHeight, - }) + } } return cswpList } @@ -68,7 +68,7 @@ func TestHappyFlow(t *testing.T) { defer s.Stop() // stop the server when finished. ctx := context.Background() csm := CreateConsumerSessionManager() - pairingList := createPairingList() + pairingList := createPairingList("") err := csm.UpdateAllProviders(firstEpochHeight, pairingList) // update the providers. require.Nil(t, err) cs, epoch, _, _, err := csm.GetSession(ctx, cuForFirstRequest, nil) // get a session @@ -89,7 +89,7 @@ func TestPairingReset(t *testing.T) { defer s.Stop() // stop the server when finished. ctx := context.Background() csm := CreateConsumerSessionManager() - pairingList := createPairingList() + pairingList := createPairingList("") err := csm.UpdateAllProviders(firstEpochHeight, pairingList) // update the providers. require.Nil(t, err) csm.validAddresses = []string{} // set valid addresses to zero @@ -113,7 +113,7 @@ func TestPairingResetWithFailures(t *testing.T) { defer s.Stop() // stop the server when finished. ctx := context.Background() csm := CreateConsumerSessionManager() - pairingList := createPairingList() + pairingList := createPairingList("") err := csm.UpdateAllProviders(firstEpochHeight, pairingList) // update the providers. require.Nil(t, err) for { @@ -143,7 +143,7 @@ func TestPairingResetWithMultipleFailures(t *testing.T) { defer s.Stop() // stop the server when finished. ctx := context.Background() csm := CreateConsumerSessionManager() - pairingList := createPairingList() + pairingList := createPairingList("") err := csm.UpdateAllProviders(firstEpochHeight, pairingList) // update the providers. require.Nil(t, err) for numberOfResets := 0; numberOfResets < numberOfResetsToTest; numberOfResets++ { @@ -188,7 +188,7 @@ func TestSuccessAndFailureOfSessionWithUpdatePairingsInTheMiddle(t *testing.T) { defer s.Stop() // stop the server when finished. ctx := context.Background() csm := CreateConsumerSessionManager() - pairingList := createPairingList() + pairingList := createPairingList("") err := csm.UpdateAllProviders(firstEpochHeight, pairingList) // update the providers. require.Nil(t, err) @@ -232,7 +232,7 @@ func TestSuccessAndFailureOfSessionWithUpdatePairingsInTheMiddle(t *testing.T) { } } - err = csm.UpdateAllProviders(secondEpochHeight, pairingList[0:(numberOfProviders/2)]) // update the providers. with half of them + err = csm.UpdateAllProviders(secondEpochHeight, createPairingList("test2")) // update the providers. with half of them require.Nil(t, err) for j := numberOfAllowedSessionsPerConsumer / 2; j < numberOfAllowedSessionsPerConsumer; j++ { @@ -285,7 +285,7 @@ func TestHappyFlowMultiThreaded(t *testing.T) { defer s.Stop() // stop the server when finished. ctx := context.Background() csm := CreateConsumerSessionManager() - pairingList := createPairingList() + pairingList := createPairingList("") err := csm.UpdateAllProviders(firstEpochHeight, pairingList) // update the providers. require.Nil(t, err) ch1 := make(chan int) @@ -333,7 +333,7 @@ func TestHappyFlowMultiThreadedWithUpdateSession(t *testing.T) { defer s.Stop() // stop the server when finished. ctx := context.Background() csm := CreateConsumerSessionManager() - pairingList := createPairingList() + pairingList := createPairingList("") err := csm.UpdateAllProviders(firstEpochHeight, pairingList) // update the providers. require.Nil(t, err) ch1 := make(chan int) @@ -350,7 +350,7 @@ func TestHappyFlowMultiThreadedWithUpdateSession(t *testing.T) { if len(all_chs) == parallelGoRoutines { // at half of the go routines launch the swap. go func() { utils.LavaFormatInfo(fmt.Sprintf("#### UPDATING PROVIDERS ####"), nil) - err := csm.UpdateAllProviders(secondEpochHeight, pairingList[0:(numberOfProviders/2)]) // update the providers. with half of them + err := csm.UpdateAllProviders(secondEpochHeight, createPairingList("test2")) // update the providers. with half of them require.Nil(t, err) }() } @@ -390,7 +390,7 @@ func TestSessionFailureAndGetReportedProviders(t *testing.T) { defer s.Stop() // stop the server when finished. ctx := context.Background() csm := CreateConsumerSessionManager() - pairingList := createPairingList() + pairingList := createPairingList("") err := csm.UpdateAllProviders(firstEpochHeight, pairingList) // update the providers. require.Nil(t, err) cs, epoch, _, _, err := csm.GetSession(ctx, cuForFirstRequest, nil) // get a session @@ -426,7 +426,7 @@ func TestSessionFailureEpochMisMatch(t *testing.T) { defer s.Stop() // stop the server when finished. ctx := context.Background() csm := CreateConsumerSessionManager() - pairingList := createPairingList() + pairingList := createPairingList("") err := csm.UpdateAllProviders(firstEpochHeight, pairingList) // update the providers. require.Nil(t, err) cs, epoch, _, _, err := csm.GetSession(ctx, cuForFirstRequest, nil) // get a sesssion @@ -444,7 +444,7 @@ func TestSessionFailureEpochMisMatch(t *testing.T) { func TestAllProvidersEndpointsDisabled(t *testing.T) { ctx := context.Background() csm := CreateConsumerSessionManager() - pairingList := createPairingList() + pairingList := createPairingList("") err := csm.UpdateAllProviders(firstEpochHeight, pairingList) // update the providers. require.Nil(t, err) cs, _, _, _, err := csm.GetSession(ctx, cuForFirstRequest, nil) // get a session @@ -455,7 +455,7 @@ func TestAllProvidersEndpointsDisabled(t *testing.T) { func TestUpdateAllProviders(t *testing.T) { csm := CreateConsumerSessionManager() - pairingList := createPairingList() + pairingList := createPairingList("") err := csm.UpdateAllProviders(firstEpochHeight, pairingList) require.Nil(t, err) require.Equal(t, len(csm.validAddresses), numberOfProviders) // checking there are 2 valid addresses @@ -469,7 +469,7 @@ func TestUpdateAllProviders(t *testing.T) { func TestUpdateAllProvidersWithSameEpoch(t *testing.T) { csm := CreateConsumerSessionManager() - pairingList := createPairingList() + pairingList := createPairingList("") err := csm.UpdateAllProviders(firstEpochHeight, pairingList) require.Nil(t, err) err = csm.UpdateAllProviders(firstEpochHeight, pairingList) @@ -488,7 +488,7 @@ func TestGetSession(t *testing.T) { defer s.Stop() // stop the server when finished. ctx := context.Background() csm := CreateConsumerSessionManager() - pairingList := createPairingList() + pairingList := createPairingList("") err := csm.UpdateAllProviders(firstEpochHeight, pairingList) require.Nil(t, err) cs, epoch, _, _, err := csm.GetSession(ctx, cuForFirstRequest, nil) diff --git a/protocol/lavasession/consumer_types.go b/protocol/lavasession/consumer_types.go index fef70c8e04..f906871869 100644 --- a/protocol/lavasession/consumer_types.go +++ b/protocol/lavasession/consumer_types.go @@ -22,7 +22,7 @@ type ignoredProviders struct { currentEpoch uint64 } -type qoSInfo struct { +type QoSReport struct { LastQoSReport *pairingtypes.QualityOfServiceReport LatencyScoreList []sdk.Dec SyncScoreSum int64 @@ -34,7 +34,7 @@ type qoSInfo struct { type SingleConsumerSession struct { CuSum uint64 LatestRelayCu uint64 // set by GetSession cuNeededForSession - QoSInfo qoSInfo + QoSInfo QoSReport SessionId int64 Client *ConsumerSessionsWithProvider lock utils.LavaMutex diff --git a/protocol/lavasession/errors.go b/protocol/lavasession/errors.go index daf5b83d89..c7d7eb5d05 100644 --- a/protocol/lavasession/errors.go +++ b/protocol/lavasession/errors.go @@ -28,20 +28,22 @@ var ( // Consumer Side Errors ) var ( // Provider Side Errors - InvalidEpochError = sdkerrors.New("InvalidEpoch Error", 881, "Requested Epoch Is Too Old") - NewSessionWithRelayNumError = sdkerrors.New("NewSessionWithRelayNum Error", 882, "Requested Session With Relay Number Is Invalid") - ConsumerIsBlockListed = sdkerrors.New("ConsumerIsBlockListed Error", 883, "This Consumer Is Blocked.") - ConsumerNotRegisteredYet = sdkerrors.New("ConsumerNotActive Error", 884, "This Consumer Is Not Currently In The Pool.") - SessionDoesNotExist = sdkerrors.New("SessionDoesNotExist Error", 885, "This Session Id Does Not Exist.") - MaximumCULimitReachedByConsumer = sdkerrors.New("MaximumCULimitReachedByConsumer Error", 886, "Consumer reached maximum cu limit") - ProviderConsumerCuMisMatch = sdkerrors.New("ProviderConsumerCuMisMatch Error", 887, "Provider and Consumer disagree on total cu for session") - RelayNumberMismatch = sdkerrors.New("RelayNumberMismatch Error", 888, "Provider and Consumer disagree on relay number for session") - SubscriptionInitiationError = sdkerrors.New("SubscriptionInitiationError Error", 889, "Provider failed initiating subscription") - EpochIsNotRegisteredError = sdkerrors.New("EpochIsNotRegisteredError Error", 890, "Epoch is not registered in provider session manager") - ConsumerIsNotRegisteredError = sdkerrors.New("ConsumerIsNotRegisteredError Error", 891, "Consumer is not registered in provider session manager") - SubscriptionAlreadyExistsError = sdkerrors.New("SubscriptionAlreadyExists Error", 892, "Subscription already exists in single provider session") - DataReliabilitySessionAlreadyUsedError = sdkerrors.New("DataReliabilitySessionAlreadyUsed Error", 893, "Data Reliability Session already used by this consumer in this epoch") - DataReliabilityCuSumMisMatchError = sdkerrors.New("DataReliabilityCuSumMisMatch Error", 894, "Data Reliability Cu sum mismatch error") - DataReliabilityRelayNumberMisMatchError = sdkerrors.New("DataReliabilityRelayNumberMisMatch Error", 895, "Data Reliability RelayNumber mismatch error") - SubscriptionPointerIsNilError = sdkerrors.New("SubscriptionPointerIsNil Error", 896, "Trying to unsubscribe a nil pointer.") + InvalidEpochError = sdkerrors.New("InvalidEpoch Error", 881, "Requested Epoch Is Too Old") + NewSessionWithRelayNumError = sdkerrors.New("NewSessionWithRelayNum Error", 882, "Requested Session With Relay Number Is Invalid") + ConsumerIsBlockListed = sdkerrors.New("ConsumerIsBlockListed Error", 883, "This Consumer Is Blocked.") + ConsumerNotRegisteredYet = sdkerrors.New("ConsumerNotActive Error", 884, "This Consumer Is Not Currently In The Pool.") + SessionDoesNotExist = sdkerrors.New("SessionDoesNotExist Error", 885, "This Session Id Does Not Exist.") + MaximumCULimitReachedByConsumer = sdkerrors.New("MaximumCULimitReachedByConsumer Error", 886, "Consumer reached maximum cu limit") + ProviderConsumerCuMisMatch = sdkerrors.New("ProviderConsumerCuMisMatch Error", 887, "Provider and Consumer disagree on total cu for session") + RelayNumberMismatch = sdkerrors.New("RelayNumberMismatch Error", 888, "Provider and Consumer disagree on relay number for session") + SubscriptionInitiationError = sdkerrors.New("SubscriptionInitiationError Error", 889, "Provider failed initiating subscription") + EpochIsNotRegisteredError = sdkerrors.New("EpochIsNotRegisteredError Error", 890, "Epoch is not registered in provider session manager") + ConsumerIsNotRegisteredError = sdkerrors.New("ConsumerIsNotRegisteredError Error", 891, "Consumer is not registered in provider session manager") + SubscriptionAlreadyExistsError = sdkerrors.New("SubscriptionAlreadyExists Error", 892, "Subscription already exists in single provider session") + DataReliabilitySessionAlreadyUsedError = sdkerrors.New("DataReliabilitySessionAlreadyUsed Error", 893, "Data Reliability Session already used by this consumer in this epoch") + DataReliabilityCuSumMisMatchError = sdkerrors.New("DataReliabilityCuSumMisMatch Error", 894, "Data Reliability Cu sum mismatch error") + DataReliabilityRelayNumberMisMatchError = sdkerrors.New("DataReliabilityRelayNumberMisMatch Error", 895, "Data Reliability RelayNumber mismatch error") + SubscriptionPointerIsNilError = sdkerrors.New("SubscriptionPointerIsNil Error", 896, "Trying to unsubscribe a nil pointer.") + CouldNotFindIndexAsConsumerNotYetRegisteredError = sdkerrors.New("CouldNotFindIndexAsConsumerNotYetRegistered Error", 897, "fetching provider index from psm failed") + ProviderIndexMisMatchError = sdkerrors.New("ProviderIndexMisMatch Error", 898, "provider index mismatch") ) diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index 7785906ee2..2c2edc1ed5 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -23,6 +23,25 @@ func (psm *ProviderSessionManager) atomicWriteBlockedEpoch(epoch uint64) { atomic.StoreUint64(&psm.blockedEpochHeight, epoch) } +func (psm *ProviderSessionManager) GetProviderIndexWithConsumer(epoch uint64, consumerAddress string) (int64, error) { + providerSessionWithConsumer, err := psm.IsActiveConsumer(epoch, consumerAddress) + if err != nil { + // if consumer not active maybe it has a DR session. so check there as well + psm.lock.RLock() + defer psm.lock.RUnlock() + drSessionData, found := psm.dataReliabilitySessionsWithAllConsumers[epoch] + if found { + drProviderSessionWithConsumer, foundDrSession := drSessionData.sessionMap[consumerAddress] + if foundDrSession { + return drProviderSessionWithConsumer.atomicReadProviderIndex(), nil + } + } + // we didn't find the consumer in both maps + return IndexNotFound, CouldNotFindIndexAsConsumerNotYetRegisteredError + } + return providerSessionWithConsumer.atomicReadProviderIndex(), nil +} + // reads cs.BlockedEpoch atomically func (psm *ProviderSessionManager) atomicReadBlockedEpoch() (epoch uint64) { return atomic.LoadUint64(&psm.blockedEpochHeight) @@ -61,7 +80,7 @@ func (psm *ProviderSessionManager) getSingleSessionFromProviderSessionWithConsum return singleProviderSession, err } -func (psm *ProviderSessionManager) getOrCreateDataReliabilitySessionWithConsumer(address string, epoch uint64, sessionId uint64) (providerSessionWithConsumer *ProviderSessionsWithConsumer, err error) { +func (psm *ProviderSessionManager) getOrCreateDataReliabilitySessionWithConsumer(address string, epoch uint64, sessionId uint64, selfProviderIndex int64) (providerSessionWithConsumer *ProviderSessionsWithConsumer, err error) { if mapOfDataReliabilitySessionsWithConsumer, consumerFoundInEpoch := psm.dataReliabilitySessionsWithAllConsumers[epoch]; consumerFoundInEpoch { if providerSessionWithConsumer, consumerAddressFound := mapOfDataReliabilitySessionsWithConsumer.sessionMap[address]; consumerAddressFound { if providerSessionWithConsumer.atomicReadConsumerBlocked() == blockListedConsumer { // we atomic read block listed so we dont need to lock the provider. (double lock is always a bad idea.) @@ -69,6 +88,10 @@ func (psm *ProviderSessionManager) getOrCreateDataReliabilitySessionWithConsumer utils.LavaFormatWarning("getActiveConsumer", ConsumerIsBlockListed, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10), "ConsumerAddress": address}) return nil, ConsumerIsBlockListed } + // validate self index is the same. + if selfProviderIndex != providerSessionWithConsumer.atomicReadProviderIndex() { + return nil, ProviderIndexMisMatchError + } return providerSessionWithConsumer, nil // no error } } else { @@ -77,13 +100,13 @@ func (psm *ProviderSessionManager) getOrCreateDataReliabilitySessionWithConsumer } // If we got here, we need to create a new instance for this consumer address. - providerSessionWithConsumer = NewProviderSessionsWithConsumer(address, nil, isDataReliabilityPSWC) + providerSessionWithConsumer = NewProviderSessionsWithConsumer(address, nil, isDataReliabilityPSWC, selfProviderIndex) psm.dataReliabilitySessionsWithAllConsumers[epoch].sessionMap[address] = providerSessionWithConsumer return providerSessionWithConsumer, nil } // GetDataReliabilitySession fetches a data reliability session -func (psm *ProviderSessionManager) GetDataReliabilitySession(address string, epoch uint64, sessionId uint64, relayNumber uint64) (*SingleProviderSession, error) { +func (psm *ProviderSessionManager) GetDataReliabilitySession(address string, epoch uint64, sessionId uint64, relayNumber uint64, selfProviderIndex int64) (*SingleProviderSession, error) { // validate Epoch if !psm.IsValidEpoch(epoch) { // fast checking to see if epoch is even relevant utils.LavaFormatError("GetSession", InvalidEpochError, &map[string]string{"RequestedEpoch": strconv.FormatUint(epoch, 10)}) @@ -101,7 +124,7 @@ func (psm *ProviderSessionManager) GetDataReliabilitySession(address string, epo } // validate active consumer. - providerSessionWithConsumer, err := psm.getOrCreateDataReliabilitySessionWithConsumer(address, epoch, sessionId) + providerSessionWithConsumer, err := psm.getOrCreateDataReliabilitySessionWithConsumer(address, epoch, sessionId, selfProviderIndex) if err != nil { return nil, utils.LavaFormatError("getOrCreateDataReliabilitySessionWithConsumer Failed", err, &map[string]string{"relayNumber": strconv.FormatUint(relayNumber, 10), "DataReliabilityRelayNumber": strconv.Itoa(DataReliabilityRelayNumber)}) } @@ -135,7 +158,7 @@ func (psm *ProviderSessionManager) GetSession(address string, epoch uint64, sess return psm.getSingleSessionFromProviderSessionWithConsumer(providerSessionWithConsumer, sessionId, epoch, relayNumber) } -func (psm *ProviderSessionManager) registerNewConsumer(consumerAddr string, epoch uint64, maxCuForConsumer uint64) (*ProviderSessionsWithConsumer, error) { +func (psm *ProviderSessionManager) registerNewConsumer(consumerAddr string, epoch uint64, maxCuForConsumer uint64, selfProviderIndex int64) (*ProviderSessionsWithConsumer, error) { psm.lock.Lock() defer psm.lock.Unlock() if !psm.IsValidEpoch(epoch) { // checking again because we are now locked and epoch cant change now. @@ -151,17 +174,17 @@ func (psm *ProviderSessionManager) registerNewConsumer(consumerAddr string, epoc providerSessionWithConsumer, foundAddressInMap := mapOfProviderSessionsWithConsumer.sessionMap[consumerAddr] if !foundAddressInMap { - providerSessionWithConsumer = NewProviderSessionsWithConsumer(consumerAddr, &ProviderSessionsEpochData{MaxComputeUnits: maxCuForConsumer}, notDataReliabilityPSWC) + providerSessionWithConsumer = NewProviderSessionsWithConsumer(consumerAddr, &ProviderSessionsEpochData{MaxComputeUnits: maxCuForConsumer}, notDataReliabilityPSWC, selfProviderIndex) mapOfProviderSessionsWithConsumer.sessionMap[consumerAddr] = providerSessionWithConsumer } return providerSessionWithConsumer, nil } -func (psm *ProviderSessionManager) RegisterProviderSessionWithConsumer(consumerAddress string, epoch uint64, sessionId uint64, relayNumber uint64, maxCuForConsumer uint64) (*SingleProviderSession, error) { +func (psm *ProviderSessionManager) RegisterProviderSessionWithConsumer(consumerAddress string, epoch uint64, sessionId uint64, relayNumber uint64, maxCuForConsumer uint64, selfProviderIndex int64) (*SingleProviderSession, error) { providerSessionWithConsumer, err := psm.IsActiveConsumer(epoch, consumerAddress) if err != nil { if ConsumerNotRegisteredYet.Is(err) { - providerSessionWithConsumer, err = psm.registerNewConsumer(consumerAddress, epoch, maxCuForConsumer) + providerSessionWithConsumer, err = psm.registerNewConsumer(consumerAddress, epoch, maxCuForConsumer, selfProviderIndex) if err != nil { return nil, utils.LavaFormatError("RegisterProviderSessionWithConsumer Failed to registerNewSession", err, nil) } diff --git a/protocol/lavasession/provider_session_manager_test.go b/protocol/lavasession/provider_session_manager_test.go index 3de5b6a4db..17b638853f 100644 --- a/protocol/lavasession/provider_session_manager_test.go +++ b/protocol/lavasession/provider_session_manager_test.go @@ -19,6 +19,7 @@ const ( maxCu = uint64(150) epoch2 = testNumberOfBlocksKeptInMemory + epoch1 consumerOneAddress = "consumer1" + selfProviderIndex = int64(1) ) func initProviderSessionManager() *ProviderSessionManager { @@ -45,7 +46,7 @@ func prepareSession(t *testing.T) (*ProviderSessionManager, *SingleProviderSessi require.True(t, ConsumerNotRegisteredYet.Is(err)) // expect session to be missing, so we need to register it for the first time - sps, err = psm.RegisterProviderSessionWithConsumer(consumerOneAddress, epoch1, sessionId, relayNumber, maxCu) + sps, err = psm.RegisterProviderSessionWithConsumer(consumerOneAddress, epoch1, sessionId, relayNumber, maxCu, selfProviderIndex) // validate session was added require.NotEmpty(t, psm.sessionsWithAllConsumers) @@ -57,6 +58,7 @@ func prepareSession(t *testing.T) (*ProviderSessionManager, *SingleProviderSessi // validate session was prepared successfully require.Nil(t, err) + require.Equal(t, sps.userSessionsParent.atomicReadProviderIndex(), selfProviderIndex) require.Equal(t, relayCu, sps.LatestRelayCu) require.Equal(t, sps.CuSum, relayCu) require.Equal(t, sps.SessionID, sessionId) @@ -70,7 +72,7 @@ func prepareDRSession(t *testing.T) (*ProviderSessionManager, *SingleProviderSes psm := initProviderSessionManager() // get data reliability session - sps, err := psm.GetDataReliabilitySession(consumerOneAddress, epoch1, dataReliabilitySessionId, relayNumber) + sps, err := psm.GetDataReliabilitySession(consumerOneAddress, epoch1, dataReliabilitySessionId, relayNumber, selfProviderIndex) // validate results require.Nil(t, err) @@ -255,7 +257,7 @@ func TestPSMDataReliabilityTwicePerEpoch(t *testing.T) { require.Equal(t, epoch1, sps.PairingEpoch) // try to get a data reliability session again. - sps, err := psm.GetDataReliabilitySession(consumerOneAddress, epoch1, dataReliabilitySessionId, relayNumber) + sps, err := psm.GetDataReliabilitySession(consumerOneAddress, epoch1, dataReliabilitySessionId, relayNumber, selfProviderIndex) // validate we cant get more than one data reliability session per epoch (might change in the future) require.Error(t, err) @@ -293,7 +295,7 @@ func TestPSMDataReliabilityRetryAfterFailure(t *testing.T) { require.Equal(t, epoch1, sps.PairingEpoch) // try to get a data reliability session again. - sps, err := psm.GetDataReliabilitySession(consumerOneAddress, epoch1, dataReliabilitySessionId, relayNumber) + sps, err := psm.GetDataReliabilitySession(consumerOneAddress, epoch1, dataReliabilitySessionId, relayNumber, selfProviderIndex) // validate we can get a data reliability session if we failed before require.Nil(t, err) diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index 435e15788f..3c1300ebe7 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -99,6 +99,7 @@ type ProviderSessionsWithConsumer struct { epochData *ProviderSessionsEpochData Lock sync.RWMutex isDataReliability uint32 // 0 is false, 1 is true. set to uint so we can atomically read + selfProviderIndex int64 } type SingleProviderSession struct { @@ -111,24 +112,30 @@ type SingleProviderSession struct { PairingEpoch uint64 } -func NewProviderSessionsWithConsumer(consumerAddr string, epochData *ProviderSessionsEpochData, isDataReliability uint32) *ProviderSessionsWithConsumer { +func NewProviderSessionsWithConsumer(consumerAddr string, epochData *ProviderSessionsEpochData, isDataReliability uint32, selfProviderIndex int64) *ProviderSessionsWithConsumer { pswc := &ProviderSessionsWithConsumer{ Sessions: map[uint64]*SingleProviderSession{}, isBlockListed: 0, consumerAddr: consumerAddr, epochData: epochData, isDataReliability: isDataReliability, + selfProviderIndex: selfProviderIndex, } return pswc } +// reads the selfProviderIndex data atomically for DR +func (pswc *ProviderSessionsWithConsumer) atomicReadProviderIndex() int64 { + return atomic.LoadInt64(&pswc.selfProviderIndex) +} + // reads the isDataReliability data atomically -func (pswc *ProviderSessionsWithConsumer) atomicReadIsDataReliability() uint32 { // rename to blocked consumer not blocked epoch +func (pswc *ProviderSessionsWithConsumer) atomicReadIsDataReliability() uint32 { return atomic.LoadUint32(&pswc.isDataReliability) } // reads cs.BlockedEpoch atomically, notBlockListedConsumer = 0, blockListedConsumer = 1 -func (pswc *ProviderSessionsWithConsumer) atomicWriteConsumerBlocked(blockStatus uint32) { // rename to blocked consumer not blocked epoch +func (pswc *ProviderSessionsWithConsumer) atomicWriteConsumerBlocked(blockStatus uint32) { atomic.StoreUint32(&pswc.isBlockListed, blockStatus) } diff --git a/protocol/rpcprovider/rewardserver/reward_server.go b/protocol/rpcprovider/rewardserver/reward_server.go index 8066831658..277882e8c1 100644 --- a/protocol/rpcprovider/rewardserver/reward_server.go +++ b/protocol/rpcprovider/rewardserver/reward_server.go @@ -9,8 +9,8 @@ import ( "sync/atomic" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/lavanet/lava/protocol/lavaprotocol" "github.com/lavanet/lava/protocol/lavasession" + "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/utils" pairingtypes "github.com/lavanet/lava/x/pairing/types" terderminttypes "github.com/tendermint/tendermint/abci/types" @@ -135,16 +135,11 @@ func (rws *RewardServer) sendRewardsClaim(ctx context.Context, epoch uint64) err return err } for _, relay := range rewardsToClaim { - consumerBytes, err := lavaprotocol.ExtractSignerAddress(relay) + consumerAddr, err := sigs.ExtractSignerAddress(relay) if err != nil { utils.LavaFormatError("invalid consumer address extraction from relay", err, &map[string]string{"relay": fmt.Sprintf("%+v", relay)}) continue } - consumerAddr, err := sdk.AccAddressFromHex(consumerBytes.String()) - if err != nil { - utils.LavaFormatError("invalid consumer address extraction from relay", err, &map[string]string{"relay": fmt.Sprintf("%+v", relay), "consumerBytes": consumerBytes.String()}) - continue - } expectedPay := PaymentRequest{ChainID: relay.ChainID, CU: relay.CuSum, BlockHeightDeadline: relay.BlockHeight, Amount: sdk.Coin{}, Client: consumerAddr, UniqueIdentifier: relay.SessionId, Description: strconv.FormatUint(rws.serverID, 10)} rws.addExpectedPayment(expectedPay) rws.updateCUServiced(relay.CuSum) diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index ac503ea756..fd20cdd0d7 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -84,14 +84,20 @@ func (rpcps *RPCProviderServer) ServeRPCRequests( // function used to handle relay requests from a consumer, it is called by a provider_listener by calling RegisterReceiver func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes.RelayRequest) (*pairingtypes.RelayReply, error) { utils.LavaFormatDebug("Provider got relay request", &map[string]string{ - "request.SessionId": strconv.FormatUint(request.SessionId, 10), - "request.relayNumber": strconv.FormatUint(request.RelayNum, 10), - "request.cu": strconv.FormatUint(request.CuSum, 10), + "request": fmt.Sprintf("%+v", request), }) + + // we need a shallow copy of the request for the payment proof, when data reliability is enabled it changes the request.blockNumber, + // that causes the signature to change. + requestCopy := request.ShallowCopy() + + // Init relay relaySession, consumerAddress, chainMessage, err := rpcps.initRelay(ctx, request) if err != nil { return nil, rpcps.handleRelayErrorStatus(err) } + + // Try sending relay reply, err := rpcps.TryRelay(ctx, request, consumerAddress, chainMessage) if err != nil { // failed to send relay. we need to adjust session state. cuSum and relayNumber. @@ -104,12 +110,16 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes "request.userAddr": consumerAddress.String(), }) } else { + + // On successful relay relayError := rpcps.providerSessionManager.OnSessionDone(relaySession) if relayError != nil { err = sdkerrors.Wrapf(relayError, "OnSession Done failure: "+err.Error()) } else { if request.DataReliability == nil { - err = rpcps.SendProof(ctx, relaySession, request, consumerAddress) + // SendProof gets the request copy, as in the case of data reliability enabled the request.blockNumber is changed. + // Therefore the signature changes, so we need the original copy to extract the address from it. + err = rpcps.SendProof(ctx, relaySession, requestCopy, consumerAddress) if err != nil { return nil, err } @@ -200,7 +210,7 @@ func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayReques func (rpcps *RPCProviderServer) SendProof(ctx context.Context, relaySession *lavasession.SingleProviderSession, request *pairingtypes.RelayRequest, consumerAddress sdk.AccAddress) error { epoch := relaySession.PairingEpoch - storedCU, updatedWithProof := rpcps.rewardServer.SendNewProof(ctx, request.ShallowCopy(), epoch, consumerAddress.String()) + storedCU, updatedWithProof := rpcps.rewardServer.SendNewProof(ctx, request, epoch, consumerAddress.String()) if !updatedWithProof && storedCU > request.CuSum { rpcps.providerSessionManager.UpdateSessionCU(consumerAddress.String(), epoch, request.SessionId, storedCU) err := utils.LavaFormatError("Cu in relay smaller than existing proof", lavasession.ProviderConsumerCuMisMatch, &map[string]string{"existing_proof_cu": strconv.FormatUint(storedCU, 10)}) @@ -298,32 +308,71 @@ func (rpcps *RPCProviderServer) verifyRelaySession(ctx context.Context, request return nil, nil, utils.LavaFormatError("did not pass relay validation", err, nil) } // check signature - consumerBytes, err := lavaprotocol.ExtractSignerAddress(request) + extractedConsumerAddress, err = sigs.ExtractSignerAddress(request) if err != nil { return nil, nil, utils.LavaFormatError("extract signer address from relay", err, nil) } - extractedConsumerAddress, err = sdk.AccAddressFromHex(consumerBytes.String()) - if err != nil { - return nil, nil, utils.LavaFormatError("get relay consumer address", err, nil) - } // handle non data reliability relays + consumerAddressString := extractedConsumerAddress.String() if request.DataReliability == nil { - singleProviderSession, err = rpcps.getSingleProviderSession(ctx, request, extractedConsumerAddress.String()) + singleProviderSession, err = rpcps.getSingleProviderSession(ctx, request, consumerAddressString) return singleProviderSession, extractedConsumerAddress, err } + // try to fetch the selfProviderIndex from the already registered consumers. + selfProviderIndex, errFindingIndex := rpcps.providerSessionManager.GetProviderIndexWithConsumer(uint64(request.BlockHeight), consumerAddressString) + // validate the error is the expected type this error is valid and + // just indicates this consumer has not registered yet and we need to fetch the index from the blockchain + if errFindingIndex != nil && !lavasession.CouldNotFindIndexAsConsumerNotYetRegisteredError.Is(errFindingIndex) { + return nil, nil, utils.LavaFormatError("GetProviderIndexWithConsumer got an unexpected Error", errFindingIndex, nil) + } + // data reliability session verifications - err = rpcps.verifyDataReliabilityRelayRequest(ctx, request, extractedConsumerAddress) + vrfIndex, err := rpcps.verifyDataReliabilityRelayRequest(ctx, request, extractedConsumerAddress) if err != nil { return nil, nil, utils.LavaFormatError("failed data reliability validation", err, nil) } - dataReliabilitySingleProviderSession, err := rpcps.providerSessionManager.GetDataReliabilitySession(extractedConsumerAddress.String(), uint64(request.BlockHeight), request.SessionId, request.RelayNum) + + // in case we didnt find selfProviderIndex as consumer is not registered we are sending VerifyPairing to fetch the index and add it to the PSM + if errFindingIndex != nil { + var validPairing bool + var verifyPairingError error + // verify pairing for DR session + validPairing, selfProviderIndex, verifyPairingError = rpcps.stateTracker.VerifyPairing(ctx, consumerAddressString, rpcps.providerAddress.String(), uint64(request.BlockHeight), request.ChainID) + if verifyPairingError != nil { + return nil, nil, utils.LavaFormatError("Failed to VerifyPairing after verifyRelaySession for GetDataReliabilitySession", verifyPairingError, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) + } + if !validPairing { + return nil, nil, utils.LavaFormatError("VerifyPairing, this consumer address is not valid with this provider for GetDataReliabilitySession", nil, &map[string]string{"epoch": strconv.FormatInt(request.BlockHeight, 10), "sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) + } + } + + // validate the provider index with the vrfIndex + if selfProviderIndex != vrfIndex { + dataReliabilityMarshalled, err := json.Marshal(request.DataReliability) + if err != nil { + dataReliabilityMarshalled = []byte{} + } + return nil, nil, utils.LavaFormatError("Provider identified invalid vrfIndex in data reliability request, the given index and self index are different", nil, + &map[string]string{ + "requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": consumerAddressString, + "dataReliability": string(dataReliabilityMarshalled), "relayEpochStart": strconv.FormatInt(request.BlockHeight, 10), + "vrfIndex": strconv.FormatInt(vrfIndex, 10), + "self Index": strconv.FormatInt(selfProviderIndex, 10), + "vrf_chainId": request.DataReliability.ChainID, + "vrf_epoch": strconv.FormatInt(request.DataReliability.Epoch, 10), + }) + } + utils.LavaFormatInfo("Simulation: server got valid DataReliability request", nil) + + // Fetch the DR session! + dataReliabilitySingleProviderSession, err := rpcps.providerSessionManager.GetDataReliabilitySession(consumerAddressString, uint64(request.BlockHeight), request.SessionId, request.RelayNum, selfProviderIndex) if err != nil { if lavasession.DataReliabilityAlreadySentThisEpochError.Is(err) { return nil, nil, err } - return nil, nil, utils.LavaFormatError("failed to get a provider data reliability session", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": extractedConsumerAddress.String(), "epoch": strconv.FormatInt(request.BlockHeight, 10)}) + return nil, nil, utils.LavaFormatError("failed to get a provider data reliability session", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "epoch": strconv.FormatInt(request.BlockHeight, 10)}) } return dataReliabilitySingleProviderSession, extractedConsumerAddress, nil } @@ -334,7 +383,7 @@ func (rpcps *RPCProviderServer) getSingleProviderSession(ctx context.Context, re if err != nil { if lavasession.ConsumerNotRegisteredYet.Is(err) { - valid, _, verifyPairingError := rpcps.stateTracker.VerifyPairing(ctx, consumerAddressString, rpcps.providerAddress.String(), uint64(request.BlockHeight), request.ChainID) + valid, selfProviderIndex, verifyPairingError := rpcps.stateTracker.VerifyPairing(ctx, consumerAddressString, rpcps.providerAddress.String(), uint64(request.BlockHeight), request.ChainID) if verifyPairingError != nil { return nil, utils.LavaFormatError("Failed to VerifyPairing after ConsumerNotRegisteredYet", verifyPairingError, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) } @@ -346,7 +395,7 @@ func (rpcps *RPCProviderServer) getSingleProviderSession(ctx context.Context, re return nil, utils.LavaFormatError("ConsumerNotRegisteredYet: GetVrfPkAndMaxCuForUser failed", getVrfAndMaxCuError, &map[string]string{"epoch": strconv.FormatInt(request.BlockHeight, 10), "sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) } // After validating the consumer we can register it with provider session manager. - singleProviderSession, err = rpcps.providerSessionManager.RegisterProviderSessionWithConsumer(consumerAddressString, uint64(request.BlockHeight), request.SessionId, request.RelayNum, maxCuForConsumer) + singleProviderSession, err = rpcps.providerSessionManager.RegisterProviderSessionWithConsumer(consumerAddressString, uint64(request.BlockHeight), request.SessionId, request.RelayNum, maxCuForConsumer, selfProviderIndex) if err != nil { return nil, utils.LavaFormatError("Failed to RegisterProviderSessionWithConsumer", err, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "relayNum": strconv.FormatUint(request.RelayNum, 10)}) } @@ -368,42 +417,42 @@ func (rpcps *RPCProviderServer) verifyRelayRequestMetaData(request *pairingtypes return nil } -func (rpcps *RPCProviderServer) verifyDataReliabilityRelayRequest(ctx context.Context, request *pairingtypes.RelayRequest, consumerAddress sdk.AccAddress) error { +func (rpcps *RPCProviderServer) verifyDataReliabilityRelayRequest(ctx context.Context, request *pairingtypes.RelayRequest, consumerAddress sdk.AccAddress) (int64, error) { if request.CuSum != lavasession.DataReliabilityCuSum { - return utils.LavaFormatError("request's CU sum is not equal to the data reliability CU sum", nil, &map[string]string{"cuSum": strconv.FormatUint(request.CuSum, 10), "DataReliabilityCuSum": strconv.Itoa(lavasession.DataReliabilityCuSum)}) + return lavasession.IndexNotFound, utils.LavaFormatError("request's CU sum is not equal to the data reliability CU sum", nil, &map[string]string{"cuSum": strconv.FormatUint(request.CuSum, 10), "DataReliabilityCuSum": strconv.Itoa(lavasession.DataReliabilityCuSum)}) } vrf_pk, _, err := rpcps.stateTracker.GetVrfPkAndMaxCuForUser(ctx, consumerAddress.String(), request.ChainID, uint64(request.BlockHeight)) if err != nil { - return utils.LavaFormatError("failed to get vrfpk and maxCURes for data reliability!", err, &map[string]string{ + return lavasession.IndexNotFound, utils.LavaFormatError("failed to get vrfpk and maxCURes for data reliability!", err, &map[string]string{ "userAddr": consumerAddress.String(), }) } // data reliability is not session dependant, its always sent with sessionID 0 and if not we don't care if vrf_pk == nil { - return utils.LavaFormatError("dataReliability Triggered with vrf_pk == nil", nil, + return lavasession.IndexNotFound, utils.LavaFormatError("dataReliability Triggered with vrf_pk == nil", nil, &map[string]string{"requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": consumerAddress.String()}) } // verify the providerSig is indeed a signature by a valid provider on this query valid, index, err := rpcps.VerifyReliabilityAddressSigning(ctx, consumerAddress, request) if err != nil { - return utils.LavaFormatError("VerifyReliabilityAddressSigning invalid", err, + return lavasession.IndexNotFound, utils.LavaFormatError("VerifyReliabilityAddressSigning invalid", err, &map[string]string{"requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": consumerAddress.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) } if !valid { - return utils.LavaFormatError("invalid DataReliability Provider signing", nil, + return lavasession.IndexNotFound, utils.LavaFormatError("invalid DataReliability Provider signing", nil, &map[string]string{"requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": consumerAddress.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) } // verify data reliability fields correspond to the right vrf valid = utils.VerifyVrfProof(request, *vrf_pk, uint64(request.BlockHeight)) if !valid { - return utils.LavaFormatError("invalid DataReliability fields, VRF wasn't verified with provided proof", nil, + return lavasession.IndexNotFound, utils.LavaFormatError("invalid DataReliability fields, VRF wasn't verified with provided proof", nil, &map[string]string{"requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": consumerAddress.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) } _, dataReliabilityThreshold := rpcps.chainParser.DataReliabilityParams() providersCount, err := rpcps.stateTracker.GetProvidersCountForConsumer(ctx, consumerAddress.String(), uint64(request.BlockHeight), request.ChainID) if err != nil { - return utils.LavaFormatError("VerifyReliabilityAddressSigning failed fetching providers count for consumer", err, &map[string]string{"chainID": request.ChainID, "consumer": consumerAddress.String(), "epoch": strconv.FormatInt(request.BlockHeight, 10)}) + return lavasession.IndexNotFound, utils.LavaFormatError("VerifyReliabilityAddressSigning failed fetching providers count for consumer", err, &map[string]string{"chainID": request.ChainID, "consumer": consumerAddress.String(), "epoch": strconv.FormatInt(request.BlockHeight, 10)}) } vrfIndex, vrfErr := utils.GetIndexForVrf(request.DataReliability.VrfValue, providersCount, dataReliabilityThreshold) if vrfErr != nil { @@ -411,29 +460,20 @@ func (rpcps *RPCProviderServer) verifyDataReliabilityRelayRequest(ctx context.Co if err != nil { dataReliabilityMarshalled = []byte{} } - return utils.LavaFormatError("Provider identified vrf value in data reliability request does not meet threshold", vrfErr, + return lavasession.IndexNotFound, utils.LavaFormatError("Provider identified vrf value in data reliability request does not meet threshold", vrfErr, &map[string]string{ "requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": consumerAddress.String(), "dataReliability": string(dataReliabilityMarshalled), "relayEpochStart": strconv.FormatInt(request.BlockHeight, 10), - "vrfIndex": strconv.FormatInt(vrfIndex, 10), - "self Index": strconv.FormatInt(index, 10), + "vrfIndex": strconv.FormatInt(vrfIndex, 10), + "self Index": strconv.FormatInt(index, 10), + "request.DataReliability.VrfValue": string(request.DataReliability.VrfValue), + "providerAddress": rpcps.providerAddress.String(), + "chainId": rpcps.rpcProviderEndpoint.ChainID, }) } - if index != vrfIndex { - dataReliabilityMarshalled, err := json.Marshal(request.DataReliability) - if err != nil { - dataReliabilityMarshalled = []byte{} - } - return utils.LavaFormatError("Provider identified invalid vrfIndex in data reliability request, the given index and self index are different", nil, - &map[string]string{ - "requested epoch": strconv.FormatInt(request.BlockHeight, 10), "userAddr": consumerAddress.String(), - "dataReliability": string(dataReliabilityMarshalled), "relayEpochStart": strconv.FormatInt(request.BlockHeight, 10), - "vrfIndex": strconv.FormatInt(vrfIndex, 10), - "self Index": strconv.FormatInt(index, 10), - }) - } - utils.LavaFormatInfo("Simulation: server got valid DataReliability request", nil) - return nil + + // return the vrfIndex for verification + return vrfIndex, nil } func (rpcps *RPCProviderServer) VerifyReliabilityAddressSigning(ctx context.Context, consumer sdk.AccAddress, request *pairingtypes.RelayRequest) (valid bool, index int64, err error) { diff --git a/protocol/statetracker/pairing_updater.go b/protocol/statetracker/pairing_updater.go index 495109b250..d95ee9f0ed 100644 --- a/protocol/statetracker/pairing_updater.go +++ b/protocol/statetracker/pairing_updater.go @@ -65,7 +65,7 @@ func (pu *PairingUpdater) Update(latestBlock int64) { } for _, consumerSessionManager := range consumerSessionManagerList { // same pairing for all apiInterfaces, they pick the right endpoints from inside using our filter function - err := pu.updateConsummerSessionManager(ctx, pairingList, consumerSessionManager, epoch) + err = pu.updateConsummerSessionManager(ctx, pairingList, consumerSessionManager, epoch) if err != nil { utils.LavaFormatError("failed updating consumer session manager", err, &map[string]string{"chainID": chainID, "apiInterface": consumerSessionManager.RPCEndpoint().ApiInterface, "pairingListLen": strconv.Itoa(len(pairingList))}) continue @@ -90,10 +90,10 @@ func (pu *PairingUpdater) updateConsummerSessionManager(ctx context.Context, pai return } -func (pu *PairingUpdater) filterPairingListByEndpoint(ctx context.Context, pairingList []epochstoragetypes.StakeEntry, rpcEndpoint lavasession.RPCEndpoint, epoch uint64) (filteredList []*lavasession.ConsumerSessionsWithProvider, err error) { +func (pu *PairingUpdater) filterPairingListByEndpoint(ctx context.Context, pairingList []epochstoragetypes.StakeEntry, rpcEndpoint lavasession.RPCEndpoint, epoch uint64) (filteredList map[uint64]*lavasession.ConsumerSessionsWithProvider, err error) { // go over stake entries, and filter endpoints that match geolocation and api interface - pairing := []*lavasession.ConsumerSessionsWithProvider{} - for _, provider := range pairingList { + pairing := map[uint64]*lavasession.ConsumerSessionsWithProvider{} + for providerIdx, provider := range pairingList { // // Sanity providerEndpoints := provider.GetEndpoints() @@ -125,14 +125,14 @@ func (pu *PairingUpdater) filterPairingListByEndpoint(ctx context.Context, pairi pairingEndpoints[idx] = endp } - pairing = append(pairing, &lavasession.ConsumerSessionsWithProvider{ + pairing[uint64(providerIdx)] = &lavasession.ConsumerSessionsWithProvider{ PublicLavaAddress: provider.Address, Endpoints: pairingEndpoints, Sessions: map[int64]*lavasession.SingleConsumerSession{}, MaxComputeUnits: maxcu, ReliabilitySent: false, PairingEpoch: epoch, - }) + } } if len(pairing) == 0 { return nil, utils.LavaFormatError("Failed getting pairing for consumer, pairing is empty", err, &map[string]string{"apiInterface": rpcEndpoint.ApiInterface, "ChainID": rpcEndpoint.ChainID, "geolocation": strconv.FormatUint(rpcEndpoint.Geolocation, 10)}) diff --git a/relayer/sentry/sentry.go b/relayer/sentry/sentry.go index 6f875f1e8c..395f0080a8 100755 --- a/relayer/sentry/sentry.go +++ b/relayer/sentry/sentry.go @@ -169,14 +169,14 @@ func (s *Sentry) SetupConsumerSessionManager(ctx context.Context, consumerSessio utils.LavaFormatInfo("Setting up ConsumerSessionManager", nil) s.consumerSessionManager = consumerSessionManager // Get pairing for the first time, for clients - pairingList, err := s.getPairing(ctx) - if err != nil { - utils.LavaFormatFatal("Failed getting pairing for consumer in initialization", err, &map[string]string{"Address": s.Acc}) - } - err = s.consumerSessionManager.UpdateAllProviders(s.GetCurrentEpochHeight(), pairingList) - if err != nil { - utils.LavaFormatFatal("Failed UpdateAllProviders", err, &map[string]string{"Address": s.Acc}) - } + // pairingList, err := s.getPairing(ctx) + // if err != nil { + // utils.LavaFormatFatal("Failed getting pairing for consumer in initialization", err, &map[string]string{"Address": s.Acc}) + // } + // err = s.consumerSessionManager.UpdateAllProviders(s.GetCurrentEpochHeight(), pairingList) + // if err != nil { + // utils.LavaFormatFatal("Failed UpdateAllProviders", err, &map[string]string{"Address": s.Acc}) + // } return nil } @@ -654,14 +654,14 @@ func (s *Sentry) Start(ctx context.Context) { // // Update pairing if s.isUser { - pairingList, err := s.getPairing(ctx) - if err != nil { - utils.LavaFormatFatal("Failed getting pairing for consumer in initialization", err, &map[string]string{"Address": s.Acc}) - } - err = s.consumerSessionManager.UpdateAllProviders(s.GetCurrentEpochHeight(), pairingList) - if err != nil { - utils.LavaFormatFatal("Failed UpdateAllProviders", err, &map[string]string{"Address": s.Acc}) - } + // pairingList, err := s.getPairing(ctx) + // if err != nil { + // utils.LavaFormatFatal("Failed getting pairing for consumer in initialization", err, &map[string]string{"Address": s.Acc}) + // } + // err = s.consumerSessionManager.UpdateAllProviders(s.GetCurrentEpochHeight(), pairingList) + // if err != nil { + // utils.LavaFormatFatal("Failed UpdateAllProviders", err, &map[string]string{"Address": s.Acc}) + // } } s.clearAuthResponseCache(data.Block.Height) // TODO: Remove this after provider session manager is fully functional diff --git a/relayer/sigs/sigs.go b/relayer/sigs/sigs.go index 51a54297f2..1081f09142 100644 --- a/relayer/sigs/sigs.go +++ b/relayer/sigs/sigs.go @@ -206,6 +206,18 @@ func RecoverProviderPubKeyFromVrfDataAndQuery(request *pairingtypes.RelayRequest return RecoverProviderPubKeyFromQueryAndAllDataHash(request, request.DataReliability.AllDataHash, request.DataReliability.ProviderSig) } +func ExtractSignerAddress(in *pairingtypes.RelayRequest) (sdk.AccAddress, error) { + pubKey, err := RecoverPubKeyFromRelay(*in) + if err != nil { + return nil, err + } + extractedConsumerAddress, err := sdk.AccAddressFromHex(pubKey.Address().String()) + if err != nil { + return nil, utils.LavaFormatError("get relay consumer address", err, nil) + } + return extractedConsumerAddress, nil +} + func RecoverPubKeyFromRelay(in pairingtypes.RelayRequest) (secp256k1.PubKey, error) { signature := in.Sig in.Sig = []byte{} diff --git a/testutil/e2e/e2e.go b/testutil/e2e/e2e.go index de0aabd95a..f4d2aaa281 100644 --- a/testutil/e2e/e2e.go +++ b/testutil/e2e/e2e.go @@ -516,7 +516,7 @@ func tendermintURITests(rpcURL string, testDuration time.Duration) error { "%s/status": true, "%s/block?height=1": true, "%s/blockchain?minHeight=0&maxHeight=10": true, - "%s/dial_peers?persistent=true&unconditional=true&private=true": false, // this is a rpc affecting query and is not available on the spec so it should fail + // "%s/dial_peers?persistent=true&unconditional=true&private=true": false, // this is a rpc affecting query and is not available on the spec so it should fail } for start := time.Now(); time.Since(start) < testDuration; { for api, noFail := range mostImportantApisToTest { diff --git a/x/pairing/keeper/msg_server_relay_payment.go b/x/pairing/keeper/msg_server_relay_payment.go index d697ac676d..86704000e0 100644 --- a/x/pairing/keeper/msg_server_relay_payment.go +++ b/x/pairing/keeper/msg_server_relay_payment.go @@ -36,14 +36,10 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen return errorLogAndFormat("relay_future_block", map[string]string{"blockheight": string(relay.Sig)}, "relay request for a block in the future") } - pubKey, err := sigs.RecoverPubKeyFromRelay(*relay) + clientAddr, err := sigs.ExtractSignerAddress(relay) if err != nil { return errorLogAndFormat("relay_payment_sig", map[string]string{"sig": string(relay.Sig)}, "recover PubKey from relay failed") } - clientAddr, err := sdk.AccAddressFromHex(pubKey.Address().String()) - if err != nil { - return errorLogAndFormat("relay_payment_user_addr", map[string]string{"user": pubKey.Address().String()}, "invalid user address in relay msg") - } providerAddr, err := sdk.AccAddressFromBech32(relay.Provider) if err != nil { return errorLogAndFormat("relay_payment_addr", map[string]string{"provider": relay.Provider, "creator": msg.Creator}, "invalid provider address in relay msg") diff --git a/x/pairing/keeper/pairing_test.go b/x/pairing/keeper/pairing_test.go index 80089b3315..52ff9d401a 100644 --- a/x/pairing/keeper/pairing_test.go +++ b/x/pairing/keeper/pairing_test.go @@ -105,6 +105,59 @@ func TestPairingUniqueness(t *testing.T) { } } +func TestValidatePairingDeterminism(t *testing.T) { + servers, keepers, ctx := testkeeper.InitAllKeepers(t) + + // init keepers state + spec := common.CreateMockSpec() + keepers.Spec.SetSpec(sdk.UnwrapSDKContext(ctx), spec) + + ctx = testkeeper.AdvanceEpoch(ctx, keepers) + + var balance int64 = 10000 + stake := balance / 10 + + consumer1 := common.CreateNewAccount(ctx, *keepers, balance) + common.StakeAccount(t, ctx, *keepers, *servers, consumer1, spec, stake, false) + consumer2 := common.CreateNewAccount(ctx, *keepers, balance) + common.StakeAccount(t, ctx, *keepers, *servers, consumer2, spec, stake, false) + + providers := []common.Account{} + for i := 1; i <= 10; i++ { + provider := common.CreateNewAccount(ctx, *keepers, balance) + common.StakeAccount(t, ctx, *keepers, *servers, provider, spec, stake, true) + providers = append(providers, provider) + } + + ctx = testkeeper.AdvanceEpoch(ctx, keepers) + + // test that 2 different clients get different pairings + pairedProviders, err := keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ctx), spec.Index, consumer1.Addr) + require.Nil(t, err) + verifyPairingOncurrentBlock := uint64(sdk.UnwrapSDKContext(ctx).BlockHeight()) + testAllProviders := func() { + for idx, provider := range pairedProviders { + providerAddress, err := sdk.AccAddressFromBech32(provider.Address) + require.Nil(t, err) + valid, _, foundIndex, errPairing := keepers.Pairing.ValidatePairingForClient(sdk.UnwrapSDKContext(ctx), spec.Index, consumer1.Addr, providerAddress, verifyPairingOncurrentBlock) + require.Nil(t, errPairing) + require.Equal(t, idx, foundIndex, "Failed ValidatePairingForClient", provider, uint64(sdk.UnwrapSDKContext(ctx).BlockHeight())) + require.True(t, valid) + } + } + startBlock := uint64(sdk.UnwrapSDKContext(ctx).BlockHeight()) + for i := startBlock; i < startBlock+(func() uint64 { + blockToSave, err := keepers.Epochstorage.BlocksToSave(sdk.UnwrapSDKContext(ctx), i) + require.Nil(t, err) + return blockToSave + + })(); i++ { + ctx = testkeeper.AdvanceBlock(ctx, keepers) + testAllProviders() + } + +} + // Test that verifies that new get-pairing return values (CurrentEpoch, TimeLeftToNextPairing, SpecLastUpdatedBlock) is working properly func TestGetPairing(t *testing.T) { // BLOCK_TIME = 30sec (testutil/keeper/keepers_init.go) diff --git a/x/pairing/types/relay.pb.go b/x/pairing/types/relay.pb.go index cdd1fce482..91a8312b96 100644 --- a/x/pairing/types/relay.pb.go +++ b/x/pairing/types/relay.pb.go @@ -270,13 +270,15 @@ func (m *RelayReply) GetSigBlocks() []byte { } type VRFData struct { - Differentiator bool `protobuf:"varint,1,opt,name=differentiator,proto3" json:"differentiator,omitempty"` - VrfValue []byte `protobuf:"bytes,2,opt,name=vrf_value,json=vrfValue,proto3" json:"vrf_value,omitempty"` - VrfProof []byte `protobuf:"bytes,3,opt,name=vrf_proof,json=vrfProof,proto3" json:"vrf_proof,omitempty"` - ProviderSig []byte `protobuf:"bytes,4,opt,name=provider_sig,json=providerSig,proto3" json:"provider_sig,omitempty"` - AllDataHash []byte `protobuf:"bytes,5,opt,name=allDataHash,proto3" json:"allDataHash,omitempty"` - QueryHash []byte `protobuf:"bytes,6,opt,name=queryHash,proto3" json:"queryHash,omitempty"` - Sig []byte `protobuf:"bytes,7,opt,name=sig,proto3" json:"sig,omitempty"` + ChainID string `protobuf:"bytes,1,opt,name=chainID,proto3" json:"chainID,omitempty"` + Epoch int64 `protobuf:"varint,2,opt,name=epoch,proto3" json:"epoch,omitempty"` + Differentiator bool `protobuf:"varint,3,opt,name=differentiator,proto3" json:"differentiator,omitempty"` + VrfValue []byte `protobuf:"bytes,4,opt,name=vrf_value,json=vrfValue,proto3" json:"vrf_value,omitempty"` + VrfProof []byte `protobuf:"bytes,5,opt,name=vrf_proof,json=vrfProof,proto3" json:"vrf_proof,omitempty"` + ProviderSig []byte `protobuf:"bytes,6,opt,name=provider_sig,json=providerSig,proto3" json:"provider_sig,omitempty"` + AllDataHash []byte `protobuf:"bytes,7,opt,name=allDataHash,proto3" json:"allDataHash,omitempty"` + QueryHash []byte `protobuf:"bytes,8,opt,name=queryHash,proto3" json:"queryHash,omitempty"` + Sig []byte `protobuf:"bytes,9,opt,name=sig,proto3" json:"sig,omitempty"` } func (m *VRFData) Reset() { *m = VRFData{} } @@ -312,6 +314,20 @@ func (m *VRFData) XXX_DiscardUnknown() { var xxx_messageInfo_VRFData proto.InternalMessageInfo +func (m *VRFData) GetChainID() string { + if m != nil { + return m.ChainID + } + return "" +} + +func (m *VRFData) GetEpoch() int64 { + if m != nil { + return m.Epoch + } + return 0 +} + func (m *VRFData) GetDifferentiator() bool { if m != nil { return m.Differentiator @@ -410,59 +426,60 @@ func init() { func init() { proto.RegisterFile("pairing/relay.proto", fileDescriptor_10cd1bfeb9978acf) } var fileDescriptor_10cd1bfeb9978acf = []byte{ - // 819 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x41, 0x6f, 0x1b, 0x45, - 0x14, 0xf6, 0x26, 0x4e, 0x6c, 0x3f, 0x3b, 0x0e, 0x9a, 0x26, 0xed, 0x2a, 0x50, 0xc7, 0x2c, 0x52, - 0xeb, 0x03, 0xd8, 0xa8, 0x08, 0x0e, 0x48, 0x48, 0x60, 0x05, 0x68, 0x10, 0xa2, 0xcd, 0x18, 0x7a, - 0xc8, 0x65, 0x35, 0x5e, 0x8f, 0xd7, 0xa3, 0x8e, 0x67, 0xb6, 0x33, 0xbb, 0x16, 0xcb, 0xaf, 0xe0, - 0xb7, 0x70, 0x80, 0x3b, 0xa7, 0x1e, 0x7b, 0x44, 0x1c, 0xa2, 0x2a, 0xf9, 0x07, 0xfc, 0x02, 0x34, - 0x6f, 0x77, 0x13, 0xb7, 0xb5, 0x90, 0x2a, 0x71, 0xda, 0x79, 0xdf, 0x7b, 0xef, 0x7b, 0x7e, 0xdf, - 0x7b, 0x33, 0x86, 0x5b, 0x09, 0x13, 0x46, 0xa8, 0x78, 0x64, 0xb8, 0x64, 0xf9, 0x30, 0x31, 0x3a, - 0xd5, 0xe4, 0x40, 0xb2, 0x15, 0x53, 0x3c, 0x1d, 0xba, 0xef, 0xb0, 0x8c, 0x38, 0x3a, 0x88, 0x75, - 0xac, 0x31, 0x60, 0xe4, 0x4e, 0x45, 0x6c, 0xf0, 0x47, 0x1d, 0x3a, 0xd4, 0xe5, 0x52, 0xfe, 0x2c, - 0xe3, 0x36, 0x25, 0x3e, 0x34, 0xa2, 0x05, 0x13, 0xea, 0xf4, 0xc4, 0xf7, 0xfa, 0xde, 0xa0, 0x45, - 0x2b, 0x93, 0xdc, 0x87, 0xfd, 0x48, 0x2b, 0xc5, 0xa3, 0x54, 0x68, 0x15, 0xa6, 0x79, 0xc2, 0xfd, - 0x2d, 0x8c, 0xe8, 0xde, 0xc0, 0x3f, 0xe6, 0x09, 0x27, 0x77, 0xa0, 0xc1, 0x12, 0x11, 0x66, 0x46, - 0xfa, 0xdb, 0x18, 0xb0, 0xcb, 0x12, 0xf1, 0x93, 0x91, 0xe4, 0x2e, 0x80, 0xe5, 0xd6, 0xba, 0x74, - 0x31, 0xf3, 0xeb, 0x7d, 0x6f, 0x50, 0xa7, 0xad, 0x12, 0x39, 0x9d, 0x91, 0x43, 0xd8, 0x8d, 0xb2, - 0xd0, 0x66, 0x4b, 0x7f, 0x07, 0x5d, 0x3b, 0x51, 0x36, 0xc9, 0x96, 0x84, 0x40, 0x7d, 0xc6, 0x52, - 0xe6, 0xef, 0xf6, 0xbd, 0x41, 0x87, 0xe2, 0x99, 0xbc, 0x03, 0xdb, 0x56, 0xc4, 0x7e, 0x03, 0x21, - 0x77, 0x24, 0x47, 0xd0, 0x4c, 0x8c, 0x5e, 0x89, 0x19, 0x37, 0x7e, 0x13, 0xab, 0x5e, 0xdb, 0xe4, - 0x7d, 0xe8, 0x4c, 0xa5, 0x8e, 0x9e, 0x86, 0x0b, 0x2e, 0xe2, 0x45, 0xea, 0xb7, 0xfa, 0xde, 0x60, - 0x9b, 0xb6, 0x11, 0x7b, 0x88, 0x10, 0x79, 0x17, 0x5a, 0x28, 0x61, 0xa8, 0xb2, 0xa5, 0x0f, 0x58, - 0xbe, 0x89, 0xc0, 0x0f, 0xd9, 0x92, 0x7c, 0x00, 0x7b, 0xa6, 0x90, 0x27, 0xc4, 0x1c, 0xbf, 0x8d, - 0x04, 0x9d, 0x12, 0x1c, 0x3b, 0x8c, 0x7c, 0x0b, 0xfb, 0x27, 0x2c, 0x65, 0x94, 0x4b, 0xc1, 0xa6, - 0x42, 0x8a, 0x34, 0xf7, 0x3b, 0x7d, 0x6f, 0xd0, 0x7e, 0x70, 0x77, 0xb8, 0x69, 0x1e, 0xc3, 0x27, - 0xf4, 0x1b, 0x8c, 0x7f, 0x3d, 0x8b, 0x7c, 0x07, 0xad, 0x33, 0x3d, 0xa1, 0x3c, 0xd1, 0x26, 0xf5, - 0xf7, 0x90, 0xe2, 0xc3, 0xcd, 0x14, 0x67, 0x19, 0x73, 0x19, 0x8f, 0xe6, 0x13, 0x6e, 0x56, 0x22, - 0xe2, 0x45, 0x0e, 0xbd, 0x49, 0x27, 0x9f, 0xc2, 0xed, 0x4c, 0x19, 0x6e, 0x13, 0xad, 0xac, 0x58, - 0xf1, 0xb0, 0x92, 0xc4, 0xfa, 0x5d, 0x94, 0xee, 0x70, 0xdd, 0xfb, 0xb8, 0x72, 0x92, 0x00, 0x3a, - 0x2c, 0x11, 0xa7, 0x2a, 0xe5, 0x66, 0xce, 0x22, 0xee, 0xef, 0xa3, 0xa0, 0xaf, 0x60, 0xc1, 0x9f, - 0x1e, 0x40, 0xb9, 0x39, 0x89, 0xcc, 0xaf, 0xa7, 0xe4, 0xbd, 0x39, 0xa5, 0xad, 0x9b, 0x29, 0x1d, - 0xc0, 0x8e, 0xd2, 0x2a, 0xe2, 0xb8, 0x18, 0x7b, 0xb4, 0x30, 0xdc, 0x7c, 0x24, 0x4b, 0x6f, 0xe4, - 0xad, 0x17, 0xf3, 0x29, 0xb0, 0x42, 0xdd, 0xcf, 0xe0, 0xce, 0x5c, 0x28, 0x26, 0xc5, 0x2f, 0x7c, - 0x56, 0x44, 0xd9, 0x70, 0xc1, 0xec, 0x82, 0x5b, 0x5c, 0x96, 0x0e, 0x3d, 0xbc, 0x76, 0x63, 0x82, - 0x7d, 0x88, 0x4e, 0x5c, 0x39, 0x11, 0x97, 0x19, 0xe5, 0x0a, 0xb5, 0xac, 0x88, 0x8b, 0xa0, 0xe0, - 0xa5, 0x07, 0x8d, 0x72, 0x10, 0xe4, 0x1e, 0x74, 0x67, 0x62, 0x3e, 0xe7, 0x86, 0xab, 0x54, 0xb0, - 0x54, 0x1b, 0xec, 0xa5, 0x49, 0x5f, 0x43, 0xdd, 0xaa, 0xac, 0xcc, 0x3c, 0x5c, 0x31, 0x99, 0xf1, - 0xb2, 0xb7, 0xe6, 0xca, 0xcc, 0x9f, 0x38, 0xbb, 0x72, 0x26, 0x46, 0xeb, 0x39, 0x36, 0x59, 0x38, - 0x1f, 0x3b, 0xdb, 0xf5, 0x59, 0x0d, 0x20, 0x74, 0xc2, 0xd4, 0xd1, 0xdf, 0xae, 0xb0, 0x89, 0x88, - 0x49, 0x1f, 0xda, 0x4c, 0x4a, 0xf7, 0x7b, 0x5c, 0x03, 0x65, 0x6f, 0xeb, 0x10, 0x79, 0x0f, 0x5a, - 0xcf, 0x32, 0x6e, 0x72, 0xf4, 0x97, 0x0d, 0x5d, 0x03, 0x6f, 0x5e, 0x8c, 0xe0, 0xb7, 0x2d, 0xb8, - 0xbd, 0x79, 0x51, 0xc8, 0x39, 0x34, 0x9c, 0xc6, 0x2a, 0xca, 0x8b, 0xbb, 0x3e, 0xfe, 0xf2, 0xf9, - 0xc5, 0x71, 0xed, 0xef, 0x8b, 0xe3, 0x7b, 0xb1, 0x48, 0x17, 0xd9, 0x74, 0x18, 0xe9, 0xe5, 0x28, - 0xd2, 0x76, 0xa9, 0x6d, 0xf9, 0xf9, 0xc8, 0xce, 0x9e, 0x8e, 0xdc, 0xd5, 0xb7, 0xc3, 0x13, 0x1e, - 0xfd, 0x73, 0x71, 0xdc, 0xcd, 0xd9, 0x52, 0x7e, 0x1e, 0x7c, 0x5f, 0xd0, 0x04, 0xb4, 0x22, 0x24, - 0x02, 0x3a, 0x6c, 0xc5, 0x84, 0xac, 0xee, 0x02, 0x3e, 0x15, 0xe3, 0xaf, 0xdf, 0xba, 0xc0, 0xad, - 0xa2, 0xc0, 0x3a, 0x57, 0x40, 0x5f, 0xa1, 0x26, 0x67, 0x50, 0xb7, 0xb9, 0x8a, 0x8a, 0xc7, 0x66, - 0xfc, 0xc5, 0x5b, 0x97, 0x68, 0x17, 0x25, 0x1c, 0x47, 0x40, 0x91, 0xea, 0xc1, 0xef, 0x1e, 0x34, - 0x70, 0xb9, 0xb9, 0x21, 0x8f, 0x60, 0x07, 0x8f, 0x24, 0xd8, 0x7c, 0x0b, 0xd7, 0x9f, 0xcf, 0xa3, - 0xfe, 0x7f, 0xc6, 0x24, 0x32, 0x0f, 0x6a, 0xe4, 0x1c, 0xba, 0x68, 0x4f, 0xb2, 0xa9, 0x8d, 0x8c, - 0x98, 0xf2, 0xff, 0x8b, 0xf9, 0x63, 0x6f, 0xfc, 0xd5, 0xf3, 0xcb, 0x9e, 0xf7, 0xe2, 0xb2, 0xe7, - 0xbd, 0xbc, 0xec, 0x79, 0xbf, 0x5e, 0xf5, 0x6a, 0x2f, 0xae, 0x7a, 0xb5, 0xbf, 0xae, 0x7a, 0xb5, - 0xf3, 0xfb, 0x6b, 0x7a, 0x94, 0x4c, 0xf8, 0x1d, 0xfd, 0x3c, 0xaa, 0xfe, 0x44, 0x50, 0x94, 0xe9, - 0x2e, 0xfe, 0x33, 0x7c, 0xf2, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x28, 0x1d, 0x4a, 0xdd, 0x5c, - 0x06, 0x00, 0x00, + // 836 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0x26, 0x76, 0x6c, 0x3f, 0x6f, 0x1c, 0x34, 0x4d, 0xda, 0x55, 0xa0, 0x8e, 0x59, 0xa4, + 0xd6, 0x07, 0xb0, 0x51, 0x11, 0x1c, 0x90, 0x90, 0xc0, 0x0a, 0xd0, 0x20, 0x44, 0x9b, 0x31, 0xf4, + 0x90, 0xcb, 0x6a, 0xbc, 0x1e, 0xaf, 0x47, 0x5d, 0xef, 0x6c, 0x67, 0x76, 0x2d, 0x96, 0x4f, 0xd1, + 0xcf, 0xc2, 0x01, 0xee, 0x9c, 0x7a, 0xec, 0x11, 0x71, 0x88, 0x50, 0xf2, 0x0d, 0xf8, 0x04, 0x68, + 0xde, 0xec, 0x3a, 0x6e, 0xb1, 0x2a, 0x55, 0xe2, 0x34, 0xf3, 0x7e, 0xef, 0xdf, 0xce, 0xef, 0xf7, + 0x66, 0x16, 0x6e, 0xa5, 0x4c, 0x28, 0x91, 0x44, 0x23, 0xc5, 0x63, 0x56, 0x0c, 0x53, 0x25, 0x33, + 0x49, 0x0e, 0x63, 0xb6, 0x62, 0x09, 0xcf, 0x86, 0x66, 0x1d, 0x96, 0x11, 0xc7, 0x87, 0x91, 0x8c, + 0x24, 0x06, 0x8c, 0xcc, 0xce, 0xc6, 0xfa, 0xbf, 0xd7, 0xc1, 0xa5, 0x26, 0x97, 0xf2, 0x67, 0x39, + 0xd7, 0x19, 0xf1, 0xa0, 0x19, 0x2e, 0x98, 0x48, 0xce, 0x4e, 0x3d, 0xa7, 0xef, 0x0c, 0xda, 0xb4, + 0x32, 0xc9, 0x7d, 0x38, 0x08, 0x65, 0x92, 0xf0, 0x30, 0x13, 0x32, 0x09, 0xb2, 0x22, 0xe5, 0xde, + 0x0e, 0x46, 0x74, 0x6f, 0xe0, 0x1f, 0x8b, 0x94, 0x93, 0x3b, 0xd0, 0x64, 0xa9, 0x08, 0x72, 0x15, + 0x7b, 0xbb, 0x18, 0xb0, 0xc7, 0x52, 0xf1, 0x93, 0x8a, 0xc9, 0x5d, 0x00, 0xcd, 0xb5, 0x36, 0xe9, + 0x62, 0xe6, 0xd5, 0xfb, 0xce, 0xa0, 0x4e, 0xdb, 0x25, 0x72, 0x36, 0x23, 0x47, 0xb0, 0x17, 0xe6, + 0x81, 0xce, 0x97, 0x5e, 0x03, 0x5d, 0x8d, 0x30, 0x9f, 0xe4, 0x4b, 0x42, 0xa0, 0x3e, 0x63, 0x19, + 0xf3, 0xf6, 0xfa, 0xce, 0xc0, 0xa5, 0xb8, 0x27, 0xef, 0xc0, 0xae, 0x16, 0x91, 0xd7, 0x44, 0xc8, + 0x6c, 0xc9, 0x31, 0xb4, 0x52, 0x25, 0x57, 0x62, 0xc6, 0x95, 0xd7, 0xc2, 0xae, 0x6b, 0x9b, 0xbc, + 0x0f, 0xee, 0x34, 0x96, 0xe1, 0xd3, 0x60, 0xc1, 0x45, 0xb4, 0xc8, 0xbc, 0x76, 0xdf, 0x19, 0xec, + 0xd2, 0x0e, 0x62, 0x0f, 0x11, 0x22, 0xef, 0x42, 0x1b, 0x29, 0x0c, 0x92, 0x7c, 0xe9, 0x01, 0xb6, + 0x6f, 0x21, 0xf0, 0x43, 0xbe, 0x24, 0x1f, 0xc0, 0xbe, 0xb2, 0xf4, 0x04, 0x98, 0xe3, 0x75, 0xb0, + 0x80, 0x5b, 0x82, 0x63, 0x83, 0x91, 0x6f, 0xe1, 0xe0, 0x94, 0x65, 0x8c, 0xf2, 0x58, 0xb0, 0xa9, + 0x88, 0x45, 0x56, 0x78, 0x6e, 0xdf, 0x19, 0x74, 0x1e, 0xdc, 0x1d, 0x6e, 0xd3, 0x63, 0xf8, 0x84, + 0x7e, 0x83, 0xf1, 0xaf, 0x67, 0x91, 0xef, 0xa0, 0x7d, 0x2e, 0x27, 0x94, 0xa7, 0x52, 0x65, 0xde, + 0x3e, 0x96, 0xf8, 0x70, 0x7b, 0x89, 0xf3, 0x9c, 0x99, 0x8c, 0x47, 0xf3, 0x09, 0x57, 0x2b, 0x11, + 0x72, 0x9b, 0x43, 0x6f, 0xd2, 0xc9, 0xa7, 0x70, 0x3b, 0x4f, 0x14, 0xd7, 0xa9, 0x4c, 0xb4, 0x58, + 0xf1, 0xa0, 0xa2, 0x44, 0x7b, 0x5d, 0xa4, 0xee, 0x68, 0xd3, 0xfb, 0xb8, 0x72, 0x12, 0x1f, 0x5c, + 0x96, 0x8a, 0xb3, 0x24, 0xe3, 0x6a, 0xce, 0x42, 0xee, 0x1d, 0x20, 0xa1, 0xaf, 0x60, 0xfe, 0x1f, + 0x0e, 0x40, 0x39, 0x39, 0x69, 0x5c, 0xac, 0x55, 0x72, 0xfe, 0xab, 0xd2, 0xce, 0x8d, 0x4a, 0x87, + 0xd0, 0x48, 0x64, 0x12, 0x72, 0x1c, 0x8c, 0x7d, 0x6a, 0x0d, 0xa3, 0x4f, 0xcc, 0xb2, 0x1b, 0x7a, + 0xeb, 0x56, 0x1f, 0x8b, 0x59, 0x76, 0x3f, 0x83, 0x3b, 0x73, 0x91, 0xb0, 0x58, 0xfc, 0xc2, 0x67, + 0x36, 0x4a, 0x07, 0x0b, 0xa6, 0x17, 0x5c, 0xe3, 0xb0, 0xb8, 0xf4, 0x68, 0xed, 0xc6, 0x04, 0xfd, + 0x10, 0x9d, 0x38, 0x72, 0x22, 0x2a, 0x33, 0xca, 0x11, 0x6a, 0x6b, 0x11, 0xd9, 0x20, 0xff, 0xf9, + 0x0e, 0x34, 0x4b, 0x21, 0xde, 0x30, 0xf9, 0x87, 0xd0, 0xe0, 0xa9, 0x0c, 0x17, 0x78, 0x92, 0x5d, + 0x6a, 0x0d, 0x72, 0x0f, 0xba, 0x33, 0x31, 0x9f, 0x73, 0xc5, 0x93, 0x4c, 0xb0, 0x4c, 0x2a, 0x3c, + 0x54, 0x8b, 0xbe, 0x86, 0x9a, 0xd1, 0x5a, 0xa9, 0x79, 0xb0, 0x62, 0x71, 0xce, 0xf1, 0x68, 0x2e, + 0x6d, 0xad, 0xd4, 0xfc, 0x89, 0xb1, 0x2b, 0x67, 0xaa, 0xa4, 0x9c, 0x97, 0x27, 0x31, 0xce, 0xc7, + 0xc6, 0x36, 0xbc, 0x54, 0x82, 0x05, 0x86, 0x48, 0xfb, 0xf9, 0x9d, 0x0a, 0x9b, 0x88, 0x88, 0xf4, + 0xa1, 0xc3, 0xe2, 0xd8, 0x7c, 0xbf, 0x39, 0x70, 0x79, 0x21, 0x36, 0x21, 0xf2, 0x1e, 0xb4, 0x9f, + 0xe5, 0x5c, 0x15, 0xe8, 0x6f, 0x59, 0x02, 0xd6, 0x40, 0x25, 0x51, 0x7b, 0x2d, 0x91, 0xff, 0xeb, + 0x0e, 0xdc, 0xde, 0x3e, 0x58, 0xe4, 0x02, 0x9a, 0x46, 0x93, 0x24, 0x2c, 0x2c, 0x43, 0xe3, 0x2f, + 0x5f, 0x5c, 0x9e, 0xd4, 0xfe, 0xba, 0x3c, 0xb9, 0x17, 0x89, 0x6c, 0x91, 0x4f, 0x87, 0xa1, 0x5c, + 0x8e, 0x42, 0xa9, 0x97, 0x52, 0x97, 0xcb, 0x47, 0x7a, 0xf6, 0x74, 0x64, 0x9e, 0x0a, 0x3d, 0x3c, + 0xe5, 0xe1, 0x3f, 0x97, 0x27, 0xdd, 0x82, 0x2d, 0xe3, 0xcf, 0xfd, 0xef, 0x6d, 0x19, 0x9f, 0x56, + 0x05, 0x89, 0x00, 0x97, 0xad, 0x98, 0x88, 0xab, 0xbb, 0x83, 0x4f, 0xcb, 0xf8, 0xeb, 0xb7, 0x6e, + 0x70, 0xcb, 0x36, 0xd8, 0xac, 0xe5, 0xd3, 0x57, 0x4a, 0x93, 0x73, 0xa8, 0xeb, 0x22, 0x09, 0xed, + 0xe3, 0x34, 0xfe, 0xe2, 0xad, 0x5b, 0x74, 0x6c, 0x0b, 0x53, 0xc3, 0xa7, 0x58, 0xea, 0xc1, 0x6f, + 0x0e, 0x34, 0xf1, 0x32, 0x70, 0x45, 0x1e, 0x41, 0x03, 0xb7, 0xc4, 0xdf, 0x7e, 0x6b, 0x37, 0x9f, + 0xdb, 0xe3, 0xfe, 0x1b, 0x63, 0xd2, 0xb8, 0xf0, 0x6b, 0xe4, 0x02, 0xba, 0x68, 0x4f, 0xf2, 0xa9, + 0x0e, 0x95, 0x98, 0xf2, 0xff, 0xab, 0xf2, 0xc7, 0xce, 0xf8, 0xab, 0x17, 0x57, 0x3d, 0xe7, 0xe5, + 0x55, 0xcf, 0xf9, 0xfb, 0xaa, 0xe7, 0x3c, 0xbf, 0xee, 0xd5, 0x5e, 0x5e, 0xf7, 0x6a, 0x7f, 0x5e, + 0xf7, 0x6a, 0x17, 0xf7, 0x37, 0xf8, 0x28, 0x2b, 0xe1, 0x3a, 0xfa, 0x79, 0x54, 0xfd, 0x74, 0x90, + 0x94, 0xe9, 0x1e, 0xfe, 0x49, 0x3e, 0xf9, 0x37, 0x00, 0x00, 0xff, 0xff, 0xb3, 0x0e, 0x07, 0x7d, + 0x8c, 0x06, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -823,42 +840,42 @@ func (m *VRFData) MarshalToSizedBuffer(dAtA []byte) (int, error) { copy(dAtA[i:], m.Sig) i = encodeVarintRelay(dAtA, i, uint64(len(m.Sig))) i-- - dAtA[i] = 0x3a + dAtA[i] = 0x4a } if len(m.QueryHash) > 0 { i -= len(m.QueryHash) copy(dAtA[i:], m.QueryHash) i = encodeVarintRelay(dAtA, i, uint64(len(m.QueryHash))) i-- - dAtA[i] = 0x32 + dAtA[i] = 0x42 } if len(m.AllDataHash) > 0 { i -= len(m.AllDataHash) copy(dAtA[i:], m.AllDataHash) i = encodeVarintRelay(dAtA, i, uint64(len(m.AllDataHash))) i-- - dAtA[i] = 0x2a + dAtA[i] = 0x3a } if len(m.ProviderSig) > 0 { i -= len(m.ProviderSig) copy(dAtA[i:], m.ProviderSig) i = encodeVarintRelay(dAtA, i, uint64(len(m.ProviderSig))) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x32 } if len(m.VrfProof) > 0 { i -= len(m.VrfProof) copy(dAtA[i:], m.VrfProof) i = encodeVarintRelay(dAtA, i, uint64(len(m.VrfProof))) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x2a } if len(m.VrfValue) > 0 { i -= len(m.VrfValue) copy(dAtA[i:], m.VrfValue) i = encodeVarintRelay(dAtA, i, uint64(len(m.VrfValue))) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x22 } if m.Differentiator { i-- @@ -868,7 +885,19 @@ func (m *VRFData) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0 } i-- - dAtA[i] = 0x8 + dAtA[i] = 0x18 + } + if m.Epoch != 0 { + i = encodeVarintRelay(dAtA, i, uint64(m.Epoch)) + i-- + dAtA[i] = 0x10 + } + if len(m.ChainID) > 0 { + i -= len(m.ChainID) + copy(dAtA[i:], m.ChainID) + i = encodeVarintRelay(dAtA, i, uint64(len(m.ChainID))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } @@ -1038,6 +1067,13 @@ func (m *VRFData) Size() (n int) { } var l int _ = l + l = len(m.ChainID) + if l > 0 { + n += 1 + l + sovRelay(uint64(l)) + } + if m.Epoch != 0 { + n += 1 + sovRelay(uint64(m.Epoch)) + } if m.Differentiator { n += 2 } @@ -1822,6 +1858,57 @@ func (m *VRFData) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRelay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRelay + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRelay + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Epoch", wireType) + } + m.Epoch = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRelay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Epoch |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Differentiator", wireType) } @@ -1841,7 +1928,7 @@ func (m *VRFData) Unmarshal(dAtA []byte) error { } } m.Differentiator = bool(v != 0) - case 2: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field VrfValue", wireType) } @@ -1875,7 +1962,7 @@ func (m *VRFData) Unmarshal(dAtA []byte) error { m.VrfValue = []byte{} } iNdEx = postIndex - case 3: + case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field VrfProof", wireType) } @@ -1909,7 +1996,7 @@ func (m *VRFData) Unmarshal(dAtA []byte) error { m.VrfProof = []byte{} } iNdEx = postIndex - case 4: + case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ProviderSig", wireType) } @@ -1943,7 +2030,7 @@ func (m *VRFData) Unmarshal(dAtA []byte) error { m.ProviderSig = []byte{} } iNdEx = postIndex - case 5: + case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field AllDataHash", wireType) } @@ -1977,7 +2064,7 @@ func (m *VRFData) Unmarshal(dAtA []byte) error { m.AllDataHash = []byte{} } iNdEx = postIndex - case 6: + case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field QueryHash", wireType) } @@ -2011,7 +2098,7 @@ func (m *VRFData) Unmarshal(dAtA []byte) error { m.QueryHash = []byte{} } iNdEx = postIndex - case 7: + case 9: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sig", wireType) } From 706edb3cc9d5b0aefed4d183027ef395074b41ef Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Tue, 14 Mar 2023 19:15:19 +0100 Subject: [PATCH 104/123] panic protection --- protocol/rpcconsumer/rpcconsumer_server.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/protocol/rpcconsumer/rpcconsumer_server.go b/protocol/rpcconsumer/rpcconsumer_server.go index c5d0006131..0f493fbe38 100644 --- a/protocol/rpcconsumer/rpcconsumer_server.go +++ b/protocol/rpcconsumer/rpcconsumer_server.go @@ -313,6 +313,12 @@ func (rpccs *RPCConsumerServer) sendDataReliabilityRelayIfApplicable(ctx context // send the data reliability relay message with the lavaprotocol grpc service // check validity of the data reliability response with the lavaprotocol package // compare results for both relays, if there is a difference send a detection tx with both requests and both responses + + // validate relayResult is not nil + if relayResult == nil || relayResult.Reply == nil || relayResult.Request == nil { + return utils.LavaFormatError("sendDataReliabilityRelayIfApplicable relayResult nil check", nil, &map[string]string{"relayResult": fmt.Sprintf("%#v", relayResult), "relayRequestCommonData": fmt.Sprintf("%#v", relayRequestCommonData)}) + } + specCategory := chainMessage.GetInterface().Category if !specCategory.Deterministic || !relayResult.Finalized { return nil // disabled for this spec and requested block so no data reliability messages From 1b219a8e385774fc9a35147a57bfcdf41d760aa6 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Wed, 15 Mar 2023 12:14:10 +0200 Subject: [PATCH 105/123] finished merging --- protocol/rpcconsumer/rpcconsumer_server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/protocol/rpcconsumer/rpcconsumer_server.go b/protocol/rpcconsumer/rpcconsumer_server.go index 21c73ee206..b175c3b8fd 100644 --- a/protocol/rpcconsumer/rpcconsumer_server.go +++ b/protocol/rpcconsumer/rpcconsumer_server.go @@ -319,7 +319,7 @@ func (rpccs *RPCConsumerServer) sendDataReliabilityRelayIfApplicable(ctx context // validate relayResult is not nil if relayResult == nil || relayResult.Reply == nil || relayResult.Request == nil { - return utils.LavaFormatError("sendDataReliabilityRelayIfApplicable relayResult nil check", nil, &map[string]string{"relayResult": fmt.Sprintf("%#v", relayResult), "relayRequestCommonData": fmt.Sprintf("%#v", relayRequestCommonData)}) + return utils.LavaFormatError("sendDataReliabilityRelayIfApplicable relayResult nil check", nil, &map[string]string{"relayResult": fmt.Sprintf("%#v", relayResult)}) } specCategory := chainMessage.GetInterface().Category From ae9a7f123dd537b454e07ef995d2e0ec3e28267a Mon Sep 17 00:00:00 2001 From: omer mishael Date: Wed, 15 Mar 2023 12:42:37 +0200 Subject: [PATCH 106/123] lint + fix --- .golangci.yml | 1 + protocol/chainlib/chainproxy/common.go | 2 ++ protocol/chainlib/rest.go | 1 - protocol/chaintracker/chain_tracker.go | 3 +-- protocol/lavaprotocol/request_builder.go | 1 - protocol/lavaprotocol/reuqest_builder_test.go | 8 +++--- protocol/lavasession/provider_types.go | 26 ++++++++++--------- protocol/rpcprovider/provider_listener.go | 10 +++---- .../rpcprovider/reliabilitymanager/errors.go | 4 +-- .../reliabilitymanager/reliability_manager.go | 3 --- .../rpcprovider/rewardserver/reward_server.go | 6 ++--- protocol/rpcprovider/rpcprovider_server.go | 2 -- .../statetracker/provider_state_tracker.go | 1 + protocol/statetracker/state_query.go | 3 +-- relayer/sentry/sentry.go | 20 +++++++------- testutil/e2e/e2e.go | 1 - 16 files changed, 42 insertions(+), 50 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 1c4d31fe08..ce259eb2b0 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -5,6 +5,7 @@ run: skip-files: - "relayer/chainproxy/thirdparty/*" - "relayer/chainproxy/grpc.go" + - "protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage.go" - "protocol/chainlib/chainproxy/thirdparty/*" - "protocol/chainlib/grpc.go" diff --git a/protocol/chainlib/chainproxy/common.go b/protocol/chainlib/chainproxy/common.go index 28894682fe..8a2bf5462c 100644 --- a/protocol/chainlib/chainproxy/common.go +++ b/protocol/chainlib/chainproxy/common.go @@ -22,9 +22,11 @@ type DefaultRPCInput struct { func (dri DefaultRPCInput) GetParams() interface{} { return nil } + func (dri DefaultRPCInput) GetResult() json.RawMessage { return dri.Result } + func (dri DefaultRPCInput) ParseBlock(inp string) (int64, error) { return parser.ParseDefaultBlockParameter(inp) } diff --git a/protocol/chainlib/rest.go b/protocol/chainlib/rest.go index 13e5492300..6918999e3c 100644 --- a/protocol/chainlib/rest.go +++ b/protocol/chainlib/rest.go @@ -79,7 +79,6 @@ func (apip *RestChainParser) ParseMsg(url string, data []byte, connectionType st Msg: nil, Path: url + string(data), } - } // TODO fix requested block diff --git a/protocol/chaintracker/chain_tracker.go b/protocol/chaintracker/chain_tracker.go index c6656f918e..f569c4e97e 100644 --- a/protocol/chaintracker/chain_tracker.go +++ b/protocol/chaintracker/chain_tracker.go @@ -158,7 +158,7 @@ func (cs *ChainTracker) fetchAllPreviousBlocks(ctx context.Context, latestBlock if blocksQueueLen < cs.blocksToSave { return utils.LavaFormatError("fetchAllPreviousBlocks didn't save enough blocks in Chain Tracker", nil, &map[string]string{"blocksQueueLen": strconv.FormatUint(blocksQueueLen, 10)}) } - utils.LavaFormatDebug("Chain Tracker Updated block hashes", &map[string]string{"latest_block": strconv.FormatInt(latestBlock, 10), "latestHash": latestHash, "blocksQueueLen": strconv.FormatUint(blocksQueueLen, 10), "blocksQueried": strconv.FormatInt(int64(int64(cs.blocksToSave)-blocksCopied), 10), "blocksKept": strconv.FormatInt(blocksCopied, 10), "ChainID": cs.endpoint.ChainID, "ApiInterface": cs.endpoint.ApiInterface}) + utils.LavaFormatDebug("Chain Tracker Updated block hashes", &map[string]string{"latest_block": strconv.FormatInt(latestBlock, 10), "latestHash": latestHash, "blocksQueueLen": strconv.FormatUint(blocksQueueLen, 10), "blocksQueried": strconv.FormatInt(int64(cs.blocksToSave)-blocksCopied, 10), "blocksKept": strconv.FormatInt(blocksCopied, 10), "ChainID": cs.endpoint.ChainID, "ApiInterface": cs.endpoint.ApiInterface}) return nil } @@ -237,7 +237,6 @@ func (cs *ChainTracker) fetchAllPreviousBlocksIfNecessary(ctx context.Context) ( return utils.LavaFormatError("could not fetchLatestBlock Hash in ChainTracker", err, &map[string]string{"block": strconv.FormatInt(newLatestBlock, 10), "endpoint": cs.endpoint.String()}) } if gotNewBlock || forked { - // utils.LavaFormatDebug("ChainTracker should update state", &map[string]string{"gotNewBlock": fmt.Sprintf("%t", gotNewBlock), "forked": fmt.Sprintf("%t", forked), "newLatestBlock": strconv.FormatInt(newLatestBlock, 10), "currentBlock": strconv.FormatInt(cs.GetLatestBlockNum(), 10)}) prev_latest := cs.GetLatestBlockNum() cs.fetchAllPreviousBlocks(ctx, newLatestBlock) if gotNewBlock { diff --git a/protocol/lavaprotocol/request_builder.go b/protocol/lavaprotocol/request_builder.go index 82d2f181d7..ec1b270a54 100644 --- a/protocol/lavaprotocol/request_builder.go +++ b/protocol/lavaprotocol/request_builder.go @@ -86,7 +86,6 @@ func dataReliabilityRelaySession(lavaChainID string, relayRequestData *pairingty } func ConstructRelayRequest(ctx context.Context, privKey *btcec.PrivateKey, lavaChainID string, chainID string, relayRequestData *pairingtypes.RelayPrivateData, providerPublicAddress string, consumerSession *lavasession.SingleConsumerSession, epoch int64, reportedProviders []byte) (*pairingtypes.RelayRequest, error) { - relayRequest := &pairingtypes.RelayRequest{ RelayData: relayRequestData, RelaySession: ConstructRelaySession(lavaChainID, relayRequestData, chainID, providerPublicAddress, consumerSession, epoch, reportedProviders), diff --git a/protocol/lavaprotocol/reuqest_builder_test.go b/protocol/lavaprotocol/reuqest_builder_test.go index 8914e6b921..ec104677ac 100644 --- a/protocol/lavaprotocol/reuqest_builder_test.go +++ b/protocol/lavaprotocol/reuqest_builder_test.go @@ -13,7 +13,7 @@ import ( func TestSignAndExtract(t *testing.T) { ctx := context.Background() sk, address := sigs.GenerateFloatingKey() - chainId := "LAV1" + specId := "LAV1" epoch := int64(100) singleConsumerSession := &lavasession.SingleConsumerSession{ CuSum: 20, @@ -27,12 +27,12 @@ func TestSignAndExtract(t *testing.T) { BlockListed: false, // if session lost sync we blacklist it. ConsecutiveNumberOfFailures: 0, // number of times this session has failed } - commonData := NewRelayRequestCommonData(chainId, "GET", "stub_url", []byte("stub_data"), 10, "tendermintrpc") - relay, err := ConstructRelayRequest(ctx, sk, chainId, commonData, "lava@stubProviderAddress", singleConsumerSession, epoch, []byte("stubbytes")) + relayRequestData := NewRelayData("GET", "stub_url", []byte("stub_data"), 10, "tendermintrpc") + relay, err := ConstructRelayRequest(ctx, sk, "lava", specId, relayRequestData, "lava@stubProviderAddress", singleConsumerSession, epoch, []byte("stubbytes")) require.Nil(t, err) // check signature - extractedConsumerAddress, err := sigs.ExtractSignerAddress(relay) + extractedConsumerAddress, err := sigs.ExtractSignerAddress(relay.RelaySession) require.Nil(t, err) require.Equal(t, extractedConsumerAddress, address) } diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index 3c1300ebe7..43c7f7cab8 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -248,9 +248,10 @@ func (sps *SingleProviderSession) PrepareDataReliabilitySessionForUsage(relayReq sps.LatestRelayCu = DataReliabilityCuSum // 1. update latest sps.CuSum = relayRequestTotalCU // 2. update CuSum, if consumer wants to pay more, let it sps.RelayNum += 1 - utils.LavaFormatDebug("PrepareDataReliabilitySessionForUsage", &map[string]string{"relayRequestTotalCU": strconv.FormatUint(relayRequestTotalCU, 10), - "sps.LatestRelayCu": strconv.FormatUint(sps.LatestRelayCu, 10), - "sps.RelayNum": strconv.FormatUint(sps.RelayNum, 10), + utils.LavaFormatDebug("PrepareDataReliabilitySessionForUsage", &map[string]string{ + "relayRequestTotalCU": strconv.FormatUint(relayRequestTotalCU, 10), + "sps.LatestRelayCu": strconv.FormatUint(sps.LatestRelayCu, 10), + "sps.RelayNum": strconv.FormatUint(sps.RelayNum, 10), }) return nil } @@ -291,11 +292,12 @@ func (sps *SingleProviderSession) PrepareSessionForUsage(cuFromSpec uint64, rela sps.LatestRelayCu = cuToAdd // 1. update latest sps.CuSum += cuToAdd // 2. update CuSum, if consumer wants to pay more, let it sps.RelayNum = relayNumber // 3. update RelayNum, we already verified relayNum is valid in GetSession. - utils.LavaFormatDebug("Before Update Normal PrepareSessionForUsage", &map[string]string{"relayRequestTotalCU": strconv.FormatUint(relayRequestTotalCU, 10), - "sps.LatestRelayCu": strconv.FormatUint(sps.LatestRelayCu, 10), - "sps.RelayNum": strconv.FormatUint(sps.RelayNum, 10), - "sps.CuSum": strconv.FormatUint(sps.CuSum, 10), - "sps.sessionId": strconv.FormatUint(sps.SessionID, 10), + utils.LavaFormatDebug("Before Update Normal PrepareSessionForUsage", &map[string]string{ + "relayRequestTotalCU": strconv.FormatUint(relayRequestTotalCU, 10), + "sps.LatestRelayCu": strconv.FormatUint(sps.LatestRelayCu, 10), + "sps.RelayNum": strconv.FormatUint(sps.RelayNum, 10), + "sps.CuSum": strconv.FormatUint(sps.CuSum, 10), + "sps.sessionId": strconv.FormatUint(sps.SessionID, 10), }) return nil } @@ -329,8 +331,8 @@ func (sps *SingleProviderSession) validateAndSubUsedCU(currentCU uint64) error { } func (sps *SingleProviderSession) onDataReliabilitySessionFailure() error { - sps.CuSum = sps.CuSum - sps.LatestRelayCu - sps.RelayNum = sps.RelayNum - 1 + sps.CuSum -= sps.LatestRelayCu + sps.RelayNum -= 1 sps.LatestRelayCu = 0 return nil } @@ -347,8 +349,8 @@ func (sps *SingleProviderSession) onSessionFailure() error { return sps.onDataReliabilitySessionFailure() } - sps.CuSum = sps.CuSum - sps.LatestRelayCu - sps.RelayNum = sps.RelayNum - 1 + sps.CuSum -= sps.LatestRelayCu + sps.RelayNum -= 1 sps.validateAndSubUsedCU(sps.LatestRelayCu) sps.LatestRelayCu = 0 return nil diff --git a/protocol/rpcprovider/provider_listener.go b/protocol/rpcprovider/provider_listener.go index bd8920e1fe..d7d2bccbc0 100644 --- a/protocol/rpcprovider/provider_listener.go +++ b/protocol/rpcprovider/provider_listener.go @@ -2,12 +2,11 @@ package rpcprovider import ( "context" - "strings" - "sync" - "errors" "net" "net/http" + "strings" + "sync" "github.com/lavanet/lava/protocol/lavasession" @@ -70,16 +69,15 @@ func NewProviderListener(ctx context.Context, networkAddress string) *ProviderLi wrappedServer.ServeHTTP(resp, req) } - httpServer := http.Server{ + pl.httpServer = http.Server{ Handler: h2c.NewHandler(http.HandlerFunc(handler), &http2.Server{}), } - pl.httpServer = httpServer relayServer := &relayServer{relayReceivers: map[string]RelayReceiver{}} pl.relayServer = relayServer pairingtypes.RegisterRelayerServer(grpcServer, relayServer) go func() { utils.LavaFormatInfo("New provider listener active", &map[string]string{"address": networkAddress}) - if err := httpServer.Serve(lis); !errors.Is(err, http.ErrServerClosed) { + if err := pl.httpServer.Serve(lis); !errors.Is(err, http.ErrServerClosed) { utils.LavaFormatFatal("provider failed to serve", err, &map[string]string{"Address": lis.Addr().String()}) } utils.LavaFormatInfo("listener closed server", &map[string]string{"address": networkAddress}) diff --git a/protocol/rpcprovider/reliabilitymanager/errors.go b/protocol/rpcprovider/reliabilitymanager/errors.go index 5fe437d437..3c3534e1f5 100644 --- a/protocol/rpcprovider/reliabilitymanager/errors.go +++ b/protocol/rpcprovider/reliabilitymanager/errors.go @@ -4,6 +4,4 @@ import ( sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" ) -var ( - NoVoteDeadline = sdkerrors.New("Not Connected Error", 800, "No Connection To grpc server") -) +var NoVoteDeadline = sdkerrors.New("Not Connected Error", 800, "No Connection To grpc server") diff --git a/protocol/rpcprovider/reliabilitymanager/reliability_manager.go b/protocol/rpcprovider/reliabilitymanager/reliability_manager.go index 2a4045198b..c953a4d2de 100644 --- a/protocol/rpcprovider/reliabilitymanager/reliability_manager.go +++ b/protocol/rpcprovider/reliabilitymanager/reliability_manager.go @@ -39,7 +39,6 @@ type ReliabilityManager struct { } func (rm *ReliabilityManager) VoteHandler(voteParams *VoteParams, nodeHeight uint64) { - // got a vote event, handle the cases here voteID := voteParams.VoteID voteDeadline := voteParams.VoteDeadline @@ -108,7 +107,6 @@ func (rm *ReliabilityManager) VoteHandler(voteParams *VoteParams, nodeHeight uin return } reply, _, _, err := rm.chainProxy.SendNodeMsg(ctx, nil, chainMessage) - if err != nil { utils.LavaFormatError("vote relay send has failed", err, &map[string]string{"ApiURL": voteParams.ApiURL, "RequestData": string(voteParams.RequestData)}) @@ -233,7 +231,6 @@ func BuildVoteParamsFromDetectionEvent(event terderminttypes.Event) (*VoteParams requestBlock, err := strconv.ParseUint(num_str, 10, 64) if err != nil { return nil, utils.LavaFormatError("vote requested block could not be parsed", err, &map[string]string{"requested block": num_str, "voteID": voteID}) - } num_str, ok = attributes["voteDeadline"] if !ok { diff --git a/protocol/rpcprovider/rewardserver/reward_server.go b/protocol/rpcprovider/rewardserver/reward_server.go index 5b750ef6c7..54a62be50f 100644 --- a/protocol/rpcprovider/rewardserver/reward_server.go +++ b/protocol/rpcprovider/rewardserver/reward_server.go @@ -191,7 +191,7 @@ func (rws *RewardServer) identifyMissingPayments(ctx context.Context) (missingPa "total CU serviced": strconv.FormatUint(rws.cUServiced(), 10), "total CU that got paid": strconv.FormatUint(rws.paidCU(), 10), }) - return + return missingPayments, err } func (rws *RewardServer) cUServiced() uint64 { @@ -236,7 +236,7 @@ func (rws *RewardServer) gatherRewardsForClaim(ctx context.Context, currentEpoch } activeEpochThreshold := currentEpoch - blockDistanceForEpochValidity for epoch, epochRewards := range rws.rewards { - if lavasession.IsEpochValidForUse(epoch, uint64(activeEpochThreshold)) { + if lavasession.IsEpochValidForUse(epoch, activeEpochThreshold) { // Epoch is still active so we don't claim the rewards yet. continue } @@ -255,7 +255,7 @@ func (rws *RewardServer) gatherRewardsForClaim(ctx context.Context, currentEpoch delete(rws.rewards, epoch) } } - return + return rewardsForClaim, dataReliabilityProofs, errRet } func (rws *RewardServer) SubscribeStarted(consumer string, epoch uint64, subscribeID string) { diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 894cc6316f..fb08c2b3b0 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -114,7 +114,6 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes "request.userAddr": consumerAddress.String(), }) } else { - // On successful relay relayError := rpcps.providerSessionManager.OnSessionDone(relaySession) if relayError != nil { @@ -386,7 +385,6 @@ func (rpcps *RPCProviderServer) getSingleProviderSession(ctx context.Context, re singleProviderSession, err := rpcps.providerSessionManager.GetSession(consumerAddressString, uint64(request.Epoch), request.SessionId, request.RelayNum) if err != nil { if lavasession.ConsumerNotRegisteredYet.Is(err) { - valid, selfProviderIndex, verifyPairingError := rpcps.stateTracker.VerifyPairing(ctx, consumerAddressString, rpcps.providerAddress.String(), uint64(request.Epoch), request.SpecID) if verifyPairingError != nil { return nil, utils.LavaFormatError("Failed to VerifyPairing after ConsumerNotRegisteredYet", verifyPairingError, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) diff --git a/protocol/statetracker/provider_state_tracker.go b/protocol/statetracker/provider_state_tracker.go index 22268e7725..b3f06ad9e7 100644 --- a/protocol/statetracker/provider_state_tracker.go +++ b/protocol/statetracker/provider_state_tracker.go @@ -83,6 +83,7 @@ func (pst *ProviderStateTracker) TxRelayPayment(ctx context.Context, relayReques func (pst *ProviderStateTracker) SendVoteReveal(voteID string, vote *reliabilitymanager.VoteData) error { return pst.txSender.SendVoteReveal(voteID, vote) } + func (pst *ProviderStateTracker) SendVoteCommitment(voteID string, vote *reliabilitymanager.VoteData) error { return pst.txSender.SendVoteCommitment(voteID, vote) } diff --git a/protocol/statetracker/state_query.go b/protocol/statetracker/state_query.go index cd4eda5886..992725044f 100644 --- a/protocol/statetracker/state_query.go +++ b/protocol/statetracker/state_query.go @@ -157,7 +157,6 @@ func (psq *ProviderStateQuery) CurrentEpochStart(ctx context.Context) (uint64, e } details := epochDetails.GetEpochDetails() return details.StartBlock, nil - } func (psq *ProviderStateQuery) PaymentEvents(ctx context.Context, latestBlock int64) (payments []*rewardserver.PaymentRequest, err error) { @@ -225,7 +224,7 @@ func (psq *ProviderStateQuery) VoteEvents(ctx context.Context, latestBlock int64 utils.LavaFormatDebug("conflict_vote_resolved_event", &map[string]string{"voteID": voteID}) } } - return + return votes, err } func (psq *ProviderStateQuery) VerifyPairing(ctx context.Context, consumerAddress string, providerAddress string, epoch uint64, chainID string) (valid bool, index int64, err error) { diff --git a/relayer/sentry/sentry.go b/relayer/sentry/sentry.go index 62687e7419..7a190ff225 100755 --- a/relayer/sentry/sentry.go +++ b/relayer/sentry/sentry.go @@ -653,16 +653,16 @@ func (s *Sentry) Start(ctx context.Context) { } // // Update pairing - if s.isUser { - // pairingList, err := s.getPairing(ctx) - // if err != nil { - // utils.LavaFormatFatal("Failed getting pairing for consumer in initialization", err, &map[string]string{"Address": s.Acc}) - // } - // err = s.consumerSessionManager.UpdateAllProviders(s.GetCurrentEpochHeight(), pairingList) - // if err != nil { - // utils.LavaFormatFatal("Failed UpdateAllProviders", err, &map[string]string{"Address": s.Acc}) - // } - } + // if s.isUser { + // pairingList, err := s.getPairing(ctx) + // if err != nil { + // utils.LavaFormatFatal("Failed getting pairing for consumer in initialization", err, &map[string]string{"Address": s.Acc}) + // } + // err = s.consumerSessionManager.UpdateAllProviders(s.GetCurrentEpochHeight(), pairingList) + // if err != nil { + // utils.LavaFormatFatal("Failed UpdateAllProviders", err, &map[string]string{"Address": s.Acc}) + // } + // } s.clearAuthResponseCache(data.Block.Height) // TODO: Remove this after provider session manager is fully functional } diff --git a/testutil/e2e/e2e.go b/testutil/e2e/e2e.go index 1528fda95c..acbcefd1c6 100644 --- a/testutil/e2e/e2e.go +++ b/testutil/e2e/e2e.go @@ -233,7 +233,6 @@ func (lt *lavaTest) startJSONRPCProvider(ctx context.Context) { go func(idx int) { lt.listenCmdCommand(cmd, "startJSONRPCProvider process returned unexpectedly, provider idx:"+strconv.Itoa(idx), "startJSONRPCProvider") }(idx) - } // validate all providers are up for idx := 0; idx < len(providerCommands); idx++ { From 78c2875c22ef1efbf39a268e24f07fc5c7a7c5a0 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Wed, 15 Mar 2023 14:09:01 +0200 Subject: [PATCH 107/123] fixed data reliability miss protection to surface problems and fix unitests --- x/pairing/keeper/msg_server_relay_payment.go | 3 + .../keeper/msg_server_relay_payment_test.go | 286 +++--------------- 2 files changed, 52 insertions(+), 237 deletions(-) diff --git a/x/pairing/keeper/msg_server_relay_payment.go b/x/pairing/keeper/msg_server_relay_payment.go index e0c8479359..f84098f459 100644 --- a/x/pairing/keeper/msg_server_relay_payment.go +++ b/x/pairing/keeper/msg_server_relay_payment.go @@ -280,6 +280,9 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen utils.LogLavaEvent(ctx, logger, types.UnresponsiveProviderUnstakeFailedEventName, map[string]string{"err:": err.Error()}, "Error Unresponsive Providers could not unstake") } } + if len(dataReliabilityStore) > 0 { + return nil, utils.LavaError(ctx, k.Logger(ctx), "invalid relay payment with unused data reliability proofs", map[string]string{"dataReliabilityProofs": fmt.Sprintf("%+v", dataReliabilityStore)}, "didn't find a usage match for each relay") + } return &types.MsgRelayPaymentResponse{}, nil } diff --git a/x/pairing/keeper/msg_server_relay_payment_test.go b/x/pairing/keeper/msg_server_relay_payment_test.go index c7378a3bd6..1e0b7bbf71 100644 --- a/x/pairing/keeper/msg_server_relay_payment_test.go +++ b/x/pairing/keeper/msg_server_relay_payment_test.go @@ -145,15 +145,9 @@ func TestRelayPaymentMemoryTransferAfterEpochChange(t *testing.T) { } // Create relay request that was done in the first epoch. Change session ID each iteration to avoid double spending error (provider asks reward for the same transaction twice) - relaySession := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: sessionCounter, - SpecID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - Epoch: int64(firstEpoch), - RelayNum: 0, - } + relaySession := buildRelayRequest(ts.ctx, ts.providers[0].address.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) + relaySession.Epoch = int64(firstEpoch) + relaySession.SessionId = sessionCounter // Sign and send the payment requests sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relaySession) @@ -212,15 +206,8 @@ func TestRelayPaymentBlockHeight(t *testing.T) { require.Nil(t, err) ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) - relaySession := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - SpecID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight() + tt.blockTime, - RelayNum: 0, - } + relaySession := buildRelayRequest(ts.ctx, ts.providers[0].address.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) + relaySession.Epoch = sdk.UnwrapSDKContext(ts.ctx).BlockHeight() + tt.blockTime sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relaySession) relaySession.Sig = sig @@ -274,16 +261,7 @@ func TestRelayPaymentOverUse(t *testing.T) { maxcu, err := ts.keepers.Pairing.GetAllowedCUForBlock(sdk.UnwrapSDKContext(ts.ctx), uint64(sdk.UnwrapSDKContext(ts.ctx).BlockHeight()), entry) require.Nil(t, err) - relaySession := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - SpecID: ts.spec.Name, - CuSum: maxcu * 2, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - } - + relaySession := buildRelayRequest(ts.ctx, ts.providers[0].address.String(), []byte(ts.spec.Apis[0].Name), maxcu*2, ts.spec.Name, nil) sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relaySession) relaySession.Sig = sig require.Nil(t, err) @@ -330,17 +308,8 @@ func TestRelayPaymentNotUnstakingProviderForUnresponsivenessIfNoEpochInformation var Relays []*types.RelaySession for clientIndex := 0; clientIndex < testClientAmount; clientIndex++ { // testing testClientAmount of complaints - relaySession := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - SpecID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - UnresponsiveProviders: unresponsiveProvidersData, // create the complaint - } - + relaySession := buildRelayRequest(ts.ctx, ts.providers[0].address.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) + relaySession.UnresponsiveProviders = unresponsiveProvidersData // create the complaint sig, err := sigs.SignRelay(ts.clients[clientIndex].secretKey, *relaySession) relaySession.Sig = sig require.Nil(t, err) @@ -382,16 +351,8 @@ func TestRelayPaymentUnstakingProviderForUnresponsivenessWithBadDataInput(t *tes var Relays []*types.RelaySession var totalCu uint64 for clientIndex := 0; clientIndex < testClientAmount; clientIndex++ { // testing testClientAmount of complaints - relaySession := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - SpecID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - UnresponsiveProviders: unresponsiveProvidersData[clientIndex], // create the complaint - } + relaySession := buildRelayRequest(ts.ctx, ts.providers[0].address.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) + relaySession.UnresponsiveProviders = unresponsiveProvidersData[clientIndex] totalCu += relaySession.CuSum sig, err := sigs.SignRelay(ts.clients[clientIndex].secretKey, *relaySession) @@ -418,15 +379,7 @@ func TestRelayPaymentNotUnstakingProviderForUnresponsivenessBecauseOfServices(t var RelaysForUnresponsiveProviderInFirstTwoEpochs []*types.RelaySession for i := 0; i < 2; i++ { // move to epoch 3 so we can check enough epochs in the past - relaySession := &types.RelaySession{ - Provider: ts.providers[1].address.String(), - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - SpecID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - } + relaySession := buildRelayRequest(ts.ctx, ts.providers[1].address.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) sig, err := sigs.SignRelay(ts.clients[i].secretKey, *relaySession) relaySession.Sig = sig @@ -442,16 +395,8 @@ func TestRelayPaymentNotUnstakingProviderForUnresponsivenessBecauseOfServices(t var Relays []*types.RelaySession for clientIndex := 0; clientIndex < testClientAmount; clientIndex++ { // testing testClientAmount of complaints - relaySession := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - SpecID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - UnresponsiveProviders: unresponsiveProvidersData, // create the complaint - } + relaySession := buildRelayRequest(ts.ctx, ts.providers[0].address.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) + relaySession.UnresponsiveProviders = unresponsiveProvidersData sig, err := sigs.SignRelay(ts.clients[clientIndex].secretKey, *relaySession) relaySession.Sig = sig @@ -480,15 +425,7 @@ func TestRelayPaymentDoubleSpending(t *testing.T) { ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) cuSum := ts.spec.GetApis()[0].ComputeUnits * 10 - relaySession := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - SpecID: ts.spec.Name, - CuSum: cuSum, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - } + relaySession := buildRelayRequest(ts.ctx, ts.providers[0].address.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relaySession) relaySession.Sig = sig @@ -525,15 +462,7 @@ func TestRelayPaymentDataModification(t *testing.T) { require.Nil(t, err) ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) - relaySession := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - SpecID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - } + relaySession := buildRelayRequest(ts.ctx, ts.providers[0].address.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relaySession) relaySession.Sig = sig @@ -577,15 +506,7 @@ func TestRelayPaymentDelayedDoubleSpending(t *testing.T) { require.Nil(t, err) ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) - relaySession := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - SpecID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - } + relaySession := buildRelayRequest(ts.ctx, ts.providers[0].address.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relaySession) relaySession.Sig = sig @@ -662,15 +583,9 @@ func TestRelayPaymentOldEpochs(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 - relaySession := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: tt.sid, - SpecID: ts.spec.Name, - CuSum: cuSum, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight() - int64(blocksInEpoch)*tt.epoch, - RelayNum: 0, - } + relaySession := buildRelayRequest(ts.ctx, ts.providers[0].address.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, nil) + relaySession.SessionId = tt.sid + relaySession.Epoch = sdk.UnwrapSDKContext(ts.ctx).BlockHeight() - int64(blocksInEpoch)*tt.epoch sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relaySession) relaySession.Sig = sig @@ -731,17 +646,7 @@ func TestRelayPaymentQoS(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: tt.latency, Availability: tt.availability, Sync: tt.sync} - relaySession := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - SpecID: ts.spec.Name, - CuSum: cuSum, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - QoSReport: QoS, - } - QoS.ComputeQoS() + relaySession := buildRelayRequest(ts.ctx, ts.providers[0].address.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoS) sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relaySession) relaySession.Sig = sig require.Nil(t, err) @@ -816,18 +721,7 @@ func TestRelayPaymentDataReliability(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relaySession := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - SpecID: ts.spec.Name, - CuSum: cuSum, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - QoSReport: QoS, - } - QoS.ComputeQoS() - + relaySession := buildRelayRequest(ts.ctx, ts.providers[0].address.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoS) relaySession.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relaySession) require.Nil(t, err) @@ -904,20 +798,9 @@ func TestRelayPaymentDataReliability(t *testing.T) { } QoSDR := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequestWithDataReliability0 := &types.RelaySession{ - Provider: providers[index0].Address, - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - SpecID: ts.spec.Name, - CuSum: cuSum, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - QoSReport: QoSDR, - } - QoSDR.ComputeQoS() + relayRequestWithDataReliability0 := buildRelayRequest(ts.ctx, providers[index0].Address, []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoSDR) relayRequestWithDataReliability0.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relayRequestWithDataReliability0) require.Nil(t, err) - provider := ts.getProvider(providers[index0].Address) relaysRequests := []*types.RelaySession{relayRequestWithDataReliability0} dataReliabilities := []*types.VRFData{dataReliability0} @@ -939,6 +822,24 @@ func TestRelayPaymentDataReliability(t *testing.T) { } } +func buildRelayRequest(ctx context.Context, provider string, contentHash []byte, cuSum uint64, spec string, QoSDR *types.QualityOfServiceReport) *types.RelaySession { + relaySession := &types.RelaySession{ + Provider: provider, + ContentHash: contentHash, + SessionId: uint64(1), + SpecID: spec, + CuSum: cuSum, + Epoch: sdk.UnwrapSDKContext(ctx).BlockHeight(), + RelayNum: 0, + QoSReport: QoSDR, + LavaChainId: sdk.UnwrapSDKContext(ctx).BlockHeader().ChainID, + } + if QoSDR != nil { + QoSDR.ComputeQoS() + } + return relaySession +} + // client sends data reliability to a different provider collaborating to get more rewards func TestRelayPaymentDataReliabilityWrongProvider(t *testing.T) { ts := setupForPaymentTest(t) @@ -958,18 +859,7 @@ func TestRelayPaymentDataReliabilityWrongProvider(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relaySession := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - SpecID: ts.spec.Name, - CuSum: cuSum, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - QoSReport: QoS, - } - QoS.ComputeQoS() - + relaySession := buildRelayRequest(ts.ctx, ts.providers[0].address.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoS) relaySession.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relaySession) require.Nil(t, err) @@ -1032,17 +922,7 @@ GetWrongProvider: require.Nil(t, err) QoSDR := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequestWithDataReliability0 := &types.RelaySession{ - Provider: providers[wrongProviderIndex].Address, - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - SpecID: ts.spec.Name, - CuSum: cuSum, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - QoSReport: QoSDR, - } - QoSDR.ComputeQoS() + relayRequestWithDataReliability0 := buildRelayRequest(ts.ctx, providers[wrongProviderIndex].Address, []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoSDR) relayRequestWithDataReliability0.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relayRequestWithDataReliability0) require.Nil(t, err) @@ -1072,17 +952,7 @@ func TestRelayPaymentDataReliabilityBelowReliabilityThreshold(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relaySession := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - SpecID: ts.spec.Name, - CuSum: cuSum, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - QoSReport: QoS, - } - QoS.ComputeQoS() + relaySession := buildRelayRequest(ts.ctx, ts.providers[0].address.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoS) relaySession.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relaySession) require.Nil(t, err) @@ -1122,17 +992,7 @@ func TestRelayPaymentDataReliabilityBelowReliabilityThreshold(t *testing.T) { // make all providers send a datareliability payment request. Everyone should fail for _, provider := range ts.providers { QoSDR := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequestWithDataReliability0 := &types.RelaySession{ - Provider: provider.address.String(), - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - SpecID: ts.spec.Name, - CuSum: cuSum, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - QoSReport: QoSDR, - } - QoSDR.ComputeQoS() + relayRequestWithDataReliability0 := buildRelayRequest(ts.ctx, provider.address.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoSDR) relayRequestWithDataReliability0.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relayRequestWithDataReliability0) require.Nil(t, err) @@ -1161,16 +1021,7 @@ func TestRelayPaymentDataReliabilityDifferentClientSign(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relaySession := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - SpecID: ts.spec.Name, - CuSum: cuSum, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - QoSReport: QoS, - } + relaySession := buildRelayRequest(ts.ctx, ts.providers[0].address.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoS) QoS.ComputeQoS() relaySession.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relaySession) require.Nil(t, err) @@ -1218,17 +1069,7 @@ func TestRelayPaymentDataReliabilityDifferentClientSign(t *testing.T) { require.Nil(t, err) QoSDR := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequestWithDataReliability0 := &types.RelaySession{ - Provider: providers[index0].Address, - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - SpecID: ts.spec.Name, - CuSum: cuSum, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - QoSReport: QoSDR, - } - QoSDR.ComputeQoS() + relayRequestWithDataReliability0 := buildRelayRequest(ts.ctx, providers[index0].Address, []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoSDR) relayRequestWithDataReliability0.Sig, err = sigs.SignRelay(ts.clients[1].secretKey, *relayRequestWithDataReliability0) require.Nil(t, err) @@ -1257,17 +1098,7 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relaySession := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - SpecID: ts.spec.Name, - CuSum: cuSum, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - QoSReport: QoS, - } - QoS.ComputeQoS() + relaySession := buildRelayRequest(ts.ctx, ts.providers[0].address.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoS) relaySession.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relaySession) require.Nil(t, err) @@ -1315,17 +1146,7 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { require.Nil(t, err) QoSDR := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequestWithDataReliability0 := &types.RelaySession{ - Provider: providers[index0].Address, - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - SpecID: ts.spec.Name, - CuSum: cuSum, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - QoSReport: QoSDR, - } - QoSDR.ComputeQoS() + relayRequestWithDataReliability0 := buildRelayRequest(ts.ctx, providers[index0].Address, []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoSDR) relayRequestWithDataReliability0.Sig, err = sigs.SignRelay(ts.clients[0].secretKey, *relayRequestWithDataReliability0) require.Nil(t, err) @@ -1384,16 +1205,7 @@ func TestEpochPaymentDeletion(t *testing.T) { require.Nil(t, err) ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) - - relaySession := &types.RelaySession{ - Provider: ts.providers[0].address.String(), - ContentHash: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - SpecID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - RelayNum: 0, - } + relaySession := buildRelayRequest(ts.ctx, ts.providers[0].address.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relaySession) relaySession.Sig = sig From ad10d22d2d72dd0c781d464a50cc004582674573 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Wed, 15 Mar 2023 17:14:13 +0200 Subject: [PATCH 108/123] fix unitests failing --- config/rpcprovider.yml | 8 ++++---- protocol/chainlib/common_test.go | 3 ++- protocol/chainlib/grpc_test.go | 15 +++++++++++---- 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/config/rpcprovider.yml b/config/rpcprovider.yml index c09e250881..bec02bdbe9 100644 --- a/config/rpcprovider.yml +++ b/config/rpcprovider.yml @@ -13,7 +13,7 @@ endpoints: chain-id: LAV1 network-address: 127.0.0.1:2221 node-url: http://127.0.0.1:1317 - # - api-interface: jsonrpc - # chain-id: ETH1 - # network-address: 127.0.0.1:2221 - # node-url: wss://ethereum-rpc.com/ws/ \ No newline at end of file + - api-interface: jsonrpc + chain-id: ETH1 + network-address: 127.0.0.1:2221 + node-url: wss://ethereum-rpc.com/ws/ \ No newline at end of file diff --git a/protocol/chainlib/common_test.go b/protocol/chainlib/common_test.go index dd6c1aa880..12dbac177d 100644 --- a/protocol/chainlib/common_test.go +++ b/protocol/chainlib/common_test.go @@ -5,6 +5,7 @@ import ( "io/ioutil" "net/http/httptest" "testing" + "time" "github.com/gofiber/fiber/v2" "github.com/gofiber/websocket/v2" @@ -167,7 +168,7 @@ func TestExtractDappIDFromWebsocketConnection(t *testing.T) { defer func() { app.Shutdown() }() - + time.Sleep(time.Millisecond * 20) // let the server go up for _, testCase := range testCases { testCase := testCase diff --git a/protocol/chainlib/grpc_test.go b/protocol/chainlib/grpc_test.go index 9cac4b1ae1..f1fbcb036f 100644 --- a/protocol/chainlib/grpc_test.go +++ b/protocol/chainlib/grpc_test.go @@ -1,6 +1,7 @@ package chainlib import ( + "strings" "sync" "testing" "time" @@ -8,6 +9,7 @@ import ( "github.com/lavanet/lava/protocol/chainlib/chainproxy/rpcInterfaceMessages" spectypes "github.com/lavanet/lava/x/spec/types" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestGRPCChainParser_Spec(t *testing.T) { @@ -79,7 +81,9 @@ func TestGRPCGetSupportedApi(t *testing.T) { } _, err = apip.getSupportedApi("API2") assert.Error(t, err) - assert.Equal(t, "GRPC api not supported", err.Error()) + errorData, _, found := strings.Cut(err.Error(), " --") + require.True(t, found) + assert.Equal(t, "GRPC api not supported", errorData) // Test case 3: Returns error if the API is disabled apip = &GrpcChainParser{ @@ -88,7 +92,9 @@ func TestGRPCGetSupportedApi(t *testing.T) { } _, err = apip.getSupportedApi("API1") assert.Error(t, err) - assert.Equal(t, "api is disabled", err.Error()) + errorData, _, found = strings.Cut(err.Error(), " --") + require.True(t, found) + assert.Equal(t, "GRPC api is disabled", errorData) } func TestGRPCParseMessage(t *testing.T) { @@ -114,6 +120,7 @@ func TestGRPCParseMessage(t *testing.T) { Msg: []byte("test message"), Path: "API1", } - - assert.Equal(t, grpcMessage, msg.GetRPCMessage()) + grpcMsg, ok := msg.GetRPCMessage().(*rpcInterfaceMessages.GrpcMessage) + require.True(t, ok) + assert.Equal(t, grpcMessage, *grpcMsg) } From 9620e404bfe07be451c8e028c38a2635bc4bee23 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Wed, 15 Mar 2023 17:17:48 +0200 Subject: [PATCH 109/123] go mod tidy --- go.sum | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/go.sum b/go.sum index c9d4ca44f4..062982f37e 100644 --- a/go.sum +++ b/go.sum @@ -147,8 +147,6 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= -github.com/andrew-d/go-termutil v0.0.0-20150726205930-009166a695a2 h1:axBiC50cNZOs7ygH5BgQp4N+aYrZ2DNpWZ1KG3VOSOM= -github.com/andrew-d/go-termutil v0.0.0-20150726205930-009166a695a2/go.mod h1:jnzFpU88PccN/tPPhCpnNU8mZphvKxYM9lLNkd8e+os= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= @@ -224,8 +222,6 @@ github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= @@ -785,8 +781,6 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 h1:gDLXvp5S9izjldquuoAhDzccbskOL6tDC5jMSyx3zxE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2/go.mod h1:7pdNwVWBBHGiCxa9lAszqCJMbfTISJ7oMftp8+UGV08= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= @@ -902,16 +896,7 @@ github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52Cu github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jpillora/ansi v1.0.2 h1:+Ei5HCAH0xsrQRCT2PDr4mq9r4Gm4tg+arNdXRkB22s= -github.com/jpillora/ansi v1.0.2/go.mod h1:D2tT+6uzJvN1nBVQILYWkIdq7zG+b5gcFN5WI/VyjMY= -github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/jpillora/chisel v1.7.7 h1:eLbzoX+ekDhVmF5CpSJD01NtH/w7QMYeaFCIFbzn9ns= -github.com/jpillora/chisel v1.7.7/go.mod h1:X3ZzJDlOSlkMLVY3DMsdrd03rMtugLYk2IOUhvX0SXo= -github.com/jpillora/requestlog v1.0.0 h1:bg++eJ74T7DYL3DlIpiwknrtfdUA9oP/M4fL+PpqnyA= -github.com/jpillora/requestlog v1.0.0/go.mod h1:HTWQb7QfDc2jtHnWe2XEIEeJB7gJPnVdpNn52HXPvy8= -github.com/jpillora/sizestr v1.0.0 h1:4tr0FLxs1Mtq3TnsLDV+GYUWG7Q26a6s+tV5Zfw2ygw= -github.com/jpillora/sizestr v1.0.0/go.mod h1:bUhLv4ctkknatr6gR42qPxirmd5+ds1u7mzD+MZ33f0= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -1401,7 +1386,6 @@ github.com/tendermint/btcd v0.1.1/go.mod h1:DC6/m53jtQzr/NFmMNEu0rxf18/ktVoVtMrn github.com/tendermint/crypto v0.0.0-20191022145703-50d29ede1e15 h1:hqAk8riJvK4RMWx1aInLzndwxKalgi5rTqgfXxOxbEI= github.com/tendermint/crypto v0.0.0-20191022145703-50d29ede1e15/go.mod h1:z4YtwM70uOnk8h0pjJYlj3zdYwi9l03By6iAIF5j/Pk= github.com/tendermint/fundraising v0.3.1-0.20220613014523-03b4a2d4481a h1:DIxap6r3z89JLoaLp6TTtt8XS7Zgfy4XACfG6b+4plE= -github.com/tendermint/fundraising v0.3.1-0.20220613014523-03b4a2d4481a/go.mod h1:oJFZUZ/GsACtkYeWScKpHLdqMUThNWpMAi/G47LJUi4= github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= github.com/tendermint/spn v0.2.1-0.20220708132853-26a17f03c072 h1:J7+gbosE+lUg/m6wGNHs8xRM5ugU3FbdLWwaNg5b9kw= @@ -1424,8 +1408,6 @@ github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq// github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce h1:fb190+cK2Xz/dvi9Hv8eCYJYvIGUTN2/KLq1pT6CjEc= -github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= @@ -1540,7 +1522,6 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -1653,7 +1634,6 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1804,7 +1784,6 @@ golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= From 20458847a8c5f297c26ffa2c523306593228d514 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Thu, 16 Mar 2023 10:56:11 +0100 Subject: [PATCH 110/123] merge main --- go.sum | 2 ++ 1 file changed, 2 insertions(+) diff --git a/go.sum b/go.sum index 582600deb8..a21d940c27 100644 --- a/go.sum +++ b/go.sum @@ -781,6 +781,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 h1:gDLXvp5S9izjldquuoAhDzccbskOL6tDC5jMSyx3zxE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2/go.mod h1:7pdNwVWBBHGiCxa9lAszqCJMbfTISJ7oMftp8+UGV08= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= From b067b7bd753df4cd394c482b87a851d142c7e241 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Thu, 16 Mar 2023 11:14:32 +0100 Subject: [PATCH 111/123] make E2E great again --- .github/workflows/e2e.yml | 26 +- docs/static/openapi.yml | 3717 ++++++++++++++++++++++++++++++++----- go.mod | 1 - go.sum | 4 - testutil/e2e/e2e.go | 64 +- 5 files changed, 3275 insertions(+), 537 deletions(-) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index c6b3bd68be..46274f5c21 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -103,14 +103,14 @@ jobs: continue-on-error: true run: cat testutil/e2e/logs/04_jsonConsumer_errors.log - - name: Tendermint Provider All Logs + - name: Lava Provider All Logs if: always() - run: grep "" testutil/e2e/logs/05_tendermintProvider* --exclude="*errors*" + run: grep "" testutil/e2e/logs/05_LavaProvider* --exclude="*errors*" - - name: Tendermint Provider Error Only Logs + - name: Lava Provider Error Only Logs if: always() continue-on-error: true - run: grep "" testutil/e2e/logs/05_tendermintProvider* --include="*errors*" + run: grep "" testutil/e2e/logs/05_LavaProvider* --include="*errors*" - name: Lava over Lava All Logs if: always() @@ -121,24 +121,6 @@ jobs: continue-on-error: true run: cat testutil/e2e/logs/07_lavaOverLava_errors.log - - name: Rest Provider All Logs - if: always() - run: grep "" testutil/e2e/logs/08_restProvider* --exclude="*errors*" - - - name: Rest Provider Error Only Logs - if: always() - continue-on-error: true - run: grep "" testutil/e2e/logs/08_restProvider* --include="*errors*" - - - name: GRPC Provider All Logs - if: always() - run: grep "" testutil/e2e/logs/10_grpcProvider* --exclude="*errors*" - - - name: GRPC Provider Error Only Logs - if: always() - continue-on-error: true - run: grep "" testutil/e2e/logs/10_grpcProvider* --include="*errors*" - - name: RPCConsumer Consumer All Logs if: always() run: cat testutil/e2e/logs/06_RPCConsumer.log diff --git a/docs/static/openapi.yml b/docs/static/openapi.yml index eb28895dee..8dcc324c01 100644 --- a/docs/static/openapi.yml +++ b/docs/static/openapi.yml @@ -470,7 +470,6 @@ paths: type: object properties: account: - description: account defines the account of the corresponding address. type: object properties: '@type': @@ -531,6 +530,114 @@ paths: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } description: >- QueryAccountResponse is the response type for the Query/Account RPC method. @@ -1464,13 +1571,20 @@ paths: type: object properties: balance: - description: balance is the balance of the coin. type: object properties: denom: type: string amount: type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. description: >- QueryBalanceResponse is the response type for the Query/Balance RPC method. @@ -1704,9 +1818,6 @@ paths: type: object properties: metadata: - description: >- - metadata describes and provides all the client information for - the requested token. type: object properties: description: @@ -1774,6 +1885,9 @@ paths: Since: cosmos-sdk 0.43 + description: |- + Metadata represents a struct that describes + a basic token. description: >- QueryDenomMetadataResponse is the response type for the Query/DenomMetadata RPC @@ -2135,13 +2249,20 @@ paths: type: object properties: amount: - description: amount is the supply of the coin. type: object properties: denom: type: string amount: type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. description: >- QuerySupplyOfResponse is the response type for the Query/SupplyOf RPC method. @@ -6229,7 +6350,6 @@ paths: type: object properties: evidence: - description: evidence returns the requested evidence. type: object properties: '@type': @@ -6290,6 +6410,114 @@ paths: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } description: >- QueryEvidenceResponse is the response type for the Query/Evidence RPC method. @@ -9131,7 +9359,6 @@ paths: type: object properties: deposit: - description: deposit defines the requested deposit. type: object properties: proposal_id: @@ -9156,6 +9383,11 @@ paths: custom method signatures required by gogoproto. + description: >- + Deposit defines an amount deposited by an account address to + an active + + proposal. description: >- QueryDepositResponse is the response type for the Query/Deposit RPC method. @@ -9937,7 +10169,6 @@ paths: type: object properties: vote: - description: vote defined the queried vote. type: object properties: proposal_id: @@ -9995,6 +10226,11 @@ paths: Since: cosmos-sdk 0.43 title: 'Since: cosmos-sdk 0.43' + description: >- + Vote defines a vote on a governance proposal. + + A Vote consists of a proposal ID, the voter, and the vote + option. description: >- QueryVoteResponse is the response type for the Query/Vote RPC method. @@ -10623,9 +10859,6 @@ paths: type: object properties: val_signing_info: - title: >- - val_signing_info is the signing info of requested val cons - address type: object properties: address: @@ -10675,6 +10908,9 @@ paths: monitoring their liveness activity. + title: >- + val_signing_info is the signing info of requested val cons + address title: >- QuerySigningInfoResponse is the response type for the Query/SigningInfo RPC @@ -11812,9 +12048,6 @@ paths: operator_address defines the address of the validator's operator; bech encoded in JSON. consensus_pubkey: - description: >- - consensus_pubkey is the consensus public key of the - validator, as a Protobuf Any. type: object properties: '@type': @@ -11876,6 +12109,119 @@ paths: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the + regular + + representation of the deserialized, embedded message, + with an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message + [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } jailed: type: boolean description: >- @@ -12285,7 +12631,6 @@ paths: type: object properties: validator: - description: validator defines the the validator info. type: object properties: operator_address: @@ -12294,9 +12639,6 @@ paths: operator_address defines the address of the validator's operator; bech encoded in JSON. consensus_pubkey: - description: >- - consensus_pubkey is the consensus public key of the - validator, as a Protobuf Any. type: object properties: '@type': @@ -12358,6 +12700,117 @@ paths: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } jailed: type: boolean description: >- @@ -12459,6 +12912,29 @@ paths: description: >- min_self_delegation is the validator's self declared minimum self delegation. + description: >- + Validator defines a validator, together with the total amount + of the + + Validator's bond shares and their exchange rate to coins. + Slashing results in + + a decrease in the exchange rate, allowing correct calculation + of future + + undelegations without iterating over delegators. When coins + are delegated to + + this validator, the validator is credited with a delegation + whose number of + + bond shares is based on the amount of coins delegated divided + by the current + + exchange rate. Voting power can be calculated as total bonded + shares + + multiplied by exchange rate. description: |- QueryDelegatorValidatorResponse response type for the Query/DelegatorValidator RPC method. @@ -12773,9 +13249,6 @@ paths: operator_address defines the address of the validator's operator; bech encoded in JSON. consensus_pubkey: - description: >- - consensus_pubkey is the consensus public key of the - validator, as a Protobuf Any. type: object properties: '@type': @@ -12837,6 +13310,120 @@ paths: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol + buffer message along with a + + URL that describes the type of the serialized + message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods + of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will + by default use + + 'type.googleapis.com/full.type.name' as the type URL + and the unpack + + methods only use the fully qualified type name after + the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" + will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the + regular + + representation of the deserialized, embedded + message, with an + + additional field `@type` which contains the type + URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to + the `@type` + + field. Example (for message + [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } jailed: type: boolean description: >- @@ -13615,9 +14202,6 @@ paths: operator_address defines the address of the validator's operator; bech encoded in JSON. consensus_pubkey: - description: >- - consensus_pubkey is the consensus public key of the - validator, as a Protobuf Any. type: object properties: '@type': @@ -13679,6 +14263,119 @@ paths: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the + regular + + representation of the deserialized, embedded message, + with an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message + [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } jailed: type: boolean description: >- @@ -14086,7 +14783,6 @@ paths: type: object properties: validator: - description: validator defines the the validator info. type: object properties: operator_address: @@ -14095,9 +14791,6 @@ paths: operator_address defines the address of the validator's operator; bech encoded in JSON. consensus_pubkey: - description: >- - consensus_pubkey is the consensus public key of the - validator, as a Protobuf Any. type: object properties: '@type': @@ -14159,6 +14852,117 @@ paths: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } jailed: type: boolean description: >- @@ -14260,6 +15064,29 @@ paths: description: >- min_self_delegation is the validator's self declared minimum self delegation. + description: >- + Validator defines a validator, together with the total amount + of the + + Validator's bond shares and their exchange rate to coins. + Slashing results in + + a decrease in the exchange rate, allowing correct calculation + of future + + undelegations without iterating over delegators. When coins + are delegated to + + this validator, the validator is credited with a delegation + whose number of + + bond shares is based on the amount of coins delegated divided + by the current + + exchange rate. Voting power can be calculated as total bonded + shares + + multiplied by exchange rate. title: >- QueryValidatorResponse is response type for the Query/Validator RPC method @@ -14797,9 +15624,6 @@ paths: type: object properties: delegation_response: - description: >- - delegation_responses defines the delegation info of a - delegation. type: object properties: delegation: @@ -14841,6 +15665,12 @@ paths: custom method signatures required by gogoproto. + description: >- + DelegationResponse is equivalent to Delegation except that it + contains a + + balance in addition to shares which is more suitable for + client responses. description: >- QueryDelegationResponse is response type for the Query/Delegation RPC method. @@ -15055,7 +15885,6 @@ paths: type: object properties: unbond: - description: unbond defines the unbonding information of a delegation. type: object properties: delegator_address: @@ -15100,6 +15929,11 @@ paths: entries are the unbonding delegation entries. unbonding delegation entries + description: >- + UnbondingDelegation stores all of a single delegator's + unbonding bonds + + for a single validator in an time-ordered list. description: >- QueryDelegationResponse is response type for the Query/UnbondingDelegation @@ -16203,7 +17037,6 @@ paths: type: object properties: tx_response: - description: tx_response is the queried TxResponses. type: object properties: height: @@ -16290,7 +17123,6 @@ paths: format: int64 description: Amount of gas consumed by transaction. tx: - description: The request transaction bytes. type: object properties: '@type': @@ -16352,6 +17184,117 @@ paths: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } timestamp: type: string description: >- @@ -16409,6 +17352,11 @@ paths: Since: cosmos-sdk 0.42.11, 0.44.5, 0.45 + description: >- + TxResponse defines a structure containing relevant tx data and + metadata. The + + tags are stringified and the log is JSON decoded. description: |- BroadcastTxResponse is the response type for the Service.BroadcastTx method. @@ -17374,13 +18322,6 @@ paths: such as a git commit that validators could automatically upgrade to upgraded_client_state: - description: >- - Deprecated: UpgradedClientState field has been deprecated. - IBC upgrade logic has been - - moved to the IBC module in the sub module 02-client. - - If this field is not empty, an error will be thrown. type: object properties: '@type': @@ -17442,6 +18383,117 @@ paths: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } description: >- QueryCurrentPlanResponse is the response type for the Query/CurrentPlan RPC @@ -18693,9 +19745,6 @@ paths: type: object properties: denom_trace: - description: >- - denom_trace returns the requested denomination trace - information. type: object properties: path: @@ -18708,6 +19757,11 @@ paths: base_denom: type: string description: base denomination of the relayed fungible token. + description: >- + DenomTrace contains the base denomination for ICS20 fungible + tokens and the + + source tracing information path. description: >- QueryDenomTraceResponse is the response type for the Query/DenomTrace RPC @@ -19855,7 +20909,6 @@ paths: type: string title: client identifier client_state: - title: client state type: object properties: '@type': @@ -20028,6 +21081,7 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state description: >- IdentifiedClientState defines a client state with an additional client @@ -20280,7 +21334,6 @@ paths: type: object properties: consensus_state: - title: consensus state associated with the channel type: object properties: '@type': @@ -20449,6 +21502,7 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: consensus state associated with the channel client_id: type: string title: client ID associated with the consensus state @@ -23571,7 +24625,6 @@ paths: type: string title: client identifier client_state: - title: client state type: object properties: '@type': @@ -23746,6 +24799,7 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state description: >- IdentifiedClientState defines a client state with an additional client @@ -24030,7 +25084,6 @@ paths: type: object properties: client_state: - title: client state associated with the request identifier type: object properties: '@type': @@ -24199,6 +25252,7 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state associated with the request identifier proof: type: string format: byte @@ -24690,7 +25744,6 @@ paths: gets reset consensus_state: - title: consensus state type: object properties: '@type': @@ -24865,6 +25918,7 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: consensus state description: >- ConsensusStateWithHeight defines a consensus state with an additional height @@ -25156,9 +26210,6 @@ paths: type: object properties: consensus_state: - title: >- - consensus state associated with the client identifier at the - given height type: object properties: '@type': @@ -25327,6 +26378,9 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: >- + consensus state associated with the client identifier at the + given height proof: type: string format: byte @@ -25590,7 +26644,6 @@ paths: type: object properties: upgraded_client_state: - title: client state associated with the request identifier type: object properties: '@type': @@ -25759,6 +26812,7 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state associated with the request identifier description: |- QueryUpgradedClientStateResponse is the response type for the Query/UpgradedClientState RPC method. @@ -25960,7 +27014,6 @@ paths: type: object properties: upgraded_consensus_state: - title: Consensus state associated with the request identifier type: object properties: '@type': @@ -26129,6 +27182,7 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: Consensus state associated with the request identifier description: |- QueryUpgradedConsensusStateResponse is the response type for the Query/UpgradedConsensusState RPC method. @@ -27315,7 +28369,6 @@ paths: type: string title: client identifier client_state: - title: client state type: object properties: '@type': @@ -27488,6 +28541,7 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state description: >- IdentifiedClientState defines a client state with an additional client @@ -27735,7 +28789,6 @@ paths: type: object properties: consensus_state: - title: consensus state associated with the channel type: object properties: '@type': @@ -27904,6 +28957,7 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: consensus state associated with the channel client_id: type: string title: client ID associated with the consensus state @@ -28620,282 +29674,262 @@ paths: in: query required: false type: boolean - - name: pagination.reverse - description: >- - reverse is set to true if results are to be returned in the - descending order. - - - Since: cosmos-sdk 0.43 - in: query - required: false - type: boolean - tags: - - Query - '/lavanet/lava/epochstorage/fixated_params/{index}': - get: - summary: Queries a FixatedParams by index. - operationId: LavanetLavaEpochstorageFixatedParams - responses: - '200': - description: A successful response. - schema: - type: object - properties: - fixatedParams: - type: object - properties: - index: - type: string - parameter: - type: string - format: byte - fixationBlock: - type: string - format: uint64 - default: - description: An unexpected error response. - schema: - type: object - properties: - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - '@type': - type: string - additionalProperties: {} - parameters: - - name: index - in: path - required: true - type: string - tags: - - Query - /lavanet/lava/epochstorage/params: - get: - summary: Parameters queries the parameters of the module. - operationId: LavanetLavaEpochstorageParams - responses: - '200': - description: A successful response. - schema: - type: object - properties: - params: - description: params holds all the parameters of this module. - type: object - properties: - unstakeHoldBlocks: - type: string - format: uint64 - epochBlocks: - type: string - format: uint64 - epochsToSave: - type: string - format: uint64 - latestParamChange: - type: string - format: uint64 - unstakeHoldBlocksStatic: - type: string - format: uint64 - description: >- - QueryParamsResponse is response type for the Query/Params RPC - method. - default: - description: An unexpected error response. - schema: - type: object - properties: - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - '@type': - type: string - additionalProperties: {} - tags: - - Query - /lavanet/lava/epochstorage/stake_storage: - get: - summary: Queries a list of StakeStorage items. - operationId: LavanetLavaEpochstorageStakeStorageAll - responses: - '200': - description: A successful response. - schema: - type: object - properties: - stakeStorage: - type: array - items: - type: object - properties: - index: - type: string - stakeEntries: - type: array - items: - type: object - properties: - stake: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an - amount. - - - NOTE: The amount field is an Int which implements - the custom method - - signatures required by gogoproto. - address: - type: string - stake_applied_block: - type: string - format: uint64 - endpoints: - type: array - items: - type: object - properties: - iPPORT: - type: string - useType: - type: string - geolocation: - type: string - format: uint64 - geolocation: - type: string - format: uint64 - chain: - type: string - vrfpk: - type: string - moniker: - type: string - epochBlockHash: - type: string - format: byte - pagination: - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: >- - PageResponse is to be embedded in gRPC response messages where - the - - corresponding request message has used PageRequest. - - message SomeResponse { - repeated Bar results = 1; - PageResponse page = 2; - } - default: - description: An unexpected error response. - schema: - type: object - properties: - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - '@type': - type: string - additionalProperties: {} - parameters: - - name: pagination.key - description: |- - key is a value returned in PageResponse.next_key to begin - querying the next page most efficiently. Only one of offset or key - should be set. - in: query - required: false - type: string - format: byte - - name: pagination.offset - description: >- - offset is a numeric offset that can be used when key is unavailable. - - It is less efficient than using key. Only one of offset or key - should - - be set. - in: query - required: false - type: string - format: uint64 - - name: pagination.limit - description: >- - limit is the total number of results to be returned in the result - page. - - If left empty it will default to a value to be set by each app. - in: query - required: false - type: string - format: uint64 - - name: pagination.count_total - description: >- - count_total is set to true to indicate that the result set should - include - - a count of the total number of items available for pagination in - UIs. - - count_total is only respected when offset is used. It is ignored - when key - - is set. - in: query - required: false - type: boolean - - name: pagination.reverse + tags: + - Query + '/lavanet/lava/epochstorage/fixated_params/{index}': + get: + summary: Queries a FixatedParams by index. + operationId: LavanetLavaEpochstorageFixatedParams + responses: + '200': + description: A successful response. + schema: + type: object + properties: + fixatedParams: + type: object + properties: + index: + type: string + parameter: + type: string + format: byte + fixationBlock: + type: string + format: uint64 + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: index + in: path + required: true + type: string + tags: + - Query + /lavanet/lava/epochstorage/params: + get: + summary: Parameters queries the parameters of the module. + operationId: LavanetLavaEpochstorageParams + responses: + '200': + description: A successful response. + schema: + type: object + properties: + params: + description: params holds all the parameters of this module. + type: object + properties: + unstakeHoldBlocks: + type: string + format: uint64 + epochBlocks: + type: string + format: uint64 + epochsToSave: + type: string + format: uint64 + latestParamChange: + type: string + format: uint64 + unstakeHoldBlocksStatic: + type: string + format: uint64 + description: >- + QueryParamsResponse is response type for the Query/Params RPC + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + tags: + - Query + /lavanet/lava/epochstorage/stake_storage: + get: + summary: Queries a list of StakeStorage items. + operationId: LavanetLavaEpochstorageStakeStorageAll + responses: + '200': + description: A successful response. + schema: + type: object + properties: + stakeStorage: + type: array + items: + type: object + properties: + index: + type: string + stakeEntries: + type: array + items: + type: object + properties: + stake: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an + amount. + + + NOTE: The amount field is an Int which implements + the custom method + + signatures required by gogoproto. + address: + type: string + stake_applied_block: + type: string + format: uint64 + endpoints: + type: array + items: + type: object + properties: + iPPORT: + type: string + useType: + type: string + geolocation: + type: string + format: uint64 + geolocation: + type: string + format: uint64 + chain: + type: string + vrfpk: + type: string + moniker: + type: string + epochBlockHash: + type: string + format: byte + pagination: + type: object + properties: + next_key: + type: string + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset description: >- - reverse is set to true if results are to be returned in the - descending order. + offset is a numeric offset that can be used when key is unavailable. + It is less efficient than using key. Only one of offset or key + should - Since: cosmos-sdk 0.43 + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. in: query required: false type: boolean @@ -29187,6 +30221,16 @@ paths: in: query required: false type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query '/lavanet/lava/pairing/epoch_payments/{index}': @@ -29518,6 +30562,16 @@ paths: in: query required: false type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query '/lavanet/lava/pairing/provider_payment_storage/{index}': @@ -29655,7 +30709,7 @@ paths: in: path required: true type: string - - name: showFrozenProviders + - name: showFrozen in: query required: false type: boolean @@ -29857,6 +30911,16 @@ paths: in: query required: false type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query '/lavanet/lava/pairing/unique_payment_storage_client_provider/{index}': @@ -30154,10 +31218,6 @@ paths: properties: index: type: string - duration: - type: string - format: uint64 - title: duration of the plan's subscription in months block: type: string format: uint64 @@ -30921,6 +31981,16 @@ paths: in: query required: false type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query '/lavanet/lava/spec/spec/{ChainID}': @@ -31486,6 +32556,16 @@ paths: in: query required: false type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query '/lavanet/lava/spec/spec_raw/{ChainID}': @@ -31750,27 +32830,49 @@ paths: properties: creator: type: string + title: creator pays for the subscription consumer: type: string + title: consumer uses the subscription block: type: string format: uint64 + title: when the subscription was created plan_index: type: string + title: index (name) of plan plan_block: type: string format: uint64 - is_yearly: - type: boolean - expiry_time: + title: when the plan was created + duration_total: type: string format: uint64 - usedCU: + title: total requested duration in months + duration_left: + type: string + format: uint64 + title: remaining duration in months + month_expiry_time: + type: string + format: uint64 + title: expiry time of current month + prev_expiry_block: + type: string + format: uint64 + title: when previous month expired + month_cu_total: + type: string + format: uint64 + title: CU allowance during current month + month_cu_left: type: string format: uint64 - remainingCU: + title: CU remaining during current month + prev_cu_left: type: string format: uint64 + title: CU remaining for previous month default: description: An unexpected error response. schema: @@ -31856,7 +32958,6 @@ definitions: type: object properties: account: - description: account defines the account of the corresponding address. type: object properties: '@type': @@ -31912,6 +33013,107 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } description: >- QueryAccountResponse is the response type for the Query/Account RPC method. @@ -32895,13 +34097,17 @@ definitions: type: object properties: balance: - description: balance is the balance of the coin. type: object properties: denom: type: string amount: type: string + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. description: >- QueryBalanceResponse is the response type for the Query/Balance RPC method. @@ -32909,9 +34115,6 @@ definitions: type: object properties: metadata: - description: >- - metadata describes and provides all the client information for the - requested token. type: object properties: description: @@ -32974,6 +34177,9 @@ definitions: Since: cosmos-sdk 0.43 + description: |- + Metadata represents a struct that describes + a basic token. description: >- QueryDenomMetadataResponse is the response type for the Query/DenomMetadata RPC @@ -33147,13 +34353,17 @@ definitions: type: object properties: amount: - description: amount is the supply of the coin. type: object properties: denom: type: string amount: type: string + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. description: >- QuerySupplyOfResponse is the response type for the Query/SupplyOf RPC method. @@ -38119,7 +39329,6 @@ definitions: type: object properties: evidence: - description: evidence returns the requested evidence. type: object properties: '@type': @@ -38175,6 +39384,107 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } description: >- QueryEvidenceResponse is the response type for the Query/Evidence RPC method. @@ -38869,7 +40179,6 @@ definitions: type: object properties: deposit: - description: deposit defines the requested deposit. type: object properties: proposal_id: @@ -38894,6 +40203,9 @@ definitions: method signatures required by gogoproto. + description: |- + Deposit defines an amount deposited by an account address to an active + proposal. description: >- QueryDepositResponse is the response type for the Query/Deposit RPC method. @@ -39544,7 +40856,6 @@ definitions: type: object properties: vote: - description: vote defined the queried vote. type: object properties: proposal_id: @@ -39599,6 +40910,9 @@ definitions: Since: cosmos-sdk 0.43 title: 'Since: cosmos-sdk 0.43' + description: |- + Vote defines a vote on a governance proposal. + A Vote consists of a proposal ID, the voter, and the vote option. description: QueryVoteResponse is the response type for the Query/Vote RPC method. cosmos.gov.v1beta1.QueryVotesResponse: type: object @@ -39970,7 +41284,6 @@ definitions: type: object properties: val_signing_info: - title: val_signing_info is the signing info of requested val cons address type: object properties: address: @@ -40017,6 +41330,7 @@ definitions: their liveness activity. + title: val_signing_info is the signing info of requested val cons address title: >- QuerySigningInfoResponse is the response type for the Query/SigningInfo RPC @@ -40386,9 +41700,6 @@ definitions: operator_address defines the address of the validator's operator; bech encoded in JSON. consensus_pubkey: - description: >- - consensus_pubkey is the consensus public key of the validator, - as a Protobuf Any. type: object properties: '@type': @@ -40449,6 +41760,112 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } jailed: type: boolean description: >- @@ -40634,7 +42051,6 @@ definitions: type: object properties: delegation_response: - description: delegation_responses defines the delegation info of a delegation. type: object properties: delegation: @@ -40676,6 +42092,12 @@ definitions: method signatures required by gogoproto. + description: >- + DelegationResponse is equivalent to Delegation except that it contains + a + + balance in addition to shares which is more suitable for client + responses. description: >- QueryDelegationResponse is response type for the Query/Delegation RPC method. @@ -40832,7 +42254,6 @@ definitions: type: object properties: validator: - description: validator defines the the validator info. type: object properties: operator_address: @@ -40841,9 +42262,6 @@ definitions: operator_address defines the address of the validator's operator; bech encoded in JSON. consensus_pubkey: - description: >- - consensus_pubkey is the consensus public key of the validator, as - a Protobuf Any. type: object properties: '@type': @@ -40902,6 +42320,110 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } jailed: type: boolean description: >- @@ -40993,6 +42515,27 @@ definitions: description: >- min_self_delegation is the validator's self declared minimum self delegation. + description: >- + Validator defines a validator, together with the total amount of the + + Validator's bond shares and their exchange rate to coins. Slashing + results in + + a decrease in the exchange rate, allowing correct calculation of + future + + undelegations without iterating over delegators. When coins are + delegated to + + this validator, the validator is credited with a delegation whose + number of + + bond shares is based on the amount of coins delegated divided by the + current + + exchange rate. Voting power can be calculated as total bonded shares + + multiplied by exchange rate. description: |- QueryDelegatorValidatorResponse response type for the Query/DelegatorValidator RPC method. @@ -41010,9 +42553,6 @@ definitions: operator_address defines the address of the validator's operator; bech encoded in JSON. consensus_pubkey: - description: >- - consensus_pubkey is the consensus public key of the validator, - as a Protobuf Any. type: object properties: '@type': @@ -41073,6 +42613,112 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } jailed: type: boolean description: >- @@ -41313,9 +42959,6 @@ definitions: operator_address defines the address of the validator's operator; bech encoded in JSON. consensus_pubkey: - description: >- - consensus_pubkey is the consensus public key of the - validator, as a Protobuf Any. type: object properties: '@type': @@ -41377,6 +43020,117 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } jailed: type: boolean description: >- @@ -41682,7 +43436,6 @@ definitions: type: object properties: unbond: - description: unbond defines the unbonding information of a delegation. type: object properties: delegator_address: @@ -41721,6 +43474,9 @@ definitions: entries are the unbonding delegation entries. unbonding delegation entries + description: |- + UnbondingDelegation stores all of a single delegator's unbonding bonds + for a single validator in an time-ordered list. description: |- QueryDelegationResponse is response type for the Query/UnbondingDelegation RPC method. @@ -41802,7 +43558,6 @@ definitions: type: object properties: validator: - description: validator defines the the validator info. type: object properties: operator_address: @@ -41811,9 +43566,6 @@ definitions: operator_address defines the address of the validator's operator; bech encoded in JSON. consensus_pubkey: - description: >- - consensus_pubkey is the consensus public key of the validator, as - a Protobuf Any. type: object properties: '@type': @@ -41872,6 +43624,110 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } jailed: type: boolean description: >- @@ -41963,6 +43819,27 @@ definitions: description: >- min_self_delegation is the validator's self declared minimum self delegation. + description: >- + Validator defines a validator, together with the total amount of the + + Validator's bond shares and their exchange rate to coins. Slashing + results in + + a decrease in the exchange rate, allowing correct calculation of + future + + undelegations without iterating over delegators. When coins are + delegated to + + this validator, the validator is credited with a delegation whose + number of + + bond shares is based on the amount of coins delegated divided by the + current + + exchange rate. Voting power can be calculated as total bonded shares + + multiplied by exchange rate. title: QueryValidatorResponse is response type for the Query/Validator RPC method cosmos.staking.v1beta1.QueryValidatorUnbondingDelegationsResponse: type: object @@ -42052,9 +43929,6 @@ definitions: operator_address defines the address of the validator's operator; bech encoded in JSON. consensus_pubkey: - description: >- - consensus_pubkey is the consensus public key of the validator, - as a Protobuf Any. type: object properties: '@type': @@ -42115,6 +43989,112 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } jailed: type: boolean description: >- @@ -42544,9 +44524,6 @@ definitions: operator_address defines the address of the validator's operator; bech encoded in JSON. consensus_pubkey: - description: >- - consensus_pubkey is the consensus public key of the validator, as a - Protobuf Any. type: object properties: '@type': @@ -42602,6 +44579,107 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } jailed: type: boolean description: >- @@ -42928,7 +45006,6 @@ definitions: format: int64 description: Amount of gas consumed by transaction. tx: - description: The request transaction bytes. type: object properties: '@type': @@ -42984,6 +45061,107 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } timestamp: type: string description: >- @@ -43095,7 +45273,6 @@ definitions: signer_infos: type: array items: - type: object $ref: '#/definitions/cosmos.tx.v1beta1.SignerInfo' description: >- signer_infos defines the signing modes for the required signers. The @@ -43223,7 +45400,6 @@ definitions: type: object properties: tx_response: - description: tx_response is the queried TxResponses. type: object properties: height: @@ -43308,7 +45484,6 @@ definitions: format: int64 description: Amount of gas consumed by transaction. tx: - description: The request transaction bytes. type: object properties: '@type': @@ -43367,6 +45542,110 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } timestamp: type: string description: >- @@ -43423,6 +45702,11 @@ definitions: Since: cosmos-sdk 0.42.11, 0.44.5, 0.45 + description: >- + TxResponse defines a structure containing relevant tx data and + metadata. The + + tags are stringified and the log is JSON decoded. description: |- BroadcastTxResponse is the response type for the Service.BroadcastTx method. @@ -43486,7 +45770,6 @@ definitions: txs: type: array items: - type: object $ref: '#/definitions/cosmos.tx.v1beta1.Tx' description: txs are the transactions in the block. block_id: @@ -44089,7 +46372,6 @@ definitions: $ref: '#/definitions/cosmos.tx.v1beta1.Tx' description: tx is the queried transaction. tx_response: - description: tx_response is the queried TxResponses. type: object properties: height: @@ -44174,7 +46456,6 @@ definitions: format: int64 description: Amount of gas consumed by transaction. tx: - description: The request transaction bytes. type: object properties: '@type': @@ -44233,6 +46514,110 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } timestamp: type: string description: >- @@ -44289,6 +46674,11 @@ definitions: Since: cosmos-sdk 0.42.11, 0.44.5, 0.45 + description: >- + TxResponse defines a structure containing relevant tx data and + metadata. The + + tags are stringified and the log is JSON decoded. description: GetTxResponse is the response type for the Service.GetTx method. cosmos.tx.v1beta1.GetTxsEventResponse: type: object @@ -44296,7 +46686,6 @@ definitions: txs: type: array items: - type: object $ref: '#/definitions/cosmos.tx.v1beta1.Tx' description: txs is the list of queried transactions. tx_responses: @@ -44386,7 +46775,6 @@ definitions: format: int64 description: Amount of gas consumed by transaction. tx: - description: The request transaction bytes. type: object properties: '@type': @@ -44447,6 +46835,112 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } timestamp: type: string description: >- @@ -44609,7 +47103,6 @@ definitions: mode_infos: type: array items: - type: object $ref: '#/definitions/cosmos.tx.v1beta1.ModeInfo' title: |- mode_infos is the corresponding modes of the signers of the multisig @@ -44680,14 +47173,6 @@ definitions: type: object properties: public_key: - description: >- - public_key is the public key of the signer. It is optional for - accounts - - that already exist in state. If unset, the verifier can use the - required \ - - signer address for this position and lookup the public key. type: object properties: '@type': @@ -44743,6 +47228,107 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } mode_info: $ref: '#/definitions/cosmos.tx.v1beta1.ModeInfo' title: |- @@ -46074,13 +48660,6 @@ definitions: Any application specific upgrade info to be included on-chain such as a git commit that validators could automatically upgrade to upgraded_client_state: - description: >- - Deprecated: UpgradedClientState field has been deprecated. IBC upgrade - logic has been - - moved to the IBC module in the sub module 02-client. - - If this field is not empty, an error will be thrown. type: object properties: '@type': @@ -46136,6 +48715,107 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } description: >- Plan specifies information about a planned upgrade and when it should occur. @@ -46204,13 +48884,6 @@ definitions: such as a git commit that validators could automatically upgrade to upgraded_client_state: - description: >- - Deprecated: UpgradedClientState field has been deprecated. IBC - upgrade logic has been - - moved to the IBC module in the sub module 02-client. - - If this field is not empty, an error will be thrown. type: object properties: '@type': @@ -46269,6 +48942,110 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } description: >- QueryCurrentPlanResponse is the response type for the Query/CurrentPlan RPC @@ -46435,7 +49212,6 @@ definitions: type: object properties: denom_trace: - description: denom_trace returns the requested denomination trace information. type: object properties: path: @@ -46448,6 +49224,11 @@ definitions: base_denom: type: string description: base denomination of the relayed fungible token. + description: >- + DenomTrace contains the base denomination for ICS20 fungible tokens + and the + + source tracing information path. description: |- QueryDenomTraceResponse is the response type for the Query/DenomTrace RPC method. @@ -46899,7 +49680,6 @@ definitions: type: string title: client identifier client_state: - title: client state type: object properties: '@type': @@ -47062,6 +49842,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state description: |- IdentifiedClientState defines a client state with an additional client identifier field. @@ -47104,7 +49885,6 @@ definitions: type: object properties: consensus_state: - title: consensus state associated with the channel type: object properties: '@type': @@ -47261,6 +50041,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: consensus state associated with the channel client_id: type: string title: client ID associated with the consensus state @@ -48153,7 +50934,6 @@ definitions: type: string title: client identifier client_state: - title: client state type: object properties: '@type': @@ -48310,6 +51090,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state description: |- IdentifiedClientState defines a client state with an additional client identifier field. @@ -48345,7 +51126,6 @@ definitions: gets reset consensus_state: - title: consensus state type: object properties: '@type': @@ -48502,6 +51282,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: consensus state description: >- ConsensusStateWithHeight defines a consensus state with an additional height @@ -48551,7 +51332,6 @@ definitions: type: object properties: client_state: - title: client state associated with the request identifier type: object properties: '@type': @@ -48708,6 +51488,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state associated with the request identifier proof: type: string format: byte @@ -48759,7 +51540,6 @@ definitions: type: string title: client identifier client_state: - title: client state type: object properties: '@type': @@ -48926,6 +51706,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state description: >- IdentifiedClientState defines a client state with an additional client @@ -48977,9 +51758,6 @@ definitions: type: object properties: consensus_state: - title: >- - consensus state associated with the client identifier at the given - height type: object properties: '@type': @@ -49136,6 +51914,9 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: >- + consensus state associated with the client identifier at the given + height proof: type: string format: byte @@ -49211,7 +51992,6 @@ definitions: gets reset consensus_state: - title: consensus state type: object properties: '@type': @@ -49378,6 +52158,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: consensus state description: >- ConsensusStateWithHeight defines a consensus state with an additional height @@ -49417,7 +52198,6 @@ definitions: type: object properties: upgraded_client_state: - title: client state associated with the request identifier type: object properties: '@type': @@ -49574,6 +52354,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state associated with the request identifier description: |- QueryUpgradedClientStateResponse is the response type for the Query/UpgradedClientState RPC method. @@ -49581,7 +52362,6 @@ definitions: type: object properties: upgraded_consensus_state: - title: Consensus state associated with the request identifier type: object properties: '@type': @@ -49738,6 +52518,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: Consensus state associated with the request identifier description: |- QueryUpgradedConsensusStateResponse is the response type for the Query/UpgradedConsensusState RPC method. @@ -50024,7 +52805,6 @@ definitions: type: string title: client identifier client_state: - title: client state type: object properties: '@type': @@ -50187,6 +52967,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state description: |- IdentifiedClientState defines a client state with an additional client identifier field. @@ -50229,7 +53010,6 @@ definitions: type: object properties: consensus_state: - title: consensus state associated with the channel type: object properties: '@type': @@ -50386,6 +53166,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: consensus state associated with the channel client_id: type: string title: client ID associated with the consensus state @@ -51481,7 +54262,7 @@ definitions: relay_session: type: object properties: - chainID: + specID: type: string content_hash: type: string @@ -51507,7 +54288,7 @@ definitions: type: string sync: type: string - block_height: + epoch: type: string format: int64 unresponsive_providers: @@ -51530,6 +54311,8 @@ definitions: badge_pk: type: string format: byte + spec_id: + type: string project_sig: type: string format: byte @@ -51980,6 +54763,8 @@ definitions: badge_pk: type: string format: byte + spec_id: + type: string project_sig: type: string format: byte @@ -52001,7 +54786,7 @@ definitions: type: array items: type: string - lavanet.lava.pairing.MsgFreezeResponse: + lavanet.lava.pairing.MsgFreezeProviderResponse: type: object lavanet.lava.pairing.MsgRelayPaymentResponse: type: object @@ -52009,7 +54794,7 @@ definitions: type: object lavanet.lava.pairing.MsgStakeProviderResponse: type: object - lavanet.lava.pairing.MsgUnfreezeResponse: + lavanet.lava.pairing.MsgUnfreezeProviderResponse: type: object lavanet.lava.pairing.MsgUnstakeClientResponse: type: object @@ -52572,7 +55357,7 @@ definitions: lavanet.lava.pairing.RelaySession: type: object properties: - chainID: + specID: type: string content_hash: type: string @@ -52598,7 +55383,7 @@ definitions: type: string sync: type: string - block_height: + epoch: type: string format: int64 unresponsive_providers: @@ -52621,6 +55406,8 @@ definitions: badge_pk: type: string format: byte + spec_id: + type: string project_sig: type: string format: byte @@ -52643,10 +55430,6 @@ definitions: properties: index: type: string - duration: - type: string - format: uint64 - title: duration of the plan's subscription in months block: type: string format: uint64 @@ -52738,10 +55521,6 @@ definitions: properties: index: type: string - duration: - type: string - format: uint64 - title: duration of the plan's subscription in months block: type: string format: uint64 @@ -54103,7 +56882,7 @@ definitions: type: array items: type: string - lavanet.lava.subscription.MsgSubscribeResponse: + lavanet.lava.subscription.MsgBuyResponse: type: object lavanet.lava.subscription.Params: type: object @@ -54116,27 +56895,49 @@ definitions: properties: creator: type: string + title: creator pays for the subscription consumer: type: string + title: consumer uses the subscription block: type: string format: uint64 + title: when the subscription was created plan_index: type: string + title: index (name) of plan plan_block: type: string format: uint64 - is_yearly: - type: boolean - expiry_time: + title: when the plan was created + duration_total: type: string format: uint64 - usedCU: + title: total requested duration in months + duration_left: + type: string + format: uint64 + title: remaining duration in months + month_expiry_time: + type: string + format: uint64 + title: expiry time of current month + prev_expiry_block: + type: string + format: uint64 + title: when previous month expired + month_cu_total: type: string format: uint64 - remainingCU: + title: CU allowance during current month + month_cu_left: type: string format: uint64 + title: CU remaining during current month + prev_cu_left: + type: string + format: uint64 + title: CU remaining for previous month lavanet.lava.subscription.QueryParamsResponse: type: object properties: @@ -54149,24 +56950,46 @@ definitions: properties: creator: type: string + title: creator pays for the subscription consumer: type: string + title: consumer uses the subscription block: type: string format: uint64 + title: when the subscription was created plan_index: type: string + title: index (name) of plan plan_block: type: string format: uint64 - is_yearly: - type: boolean - expiry_time: + title: when the plan was created + duration_total: type: string format: uint64 - usedCU: + title: total requested duration in months + duration_left: + type: string + format: uint64 + title: remaining duration in months + month_expiry_time: + type: string + format: uint64 + title: expiry time of current month + prev_expiry_block: + type: string + format: uint64 + title: when previous month expired + month_cu_total: + type: string + format: uint64 + title: CU allowance during current month + month_cu_left: type: string format: uint64 - remainingCU: + title: CU remaining during current month + prev_cu_left: type: string format: uint64 + title: CU remaining for previous month diff --git a/go.mod b/go.mod index c05fd8e7fe..bcc34dc524 100644 --- a/go.mod +++ b/go.mod @@ -49,7 +49,6 @@ require ( github.com/ghodss/yaml v1.0.0 // indirect github.com/gogo/googleapis v1.4.0 // indirect github.com/golang/glog v1.0.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect golang.org/x/mod v0.7.0 // indirect golang.org/x/tools v0.2.0 // indirect diff --git a/go.sum b/go.sum index a21d940c27..8626d7221e 100644 --- a/go.sum +++ b/go.sum @@ -781,8 +781,6 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 h1:gDLXvp5S9izjldquuoAhDzccbskOL6tDC5jMSyx3zxE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2/go.mod h1:7pdNwVWBBHGiCxa9lAszqCJMbfTISJ7oMftp8+UGV08= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= @@ -1076,8 +1074,6 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/neilotoole/errgroup v0.1.5/go.mod h1:Q2nLGf+594h0CLBs/Mbg6qOr7GtqDK7C2S41udRnToE= -github.com/newrelic/go-agent/v3 v3.20.3 h1:hUBAMq/Y2Y9as5/yxQbf0zNde/X7w58cWZkm2flZIaw= -github.com/newrelic/go-agent/v3 v3.20.3/go.mod h1:rT6ZUxJc5rQbWLyCtjqQCOcfb01lKRFbc1yMQkcboWM= github.com/newrelic/go-agent/v3 v3.20.4 h1:fkxr0oUEYrPeXyfJC0D0BwDs1FYMe4NgUSqnzqPESI0= github.com/newrelic/go-agent/v3 v3.20.4/go.mod h1:rT6ZUxJc5rQbWLyCtjqQCOcfb01lKRFbc1yMQkcboWM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= diff --git a/testutil/e2e/e2e.go b/testutil/e2e/e2e.go index acbcefd1c6..15aef95f1f 100644 --- a/testutil/e2e/e2e.go +++ b/testutil/e2e/e2e.go @@ -430,7 +430,7 @@ func (lt *lavaTest) startLavaProviders(ctx context.Context) { lt.commands[logName] = cmd go func(idx int) { - lt.listenCmdCommand(cmd, "startTendermintProvider process returned unexpectedly, provider idx:"+strconv.Itoa(idx), "startTendermintProvider") + lt.listenCmdCommand(cmd, "startLavaProviders process returned unexpectedly, provider idx:"+strconv.Itoa(idx), "startLavaProviders") }(idx) } @@ -559,37 +559,6 @@ func (lt *lavaTest) lavaOverLava(ctx context.Context) { lt.checkStakeLava(5, 5, 1, checkedSpecsE2ELOL, "Lava Over Lava Test OK") } -func (lt *lavaTest) startRESTProvider(rpcURL string, ctx context.Context) { - providerCommands := []string{ - lt.lavadPath + " server 127.0.0.1 2271 " + rpcURL + " LAV1 rest --from servicer6 --geolocation 1 --log_level debug", - lt.lavadPath + " server 127.0.0.1 2272 " + rpcURL + " LAV1 rest --from servicer7 --geolocation 1 --log_level debug", - lt.lavadPath + " server 127.0.0.1 2273 " + rpcURL + " LAV1 rest --from servicer8 --geolocation 1 --log_level debug", - lt.lavadPath + " server 127.0.0.1 2274 " + rpcURL + " LAV1 rest --from servicer9 --geolocation 1 --log_level debug", - lt.lavadPath + " server 127.0.0.1 2275 " + rpcURL + " LAV1 rest --from servicer10 --geolocation 1 --log_level debug", - } - - for idx, providerCommand := range providerCommands { - logName := "08_restProvider_" + fmt.Sprintf("%02d", idx) - lt.logs[logName] = new(bytes.Buffer) - cmd := exec.CommandContext(ctx, "", "") - cmd.Path = lt.lavadPath - cmd.Args = strings.Split(providerCommand, " ") - cmd.Stdout = lt.logs[logName] - cmd.Stderr = lt.logs[logName] - - err := cmd.Start() - if err != nil { - panic(err) - } - lt.commands[logName] = cmd - - go func(idx int) { - lt.listenCmdCommand(cmd, "startRESTProvider process returned unexpectedly, provider idx:"+strconv.Itoa(idx), "startRESTProvider") - }(idx) - } - utils.LavaFormatInfo("startRESTProvider OK", nil) -} - func (lt *lavaTest) checkRESTConsumer(rpcURL string, timeout time.Duration) { for start := time.Now(); time.Since(start) < timeout; { utils.LavaFormatInfo("Waiting REST Consumer", nil) @@ -650,37 +619,6 @@ func getRequest(url string) ([]byte, error) { return body, nil } -func (lt *lavaTest) startGRPCProvider(rpcURL string, ctx context.Context) { - providerCommands := []string{ - lt.lavadPath + " server 127.0.0.1 2281 " + rpcURL + " LAV1 grpc --from servicer6 --geolocation 1 --log_level debug", - lt.lavadPath + " server 127.0.0.1 2282 " + rpcURL + " LAV1 grpc --from servicer7 --geolocation 1 --log_level debug", - lt.lavadPath + " server 127.0.0.1 2283 " + rpcURL + " LAV1 grpc --from servicer8 --geolocation 1 --log_level debug", - lt.lavadPath + " server 127.0.0.1 2284 " + rpcURL + " LAV1 grpc --from servicer9 --geolocation 1 --log_level debug", - lt.lavadPath + " server 127.0.0.1 2285 " + rpcURL + " LAV1 grpc --from servicer10 --geolocation 1 --log_level debug", - } - - for idx, providerCommand := range providerCommands { - logName := "10_grpcProvider_" + fmt.Sprintf("%02d", idx) - lt.logs[logName] = new(bytes.Buffer) - cmd := exec.CommandContext(ctx, "", "") - cmd.Path = lt.lavadPath - cmd.Args = strings.Split(providerCommand, " ") - cmd.Stdout = lt.logs[logName] - cmd.Stderr = lt.logs[logName] - - err := cmd.Start() - if err != nil { - panic(err) - } - lt.commands[logName] = cmd - - go func(idx int) { - lt.listenCmdCommand(cmd, "startGRPCProvider process returned unexpectedly, provider idx:"+strconv.Itoa(idx), "startGRPCProvider") - }(idx) - } - utils.LavaFormatInfo("startGRPCProvider OK", nil) -} - func (lt *lavaTest) checkGRPCConsumer(rpcURL string, timeout time.Duration) { for start := time.Now(); time.Since(start) < timeout; { utils.LavaFormatInfo("Waiting GRPC Consumer", nil) From 8ab8491152c395544f1a56493b3275b95e6e3e5a Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Thu, 16 Mar 2023 11:40:48 +0100 Subject: [PATCH 112/123] fix tests --- .github/workflows/e2e.yml | 4 ++-- x/pairing/keeper/msg_server_freeze_test.go | 20 ++++++++------------ 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 46274f5c21..7ff906ca51 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -87,12 +87,12 @@ jobs: - name: JSON Provider All Logs if: always() - run: grep "" testutil/e2e/logs/03_jsonProvider* --exclude="*errors*" + run: grep "" testutil/e2e/logs/03_EthProvider* --exclude="*errors*" - name: JSON Provider Error Only Logs if: always() continue-on-error: true - run: grep "" testutil/e2e/logs/03_jsonProvider* --include="*errors*" + run: grep "" testutil/e2e/logs/03_EthProvider* --include="*errors*" - name: JSON Consumer All Logs if: always() diff --git a/x/pairing/keeper/msg_server_freeze_test.go b/x/pairing/keeper/msg_server_freeze_test.go index f51b2e66c7..a6883534f8 100644 --- a/x/pairing/keeper/msg_server_freeze_test.go +++ b/x/pairing/keeper/msg_server_freeze_test.go @@ -242,24 +242,20 @@ func TestPaymentFrozen(t *testing.T) { require.NotEqual(t, providerToFreeze.Address, provider.Address) } - relayRequest := &types.RelayRequest{ - Provider: providerToFreeze.Address, - ApiUrl: "", - Data: []byte(ts.spec.Apis[0].Name), - SessionId: uint64(1), - ChainID: ts.spec.Name, - CuSum: ts.spec.Apis[0].ComputeUnits * 10, - BlockHeight: blockForPaymentBeforeFreeze, - RelayNum: 0, - RequestBlock: -1, - DataReliability: nil, + relayRequest := &types.RelaySession{ + Provider: providerToFreeze.Address, + SessionId: uint64(1), + SpecID: ts.spec.Name, + CuSum: ts.spec.Apis[0].ComputeUnits * 10, + Epoch: blockForPaymentBeforeFreeze, + RelayNum: 0, } sig, err := sigs.SignRelay(ts.clients[0].secretKey, *relayRequest) relayRequest.Sig = sig require.Nil(t, err) - var Relays []*types.RelayRequest + var Relays []*types.RelaySession Relays = append(Relays, relayRequest) _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: providerToFreeze.Address, Relays: Relays}) From 027003275267562bee6c70b5434365451c90ed31 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Mar 2023 10:46:09 +0200 Subject: [PATCH 113/123] merge fixes --- docs/static/openapi.yml | 424 +++++++++++------- go.mod | 1 - go.sum | 2 - scripts/init_chain_commands.sh | 4 +- testutil/common/common.go | 33 +- .../keeper/msg_server_relay_payment_test.go | 64 +-- x/pairing/keeper/pairing_subscription_test.go | 62 +-- x/pairing/keeper/pairing_test.go | 2 +- 8 files changed, 318 insertions(+), 274 deletions(-) diff --git a/docs/static/openapi.yml b/docs/static/openapi.yml index 8b31d53582..3dfab40735 100644 --- a/docs/static/openapi.yml +++ b/docs/static/openapi.yml @@ -29674,6 +29674,16 @@ paths: in: query required: false type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query '/lavanet/lava/epochstorage/fixated_params/{index}': @@ -29933,6 +29943,16 @@ paths: in: query required: false type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query '/lavanet/lava/epochstorage/stake_storage/{index}': @@ -30333,16 +30353,16 @@ paths: type: string moniker: type: string - currentEpoch: + current_epoch: type: string format: uint64 - timeLeftToNextPairing: + time_left_to_next_pairing: type: string format: uint64 - specLastUpdatedBlock: + spec_last_updated_block: type: string format: uint64 - blockOfNextPairing: + block_of_next_pairing: type: string format: uint64 default: @@ -31075,10 +31095,10 @@ paths: index: type: string format: int64 - pairedProviders: + paired_providers: type: string format: uint64 - cuPerEpoch: + cu_per_epoch: type: string format: uint64 default: @@ -31119,100 +31139,10 @@ paths: format: uint64 tags: - Query - /lavanet/lava/plans/params: - get: - summary: Parameters queries the parameters of the module. - operationId: LavanetLavaPlansParams - responses: - '200': - description: A successful response. - schema: - type: object - properties: - params: - description: params holds all the parameters of this module. - type: object - description: >- - QueryParamsResponse is response type for the Query/Params RPC - method. - default: - description: An unexpected error response. - schema: - type: object - properties: - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - '@type': - type: string - additionalProperties: {} - tags: - - Query - /lavanet/lava/plans/show_all_plans: + '/lavanet/lava/plans/info/{plan_index}': get: - summary: Queries a list of ShowAllPlans items. - operationId: LavanetLavaPlansShowAllPlans - responses: - '200': - description: A successful response. - schema: - type: object - properties: - plans_info: - type: array - items: - type: object - properties: - index: - type: string - name: - type: string - price: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an amount. - - - NOTE: The amount field is an Int which implements the - custom method - - signatures required by gogoproto. - default: - description: An unexpected error response. - schema: - type: object - properties: - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - '@type': - type: string - additionalProperties: {} - tags: - - Query - '/lavanet/lava/plans/show_plan_info/{plan_index}': - get: - summary: Queries a list of ShowPlanInfo items. - operationId: LavanetLavaPlansShowPlanInfo + summary: Queries an Info item. + operationId: LavanetLavaPlansInfo responses: '200': description: A successful response. @@ -31229,6 +31159,7 @@ paths: format: uint64 title: the epoch that this plan was created price: + title: plan price (in ulava) type: object properties: denom: @@ -31243,7 +31174,6 @@ paths: custom method signatures required by gogoproto. - title: plan price (in ulava) compute_units: type: string format: uint64 @@ -31298,10 +31228,64 @@ paths: type: string tags: - Query - /lavanet/lava/projects/params: + /lavanet/lava/plans/list: + get: + summary: Queries a list of List items. + operationId: LavanetLavaPlansList + responses: + '200': + description: A successful response. + schema: + type: object + properties: + plans_info: + type: array + items: + type: object + properties: + index: + type: string + name: + type: string + price: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the + custom method + + signatures required by gogoproto. + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + tags: + - Query + /lavanet/lava/plans/params: get: summary: Parameters queries the parameters of the module. - operationId: LavanetLavaProjectsParams + operationId: LavanetLavaPlansParams responses: '200': description: A successful response. @@ -31334,10 +31318,10 @@ paths: additionalProperties: {} tags: - Query - '/lavanet/lava/projects/show_developers_project/{developer}': + '/lavanet/lava/projects/developer/{developer}': get: summary: Queries a list of ShowDevelopersProject items. - operationId: LavanetLavaProjectsShowDevelopersProject + operationId: LavanetLavaProjectsDeveloper responses: '200': description: A successful response. @@ -31440,10 +31424,10 @@ paths: type: string tags: - Query - '/lavanet/lava/projects/show_project/{project}': + '/lavanet/lava/projects/info/{project}': get: summary: Queries a list of ShowProject items. - operationId: LavanetLavaProjectsShowProject + operationId: LavanetLavaProjectsInfo responses: '200': description: A successful response. @@ -31546,6 +31530,42 @@ paths: type: string tags: - Query + /lavanet/lava/projects/params: + get: + summary: Parameters queries the parameters of the module. + operationId: LavanetLavaProjectsParams + responses: + '200': + description: A successful response. + schema: + type: object + properties: + params: + description: params holds all the parameters of this module. + type: object + description: >- + QueryParamsResponse is response type for the Query/Params RPC + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + tags: + - Query /lavanet/lava/spec/params: get: summary: Parameters queries the parameters of the module. @@ -32834,10 +32854,10 @@ paths: type: string tags: - Query - '/lavanet/lava/subscription/current_subscription/{consumer}': + '/lavanet/lava/subscription/current/{consumer}': get: - summary: Queries a list of CurrentSubscription items. - operationId: LavanetLavaSubscriptionCurrentSubscription + summary: Queries a list of Current items. + operationId: LavanetLavaSubscriptionCurrent responses: '200': description: A successful response. @@ -55110,16 +55130,16 @@ definitions: type: string moniker: type: string - currentEpoch: + current_epoch: type: string format: uint64 - timeLeftToNextPairing: + time_left_to_next_pairing: type: string format: uint64 - specLastUpdatedBlock: + spec_last_updated_block: type: string format: uint64 - blockOfNextPairing: + block_of_next_pairing: type: string format: uint64 lavanet.lava.pairing.QueryGetProviderPaymentStorageResponse: @@ -55352,12 +55372,90 @@ definitions: index: type: string format: int64 - pairedProviders: + paired_providers: + type: string + format: uint64 + cu_per_epoch: + type: string + format: uint64 + lavanet.lava.pairing.RelayPrivateData: + type: object + properties: + connection_type: + type: string + api_url: + type: string + title: >- + some relays have associated urls that are filled with params + ('/block/{height}') + data: + type: string + format: byte + request_block: + type: string + format: int64 + apiInterface: + type: string + salt: + type: string + format: byte + lavanet.lava.pairing.RelaySession: + type: object + properties: + specID: + type: string + content_hash: + type: string + format: byte + session_id: + type: string + format: uint64 + cu_sum: type: string format: uint64 - cuPerEpoch: + title: total compute unit used including this relay + provider: + type: string + relay_num: type: string format: uint64 + QoSReport: + type: object + properties: + latency: + type: string + availability: + type: string + sync: + type: string + epoch: + type: string + format: int64 + unresponsive_providers: + type: string + format: byte + lava_chain_id: + type: string + sig: + type: string + format: byte + badge: + type: object + properties: + cu_allocation: + type: string + format: uint64 + epoch: + type: string + format: int64 + badge_pk: + type: string + format: byte + spec_id: + type: string + project_sig: + type: string + format: byte lavanet.lava.pairing.UniquePaymentStorageClientProvider: type: object properties: @@ -55382,6 +55480,7 @@ definitions: format: uint64 title: the epoch that this plan was created price: + title: plan price (in ulava) type: object properties: denom: @@ -55393,7 +55492,6 @@ definitions: NOTE: The amount field is an Int which implements the custom method signatures required by gogoproto. - title: plan price (in ulava) compute_units: type: string format: uint64 @@ -55423,41 +55521,7 @@ definitions: type: string format: uint64 title: discount for buying the plan for a year - lavanet.lava.plans.QueryParamsResponse: - type: object - properties: - params: - description: params holds all the parameters of this module. - type: object - description: QueryParamsResponse is response type for the Query/Params RPC method. - lavanet.lava.plans.QueryShowAllPlansResponse: - type: object - properties: - plans_info: - type: array - items: - type: object - properties: - index: - type: string - name: - type: string - price: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an amount. - - - NOTE: The amount field is an Int which implements the custom - method - - signatures required by gogoproto. - lavanet.lava.plans.QueryShowPlanInfoResponse: + lavanet.lava.plans.QueryInfoResponse: type: object properties: plan_info: @@ -55470,6 +55534,7 @@ definitions: format: uint64 title: the epoch that this plan was created price: + title: plan price (in ulava) type: object properties: denom: @@ -55484,7 +55549,6 @@ definitions: method signatures required by gogoproto. - title: plan price (in ulava) compute_units: type: string format: uint64 @@ -55514,7 +55578,41 @@ definitions: type: string format: uint64 title: discount for buying the plan for a year - lavanet.lava.plans.showAllPlansInfoStruct: + lavanet.lava.plans.QueryListResponse: + type: object + properties: + plans_info: + type: array + items: + type: object + properties: + index: + type: string + name: + type: string + price: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + lavanet.lava.plans.QueryParamsResponse: + type: object + properties: + params: + description: params holds all the parameters of this module. + type: object + description: QueryParamsResponse is response type for the Query/Params RPC method. + lavanet.lava.plans.listInfoStruct: type: object properties: index: @@ -55672,14 +55770,7 @@ definitions: - DEVELOPER default: NONE title: 'bitmap, must only be power of 2' - lavanet.lava.projects.QueryParamsResponse: - type: object - properties: - params: - description: params holds all the parameters of this module. - type: object - description: QueryParamsResponse is response type for the Query/Params RPC method. - lavanet.lava.projects.QueryShowDevelopersProjectResponse: + lavanet.lava.projects.QueryDeveloperResponse: type: object properties: project: @@ -55751,7 +55842,7 @@ definitions: used_cu: type: string format: uint64 - lavanet.lava.projects.QueryShowProjectResponse: + lavanet.lava.projects.QueryInfoResponse: type: object properties: project: @@ -55823,6 +55914,13 @@ definitions: used_cu: type: string format: uint64 + lavanet.lava.projects.QueryParamsResponse: + type: object + properties: + params: + description: params holds all the parameters of this module. + type: object + description: QueryParamsResponse is response type for the Query/Params RPC method. lavanet.lava.spec.ApiInterface: type: object properties: @@ -56852,7 +56950,7 @@ definitions: lavanet.lava.subscription.Params: type: object description: Params defines the parameters for the module. - lavanet.lava.subscription.QueryCurrentSubscriptionResponse: + lavanet.lava.subscription.QueryCurrentResponse: type: object properties: sub: diff --git a/go.mod b/go.mod index c05fd8e7fe..bcc34dc524 100644 --- a/go.mod +++ b/go.mod @@ -49,7 +49,6 @@ require ( github.com/ghodss/yaml v1.0.0 // indirect github.com/gogo/googleapis v1.4.0 // indirect github.com/golang/glog v1.0.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect golang.org/x/mod v0.7.0 // indirect golang.org/x/tools v0.2.0 // indirect diff --git a/go.sum b/go.sum index 83ff3be0d1..8626d7221e 100644 --- a/go.sum +++ b/go.sum @@ -781,8 +781,6 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 h1:gDLXvp5S9izjldquuoAhDzccbskOL6tDC5jMSyx3zxE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2/go.mod h1:7pdNwVWBBHGiCxa9lAszqCJMbfTISJ7oMftp8+UGV08= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= diff --git a/scripts/init_chain_commands.sh b/scripts/init_chain_commands.sh index 429956b019..508ae8b659 100755 --- a/scripts/init_chain_commands.sh +++ b/scripts/init_chain_commands.sh @@ -6,11 +6,11 @@ source $__dir/useful_commands.sh killall screen screen -wipe GASPRICE="0.000000001ulava" -lavad tx gov submit-proposal spec-add ./cookbook/spec_add_ibc.json,./cookbook/spec_add_cosmoswasm.json,./cookbook/spec_add_cosmossdk.json,./cookbook/spec_add_cosmossdk_full.json,./cookbook/spec_add_ethereum.json,./cookbook/spec_add_cosmoshub.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx gov submit-proposal spec-add ./cookbook/spec_add_ibc.json,./cookbook/spec_add_cosmoswasm.json,./cookbook/spec_add_cosmossdk.json,./cookbook/spec_add_cosmossdk_full.json,./cookbook/spec_add_ethereum.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx gov vote 1 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE sleep 4 -lavad tx gov submit-proposal spec-add ./cookbook/spec_add_lava.json,./cookbook/spec_add_osmosis.json,./cookbook/spec_add_fantom.json,./cookbook/spec_add_celo.json,./cookbook/spec_add_optimism.json,./cookbook/spec_add_arbitrum.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx gov submit-proposal spec-add ./cookbook/spec_add_cosmoshub.json,./cookbook/spec_add_lava.json,./cookbook/spec_add_osmosis.json,./cookbook/spec_add_fantom.json,./cookbook/spec_add_celo.json,./cookbook/spec_add_optimism.json,./cookbook/spec_add_arbitrum.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE lavad tx gov vote 2 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE sleep 4 diff --git a/testutil/common/common.go b/testutil/common/common.go index d779d56517..26f82b8247 100644 --- a/testutil/common/common.go +++ b/testutil/common/common.go @@ -81,25 +81,22 @@ func StakeAccount(t *testing.T, ctx context.Context, keepers testkeeper.Keepers, } } -func CreateRelay(t *testing.T, provider Account, consumer Account, data []byte, seassionID uint64, chainID string, cuSum uint64, blockHeight int64, relayNum uint64, requestBlock int64, dataReliability *types.VRFData) types.RelayRequest { - relayRequest := &types.RelayRequest{ - Provider: provider.Addr.String(), - ApiUrl: "", - Data: data, - SessionId: seassionID, - ChainID: chainID, - CuSum: cuSum, - BlockHeight: blockHeight, - RelayNum: relayNum, - RequestBlock: requestBlock, - DataReliability: nil, +func BuildRelayRequest(ctx context.Context, provider string, contentHash []byte, cuSum uint64, spec string, QoSDR *types.QualityOfServiceReport) *types.RelaySession { + relaySession := &types.RelaySession{ + Provider: provider, + ContentHash: contentHash, + SessionId: uint64(1), + SpecID: spec, + CuSum: cuSum, + Epoch: sdk.UnwrapSDKContext(ctx).BlockHeight(), + RelayNum: 0, + QoSReport: QoSDR, + LavaChainId: sdk.UnwrapSDKContext(ctx).BlockHeader().ChainID, } - - sig, err := sigs.SignRelay(consumer.SK, *relayRequest) - relayRequest.Sig = sig - require.Nil(t, err) - - return *relayRequest + if QoSDR != nil { + QoSDR.ComputeQoS() + } + return relaySession } func CreateMsgDetection(ctx context.Context, consumer Account, provider0 Account, provider1 Account, spec spectypes.Spec) (conflicttypes.MsgDetection, error) { diff --git a/x/pairing/keeper/msg_server_relay_payment_test.go b/x/pairing/keeper/msg_server_relay_payment_test.go index d04d35449d..078e6c1d73 100644 --- a/x/pairing/keeper/msg_server_relay_payment_test.go +++ b/x/pairing/keeper/msg_server_relay_payment_test.go @@ -138,7 +138,7 @@ func TestRelayPaymentMemoryTransferAfterEpochChange(t *testing.T) { } // Create relay request that was done in the first epoch. Change session ID each iteration to avoid double spending error (provider asks reward for the same transaction twice) - relaySession := buildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) + relaySession := common.BuildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) relaySession.Epoch = int64(firstEpoch) relaySession.SessionId = sessionCounter @@ -203,7 +203,7 @@ func TestRelayPaymentBlockHeight(t *testing.T) { require.Nil(t, err) ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) - relaySession := buildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) + relaySession := common.BuildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) relaySession.Epoch = sdk.UnwrapSDKContext(ts.ctx).BlockHeight() + tt.blockTime sig, err := sigs.SignRelay(ts.clients[0].SK, *relaySession) @@ -258,7 +258,7 @@ func TestRelayPaymentOverUse(t *testing.T) { maxcu, err := ts.keepers.Pairing.GetAllowedCUForBlock(sdk.UnwrapSDKContext(ts.ctx), uint64(sdk.UnwrapSDKContext(ts.ctx).BlockHeight()), entry) require.Nil(t, err) - relaySession := buildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), maxcu*2, ts.spec.Name, nil) + relaySession := common.BuildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), maxcu*2, ts.spec.Name, nil) sig, err := sigs.SignRelay(ts.clients[0].SK, *relaySession) relaySession.Sig = sig require.Nil(t, err) @@ -305,7 +305,7 @@ func TestRelayPaymentNotUnstakingProviderForUnresponsivenessIfNoEpochInformation var Relays []*types.RelaySession for clientIndex := 0; clientIndex < testClientAmount; clientIndex++ { // testing testClientAmount of complaints - relaySession := buildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) + relaySession := common.BuildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) relaySession.UnresponsiveProviders = unresponsiveProvidersData // create the complaint sig, err := sigs.SignRelay(ts.clients[clientIndex].SK, *relaySession) relaySession.Sig = sig @@ -348,7 +348,7 @@ func TestRelayPaymentUnstakingProviderForUnresponsivenessWithBadDataInput(t *tes var Relays []*types.RelaySession var totalCu uint64 for clientIndex := 0; clientIndex < testClientAmount; clientIndex++ { // testing testClientAmount of complaints - relaySession := buildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) + relaySession := common.BuildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) relaySession.UnresponsiveProviders = unresponsiveProvidersData[clientIndex] totalCu += relaySession.CuSum @@ -376,7 +376,7 @@ func TestRelayPaymentNotUnstakingProviderForUnresponsivenessBecauseOfServices(t var RelaysForUnresponsiveProviderInFirstTwoEpochs []*types.RelaySession for i := 0; i < 2; i++ { // move to epoch 3 so we can check enough epochs in the past - relaySession := buildRelayRequest(ts.ctx, ts.providers[1].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) + relaySession := common.BuildRelayRequest(ts.ctx, ts.providers[1].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) sig, err := sigs.SignRelay(ts.clients[i].SK, *relaySession) relaySession.Sig = sig @@ -392,7 +392,7 @@ func TestRelayPaymentNotUnstakingProviderForUnresponsivenessBecauseOfServices(t var Relays []*types.RelaySession for clientIndex := 0; clientIndex < testClientAmount; clientIndex++ { // testing testClientAmount of complaints - relaySession := buildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) + relaySession := common.BuildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) relaySession.UnresponsiveProviders = unresponsiveProvidersData sig, err := sigs.SignRelay(ts.clients[clientIndex].SK, *relaySession) @@ -422,7 +422,7 @@ func TestRelayPaymentDoubleSpending(t *testing.T) { ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) cuSum := ts.spec.GetApis()[0].ComputeUnits * 10 - relaySession := buildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) + relaySession := common.BuildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) sig, err := sigs.SignRelay(ts.clients[0].SK, *relaySession) relaySession.Sig = sig @@ -459,7 +459,7 @@ func TestRelayPaymentDataModification(t *testing.T) { require.Nil(t, err) ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) - relaySession := buildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) + relaySession := common.BuildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) sig, err := sigs.SignRelay(ts.clients[0].SK, *relaySession) relaySession.Sig = sig @@ -503,7 +503,7 @@ func TestRelayPaymentDelayedDoubleSpending(t *testing.T) { require.Nil(t, err) ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) - relaySession := buildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) + relaySession := common.BuildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) sig, err := sigs.SignRelay(ts.clients[0].SK, *relaySession) relaySession.Sig = sig @@ -580,7 +580,7 @@ func TestRelayPaymentOldEpochs(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 - relaySession := buildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, nil) + relaySession := common.BuildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, nil) relaySession.SessionId = tt.sid relaySession.Epoch = sdk.UnwrapSDKContext(ts.ctx).BlockHeight() - int64(blocksInEpoch)*tt.epoch @@ -643,7 +643,7 @@ func TestRelayPaymentQoS(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: tt.latency, Availability: tt.availability, Sync: tt.sync} - relaySession := buildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoS) + relaySession := common.BuildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoS) sig, err := sigs.SignRelay(ts.clients[0].SK, *relaySession) relaySession.Sig = sig require.Nil(t, err) @@ -718,7 +718,7 @@ func TestRelayPaymentDataReliability(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relaySession := buildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoS) + relaySession := common.BuildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoS) relaySession.Sig, err = sigs.SignRelay(ts.clients[0].SK, *relaySession) require.Nil(t, err) @@ -795,7 +795,7 @@ func TestRelayPaymentDataReliability(t *testing.T) { } QoSDR := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequestWithDataReliability0 := buildRelayRequest(ts.ctx, providers[index0].Address, []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoSDR) + relayRequestWithDataReliability0 := common.BuildRelayRequest(ts.ctx, providers[index0].Address, []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoSDR) relayRequestWithDataReliability0.Sig, err = sigs.SignRelay(ts.clients[0].SK, *relayRequestWithDataReliability0) require.Nil(t, err) provider := ts.getProvider(providers[index0].Address) @@ -819,24 +819,6 @@ func TestRelayPaymentDataReliability(t *testing.T) { } } -func buildRelayRequest(ctx context.Context, provider string, contentHash []byte, cuSum uint64, spec string, QoSDR *types.QualityOfServiceReport) *types.RelaySession { - relaySession := &types.RelaySession{ - Provider: provider, - ContentHash: contentHash, - SessionId: uint64(1), - SpecID: spec, - CuSum: cuSum, - Epoch: sdk.UnwrapSDKContext(ctx).BlockHeight(), - RelayNum: 0, - QoSReport: QoSDR, - LavaChainId: sdk.UnwrapSDKContext(ctx).BlockHeader().ChainID, - } - if QoSDR != nil { - QoSDR.ComputeQoS() - } - return relaySession -} - // client sends data reliability to a different provider collaborating to get more rewards func TestRelayPaymentDataReliabilityWrongProvider(t *testing.T) { ts := setupForPaymentTest(t) @@ -856,7 +838,7 @@ func TestRelayPaymentDataReliabilityWrongProvider(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relaySession := buildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoS) + relaySession := common.BuildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoS) relaySession.Sig, err = sigs.SignRelay(ts.clients[0].SK, *relaySession) require.Nil(t, err) @@ -919,7 +901,7 @@ GetWrongProvider: require.Nil(t, err) QoSDR := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequestWithDataReliability0 := buildRelayRequest(ts.ctx, providers[wrongProviderIndex].Address, []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoSDR) + relayRequestWithDataReliability0 := common.BuildRelayRequest(ts.ctx, providers[wrongProviderIndex].Address, []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoSDR) relayRequestWithDataReliability0.Sig, err = sigs.SignRelay(ts.clients[0].SK, *relayRequestWithDataReliability0) require.Nil(t, err) @@ -949,7 +931,7 @@ func TestRelayPaymentDataReliabilityBelowReliabilityThreshold(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relaySession := buildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoS) + relaySession := common.BuildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoS) relaySession.Sig, err = sigs.SignRelay(ts.clients[0].SK, *relaySession) require.Nil(t, err) @@ -989,7 +971,7 @@ func TestRelayPaymentDataReliabilityBelowReliabilityThreshold(t *testing.T) { // make all providers send a datareliability payment request. Everyone should fail for _, provider := range ts.providers { QoSDR := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequestWithDataReliability0 := buildRelayRequest(ts.ctx, provider.Addr.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoSDR) + relayRequestWithDataReliability0 := common.BuildRelayRequest(ts.ctx, provider.Addr.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoSDR) relayRequestWithDataReliability0.Sig, err = sigs.SignRelay(ts.clients[0].SK, *relayRequestWithDataReliability0) require.Nil(t, err) @@ -1018,7 +1000,7 @@ func TestRelayPaymentDataReliabilityDifferentClientSign(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relaySession := buildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoS) + relaySession := common.BuildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoS) QoS.ComputeQoS() relaySession.Sig, err = sigs.SignRelay(ts.clients[0].SK, *relaySession) require.Nil(t, err) @@ -1066,7 +1048,7 @@ func TestRelayPaymentDataReliabilityDifferentClientSign(t *testing.T) { require.Nil(t, err) QoSDR := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequestWithDataReliability0 := buildRelayRequest(ts.ctx, providers[index0].Address, []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoSDR) + relayRequestWithDataReliability0 := common.BuildRelayRequest(ts.ctx, providers[index0].Address, []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoSDR) relayRequestWithDataReliability0.Sig, err = sigs.SignRelay(ts.clients[1].SK, *relayRequestWithDataReliability0) require.Nil(t, err) @@ -1095,7 +1077,7 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { cuSum := ts.spec.Apis[0].ComputeUnits * 10 QoS := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relaySession := buildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoS) + relaySession := common.BuildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoS) relaySession.Sig, err = sigs.SignRelay(ts.clients[0].SK, *relaySession) require.Nil(t, err) @@ -1143,7 +1125,7 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { require.Nil(t, err) QoSDR := &types.QualityOfServiceReport{Latency: sdk.NewDecWithPrec(1, 0), Availability: sdk.NewDecWithPrec(1, 0), Sync: sdk.NewDecWithPrec(1, 0)} - relayRequestWithDataReliability0 := buildRelayRequest(ts.ctx, providers[index0].Address, []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoSDR) + relayRequestWithDataReliability0 := common.BuildRelayRequest(ts.ctx, providers[index0].Address, []byte(ts.spec.Apis[0].Name), cuSum, ts.spec.Name, QoSDR) relayRequestWithDataReliability0.Sig, err = sigs.SignRelay(ts.clients[0].SK, *relayRequestWithDataReliability0) require.Nil(t, err) @@ -1202,7 +1184,7 @@ func TestEpochPaymentDeletion(t *testing.T) { require.Nil(t, err) ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) - relaySession := buildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) + relaySession := common.BuildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.spec.Apis[0].ComputeUnits*10, ts.spec.Name, nil) sig, err := sigs.SignRelay(ts.clients[0].SK, *relaySession) relaySession.Sig = sig diff --git a/x/pairing/keeper/pairing_subscription_test.go b/x/pairing/keeper/pairing_subscription_test.go index b387609c36..0f39fb63ec 100644 --- a/x/pairing/keeper/pairing_subscription_test.go +++ b/x/pairing/keeper/pairing_subscription_test.go @@ -4,6 +4,7 @@ import ( "testing" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/testutil/common" testkeeper "github.com/lavanet/lava/testutil/keeper" "github.com/lavanet/lava/x/pairing/types" @@ -78,22 +79,11 @@ func TestRelayPaymentSubscription(t *testing.T) { for i, tt := range tests { t.Run(tt.name, func(t *testing.T) { - - relayRequest := common.CreateRelay( - t, - *ts.providers[0], - consumer, - []byte(ts.spec.Apis[0].Name), - uint64(i), - ts.spec.Name, - tt.cu, - sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - 0, - -1, - nil, - ) - - _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: ts.providers[0].Addr.String(), Relays: []*types.RelayRequest{&relayRequest}}) + relayRequest := common.BuildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), tt.cu, ts.spec.Name, nil) + relayRequest.SessionId = uint64(i) + relayRequest.Sig, err = sigs.SignRelay(ts.clients[0].SK, *relayRequest) + require.Nil(t, err) + _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: ts.providers[0].Addr.String(), Relays: []*types.RelaySession{relayRequest}}) require.Equal(t, tt.valid, err == nil) }) } @@ -124,41 +114,21 @@ func TestRelayPaymentSubscriptionCU(t *testing.T) { i := 0 for ; uint64(i) < ts.plan.ComputeUnits/ts.plan.ComputeUnitsPerEpoch; i++ { - relayRequest := common.CreateRelay( - t, - *ts.providers[0], - consumer, - []byte(ts.spec.Apis[0].Name), - uint64(i), - ts.spec.Name, - ts.plan.ComputeUnitsPerEpoch, - sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - 0, - -1, - nil, - ) - - _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: ts.providers[0].Addr.String(), Relays: []*types.RelayRequest{&relayRequest}}) + relayRequest := common.BuildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.plan.ComputeUnitsPerEpoch, ts.spec.Name, nil) + relayRequest.SessionId = uint64(i) + relayRequest.Sig, err = sigs.SignRelay(consumer.SK, *relayRequest) + require.Nil(t, err) + _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: ts.providers[0].Addr.String(), Relays: []*types.RelaySession{relayRequest}}) require.Nil(t, err) ts.ctx = testkeeper.AdvanceEpoch(ts.ctx, ts.keepers) } //last iteration should finish the plan quota - relayRequest := common.CreateRelay( - t, - *ts.providers[0], - consumer, - []byte(ts.spec.Apis[0].Name), - uint64(i+1), - ts.spec.Name, - ts.plan.ComputeUnitsPerEpoch, - sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), - 0, - -1, - nil, - ) - - _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: ts.providers[0].Addr.String(), Relays: []*types.RelayRequest{&relayRequest}}) + relayRequest := common.BuildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), ts.plan.ComputeUnitsPerEpoch, ts.spec.Name, nil) + relayRequest.SessionId = uint64(i + 1) + relayRequest.Sig, err = sigs.SignRelay(consumer.SK, *relayRequest) + require.Nil(t, err) + _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: ts.providers[0].Addr.String(), Relays: []*types.RelaySession{relayRequest}}) require.NotNil(t, err) } diff --git a/x/pairing/keeper/pairing_test.go b/x/pairing/keeper/pairing_test.go index d22dcc05e4..a9f696de2b 100644 --- a/x/pairing/keeper/pairing_test.go +++ b/x/pairing/keeper/pairing_test.go @@ -139,7 +139,7 @@ func TestValidatePairingDeterminism(t *testing.T) { for idx, provider := range pairedProviders { providerAddress, err := sdk.AccAddressFromBech32(provider.Address) require.Nil(t, err) - valid, _, foundIndex, errPairing := keepers.Pairing.ValidatePairingForClient(sdk.UnwrapSDKContext(ctx), spec.Index, consumer1.Addr, providerAddress, verifyPairingOncurrentBlock) + valid, _, foundIndex, _, _, _, errPairing := keepers.Pairing.ValidatePairingForClient(sdk.UnwrapSDKContext(ctx), spec.Index, consumer1.Addr, providerAddress, verifyPairingOncurrentBlock) require.Nil(t, errPairing) require.Equal(t, idx, foundIndex, "Failed ValidatePairingForClient", provider, uint64(sdk.UnwrapSDKContext(ctx).BlockHeight())) require.True(t, valid) From 5e382169c9e47a69d9a2c8d9d291ba0e0a68484f Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Mar 2023 11:56:19 +0200 Subject: [PATCH 114/123] fix unitest merge problem --- x/pairing/keeper/pairing_subscription_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x/pairing/keeper/pairing_subscription_test.go b/x/pairing/keeper/pairing_subscription_test.go index 0f39fb63ec..620a616b3c 100644 --- a/x/pairing/keeper/pairing_subscription_test.go +++ b/x/pairing/keeper/pairing_subscription_test.go @@ -81,10 +81,10 @@ func TestRelayPaymentSubscription(t *testing.T) { t.Run(tt.name, func(t *testing.T) { relayRequest := common.BuildRelayRequest(ts.ctx, ts.providers[0].Addr.String(), []byte(ts.spec.Apis[0].Name), tt.cu, ts.spec.Name, nil) relayRequest.SessionId = uint64(i) - relayRequest.Sig, err = sigs.SignRelay(ts.clients[0].SK, *relayRequest) + relayRequest.Sig, err = sigs.SignRelay(consumer.SK, *relayRequest) require.Nil(t, err) _, err = ts.servers.PairingServer.RelayPayment(ts.ctx, &types.MsgRelayPayment{Creator: ts.providers[0].Addr.String(), Relays: []*types.RelaySession{relayRequest}}) - require.Equal(t, tt.valid, err == nil) + require.Equal(t, tt.valid, err == nil, "results incorrect for usage of %d err == nil: %t", tt.cu, err == nil) }) } } From 09fb3151254ea49c51055414f4168f9badda3f04 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Mar 2023 12:39:18 +0200 Subject: [PATCH 115/123] fixed lint, added debug print --- protocol/lavasession/consumer_types.go | 3 ++- testutil/common/common.go | 8 ++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/protocol/lavasession/consumer_types.go b/protocol/lavasession/consumer_types.go index 55dc5a0c82..b4d2389e89 100644 --- a/protocol/lavasession/consumer_types.go +++ b/protocol/lavasession/consumer_types.go @@ -340,11 +340,12 @@ func (cs *SingleConsumerSession) CalculateQoS(cu uint64, latency time.Duration, cs.QoSInfo.LastQoSReport.Sync = sdk.NewDec(cs.QoSInfo.SyncScoreSum).QuoInt64(cs.QoSInfo.TotalSyncScore) if sdk.OneDec().GT(cs.QoSInfo.LastQoSReport.Sync) { - utils.LavaFormatInfo("QoS Sync report", + utils.LavaFormatDebug("QoS Sync report", &map[string]string{ "Sync": cs.QoSInfo.LastQoSReport.Sync.String(), "block diff": strconv.FormatInt(blockHeightDiff, 10), "sync score": strconv.FormatInt(cs.QoSInfo.SyncScoreSum, 10) + "/" + strconv.FormatInt(cs.QoSInfo.TotalSyncScore, 10), + "session_id": strconv.FormatInt(blockHeightDiff, 10), }) } } diff --git a/testutil/common/common.go b/testutil/common/common.go index 26f82b8247..897584deb4 100644 --- a/testutil/common/common.go +++ b/testutil/common/common.go @@ -81,7 +81,7 @@ func StakeAccount(t *testing.T, ctx context.Context, keepers testkeeper.Keepers, } } -func BuildRelayRequest(ctx context.Context, provider string, contentHash []byte, cuSum uint64, spec string, QoSDR *types.QualityOfServiceReport) *types.RelaySession { +func BuildRelayRequest(ctx context.Context, provider string, contentHash []byte, cuSum uint64, spec string, qos *types.QualityOfServiceReport) *types.RelaySession { relaySession := &types.RelaySession{ Provider: provider, ContentHash: contentHash, @@ -90,11 +90,11 @@ func BuildRelayRequest(ctx context.Context, provider string, contentHash []byte, CuSum: cuSum, Epoch: sdk.UnwrapSDKContext(ctx).BlockHeight(), RelayNum: 0, - QoSReport: QoSDR, + QoSReport: qos, LavaChainId: sdk.UnwrapSDKContext(ctx).BlockHeader().ChainID, } - if QoSDR != nil { - QoSDR.ComputeQoS() + if qos != nil { + qos.ComputeQoS() } return relaySession } From 111d5c30ef5a31ad2d8aaee2666a20b9259bb3fb Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Mar 2023 13:11:10 +0200 Subject: [PATCH 116/123] removed deprecated relayer code --- cmd/lavad/main.go | 61 - protocol/chainlib/chain_fetcher.go | 2 +- protocol/chainlib/chainlib.go | 4 +- protocol/chainlib/chainproxy/common.go | 2 +- .../chainproxy/rpcInterfaceMessages/common.go | 2 +- .../rpcInterfaceMessages/grpcMessage.go | 2 +- .../rpcInterfaceMessages/jsonRPCmessage.go | 2 +- .../rpcInterfaceMessages/restMessage.go | 2 +- .../tendermintRPCMessage.go | 2 +- protocol/chainlib/common.go | 2 +- protocol/chainlib/grpc.go | 2 +- protocol/chainlib/jsonRPC.go | 4 +- protocol/chainlib/rest.go | 2 +- protocol/chainlib/tendermintRPC.go | 4 +- protocol/common/conf.go | 1 + protocol/common/rpcconsumerlogs.go | 4 +- protocol/lavaprotocol/request_builder.go | 2 +- protocol/lavaprotocol/response_builder.go | 2 +- protocol/lavaprotocol/reuqest_builder_test.go | 2 +- {relayer => protocol}/metrics/analytics.go | 0 .../metrics/metricsService.go | 0 {relayer => protocol}/metrics/metrics_test.go | 0 {relayer => protocol}/parser/misc.go | 0 {relayer => protocol}/parser/parser.go | 0 {relayer => protocol}/parser/parser_test.go | 0 {relayer => protocol}/performance/cache.go | 0 {relayer => protocol}/performance/common.go | 0 {relayer => protocol}/performance/errors.go | 0 .../performance/pprofServer.go | 0 protocol/rpcconsumer/rpcconsumer.go | 9 +- protocol/rpcconsumer/rpcconsumer_server.go | 4 +- .../reliabilitymanager/reliability_manager.go | 2 +- .../rpcprovider/rewardserver/reward_server.go | 2 +- protocol/rpcprovider/rpcprovider.go | 9 +- protocol/rpcprovider/rpcprovider_server.go | 4 +- relayer/chainproxy/chainproxy.go | 389 ---- relayer/chainproxy/chainproxyErrors.go | 7 - relayer/chainproxy/connector.go | 334 ---- relayer/chainproxy/connector_test.go | 101 -- relayer/chainproxy/grpc.go | 463 ----- relayer/chainproxy/jsonRPC.go | 503 ------ relayer/chainproxy/portalLogs.go | 182 -- relayer/chainproxy/rest.go | 404 ----- relayer/chainproxy/rpcclient/client.go | 683 -------- relayer/chainproxy/rpcclient/doc.go | 109 -- relayer/chainproxy/rpcclient/endpoints.go | 52 - relayer/chainproxy/rpcclient/errors.go | 103 -- relayer/chainproxy/rpcclient/handler.go | 435 ----- relayer/chainproxy/rpcclient/http.go | 298 ---- relayer/chainproxy/rpcclient/inproc.go | 33 - relayer/chainproxy/rpcclient/ipc.go | 56 - relayer/chainproxy/rpcclient/ipc_js.go | 38 - relayer/chainproxy/rpcclient/ipc_unix.go | 57 - relayer/chainproxy/rpcclient/ipc_windows.go | 49 - relayer/chainproxy/rpcclient/json.go | 366 ---- relayer/chainproxy/rpcclient/metrics.go | 39 - relayer/chainproxy/rpcclient/server.go | 190 -- relayer/chainproxy/rpcclient/service.go | 267 --- relayer/chainproxy/rpcclient/stdio.go | 66 - relayer/chainproxy/rpcclient/subscription.go | 369 ---- relayer/chainproxy/rpcclient/types.go | 265 --- relayer/chainproxy/rpcclient/types_test.go | 155 -- relayer/chainproxy/rpcclient/websocket.go | 324 ---- relayer/chainproxy/tendermintRPC.go | 715 -------- relayer/chainsentry/chainSentryErrors.go | 7 - relayer/chainsentry/chainsentry.go | 190 -- relayer/readme.md | 68 - relayer/sentry/sentry.go | 1557 ----------------- relayer/sentry/tx.go | 117 -- relayer/test_client.go | 107 -- relayer/testclients/aptos_tests.go | 96 - relayer/testclients/coshub_tests.go | 124 -- relayer/testclients/ethereum_tests.go | 146 -- relayer/testclients/juno_tests.go | 112 -- relayer/testclients/lava_tests.go | 89 - relayer/testclients/osmosis_tests.go | 118 -- relayer/testclients/polygon_tests.go | 54 - relayer/testclients/starknet_tests.go | 39 - relayer/testclients/terra_tests.go | 74 - relayer/testclients/test_utils.go | 36 - testutil/common/common.go | 2 +- {relayer => utils}/sigs/sigs.go | 0 x/conflict/keeper/conflict.go | 2 +- .../keeper/msg_server_detection_test.go | 2 +- x/conflict/keeper/vote_test.go | 2 +- x/pairing/keeper/fixation_test.go | 2 +- x/pairing/keeper/msg_server_freeze_test.go | 2 +- x/pairing/keeper/msg_server_relay_payment.go | 2 +- .../msg_server_relay_payment_gov_test.go | 2 +- .../keeper/msg_server_relay_payment_test.go | 2 +- .../keeper/msg_server_stake_client_test.go | 2 +- .../keeper/msg_server_stake_provider_test.go | 2 +- .../keeper/msg_server_unstake_client_test.go | 2 +- x/pairing/keeper/pairing_subscription_test.go | 2 +- .../keeper/unresponsive_provider_test.go | 2 +- x/subscription/keeper/epoch_start_test.go | 2 +- x/subscription/keeper/subscription_test.go | 2 +- 97 files changed, 52 insertions(+), 10100 deletions(-) rename {relayer => protocol}/metrics/analytics.go (100%) rename {relayer => protocol}/metrics/metricsService.go (100%) rename {relayer => protocol}/metrics/metrics_test.go (100%) rename {relayer => protocol}/parser/misc.go (100%) rename {relayer => protocol}/parser/parser.go (100%) rename {relayer => protocol}/parser/parser_test.go (100%) rename {relayer => protocol}/performance/cache.go (100%) rename {relayer => protocol}/performance/common.go (100%) rename {relayer => protocol}/performance/errors.go (100%) rename {relayer => protocol}/performance/pprofServer.go (100%) delete mode 100644 relayer/chainproxy/chainproxy.go delete mode 100644 relayer/chainproxy/chainproxyErrors.go delete mode 100644 relayer/chainproxy/connector.go delete mode 100644 relayer/chainproxy/connector_test.go delete mode 100644 relayer/chainproxy/grpc.go delete mode 100644 relayer/chainproxy/jsonRPC.go delete mode 100644 relayer/chainproxy/portalLogs.go delete mode 100644 relayer/chainproxy/rest.go delete mode 100755 relayer/chainproxy/rpcclient/client.go delete mode 100755 relayer/chainproxy/rpcclient/doc.go delete mode 100755 relayer/chainproxy/rpcclient/endpoints.go delete mode 100755 relayer/chainproxy/rpcclient/errors.go delete mode 100755 relayer/chainproxy/rpcclient/handler.go delete mode 100755 relayer/chainproxy/rpcclient/http.go delete mode 100755 relayer/chainproxy/rpcclient/inproc.go delete mode 100755 relayer/chainproxy/rpcclient/ipc.go delete mode 100755 relayer/chainproxy/rpcclient/ipc_js.go delete mode 100755 relayer/chainproxy/rpcclient/ipc_unix.go delete mode 100755 relayer/chainproxy/rpcclient/ipc_windows.go delete mode 100755 relayer/chainproxy/rpcclient/json.go delete mode 100755 relayer/chainproxy/rpcclient/metrics.go delete mode 100755 relayer/chainproxy/rpcclient/server.go delete mode 100755 relayer/chainproxy/rpcclient/service.go delete mode 100755 relayer/chainproxy/rpcclient/stdio.go delete mode 100755 relayer/chainproxy/rpcclient/subscription.go delete mode 100755 relayer/chainproxy/rpcclient/types.go delete mode 100755 relayer/chainproxy/rpcclient/types_test.go delete mode 100755 relayer/chainproxy/rpcclient/websocket.go delete mode 100644 relayer/chainproxy/tendermintRPC.go delete mode 100644 relayer/chainsentry/chainSentryErrors.go delete mode 100644 relayer/chainsentry/chainsentry.go delete mode 100644 relayer/readme.md delete mode 100755 relayer/sentry/sentry.go delete mode 100644 relayer/sentry/tx.go delete mode 100644 relayer/test_client.go delete mode 100644 relayer/testclients/aptos_tests.go delete mode 100644 relayer/testclients/coshub_tests.go delete mode 100644 relayer/testclients/ethereum_tests.go delete mode 100644 relayer/testclients/juno_tests.go delete mode 100644 relayer/testclients/lava_tests.go delete mode 100644 relayer/testclients/osmosis_tests.go delete mode 100644 relayer/testclients/polygon_tests.go delete mode 100644 relayer/testclients/starknet_tests.go delete mode 100644 relayer/testclients/terra_tests.go delete mode 100644 relayer/testclients/test_utils.go rename {relayer => utils}/sigs/sigs.go (100%) diff --git a/cmd/lavad/main.go b/cmd/lavad/main.go index b4423b7ad1..422626abe4 100644 --- a/cmd/lavad/main.go +++ b/cmd/lavad/main.go @@ -1,26 +1,16 @@ package main import ( - "context" "os" - "strconv" - "strings" _ "net/http/pprof" - "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/client/flags" - "github.com/cosmos/cosmos-sdk/client/tx" "github.com/cosmos/cosmos-sdk/server" svrcmd "github.com/cosmos/cosmos-sdk/server/cmd" "github.com/lavanet/lava/app" "github.com/lavanet/lava/cmd/lavad/cmd" "github.com/lavanet/lava/protocol/rpcconsumer" "github.com/lavanet/lava/protocol/rpcprovider" - "github.com/lavanet/lava/relayer" - "github.com/lavanet/lava/relayer/sentry" - "github.com/lavanet/lava/utils" - "github.com/spf13/cobra" ) const ( @@ -30,63 +20,12 @@ const ( func main() { rootCmd, _ := cmd.NewRootCmd() - cmdTestClient := &cobra.Command{ - Use: "test_client [chain-id] [api-interface] [duration-seconds]", - Short: "test client", - Long: `test client`, - Args: cobra.RangeArgs(2, 3), - RunE: func(cmd *cobra.Command, args []string) error { - utils.LavaFormatInfo("Test consumer process started", &map[string]string{"args": strings.Join(args, ",")}) - clientCtx, err := client.GetClientTxContext(cmd) - if err != nil { - return err - } - - chainID := args[0] - - apiInterface := args[1] - - // if duration is not set, set duration value to 1 so tests runs atleast once - duration := int64(1) - if len(args) == 3 { - duration, err = strconv.ParseInt(args[2], 10, 64) - if err != nil { - return err - } - } - ctx := context.Background() - logLevel, err := cmd.Flags().GetString(flags.FlagLogLevel) - if err != nil { - utils.LavaFormatFatal("failed to read log level flag", err, nil) - } - utils.LoggingLevel(logLevel) - - networkChainId, err := cmd.Flags().GetString(flags.FlagChainID) - if err != nil { - return err - } - txFactory := tx.NewFactoryCLI(clientCtx, cmd.Flags()).WithChainID(networkChainId) - - relayer.TestClient(ctx, txFactory, clientCtx, chainID, apiInterface, duration, cmd.Flags()) - - return nil - }, - } // rpc consumer cobra command cmdRPCConsumer := rpcconsumer.CreateRPCConsumerCobraCommand() // rpc provider cobra command cmdRPCProvider := rpcprovider.CreateRPCProviderCobraCommand() - // Test Client command flags - flags.AddTxFlagsToCmd(cmdTestClient) - cmdTestClient.Flags().String(flags.FlagChainID, app.Name, "network chain id") - cmdTestClient.Flags().Uint64(sentry.GeolocationFlag, 0, "geolocation to run from") - cmdTestClient.MarkFlagRequired(sentry.GeolocationFlag) - cmdTestClient.MarkFlagRequired(flags.FlagFrom) - cmdTestClient.Flags().Bool("secure", false, "secure sends reliability on every message") - rootCmd.AddCommand(cmdTestClient) - // Add RPC Consumer Command rootCmd.AddCommand(cmdRPCConsumer) // Add RPC Provider Command diff --git a/protocol/chainlib/chain_fetcher.go b/protocol/chainlib/chain_fetcher.go index 799628e21a..cd9569c8e3 100644 --- a/protocol/chainlib/chain_fetcher.go +++ b/protocol/chainlib/chain_fetcher.go @@ -8,7 +8,7 @@ import ( "github.com/cosmos/cosmos-sdk/client" "github.com/lavanet/lava/protocol/chainlib/chainproxy" "github.com/lavanet/lava/protocol/lavasession" - "github.com/lavanet/lava/relayer/parser" + "github.com/lavanet/lava/protocol/parser" "github.com/lavanet/lava/utils" "github.com/lavanet/lava/x/pairing/types" spectypes "github.com/lavanet/lava/x/spec/types" diff --git a/protocol/chainlib/chainlib.go b/protocol/chainlib/chainlib.go index 7a8ac1e924..d88c7d9fb5 100644 --- a/protocol/chainlib/chainlib.go +++ b/protocol/chainlib/chainlib.go @@ -8,8 +8,8 @@ import ( "github.com/lavanet/lava/protocol/chainlib/chainproxy/rpcclient" "github.com/lavanet/lava/protocol/common" "github.com/lavanet/lava/protocol/lavasession" - "github.com/lavanet/lava/relayer/metrics" - "github.com/lavanet/lava/relayer/parser" + "github.com/lavanet/lava/protocol/metrics" + "github.com/lavanet/lava/protocol/parser" pairingtypes "github.com/lavanet/lava/x/pairing/types" spectypes "github.com/lavanet/lava/x/spec/types" ) diff --git a/protocol/chainlib/chainproxy/common.go b/protocol/chainlib/chainproxy/common.go index 8a2bf5462c..3c5239069c 100644 --- a/protocol/chainlib/chainproxy/common.go +++ b/protocol/chainlib/chainproxy/common.go @@ -3,7 +3,7 @@ package chainproxy import ( "encoding/json" - "github.com/lavanet/lava/relayer/parser" + "github.com/lavanet/lava/protocol/parser" ) const ( diff --git a/protocol/chainlib/chainproxy/rpcInterfaceMessages/common.go b/protocol/chainlib/chainproxy/rpcInterfaceMessages/common.go index 8e82548173..e5472705d9 100644 --- a/protocol/chainlib/chainproxy/rpcInterfaceMessages/common.go +++ b/protocol/chainlib/chainproxy/rpcInterfaceMessages/common.go @@ -3,7 +3,7 @@ package rpcInterfaceMessages import ( "encoding/json" - "github.com/lavanet/lava/relayer/parser" + "github.com/lavanet/lava/protocol/parser" ) type ParsableRPCInput struct { diff --git a/protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage.go b/protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage.go index 902915f133..e573b76714 100644 --- a/protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage.go +++ b/protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage.go @@ -11,7 +11,7 @@ import ( "github.com/jhump/protoreflect/desc" "github.com/jhump/protoreflect/dynamic" "github.com/jhump/protoreflect/grpcreflect" - "github.com/lavanet/lava/relayer/parser" + "github.com/lavanet/lava/protocol/parser" "github.com/lavanet/lava/utils" "google.golang.org/grpc/codes" ) diff --git a/protocol/chainlib/chainproxy/rpcInterfaceMessages/jsonRPCmessage.go b/protocol/chainlib/chainproxy/rpcInterfaceMessages/jsonRPCmessage.go index 6cda5b06b2..cbaf717b21 100644 --- a/protocol/chainlib/chainproxy/rpcInterfaceMessages/jsonRPCmessage.go +++ b/protocol/chainlib/chainproxy/rpcInterfaceMessages/jsonRPCmessage.go @@ -5,7 +5,7 @@ import ( sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" "github.com/lavanet/lava/protocol/chainlib/chainproxy/rpcclient" - "github.com/lavanet/lava/relayer/parser" + "github.com/lavanet/lava/protocol/parser" "github.com/lavanet/lava/utils" ) diff --git a/protocol/chainlib/chainproxy/rpcInterfaceMessages/restMessage.go b/protocol/chainlib/chainproxy/rpcInterfaceMessages/restMessage.go index 739572eb20..ada97de925 100644 --- a/protocol/chainlib/chainproxy/rpcInterfaceMessages/restMessage.go +++ b/protocol/chainlib/chainproxy/rpcInterfaceMessages/restMessage.go @@ -3,7 +3,7 @@ package rpcInterfaceMessages import ( "encoding/json" - "github.com/lavanet/lava/relayer/parser" + "github.com/lavanet/lava/protocol/parser" ) type RestMessage struct { diff --git a/protocol/chainlib/chainproxy/rpcInterfaceMessages/tendermintRPCMessage.go b/protocol/chainlib/chainproxy/rpcInterfaceMessages/tendermintRPCMessage.go index b83bb4280c..889033091a 100644 --- a/protocol/chainlib/chainproxy/rpcInterfaceMessages/tendermintRPCMessage.go +++ b/protocol/chainlib/chainproxy/rpcInterfaceMessages/tendermintRPCMessage.go @@ -7,7 +7,7 @@ import ( "github.com/lavanet/lava/protocol/chainlib/chainproxy" "github.com/lavanet/lava/protocol/chainlib/chainproxy/rpcclient" - "github.com/lavanet/lava/relayer/parser" + "github.com/lavanet/lava/protocol/parser" "github.com/lavanet/lava/utils" tenderminttypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) diff --git a/protocol/chainlib/common.go b/protocol/chainlib/common.go index cfdc45cc00..42f1d41ec9 100644 --- a/protocol/chainlib/common.go +++ b/protocol/chainlib/common.go @@ -12,7 +12,7 @@ import ( "github.com/gofiber/fiber/v2" "github.com/gofiber/websocket/v2" common "github.com/lavanet/lava/protocol/common" - "github.com/lavanet/lava/relayer/parser" + "github.com/lavanet/lava/protocol/parser" "github.com/lavanet/lava/utils" spectypes "github.com/lavanet/lava/x/spec/types" ) diff --git a/protocol/chainlib/grpc.go b/protocol/chainlib/grpc.go index da3eee273b..ed7db6fb37 100644 --- a/protocol/chainlib/grpc.go +++ b/protocol/chainlib/grpc.go @@ -25,7 +25,7 @@ import ( "github.com/lavanet/lava/protocol/chainlib/chainproxy/thirdparty" "github.com/lavanet/lava/protocol/common" "github.com/lavanet/lava/protocol/lavasession" - "github.com/lavanet/lava/relayer/metrics" + "github.com/lavanet/lava/protocol/metrics" "github.com/lavanet/lava/utils" pairingtypes "github.com/lavanet/lava/x/pairing/types" spectypes "github.com/lavanet/lava/x/spec/types" diff --git a/protocol/chainlib/jsonRPC.go b/protocol/chainlib/jsonRPC.go index 71ea01f0d4..ebb7dc614e 100644 --- a/protocol/chainlib/jsonRPC.go +++ b/protocol/chainlib/jsonRPC.go @@ -16,8 +16,8 @@ import ( "github.com/lavanet/lava/protocol/chainlib/chainproxy/rpcInterfaceMessages" "github.com/lavanet/lava/protocol/common" "github.com/lavanet/lava/protocol/lavasession" - "github.com/lavanet/lava/relayer/metrics" - "github.com/lavanet/lava/relayer/parser" + "github.com/lavanet/lava/protocol/metrics" + "github.com/lavanet/lava/protocol/parser" "github.com/lavanet/lava/protocol/chainlib/chainproxy" "github.com/lavanet/lava/protocol/chainlib/chainproxy/rpcclient" diff --git a/protocol/chainlib/rest.go b/protocol/chainlib/rest.go index 6918999e3c..4ab866ef4b 100644 --- a/protocol/chainlib/rest.go +++ b/protocol/chainlib/rest.go @@ -21,7 +21,7 @@ import ( "github.com/gofiber/fiber/v2" "github.com/gofiber/fiber/v2/middleware/favicon" "github.com/lavanet/lava/protocol/common" - "github.com/lavanet/lava/relayer/metrics" + "github.com/lavanet/lava/protocol/metrics" spectypes "github.com/lavanet/lava/x/spec/types" ) diff --git a/protocol/chainlib/tendermintRPC.go b/protocol/chainlib/tendermintRPC.go index 53ffc5e729..f2dd9d9644 100644 --- a/protocol/chainlib/tendermintRPC.go +++ b/protocol/chainlib/tendermintRPC.go @@ -18,8 +18,8 @@ import ( "github.com/lavanet/lava/protocol/chainlib/chainproxy/rpcclient" "github.com/lavanet/lava/protocol/common" "github.com/lavanet/lava/protocol/lavasession" - "github.com/lavanet/lava/relayer/metrics" - "github.com/lavanet/lava/relayer/parser" + "github.com/lavanet/lava/protocol/metrics" + "github.com/lavanet/lava/protocol/parser" "github.com/lavanet/lava/utils" pairingtypes "github.com/lavanet/lava/x/pairing/types" spectypes "github.com/lavanet/lava/x/spec/types" diff --git a/protocol/common/conf.go b/protocol/common/conf.go index 038c6e8af5..6fccd3c6fd 100644 --- a/protocol/common/conf.go +++ b/protocol/common/conf.go @@ -10,6 +10,7 @@ import ( const ( EndpointsConfigName = "endpoints" SaveConfigFlagName = "save-conf" + GeolocationFlag = "geolocation" ) func ParseEndpointArgs(endpoint_strings []string, yaml_config_properties []string, endpointsConfigName string) (viper_endpoints *viper.Viper, err error) { diff --git a/protocol/common/rpcconsumerlogs.go b/protocol/common/rpcconsumerlogs.go index 481b2c23a5..c210f703cd 100644 --- a/protocol/common/rpcconsumerlogs.go +++ b/protocol/common/rpcconsumerlogs.go @@ -10,8 +10,8 @@ import ( "github.com/gofiber/fiber/v2" "github.com/gofiber/websocket/v2" "github.com/joho/godotenv" - "github.com/lavanet/lava/relayer/metrics" - "github.com/lavanet/lava/relayer/parser" + "github.com/lavanet/lava/protocol/metrics" + "github.com/lavanet/lava/protocol/parser" "github.com/lavanet/lava/utils" "github.com/newrelic/go-agent/v3/newrelic" "google.golang.org/grpc/metadata" diff --git a/protocol/lavaprotocol/request_builder.go b/protocol/lavaprotocol/request_builder.go index ec1b270a54..73440b03ec 100644 --- a/protocol/lavaprotocol/request_builder.go +++ b/protocol/lavaprotocol/request_builder.go @@ -12,8 +12,8 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/lavanet/lava/protocol/chainlib" "github.com/lavanet/lava/protocol/lavasession" - "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/utils" + "github.com/lavanet/lava/utils/sigs" conflicttypes "github.com/lavanet/lava/x/conflict/types" pairingtypes "github.com/lavanet/lava/x/pairing/types" spectypes "github.com/lavanet/lava/x/spec/types" diff --git a/protocol/lavaprotocol/response_builder.go b/protocol/lavaprotocol/response_builder.go index 78e010e551..f5221f9770 100644 --- a/protocol/lavaprotocol/response_builder.go +++ b/protocol/lavaprotocol/response_builder.go @@ -8,8 +8,8 @@ import ( btcSecp256k1 "github.com/btcsuite/btcd/btcec" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/utils" + "github.com/lavanet/lava/utils/sigs" conflicttypes "github.com/lavanet/lava/x/conflict/types" pairingtypes "github.com/lavanet/lava/x/pairing/types" spectypes "github.com/lavanet/lava/x/spec/types" diff --git a/protocol/lavaprotocol/reuqest_builder_test.go b/protocol/lavaprotocol/reuqest_builder_test.go index ec104677ac..aff74eecdf 100644 --- a/protocol/lavaprotocol/reuqest_builder_test.go +++ b/protocol/lavaprotocol/reuqest_builder_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/lavanet/lava/protocol/lavasession" - "github.com/lavanet/lava/relayer/sigs" + "github.com/lavanet/lava/utils/sigs" pairingtypes "github.com/lavanet/lava/x/pairing/types" "github.com/stretchr/testify/require" ) diff --git a/relayer/metrics/analytics.go b/protocol/metrics/analytics.go similarity index 100% rename from relayer/metrics/analytics.go rename to protocol/metrics/analytics.go diff --git a/relayer/metrics/metricsService.go b/protocol/metrics/metricsService.go similarity index 100% rename from relayer/metrics/metricsService.go rename to protocol/metrics/metricsService.go diff --git a/relayer/metrics/metrics_test.go b/protocol/metrics/metrics_test.go similarity index 100% rename from relayer/metrics/metrics_test.go rename to protocol/metrics/metrics_test.go diff --git a/relayer/parser/misc.go b/protocol/parser/misc.go similarity index 100% rename from relayer/parser/misc.go rename to protocol/parser/misc.go diff --git a/relayer/parser/parser.go b/protocol/parser/parser.go similarity index 100% rename from relayer/parser/parser.go rename to protocol/parser/parser.go diff --git a/relayer/parser/parser_test.go b/protocol/parser/parser_test.go similarity index 100% rename from relayer/parser/parser_test.go rename to protocol/parser/parser_test.go diff --git a/relayer/performance/cache.go b/protocol/performance/cache.go similarity index 100% rename from relayer/performance/cache.go rename to protocol/performance/cache.go diff --git a/relayer/performance/common.go b/protocol/performance/common.go similarity index 100% rename from relayer/performance/common.go rename to protocol/performance/common.go diff --git a/relayer/performance/errors.go b/protocol/performance/errors.go similarity index 100% rename from relayer/performance/errors.go rename to protocol/performance/errors.go diff --git a/relayer/performance/pprofServer.go b/protocol/performance/pprofServer.go similarity index 100% rename from relayer/performance/pprofServer.go rename to protocol/performance/pprofServer.go diff --git a/protocol/rpcconsumer/rpcconsumer.go b/protocol/rpcconsumer/rpcconsumer.go index 1678341f29..51e93729cd 100644 --- a/protocol/rpcconsumer/rpcconsumer.go +++ b/protocol/rpcconsumer/rpcconsumer.go @@ -21,11 +21,10 @@ import ( "github.com/lavanet/lava/protocol/common" "github.com/lavanet/lava/protocol/lavaprotocol" "github.com/lavanet/lava/protocol/lavasession" + "github.com/lavanet/lava/protocol/performance" "github.com/lavanet/lava/protocol/statetracker" - "github.com/lavanet/lava/relayer/performance" - "github.com/lavanet/lava/relayer/sentry" - "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/utils" + "github.com/lavanet/lava/utils/sigs" conflicttypes "github.com/lavanet/lava/x/conflict/types" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -239,8 +238,8 @@ rpcconsumer 127.0.0.1:3333 COS3 tendermintrpc 127.0.0.1:3334 COS3 rest `, flags.AddTxFlagsToCmd(cmdRPCConsumer) cmdRPCConsumer.MarkFlagRequired(flags.FlagFrom) cmdRPCConsumer.Flags().String(flags.FlagChainID, app.Name, "network chain id") - cmdRPCConsumer.Flags().Uint64(sentry.GeolocationFlag, 0, "geolocation to run from") - cmdRPCConsumer.MarkFlagRequired(sentry.GeolocationFlag) + cmdRPCConsumer.Flags().Uint64(common.GeolocationFlag, 0, "geolocation to run from") + cmdRPCConsumer.MarkFlagRequired(common.GeolocationFlag) cmdRPCConsumer.Flags().Bool("secure", false, "secure sends reliability on every message") cmdRPCConsumer.Flags().String(performance.PprofAddressFlagName, "", "pprof server address, used for code profiling") cmdRPCConsumer.Flags().String(performance.CacheFlagName, "", "address for a cache server to improve performance") diff --git a/protocol/rpcconsumer/rpcconsumer_server.go b/protocol/rpcconsumer/rpcconsumer_server.go index b175c3b8fd..d1fe2eaf43 100644 --- a/protocol/rpcconsumer/rpcconsumer_server.go +++ b/protocol/rpcconsumer/rpcconsumer_server.go @@ -13,8 +13,8 @@ import ( "github.com/lavanet/lava/protocol/common" "github.com/lavanet/lava/protocol/lavaprotocol" "github.com/lavanet/lava/protocol/lavasession" - "github.com/lavanet/lava/relayer/metrics" - "github.com/lavanet/lava/relayer/performance" + "github.com/lavanet/lava/protocol/metrics" + "github.com/lavanet/lava/protocol/performance" "github.com/lavanet/lava/utils" conflicttypes "github.com/lavanet/lava/x/conflict/types" pairingtypes "github.com/lavanet/lava/x/pairing/types" diff --git a/protocol/rpcprovider/reliabilitymanager/reliability_manager.go b/protocol/rpcprovider/reliabilitymanager/reliability_manager.go index c953a4d2de..3c08b534bf 100644 --- a/protocol/rpcprovider/reliabilitymanager/reliability_manager.go +++ b/protocol/rpcprovider/reliabilitymanager/reliability_manager.go @@ -10,8 +10,8 @@ import ( "github.com/lavanet/lava/protocol/chainlib" "github.com/lavanet/lava/protocol/chaintracker" - "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/utils" + "github.com/lavanet/lava/utils/sigs" conflicttypes "github.com/lavanet/lava/x/conflict/types" terderminttypes "github.com/tendermint/tendermint/abci/types" "golang.org/x/exp/slices" diff --git a/protocol/rpcprovider/rewardserver/reward_server.go b/protocol/rpcprovider/rewardserver/reward_server.go index 54a62be50f..b5bb56d29a 100644 --- a/protocol/rpcprovider/rewardserver/reward_server.go +++ b/protocol/rpcprovider/rewardserver/reward_server.go @@ -10,8 +10,8 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/lavanet/lava/protocol/lavasession" - "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/utils" + "github.com/lavanet/lava/utils/sigs" pairingtypes "github.com/lavanet/lava/x/pairing/types" terderminttypes "github.com/tendermint/tendermint/abci/types" ) diff --git a/protocol/rpcprovider/rpcprovider.go b/protocol/rpcprovider/rpcprovider.go index 8ca1761b09..2027369eb5 100644 --- a/protocol/rpcprovider/rpcprovider.go +++ b/protocol/rpcprovider/rpcprovider.go @@ -21,13 +21,12 @@ import ( "github.com/lavanet/lava/protocol/chaintracker" "github.com/lavanet/lava/protocol/common" "github.com/lavanet/lava/protocol/lavasession" + "github.com/lavanet/lava/protocol/performance" "github.com/lavanet/lava/protocol/rpcprovider/reliabilitymanager" "github.com/lavanet/lava/protocol/rpcprovider/rewardserver" "github.com/lavanet/lava/protocol/statetracker" - "github.com/lavanet/lava/relayer/performance" - "github.com/lavanet/lava/relayer/sentry" - "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/utils" + "github.com/lavanet/lava/utils/sigs" pairingtypes "github.com/lavanet/lava/x/pairing/types" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -345,8 +344,8 @@ rpcprovider 127.0.0.1:3333 COS3 tendermintrpc "wss://www.node-path.com:80,https: cmdRPCProvider.MarkFlagRequired(flags.FlagFrom) cmdRPCProvider.Flags().Bool(common.SaveConfigFlagName, false, "save cmd args to a config file") cmdRPCProvider.Flags().String(flags.FlagChainID, app.Name, "network chain id") - cmdRPCProvider.Flags().Uint64(sentry.GeolocationFlag, 0, "geolocation to run from") - cmdRPCProvider.MarkFlagRequired(sentry.GeolocationFlag) + cmdRPCProvider.Flags().Uint64(common.GeolocationFlag, 0, "geolocation to run from") + cmdRPCProvider.MarkFlagRequired(common.GeolocationFlag) cmdRPCProvider.Flags().String(performance.PprofAddressFlagName, "", "pprof server address, used for code profiling") cmdRPCProvider.Flags().String(performance.CacheFlagName, "", "address for a cache server to improve performance") cmdRPCProvider.Flags().Uint(chainproxy.ParallelConnectionsFlag, chainproxy.NumberOfParallelConnections, "parallel connections") diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index fb08c2b3b0..8d31dd21cb 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -18,9 +18,9 @@ import ( "github.com/lavanet/lava/protocol/chaintracker" "github.com/lavanet/lava/protocol/lavaprotocol" "github.com/lavanet/lava/protocol/lavasession" - "github.com/lavanet/lava/relayer/performance" - "github.com/lavanet/lava/relayer/sigs" + "github.com/lavanet/lava/protocol/performance" "github.com/lavanet/lava/utils" + "github.com/lavanet/lava/utils/sigs" pairingtypes "github.com/lavanet/lava/x/pairing/types" spectypes "github.com/lavanet/lava/x/spec/types" "google.golang.org/grpc/codes" diff --git a/relayer/chainproxy/chainproxy.go b/relayer/chainproxy/chainproxy.go deleted file mode 100644 index 89b0ed536b..0000000000 --- a/relayer/chainproxy/chainproxy.go +++ /dev/null @@ -1,389 +0,0 @@ -package chainproxy - -import ( - "context" - "encoding/json" - "fmt" - "net/url" - "time" - - "github.com/lavanet/lava/relayer/metrics" - "github.com/spf13/pflag" - - "github.com/btcsuite/btcd/btcec" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/gofiber/fiber/v2" - "github.com/gofiber/websocket/v2" - "github.com/lavanet/lava/protocol/lavasession" - "github.com/lavanet/lava/relayer/chainproxy/rpcclient" - "github.com/lavanet/lava/relayer/performance" - "github.com/lavanet/lava/relayer/sentry" - "github.com/lavanet/lava/relayer/sigs" - "github.com/lavanet/lava/utils" - pairingtypes "github.com/lavanet/lava/x/pairing/types" - spectypes "github.com/lavanet/lava/x/spec/types" -) - -const ( - DefaultTimeout = 10 * time.Second - TimePerCU = uint64(100 * time.Millisecond) - ContextUserValueKeyDappID = "dappID" - MinimumTimePerRelayDelay = time.Second - AverageWorldLatency = 200 * time.Millisecond - LavaErrorCode = 555 - InternalErrorString = "Internal Error" - dataReliabilityContextMultiplier = 20 -) - -type NodeMessage interface { - GetServiceApi() *spectypes.ServiceApi - GetInterface() *spectypes.ApiInterface - Send(ctx context.Context, ch chan interface{}) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) - RequestedBlock() int64 - GetMsg() interface{} - GetExtraContextTimeout() time.Duration -} - -type ChainProxy interface { - Start(context.Context) error - GetSentry() *sentry.Sentry - ParseMsg(string, []byte, string) (NodeMessage, error) - PortalStart(context.Context, *btcec.PrivateKey, string) - FetchLatestBlockNum(ctx context.Context) (int64, error) - FetchBlockHashByNum(ctx context.Context, blockNum int64) (string, error) - GetConsumerSessionManager() *lavasession.ConsumerSessionManager - SetCache(*performance.Cache) - GetCache() *performance.Cache -} - -func GetChainProxy(nodeUrl string, nConns uint, sentry *sentry.Sentry, pLogs *PortalLogs, flagSet *pflag.FlagSet) (ChainProxy, error) { - consumerSessionManagerInstance := &lavasession.ConsumerSessionManager{} - switch sentry.ApiInterface { - case spectypes.APIInterfaceJsonRPC: - return NewJrpcChainProxy(nodeUrl, nConns, sentry, consumerSessionManagerInstance, pLogs), nil - case spectypes.APIInterfaceTendermintRPC: - return NewtendermintRpcChainProxy(nodeUrl, nConns, sentry, consumerSessionManagerInstance, pLogs, flagSet), nil - case spectypes.APIInterfaceRest: - return NewRestChainProxy(nodeUrl, sentry, consumerSessionManagerInstance, pLogs), nil - case spectypes.APIInterfaceGrpc: - return NewGrpcChainProxy(nodeUrl, nConns, sentry, consumerSessionManagerInstance, pLogs), nil - } - return nil, fmt.Errorf("chain proxy for apiInterface (%s) not found", sentry.ApiInterface) -} - -func VerifyRelayReply(reply *pairingtypes.RelayReply, relayRequest *pairingtypes.RelayRequest, addr string, comparesHashes bool) error { - serverKey, err := sigs.RecoverPubKeyFromRelayReply(reply, relayRequest) - if err != nil { - return err - } - serverAddr, err := sdk.AccAddressFromHex(serverKey.Address().String()) - if err != nil { - return err - } - if serverAddr.String() != addr { - return fmt.Errorf("server address mismatch in reply (%s) (%s)", serverAddr.String(), addr) - } - - if comparesHashes { - strAdd, err := sdk.AccAddressFromBech32(addr) - if err != nil { - return err - } - serverKey, err = sigs.RecoverPubKeyFromResponseFinalizationData(reply, relayRequest, strAdd) - if err != nil { - return err - } - - serverAddr, err = sdk.AccAddressFromHex(serverKey.Address().String()) - if err != nil { - return err - } - - if serverAddr.String() != strAdd.String() { - return fmt.Errorf("server address mismatch in reply sigblocks (%s) (%s)", serverAddr.String(), strAdd.String()) - } - } - return nil -} - -// Client requests and queries -func SendRelay( - ctx context.Context, - cp ChainProxy, - privKey *btcec.PrivateKey, - url string, - req string, - connectionType string, - dappID string, - analytics *metrics.RelayMetrics, -) (*pairingtypes.RelayReply, *pairingtypes.Relayer_RelaySubscribeClient, error) { - // Unmarshal request - nodeMsg, err := cp.ParseMsg(url, []byte(req), connectionType) - if err != nil { - return nil, nil, err - } - isSubscription := nodeMsg.GetInterface().Category.Subscription - blockHeight := int64(-1) // to sync reliability blockHeight in case it changes - requestedBlock := int64(0) - // Get Session. we get session here so we can use the epoch in the callbacks - singleConsumerSession, epoch, providerPublicAddress, reportedProviders, err := cp.GetConsumerSessionManager().GetSession(ctx, nodeMsg.GetServiceApi().ComputeUnits, nil) - if err != nil { - return nil, nil, err - } - relayTimeout := getTimePerCu(singleConsumerSession.LatestRelayCu) + AverageWorldLatency + nodeMsg.GetExtraContextTimeout() - // consumerSession is locked here. - - callback_send_relay := func(consumerSession *lavasession.SingleConsumerSession) (*pairingtypes.RelayReply, *pairingtypes.Relayer_RelaySubscribeClient, *pairingtypes.RelayRequest, time.Duration, bool, error) { - // client session is locked here - blockHeight = int64(epoch) // epochs heights only - - // we need to apply CuSum and relay number that we plan to add in the relay request. even if we didn't yet apply them to the consumerSession. - relayRequest := &pairingtypes.RelayRequest{ - RelaySession: &pairingtypes.RelaySession{ - SessionId: uint64(consumerSession.SessionId), - Provider: providerPublicAddress, - SpecID: cp.GetSentry().ChainID, - Epoch: blockHeight, - RelayNum: consumerSession.RelayNum + lavasession.RelayNumberIncrement, // increment the relay number. which will be applied when session is returned properly - QoSReport: consumerSession.QoSInfo.LastQoSReport, - UnresponsiveProviders: reportedProviders, - CuSum: consumerSession.CuSum + consumerSession.LatestRelayCu, // add the latestRelayCu which will be applied when session is returned properly - }, - RelayData: &pairingtypes.RelayPrivateData{ - ConnectionType: connectionType, - Data: []byte(req), - RequestBlock: nodeMsg.RequestedBlock(), - ApiUrl: url, - }, - DataReliability: nil, - } - - sig, err := sigs.SignRelay(privKey, *relayRequest.RelaySession) - if err != nil { - return nil, nil, nil, 0, false, err - } - relayRequest.RelaySession.Sig = sig - c := *consumerSession.Endpoint.Client - - connectCtx, cancel := context.WithTimeout(ctx, relayTimeout) - defer cancel() - - var replyServer pairingtypes.Relayer_RelaySubscribeClient - var reply *pairingtypes.RelayReply - - relaySentTime := time.Now() - if isSubscription { - replyServer, err = c.RelaySubscribe(ctx, relayRequest) - } else { - cache := cp.GetCache() - reply, err = cache.GetEntry(ctx, relayRequest, cp.GetSentry().ApiInterface, nil, cp.GetSentry().ChainID, false) // caching in the portal doesn't care about hashes, and we don't have data on finalization yet - if err != nil || reply == nil { - if performance.NotConnectedError.Is(err) { - utils.LavaFormatError("cache not connected", err, nil) - } - reply, err = c.Relay(connectCtx, relayRequest) - } else { - // Info was fetched from cache, so we need to change the state - // so we can return here, no need to update anything and calculate as this info was fetched from the cache - return reply, nil, relayRequest, 0, true, nil - } - } - currentLatency := time.Since(relaySentTime) - - if analytics != nil { - analytics.Latency = currentLatency.Milliseconds() - analytics.ComputeUnits = relayRequest.RelaySession.CuSum - } - - if err != nil { - return nil, nil, nil, 0, false, err - } - - if !isSubscription { - // update relay request requestedBlock to the provided one in case it was arbitrary - sentry.UpdateRequestedBlock(relayRequest, reply) - finalized := cp.GetSentry().IsFinalizedBlock(relayRequest.RelayData.RequestBlock, reply.LatestBlock) - err = VerifyRelayReply(reply, relayRequest, providerPublicAddress, cp.GetSentry().GetSpecDataReliabilityEnabled()) - if err != nil { - return nil, nil, nil, 0, false, err - } - requestedBlock = relayRequest.RelayData.RequestBlock - cache := cp.GetCache() - // TODO: response sanity, check its under an expected format add that format to spec - err := cache.SetEntry(ctx, relayRequest, cp.GetSentry().ApiInterface, nil, cp.GetSentry().ChainID, dappID, reply, finalized) // caching in the portal doesn't care about hashes - if err != nil && !performance.NotInitialisedError.Is(err) { - utils.LavaFormatWarning("error updating cache with new entry", err, nil) - } - return reply, nil, relayRequest, currentLatency, false, nil - } - // isSubscription - return reply, &replyServer, relayRequest, currentLatency, false, nil - } - - callback_send_reliability := func(consumerSession *lavasession.SingleConsumerSession, dataReliability *pairingtypes.VRFData, providerAddress string) (*pairingtypes.RelayReply, *pairingtypes.RelayRequest, time.Duration, time.Duration, error) { - // client session is locked here - sentry := cp.GetSentry() - if blockHeight < 0 { - return nil, nil, 0, 0, fmt.Errorf("expected callback_send_relay to be called first and set blockHeight") - } - - relayRequest := &pairingtypes.RelayRequest{ - RelaySession: &pairingtypes.RelaySession{ - SessionId: lavasession.DataReliabilitySessionId, // sessionID for reliability is 0 - Provider: providerAddress, - SpecID: sentry.ChainID, - Epoch: blockHeight, - RelayNum: 0, // consumerSession.RelayNum == 0 - QoSReport: nil, - UnresponsiveProviders: reportedProviders, - CuSum: lavasession.DataReliabilityCuSum, // consumerSession.CuSum == 0 - }, - RelayData: &pairingtypes.RelayPrivateData{ - ConnectionType: connectionType, - Data: []byte(req), - RequestBlock: requestedBlock, - ApiUrl: url, - }, - DataReliability: dataReliability, - } - - sig, err := sigs.SignRelay(privKey, *relayRequest.RelaySession) - if err != nil { - return nil, nil, 0, 0, err - } - relayRequest.RelaySession.Sig = sig - - sig, err = sigs.SignVRFData(privKey, relayRequest.DataReliability) - if err != nil { - return nil, nil, 0, 0, err - } - relayRequest.DataReliability.Sig = sig - c := *consumerSession.Endpoint.Client - relaySentTime := time.Now() - // create a new context for data reliability, it needs to be a new Background context because the ctx might be canceled by the user. - drTimeout := (getTimePerCu(consumerSession.LatestRelayCu) + AverageWorldLatency) * dataReliabilityContextMultiplier - connectCtxDataReliability, cancel := context.WithTimeout(context.Background(), drTimeout) - defer cancel() - - reply, err := c.Relay(connectCtxDataReliability, relayRequest) - if err != nil { - return nil, nil, 0, 0, err - } - currentLatency := time.Since(relaySentTime) - err = VerifyRelayReply(reply, relayRequest, providerAddress, cp.GetSentry().GetSpecDataReliabilityEnabled()) - if err != nil { - return nil, nil, 0, 0, err - } - - return reply, relayRequest, currentLatency, drTimeout, nil - } - - reply, replyServer, relayLatency, isCachedResult, firstSessionError := cp.GetSentry().SendRelay(ctx, singleConsumerSession, epoch, providerPublicAddress, callback_send_relay, callback_send_reliability, nodeMsg.GetInterface().Category) - if firstSessionError != nil { - // on session failure here - errReport := cp.GetConsumerSessionManager().OnSessionFailure(singleConsumerSession, firstSessionError) - if errReport != nil { - return nil, nil, fmt.Errorf("original error: %v, onSessionFailure: %v", firstSessionError, errReport) - } - // Retry - originalProviderAddress := providerPublicAddress - singleConsumerSession, epoch, providerPublicAddress, reportedProviders, err = cp.GetConsumerSessionManager().GetSessionFromAllExcept(ctx, map[string]struct{}{providerPublicAddress: {}}, nodeMsg.GetServiceApi().ComputeUnits, epoch) - if err != nil { - return nil, nil, utils.LavaFormatError("relay_retry_attempt - Failed to get a second session from a different provider", nil, &map[string]string{"Original Error": firstSessionError.Error(), "GetSessionFromAllExcept Error": err.Error(), "ChainID": cp.GetSentry().ChainID, "Original_Provider_Address": originalProviderAddress}) - } - var secondSessionError error - reply, replyServer, relayLatency, isCachedResult, secondSessionError = cp.GetSentry().SendRelay(ctx, singleConsumerSession, epoch, providerPublicAddress, callback_send_relay, callback_send_reliability, nodeMsg.GetInterface().Category) - if secondSessionError != nil { - errReport = cp.GetConsumerSessionManager().OnSessionFailure(singleConsumerSession, secondSessionError) - if errReport != nil { - return nil, nil, fmt.Errorf("original error: %v, onSessionFailure: %v", firstSessionError, errReport) - } - // compare error1 with error2 - if secondSessionError.Error() != firstSessionError.Error() { - return nil, nil, utils.LavaFormatError("relay_retry_attempt - Received two different errors from different providers", nil, &map[string]string{"firstSessionError": firstSessionError.Error(), "secondSessionError": secondSessionError.Error(), "firstProviderAddr": originalProviderAddress, "secondProviderAddr": providerPublicAddress}) - } else { - // if both errors are the same, just return the first error. - return nil, nil, firstSessionError - } - } - // retry attempt succeeded! can continue normally - } - if !isSubscription { - if isCachedResult { - err = cp.GetConsumerSessionManager().OnSessionUnUsed(singleConsumerSession) - return reply, replyServer, err - } - latestBlock := reply.LatestBlock - expectedBH, numOfProviders := cp.GetSentry().ExpectedBlockHeight() - err = cp.GetConsumerSessionManager().OnSessionDone(singleConsumerSession, epoch, latestBlock, nodeMsg.GetServiceApi().ComputeUnits, relayLatency, singleConsumerSession.CalculateExpectedLatency(relayTimeout), expectedBH, numOfProviders, cp.GetSentry().GetProvidersCount()) // session done successfully - } else { - err = cp.GetConsumerSessionManager().OnSessionDoneIncreaseRelayAndCu(singleConsumerSession) // session done successfully - } - if replyServer == nil && reply.Data == nil && err == nil { - return nil, nil, utils.LavaFormatError("invalid handling of an error reply Data is nil & error is nil", nil, nil) - } - - return reply, replyServer, err -} - -func constructFiberCallbackWithHeaderAndParameterExtraction(callbackToBeCalled fiber.Handler, isMetricEnabled bool) fiber.Handler { - webSocketCallback := callbackToBeCalled - handler := func(c *fiber.Ctx) error { - dappId := ExtractDappIDFromFiberContext(c) - c.Locals("dappId", dappId) - if isMetricEnabled { - c.Locals(RefererHeaderKey, c.Get(RefererHeaderKey, "")) - } - return webSocketCallback(c) // uses external dappID - } - return handler -} - -func ExtractDappIDFromWebsocketConnection(c *websocket.Conn) string { - dappId, ok := c.Locals("dappId").(string) - if !ok { - dappId = "NoDappID" - } - return dappId -} - -func ExtractDappIDFromFiberContext(c *fiber.Ctx) (dappID string) { - dappID = c.Params("dappId") - if dappID == "" { - dappID = "NoDappID" - } - return dappID -} - -func getTimePerCu(cu uint64) time.Duration { - return time.Duration(cu*TimePerCU) + MinimumTimePerRelayDelay -} - -func addAttributeToError(key string, value string, errorMessage string) string { - return errorMessage + fmt.Sprintf(`, "%v": "%v"`, key, value) -} - -func convertToJsonError(errorMsg string) string { - jsonResponse, err := json.Marshal(fiber.Map{ - "error": errorMsg, - }) - if err != nil { - return `{"error": "Failed to marshal error response to json"}` - } - - return string(jsonResponse) -} - -// rpc default endpoint should be websocket. otherwise return an error -func verifyRPCendpoint(endpoint string) { - u, err := url.Parse(endpoint) - if err != nil { - utils.LavaFormatFatal("unparsable url", err, &map[string]string{"url": endpoint}) - } - switch u.Scheme { - case "ws", "wss": - return - default: - utils.LavaFormatWarning("URL scheme should be websocket (ws/wss), got: "+u.Scheme, nil, nil) - } -} diff --git a/relayer/chainproxy/chainproxyErrors.go b/relayer/chainproxy/chainproxyErrors.go deleted file mode 100644 index 569966eb09..0000000000 --- a/relayer/chainproxy/chainproxyErrors.go +++ /dev/null @@ -1,7 +0,0 @@ -package chainproxy - -import ( - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -var ErrFailedToConvertMessage = sdkerrors.New("RPC error", 1000, "failed to convert a message") diff --git a/relayer/chainproxy/connector.go b/relayer/chainproxy/connector.go deleted file mode 100644 index 8fa75d62a7..0000000000 --- a/relayer/chainproxy/connector.go +++ /dev/null @@ -1,334 +0,0 @@ -package chainproxy - -// -// Right now this is only for Ethereum -// TODO: make this into a proper connection pool that supports -// the chainproxy interface - -import ( - "context" - "errors" - "log" - "strconv" - "sync" - "time" - - "github.com/lavanet/lava/relayer/chainproxy/rpcclient" - "github.com/lavanet/lava/utils" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" -) - -const ( - DialTimeout = 500 * time.Millisecond - ParallelConnectionsFlag = "parallel-connections" - MaximumNumberOfParallelConnectionsAttempts = 10 -) - -var NumberOfParallelConnections uint = 10 - -type Connector struct { - lock utils.LavaMutex - freeClients []*rpcclient.Client - usedClients int - addr string -} - -func NewConnector(ctx context.Context, nConns uint, addr string) *Connector { - NumberOfParallelConnections = nConns // set number of parallel connections requested by user (or default.) - utils.LavaFormatInfo("Setting Number of Parallel Connections", &map[string]string{"nConns": strconv.FormatUint(uint64(NumberOfParallelConnections), 10)}) - connector := &Connector{ - freeClients: make([]*rpcclient.Client, 0, nConns), - addr: addr, - } - reachedClientLimit := false - - for i := uint(0); i < nConns; i++ { - if reachedClientLimit { - break - } - var rpcClient *rpcclient.Client - var err error - numberOfConnectionAttempts := 0 - for { - numberOfConnectionAttempts += 1 - if numberOfConnectionAttempts > MaximumNumberOfParallelConnectionsAttempts { - utils.LavaFormatError("Reached maximum number of parallel connections attempts, consider decreasing number of connections", - nil, &map[string]string{"Number of parallel connections": strconv.FormatUint(uint64(nConns), 10), "Currently Connected": strconv.FormatUint(uint64(len(connector.freeClients)), 10)}, - ) - reachedClientLimit = true - break - } - if ctx.Err() != nil { - connector.Close() - return nil - } - nctx, cancel := context.WithTimeout(ctx, DialTimeout) - rpcClient, err = rpcclient.DialContext(nctx, addr) - if err != nil { - utils.LavaFormatWarning("Could not connect to the node, retrying", err, &map[string]string{ - "Current Number Of Connections": strconv.FormatUint(uint64(i), 10), - "Number Of Attempts Remaining": strconv.Itoa(numberOfConnectionAttempts), - }) - cancel() - continue - } - cancel() - break - } - connector.freeClients = append(connector.freeClients, rpcClient) - } - utils.LavaFormatInfo("Number of parallel connections created: "+strconv.Itoa(len(connector.freeClients)), nil) - if len(connector.freeClients) == 0 { - utils.LavaFormatFatal("Could not create any connections to the node check address", nil, &map[string]string{"address": addr}) - } - go connector.connectorLoop(ctx) - return connector -} - -func (connector *Connector) connectorLoop(ctx context.Context) { - <-ctx.Done() - log.Println("connectorLoop ctx.Done") - connector.Close() -} - -func (connector *Connector) Close() { - for { - connector.lock.Lock() - log.Println("Connector closing", len(connector.freeClients)) - for i := 0; i < len(connector.freeClients); i++ { - connector.freeClients[i].Close() - } - connector.freeClients = []*rpcclient.Client{} - - if connector.usedClients > 0 { - log.Println("Connector closing, waiting for in use clients", connector.usedClients) - connector.lock.Unlock() - time.Sleep(100 * time.Millisecond) - } else { - connector.lock.Unlock() - break - } - } -} - -func (connector *Connector) increaseNumberOfClients(ctx context.Context, numberOfFreeClients int) { - utils.LavaFormatDebug("increasing number of clients", &map[string]string{"numberOfFreeClients": strconv.Itoa(numberOfFreeClients)}) - var rpcClient *rpcclient.Client - var err error - for connectionAttempt := 0; connectionAttempt < MaximumNumberOfParallelConnectionsAttempts; connectionAttempt++ { - nctx, cancel := context.WithTimeout(ctx, DialTimeout) - rpcClient, err = rpcclient.DialContext(nctx, connector.addr) - if err != nil { - utils.LavaFormatDebug( - "increaseNumberOfClients, Could not connect to the node, retrying", - &map[string]string{"err": err.Error(), "Number Of Attempts": strconv.Itoa(connectionAttempt)}) - cancel() - continue - } - cancel() - - connector.lock.Lock() // add connection to free list. - defer connector.lock.Unlock() - connector.freeClients = append(connector.freeClients, rpcClient) - return - } - utils.LavaFormatDebug("Failed increasing number of clients", nil) -} - -func (connector *Connector) GetRpc(ctx context.Context, block bool) (*rpcclient.Client, error) { - connector.lock.Lock() - defer connector.lock.Unlock() - numberOfFreeClients := len(connector.freeClients) - if numberOfFreeClients <= connector.usedClients { // if we reached half of the free clients start creating new connections - go connector.increaseNumberOfClients(ctx, numberOfFreeClients) // increase asynchronously the free list. - } - - if numberOfFreeClients == 0 { - if !block { - return nil, errors.New("out of clients") - } else { - for { - connector.lock.Unlock() - // if we reached 0 connections we need to create more connections - // before sleeping, increase asynchronously the free list. - go connector.increaseNumberOfClients(ctx, numberOfFreeClients) - time.Sleep(50 * time.Millisecond) - connector.lock.Lock() - numberOfFreeClients = len(connector.freeClients) - if numberOfFreeClients != 0 { - break - } - } - } - } - - ret := connector.freeClients[0] - connector.freeClients = connector.freeClients[1:] - connector.usedClients++ - - return ret, nil -} - -func (connector *Connector) ReturnRpc(rpc *rpcclient.Client) { - connector.lock.Lock() - defer connector.lock.Unlock() - - connector.usedClients-- - if len(connector.freeClients) > (connector.usedClients + int(NumberOfParallelConnections) /* the number we started with */) { - rpc.Close() // close connection - return // return without appending back to decrease idle connections - } - connector.freeClients = append(connector.freeClients, rpc) -} - -type GRPCConnector struct { - lock sync.RWMutex - freeClients []*grpc.ClientConn - usedClients int - addr string -} - -func NewGRPCConnector(ctx context.Context, nConns uint, addr string) *GRPCConnector { - connector := &GRPCConnector{ - freeClients: make([]*grpc.ClientConn, 0, nConns), - addr: addr, - } - - NumberOfParallelConnections = nConns // set number of parallel connections requested by user (or default.) - utils.LavaFormatInfo("Setting Number of Parallel Connections", &map[string]string{"nConns": strconv.FormatUint(uint64(NumberOfParallelConnections), 10)}) - reachedClientLimit := false - - for i := uint(0); i < nConns; i++ { - if reachedClientLimit { - break - } - var grpcClient *grpc.ClientConn - var err error - numberOfConnectionAttempts := 0 - for { - numberOfConnectionAttempts += 1 - if numberOfConnectionAttempts > MaximumNumberOfParallelConnectionsAttempts { - utils.LavaFormatError("Reached maximum number of parallel connections attempts, consider decreasing number of connections", - nil, &map[string]string{"Number of parallel connections": strconv.FormatUint(uint64(nConns), 10), "Currently Connected": strconv.FormatUint(uint64(len(connector.freeClients)), 10)}, - ) - reachedClientLimit = true - break - } - if ctx.Err() != nil { - connector.Close() - return nil - } - nctx, cancel := context.WithTimeout(ctx, DialTimeout) - grpcClient, err = grpc.DialContext(nctx, addr, grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - utils.LavaFormatWarning("Could not connect to the client, retrying", err, nil) - cancel() - continue - } - cancel() - break - } - connector.freeClients = append(connector.freeClients, grpcClient) - } - if len(connector.freeClients) == 0 { - utils.LavaFormatFatal("Could not create any connections to the node check address", nil, &map[string]string{"address": addr}) - } - go connector.connectorLoop(ctx) - return connector -} - -func (connector *GRPCConnector) increaseNumberOfClients(ctx context.Context, numberOfFreeClients int) { - utils.LavaFormatDebug("increasing number of clients", &map[string]string{"numberOfFreeClients": strconv.Itoa(numberOfFreeClients)}) - var grpcClient *grpc.ClientConn - var err error - for connectionAttempt := 0; connectionAttempt < MaximumNumberOfParallelConnectionsAttempts; connectionAttempt++ { - nctx, cancel := context.WithTimeout(ctx, DialTimeout) - grpcClient, err = grpc.DialContext(nctx, connector.addr, grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - utils.LavaFormatDebug("increaseNumberOfClients, Could not connect to the node, retrying", &map[string]string{"err": err.Error(), "Number Of Attempts": strconv.Itoa(connectionAttempt)}) - cancel() - continue - } - cancel() - - connector.lock.Lock() // add connection to free list. - defer connector.lock.Unlock() - connector.freeClients = append(connector.freeClients, grpcClient) - return - } - utils.LavaFormatDebug("increasing number of clients failed", nil) -} - -func (connector *GRPCConnector) GetRpc(ctx context.Context, block bool) (*grpc.ClientConn, error) { - connector.lock.Lock() - defer connector.lock.Unlock() - - numberOfFreeClients := len(connector.freeClients) - if numberOfFreeClients <= connector.usedClients { // if we reached half of the free clients start creating new connections - go connector.increaseNumberOfClients(ctx, numberOfFreeClients) // increase asynchronously the free list. - } - - if numberOfFreeClients == 0 { - if !block { - return nil, errors.New("out of clients") - } else { - for { - connector.lock.Unlock() - // if we reached 0 connections we need to create more connections - // before sleeping, increase asynchronously the free list. - go connector.increaseNumberOfClients(ctx, numberOfFreeClients) - time.Sleep(50 * time.Millisecond) - connector.lock.Lock() - numberOfFreeClients = len(connector.freeClients) - if numberOfFreeClients != 0 { - break - } - } - } - } - - ret := connector.freeClients[0] - connector.freeClients = connector.freeClients[1:] - connector.usedClients++ - - return ret, nil -} - -func (connector *GRPCConnector) ReturnRpc(rpc *grpc.ClientConn) { - connector.lock.Lock() - defer connector.lock.Unlock() - - connector.usedClients-- - if len(connector.freeClients) > (connector.usedClients + int(NumberOfParallelConnections) /* the number we started with */) { - rpc.Close() // close connection - return // return without appending back to decrease idle connections - } - connector.freeClients = append(connector.freeClients, rpc) -} - -func (connector *GRPCConnector) connectorLoop(ctx context.Context) { - <-ctx.Done() - log.Println("connectorLoop ctx.Done") - connector.Close() -} - -func (connector *GRPCConnector) Close() { - for { - connector.lock.Lock() - log.Println("Connector closing", len(connector.freeClients)) - for i := 0; i < len(connector.freeClients); i++ { - connector.freeClients[i].Close() - } - connector.freeClients = []*grpc.ClientConn{} - - if connector.usedClients > 0 { - log.Println("Connector closing, waiting for in use clients", connector.usedClients) - connector.lock.Unlock() - time.Sleep(100 * time.Millisecond) - } else { - connector.lock.Unlock() - break - } - } -} diff --git a/relayer/chainproxy/connector_test.go b/relayer/chainproxy/connector_test.go deleted file mode 100644 index 1c384a3625..0000000000 --- a/relayer/chainproxy/connector_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package chainproxy - -import ( - "context" - "log" - "net" - "net/http" - "net/rpc" - "testing" - "time" - - "github.com/lavanet/lava/relayer/chainproxy/rpcclient" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" -) - -const ( - listenerAddress = "localhost:1234" - listenerAddressTcp = "http://localhost:1234" - numberOfClients = 5 -) - -type Args struct{} - -type TimeServer int64 - -func (t *TimeServer) GiveServerTime(args *Args, reply *int64) error { - // Set the value at the pointer got from the client - *reply = time.Now().Unix() - return nil -} - -func createGRPCServer(t *testing.T) *grpc.Server { - lis, err := net.Listen("tcp", listenerAddress) - require.Nil(t, err) - s := grpc.NewServer() - go s.Serve(lis) // serve in a different thread - return s -} - -func createRPCServer(t *testing.T) net.Listener { - timeserver := new(TimeServer) - // Register the timeserver object upon which the GiveServerTime - // function will be called from the RPC server (from the client) - rpc.Register(timeserver) - // Registers an HTTP handler for RPC messages - rpc.HandleHTTP() - // Start listening for the requests on port 1234 - listener, err := net.Listen("tcp", listenerAddress) - if err != nil { - log.Fatal("Listener error: ", err) - } - // Serve accepts incoming HTTP connections on the listener l, creating - // a new service goroutine for each. The service goroutines read requests - // and then call handler to reply to them - go http.Serve(listener, nil) - - return listener -} - -func TestConnector(t *testing.T) { - listener := createRPCServer(t) // create a grpcServer so we can connect to its endpoint and validate everything works. - defer listener.Close() - ctx := context.Background() - conn := NewConnector(ctx, numberOfClients, listenerAddressTcp) - require.Equal(t, len(conn.freeClients), numberOfClients) - increasedClients := numberOfClients * 2 // increase to double the number of clients - rpcList := make([]*rpcclient.Client, increasedClients) - for i := 0; i < increasedClients; i++ { - rpc, err := conn.GetRpc(ctx, true) - require.Nil(t, err) - rpcList[i] = rpc - } - require.Equal(t, conn.usedClients, increasedClients) // checking we have used clients - for i := 0; i < increasedClients; i++ { - conn.ReturnRpc(rpcList[i]) - } - require.Equal(t, conn.usedClients, 0) // checking we dont have clients used - require.Equal(t, len(conn.freeClients), increasedClients) // checking we cleaned clients -} - -func TestConnectorGrpc(t *testing.T) { - server := createGRPCServer(t) // create a grpcServer so we can connect to its endpoint and validate everything works. - defer server.Stop() - ctx := context.Background() - conn := NewGRPCConnector(ctx, numberOfClients, listenerAddress) - require.Equal(t, len(conn.freeClients), numberOfClients) - increasedClients := numberOfClients * 2 // increase to double the number of clients - rpcList := make([]*grpc.ClientConn, increasedClients) - for i := 0; i < increasedClients; i++ { - rpc, err := conn.GetRpc(ctx, true) - require.Nil(t, err) - rpcList[i] = rpc - } - require.Equal(t, conn.usedClients, increasedClients) // checking we have used clients - for i := 0; i < increasedClients; i++ { - conn.ReturnRpc(rpcList[i]) - } - require.Equal(t, conn.usedClients, 0) // checking we dont have clients used - require.Equal(t, len(conn.freeClients), increasedClients) // checking we cleaned clients -} diff --git a/relayer/chainproxy/grpc.go b/relayer/chainproxy/grpc.go deleted file mode 100644 index efbbfb52e7..0000000000 --- a/relayer/chainproxy/grpc.go +++ /dev/null @@ -1,463 +0,0 @@ -package chainproxy - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net" - "net/http" - "strings" - "time" - - "github.com/lavanet/lava/relayer/metrics" - - "github.com/btcsuite/btcd/btcec" - "github.com/fullstorydev/grpcurl" - "github.com/golang/protobuf/proto" - "github.com/jhump/protoreflect/desc" - "github.com/jhump/protoreflect/dynamic" - "github.com/jhump/protoreflect/grpcreflect" - "github.com/lavanet/lava/protocol/chainlib/chainproxy/thirdparty" - "github.com/lavanet/lava/protocol/lavasession" - "github.com/lavanet/lava/relayer/chainproxy/rpcclient" - "github.com/lavanet/lava/relayer/parser" - "github.com/lavanet/lava/relayer/performance" - "github.com/lavanet/lava/relayer/sentry" - "github.com/lavanet/lava/utils" - pairingtypes "github.com/lavanet/lava/x/pairing/types" - spectypes "github.com/lavanet/lava/x/spec/types" - "github.com/pkg/errors" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - reflectionpbo "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" - "google.golang.org/grpc/status" -) - -type GrpcMessage struct { - methodDesc *desc.MethodDescriptor - formatter grpcurl.Formatter - - cp *GrpcChainProxy - serviceApi *spectypes.ServiceApi - apiInterface *spectypes.ApiInterface - path string - msg interface{} - requestedBlock int64 - connectionType string - Result json.RawMessage - extendContextTimeout time.Duration -} - -type GrpcChainProxy struct { - conn *GRPCConnector - nConns uint - nodeUrl string - sentry *sentry.Sentry - csm *lavasession.ConsumerSessionManager - portalLogs *PortalLogs - chainID string - cache *performance.Cache -} - -func (r *GrpcMessage) GetExtraContextTimeout() time.Duration { - return r.extendContextTimeout -} - -func (r *GrpcMessage) GetMsg() interface{} { - return r.msg -} - -func NewGrpcChainProxy(nodeUrl string, nConns uint, sentry *sentry.Sentry, csm *lavasession.ConsumerSessionManager, pLogs *PortalLogs) ChainProxy { - nodeUrl = strings.TrimSuffix(nodeUrl, "/") - return &GrpcChainProxy{ - nodeUrl: nodeUrl, - nConns: nConns, - sentry: sentry, - csm: csm, - portalLogs: pLogs, - chainID: sentry.GetChainID(), - cache: nil, - } -} - -func (m GrpcMessage) GetParams() interface{} { - return m.msg -} - -func (m GrpcMessage) GetResult() json.RawMessage { - msgFactory := dynamic.NewMessageFactoryWithDefaults() - msg := msgFactory.NewMessage(m.methodDesc.GetOutputType()) - if err := proto.Unmarshal(m.Result, msg); err != nil { - utils.LavaFormatError("failed to unmarshal GetResult", err, nil) - return m.Result - } - - s, err := m.formatter(msg) - if err != nil { - utils.LavaFormatError("m.formatter(msg)", err, nil) - return m.Result - } - - return []byte(s) -} - -func (m GrpcMessage) ParseBlock(inp string) (int64, error) { - return parser.ParseDefaultBlockParameter(inp) -} - -func (nm *GrpcMessage) RequestedBlock() int64 { - return nm.requestedBlock -} - -func (nm *GrpcMessage) GetServiceApi() *spectypes.ServiceApi { - return nm.serviceApi -} - -func (nm *GrpcMessage) GetInterface() *spectypes.ApiInterface { - return nm.apiInterface -} - -func (cp *GrpcChainProxy) GetConsumerSessionManager() *lavasession.ConsumerSessionManager { - return cp.csm -} - -func (cp *GrpcChainProxy) NewMessage(path string, data []byte, connectionType string) (*GrpcMessage, error) { - // - // Check api is supported and save it in nodeMsg - serviceApi, err := cp.getSupportedApi(path) - if err != nil { - return nil, utils.LavaFormatError("failed to get supported api in NewMessage", err, &map[string]string{"path": path}) - } - - var apiInterface *spectypes.ApiInterface = nil - for i := range serviceApi.ApiInterfaces { - if serviceApi.ApiInterfaces[i].Type == connectionType { - apiInterface = &serviceApi.ApiInterfaces[i] - break - } - } - if apiInterface == nil { - return nil, fmt.Errorf("could not find the interface %s in the service %s", connectionType, serviceApi.Name) - } - - nodeMsg := &GrpcMessage{ - cp: cp, - serviceApi: serviceApi, - apiInterface: apiInterface, - path: path, - msg: data, - } - - return nodeMsg, nil -} - -func (cp *GrpcChainProxy) FetchBlockHashByNum(ctx context.Context, blockNum int64) (string, error) { - serviceApi, ok := cp.GetSentry().GetSpecApiByTag(spectypes.GET_BLOCK_BY_NUM) - if !ok { - return "", errors.New(spectypes.GET_BLOCKNUM + " tag function not found") - } - - var nodeMsg NodeMessage - var err error - if serviceApi.GetParsing().FunctionTemplate != "" { - nodeMsg, err = cp.ParseMsg(serviceApi.Name, []byte(fmt.Sprintf(serviceApi.GetParsing().FunctionTemplate, blockNum)), "") - } else { - nodeMsg, err = cp.NewMessage(serviceApi.Name, nil, "") - } - - if err != nil { - return "", err - } - - _, _, _, err = nodeMsg.Send(ctx, nil) - if err != nil { - return "", err - } - - blockData, err := parser.ParseMessageResponse((nodeMsg.(*GrpcMessage)), serviceApi.Parsing.ResultParsing) - if err != nil { - return "", err - } - - // blockData is an interface array with the parsed result in index 0. - // we know to expect a string result for a hash. - ret, ok := blockData[spectypes.DEFAULT_PARSED_RESULT_INDEX].(string) - if !ok { - return "", utils.LavaFormatError("Failed to Convert blockData[spectypes.DEFAULT_PARSED_RESULT_INDEX].(string)", nil, &map[string]string{"blockData": fmt.Sprintf("%v", blockData[spectypes.DEFAULT_PARSED_RESULT_INDEX])}) - } - return ret, nil -} - -func (cp *GrpcChainProxy) FetchLatestBlockNum(ctx context.Context) (int64, error) { - serviceApi, ok := cp.GetSentry().GetSpecApiByTag(spectypes.GET_BLOCKNUM) - if !ok { - return spectypes.NOT_APPLICABLE, errors.New(spectypes.GET_BLOCKNUM + " tag function not found") - } - - params := make(json.RawMessage, 0) - nodeMsg, err := cp.NewMessage(serviceApi.GetName(), params, "") - if err != nil { - return spectypes.NOT_APPLICABLE, utils.LavaFormatError("new Message creation Failed at FetchLatestBlockNum", err, nil) - } - - _, _, _, err = nodeMsg.Send(ctx, nil) - if err != nil { - return spectypes.NOT_APPLICABLE, utils.LavaFormatError("Message send Failed at FetchLatestBlockNum", err, nil) - } - - blocknum, err := parser.ParseBlockFromReply(nodeMsg, serviceApi.Parsing.ResultParsing) - if err != nil { - return spectypes.NOT_APPLICABLE, err - } - - return blocknum, nil -} - -func (cp *GrpcChainProxy) GetSentry() *sentry.Sentry { - return cp.sentry -} - -func (cp *GrpcChainProxy) Start(ctx context.Context) error { - cp.conn = NewGRPCConnector(ctx, cp.nConns, cp.nodeUrl) - if cp.conn == nil { - return utils.LavaFormatError("g_conn == nil", nil, nil) - } - - return nil -} - -func (cp *GrpcChainProxy) getSupportedApi(path string) (*spectypes.ServiceApi, error) { - if api, ok := cp.sentry.MatchSpecApiByName(path); ok { - if !api.Enabled { - return nil, fmt.Errorf("gRPC Api is disabled %s ", path) - } - return &api, nil - } - return nil, fmt.Errorf("gRPC Api not supported %s ", path) -} - -func (cp *GrpcChainProxy) ParseMsg(path string, data []byte, connectionType string) (NodeMessage, error) { - // Check API is supported and save it in nodeMsg. - serviceApi, err := cp.getSupportedApi(path) - if err != nil { - return nil, utils.LavaFormatError("failed to getSupportedApi gRPC", err, nil) - } - - var apiInterface *spectypes.ApiInterface = nil - for i := range serviceApi.ApiInterfaces { - if serviceApi.ApiInterfaces[i].Type == connectionType { - apiInterface = &serviceApi.ApiInterfaces[i] - break - } - } - if apiInterface == nil { - return nil, fmt.Errorf("could not find the interface %s in the service %s", connectionType, serviceApi.Name) - } - - var extraTimeout time.Duration - if apiInterface.Category.HangingApi { - extraTimeout = time.Duration(cp.sentry.GetAverageBlockTime()) * time.Millisecond - } - - nodeMsg := &GrpcMessage{ - cp: cp, - serviceApi: serviceApi, - apiInterface: apiInterface, - path: path, - msg: data, - connectionType: connectionType, - extendContextTimeout: extraTimeout, - } - - return nodeMsg, nil -} - -func (cp *GrpcChainProxy) SetCache(cache *performance.Cache) { - cp.cache = cache -} - -func (cp *GrpcChainProxy) GetCache() *performance.Cache { - return cp.cache -} - -func (cp *GrpcChainProxy) PortalStart(ctx context.Context, privKey *btcec.PrivateKey, listenAddr string) { - utils.LavaFormatInfo("gRPC PortalStart", nil) - - lis, err := net.Listen("tcp", listenAddr) - if err != nil { - utils.LavaFormatFatal("provider failure setting up listener", err, &map[string]string{"listenAddr": listenAddr}) - } - apiInterface := cp.GetSentry().ApiInterface - sendRelayCallback := func(ctx context.Context, method string, reqBody []byte) ([]byte, error) { - msgSeed := cp.portalLogs.GetMessageSeed() - utils.LavaFormatInfo("GRPC Got Relay: "+method, nil) - metadataValues, _ := metadata.FromIncomingContext(ctx) - var relayReply *pairingtypes.RelayReply - metricsData := metrics.NewRelayAnalytics("NoDappID", cp.chainID, apiInterface) - relayReply, _, err = SendRelay(ctx, cp, privKey, method, string(reqBody), "", "NoDappID", metricsData) - go cp.portalLogs.AddMetricForGrpc(metricsData, err, &metadataValues) - if err != nil { - errMasking := cp.portalLogs.GetUniqueGuidResponseForError(err, msgSeed) - cp.portalLogs.LogRequestAndResponse("http in/out", true, method, string(reqBody), "", errMasking, msgSeed, err) - return nil, utils.LavaFormatError("Failed to SendRelay", fmt.Errorf(errMasking), nil) - } - cp.portalLogs.LogRequestAndResponse("http in/out", false, method, string(reqBody), "", "", msgSeed, nil) - return relayReply.Data, nil - } - - _, httpServer, err := thirdparty.RegisterServer(cp.chainID, sendRelayCallback) - if err != nil { - utils.LavaFormatFatal("provider failure RegisterServer", err, &map[string]string{"listenAddr": listenAddr}) - } - - utils.LavaFormatInfo("Server listening", &map[string]string{"Address": lis.Addr().String()}) - - if err := httpServer.Serve(lis); !errors.Is(err, http.ErrServerClosed) { - utils.LavaFormatFatal("Portal failed to serve", err, &map[string]string{"Address": lis.Addr().String(), "ChainID": cp.sentry.GetChainID()}) - } -} - -func descriptorSourceFromServer(refClient *grpcreflect.Client) grpcurl.DescriptorSource { - return ServerSource{Client: refClient} -} - -func (nm *GrpcMessage) Send(ctx context.Context, ch chan interface{}) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { - if ch != nil { - return nil, "", nil, utils.LavaFormatError("Subscribe is not allowed on rest", nil, nil) - } - conn, err := nm.cp.conn.GetRpc(ctx, true) - if err != nil { - return nil, "", nil, utils.LavaFormatError("grpc get connection failed ", err, nil) - } - defer nm.cp.conn.ReturnRpc(conn) - - connectCtx, cancel := context.WithTimeout(ctx, DefaultTimeout+nm.GetExtraContextTimeout()) - defer cancel() - - cl := grpcreflect.NewClient(ctx, reflectionpbo.NewServerReflectionClient(conn)) - descriptorSource := descriptorSourceFromServer(cl) - svc, methodName := ParseSymbol(nm.path) - var descriptor desc.Descriptor - if descriptor, err = descriptorSource.FindSymbol(svc); err != nil { - return nil, "", nil, utils.LavaFormatError("descriptorSource.FindSymbol", err, &map[string]string{"svc": svc, "methodName": methodName}) - } - - serviceDescriptor, ok := descriptor.(*desc.ServiceDescriptor) - if !ok { - return nil, "", nil, utils.LavaFormatError("serviceDescriptor, ok := descriptor.(*desc.ServiceDescriptor)", err, &map[string]string{"descriptor": fmt.Sprintf("%v", descriptor)}) - } - methodDescriptor := serviceDescriptor.FindMethodByName(methodName) - if methodDescriptor == nil { - return nil, "", nil, utils.LavaFormatError("serviceDescriptor.FindMethodByName returned nil", err, &map[string]string{"methodName": methodName}) - } - nm.methodDesc = methodDescriptor - msgFactory := dynamic.NewMessageFactoryWithDefaults() - - var reader io.Reader - msg := msgFactory.NewMessage(methodDescriptor.GetInputType()) - formatMessage := false - switch v := nm.msg.(type) { - case []byte: - if len(v) > 0 { - reader = bytes.NewReader(v) - formatMessage = true - } - default: - return nil, "", nil, utils.LavaFormatError("Unsupported type for gRPC msg", nil, &map[string]string{"type": fmt.Sprintf("%T", v)}) - } - - rp, formatter, err := grpcurl.RequestParserAndFormatter(grpcurl.FormatJSON, descriptorSource, reader, grpcurl.FormatOptions{ - EmitJSONDefaultFields: false, - IncludeTextSeparator: false, - AllowUnknownFields: true, - }) - if err != nil { - return nil, "", nil, utils.LavaFormatError("Failed to create formatter", err, nil) - } - nm.formatter = formatter - if formatMessage { - err = rp.Next(msg) - if err != nil { - return nil, "", nil, utils.LavaFormatError("rp.Next(msg) Failed", err, nil) - } - } - - response := msgFactory.NewMessage(methodDescriptor.GetOutputType()) - err = grpc.Invoke(connectCtx, nm.path, msg, response, conn) - if err != nil { - return nil, "", nil, utils.LavaFormatError("Invoke Failed", err, &map[string]string{"Method": nm.path, "msg": fmt.Sprintf("%s", nm.msg)}) - } - - var respBytes []byte - respBytes, err = proto.Marshal(response) - if err != nil { - return nil, "", nil, utils.LavaFormatError("proto.Marshal(response) Failed", err, nil) - } - - nm.Result = respBytes - reply := &pairingtypes.RelayReply{ - Data: respBytes, - } - return reply, "", nil, nil -} - -type ServerSource struct { - Client *grpcreflect.Client -} - -func (ss ServerSource) ListServices() ([]string, error) { - svcs, err := ss.Client.ListServices() - return svcs, ReflectionSupport(err) -} - -func (ss ServerSource) FindSymbol(fullyQualifiedName string) (desc.Descriptor, error) { - file, err := ss.Client.FileContainingSymbol(fullyQualifiedName) - if err != nil { - return nil, ReflectionSupport(err) - } - d := file.FindSymbol(fullyQualifiedName) - if d == nil { - return nil, utils.LavaFormatError("Symbol not found", fmt.Errorf("missing symbol: %s", fullyQualifiedName), nil) - } - return d, nil -} - -func (ss ServerSource) AllExtensionsForType(typeName string) ([]*desc.FieldDescriptor, error) { - var exts []*desc.FieldDescriptor - nums, err := ss.Client.AllExtensionNumbersForType(typeName) - if err != nil { - return nil, ReflectionSupport(err) - } - for _, fieldNum := range nums { - ext, err := ss.Client.ResolveExtension(typeName, fieldNum) - if err != nil { - return nil, ReflectionSupport(err) - } - exts = append(exts, ext) - } - return exts, nil -} - -func ReflectionSupport(err error) error { - if err == nil { - return nil - } - if stat, ok := status.FromError(err); ok && stat.Code() == codes.Unimplemented { - return utils.LavaFormatError("server does not support the reflection API", err, nil) - } - return err -} - -func ParseSymbol(svcAndMethod string) (string, string) { - pos := strings.LastIndex(svcAndMethod, "/") - if pos < 0 { - pos = strings.LastIndex(svcAndMethod, ".") - if pos < 0 { - return "", "" - } - } - return svcAndMethod[:pos], svcAndMethod[pos+1:] -} diff --git a/relayer/chainproxy/jsonRPC.go b/relayer/chainproxy/jsonRPC.go deleted file mode 100644 index 0f8fac5025..0000000000 --- a/relayer/chainproxy/jsonRPC.go +++ /dev/null @@ -1,503 +0,0 @@ -package chainproxy - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "strconv" - "strings" - "time" - - "github.com/lavanet/lava/relayer/metrics" - - "github.com/btcsuite/btcd/btcec" - "github.com/gofiber/fiber/v2" - "github.com/gofiber/fiber/v2/middleware/favicon" - "github.com/gofiber/websocket/v2" - "github.com/lavanet/lava/protocol/lavasession" - "github.com/lavanet/lava/relayer/chainproxy/rpcclient" - "github.com/lavanet/lava/relayer/parser" - "github.com/lavanet/lava/relayer/performance" - "github.com/lavanet/lava/relayer/sentry" - "github.com/lavanet/lava/utils" - pairingtypes "github.com/lavanet/lava/x/pairing/types" - spectypes "github.com/lavanet/lava/x/spec/types" -) - -type JsonrpcMessage struct { - Version string `json:"jsonrpc,omitempty"` - ID json.RawMessage `json:"id,omitempty"` - Method string `json:"method,omitempty"` - Params interface{} `json:"params,omitempty"` - Error *rpcclient.JsonError `json:"error,omitempty"` - Result json.RawMessage `json:"result,omitempty"` -} - -type JrpcMessage struct { - cp *JrpcChainProxy - serviceApi *spectypes.ServiceApi - apiInterface *spectypes.ApiInterface - msg *JsonrpcMessage - requestedBlock int64 - extendContextTimeout time.Duration -} - -func (r *JrpcMessage) GetExtraContextTimeout() time.Duration { - return r.extendContextTimeout -} - -func (j *JrpcMessage) GetMsg() interface{} { - return j.msg -} - -func (j *JrpcMessage) setMessageResult(result json.RawMessage) { - j.msg.Result = result -} - -func convertMsg(rpcMsg *rpcclient.JsonrpcMessage) (*JsonrpcMessage, error) { - // Return an error if the message was not sent - if rpcMsg == nil { - return nil, ErrFailedToConvertMessage - } - - msg := &JsonrpcMessage{ - Version: rpcMsg.Version, - ID: rpcMsg.ID, - Method: rpcMsg.Method, - Error: rpcMsg.Error, - Result: rpcMsg.Result, - } - - if rpcMsg.Params != nil { - msg.Params = rpcMsg.Params - } - - return msg, nil -} - -type JrpcChainProxy struct { - conn *Connector - nConns uint - nodeUrl string - sentry *sentry.Sentry - csm *lavasession.ConsumerSessionManager - portalLogs *PortalLogs - cache *performance.Cache -} - -func NewJrpcChainProxy(nodeUrl string, nConns uint, sentry *sentry.Sentry, csm *lavasession.ConsumerSessionManager, pLogs *PortalLogs) ChainProxy { - if nodeUrl != "" { // provider process - verifyRPCendpoint(nodeUrl) - } - return &JrpcChainProxy{ - nodeUrl: nodeUrl, - nConns: nConns, - sentry: sentry, - csm: csm, - portalLogs: pLogs, - cache: nil, - } -} - -func (cp *JrpcChainProxy) SetCache(cache *performance.Cache) { - cp.cache = cache -} - -func (cp *JrpcChainProxy) GetCache() *performance.Cache { - return cp.cache -} - -func (cp *JrpcChainProxy) GetConsumerSessionManager() *lavasession.ConsumerSessionManager { - return cp.csm -} - -func (cp *JrpcChainProxy) FetchLatestBlockNum(ctx context.Context) (int64, error) { - serviceApi, ok := cp.GetSentry().GetSpecApiByTag(spectypes.GET_BLOCKNUM) - if !ok { - return spectypes.NOT_APPLICABLE, errors.New(spectypes.GET_BLOCKNUM + " tag function not found") - } - - params := []interface{}{} - nodeMsg, err := cp.NewMessage(&serviceApi, spectypes.LATEST_BLOCK, params) - if err != nil { - return spectypes.NOT_APPLICABLE, err - } - - _, _, _, err = nodeMsg.Send(ctx, nil) - if err != nil { - return spectypes.NOT_APPLICABLE, utils.LavaFormatError("Error On Send FetchLatestBlockNum", err, &map[string]string{"nodeUrl": cp.nodeUrl}) - } - - blocknum, err := parser.ParseBlockFromReply(nodeMsg.msg, serviceApi.Parsing.ResultParsing) - if err != nil { - return spectypes.NOT_APPLICABLE, utils.LavaFormatError("Failed To Parse FetchLatestBlockNum", err, &map[string]string{ - "nodeUrl": cp.nodeUrl, - "Method": nodeMsg.msg.Method, - "Response": string(nodeMsg.msg.Result), - }) - } - - return blocknum, nil -} - -func (cp *JrpcChainProxy) FetchBlockHashByNum(ctx context.Context, blockNum int64) (string, error) { - serviceApi, ok := cp.GetSentry().GetSpecApiByTag(spectypes.GET_BLOCK_BY_NUM) - if !ok { - return "", errors.New(spectypes.GET_BLOCK_BY_NUM + " tag function not found") - } - - var nodeMsg NodeMessage - var err error - if serviceApi.GetParsing().FunctionTemplate != "" { - nodeMsg, err = cp.ParseMsg("", []byte(fmt.Sprintf(serviceApi.GetParsing().FunctionTemplate, blockNum)), http.MethodGet) - } else { - params := make([]interface{}, 0) - params = append(params, blockNum) - nodeMsg, err = cp.NewMessage(&serviceApi, spectypes.LATEST_BLOCK, params) - } - - if err != nil { - return "", err - } - - _, _, _, err = nodeMsg.Send(ctx, nil) - if err != nil { - return "", utils.LavaFormatError("Error On Send FetchBlockHashByNum", err, &map[string]string{"nodeUrl": cp.nodeUrl}) - } - // log.Println("%s", reply) - msgParsed, ok := nodeMsg.GetMsg().(*JsonrpcMessage) - if !ok { - return "", fmt.Errorf("FetchBlockHashByNum - nodeMsg.GetMsg().(*JsonrpcMessage) - type assertion failed, type:" + fmt.Sprintf("%s", nodeMsg.GetMsg())) - } - blockData, err := parser.ParseMessageResponse(msgParsed, serviceApi.Parsing.ResultParsing) - if err != nil { - return "", err - } - - // blockData is an interface array with the parsed result in index 0. - // we know to expect a string result for a hash. - parsedIndexString, ok := blockData[spectypes.DEFAULT_PARSED_RESULT_INDEX].(string) - if !ok { - return "", fmt.Errorf("FetchBlockHashByNum - blockData[spectypes.DEFAULT_PARSED_RESULT_INDEX].(string) - type assertion failed, type:" + fmt.Sprintf("%s", blockData[spectypes.DEFAULT_PARSED_RESULT_INDEX])) - } - return parsedIndexString, nil -} - -func (cp JsonrpcMessage) GetParams() interface{} { - return cp.Params -} - -func (cp JsonrpcMessage) GetResult() json.RawMessage { - return cp.Result -} - -func (cp JsonrpcMessage) ParseBlock(inp string) (int64, error) { - return parser.ParseDefaultBlockParameter(inp) -} - -func (cp *JrpcChainProxy) GetSentry() *sentry.Sentry { - return cp.sentry -} - -func (cp *JrpcChainProxy) Start(ctx context.Context) error { - cp.conn = NewConnector(ctx, cp.nConns, cp.nodeUrl) - if cp.conn == nil { - return errors.New("g_conn == nil") - } - - return nil -} - -func (cp *JrpcChainProxy) getSupportedApi(name string) (*spectypes.ServiceApi, error) { - if api, ok := cp.sentry.GetSpecApiByName(name); ok { - if !api.Enabled { - return nil, errors.New("api is disabled") - } - return &api, nil - } - - return nil, errors.New("JRPC api not supported") -} - -func (cp *JrpcChainProxy) ParseMsg(path string, data []byte, connectionType string) (NodeMessage, error) { - // connectionType is currently only used in rest API. - // Unmarshal request - var msg JsonrpcMessage - err := json.Unmarshal(data, &msg) - if err != nil { - return nil, err - } - // - // Check api is supported and save it in nodeMsg - serviceApi, err := cp.getSupportedApi(msg.Method) - if err != nil { - return nil, utils.LavaFormatError("getSupportedApi failed", err, &map[string]string{"method": msg.Method}) - } - - var apiInterface *spectypes.ApiInterface = nil - for i := range serviceApi.ApiInterfaces { - if serviceApi.ApiInterfaces[i].Type == connectionType { - apiInterface = &serviceApi.ApiInterfaces[i] - break - } - } - if apiInterface == nil { - return nil, fmt.Errorf("could not find the interface %s in the service %s", connectionType, serviceApi.Name) - } - - requestedBlock, err := parser.ParseBlockFromParams(msg, serviceApi.BlockParsing) - if err != nil { - return nil, err - } - - var extraTimeout time.Duration - if apiInterface.Category.HangingApi { - extraTimeout = time.Duration(cp.sentry.GetAverageBlockTime()) * time.Millisecond - } - - nodeMsg := &JrpcMessage{ - cp: cp, - serviceApi: serviceApi, - apiInterface: apiInterface, - msg: &msg, - requestedBlock: requestedBlock, - extendContextTimeout: extraTimeout, - } - return nodeMsg, nil -} - -func (cp *JrpcChainProxy) NewMessage(serviceApi *spectypes.ServiceApi, requestedBlock int64, params []interface{}) (*JrpcMessage, error) { - method := serviceApi.GetName() - serviceApi, err := cp.getSupportedApi(method) - if err != nil { - return nil, err - } - - nodeMsg := &JrpcMessage{ - cp: cp, - serviceApi: serviceApi, - requestedBlock: requestedBlock, - msg: &JsonrpcMessage{ - Version: "2.0", - ID: []byte("1"), // TODO:: use ids - Method: method, - Params: params, - }, - } - return nodeMsg, nil -} - -func (cp *JrpcChainProxy) PortalStart(ctx context.Context, privKey *btcec.PrivateKey, listenAddr string) { - // - // Setup HTTP Server - app := fiber.New(fiber.Config{}) - - app.Use(favicon.New()) - - app.Use("/ws/:dappId", func(c *fiber.Ctx) error { - cp.portalLogs.LogStartTransaction("jsonRpc-WebSocket") - // IsWebSocketUpgrade returns true if the client - // requested upgrade to the WebSocket protocol. - if websocket.IsWebSocketUpgrade(c) { - c.Locals("allowed", true) - return c.Next() - } - return fiber.ErrUpgradeRequired - }) - - chainID := cp.GetSentry().ChainID - apiInterface := cp.GetSentry().ApiInterface - - webSocketCallback := websocket.New(func(c *websocket.Conn) { - var ( - mt int - msg []byte - err error - ) - msgSeed := cp.portalLogs.GetMessageSeed() - for { - if mt, msg, err = c.ReadMessage(); err != nil { - cp.portalLogs.AnalyzeWebSocketErrorAndWriteMessage(c, mt, err, msgSeed, msg, spectypes.APIInterfaceJsonRPC) - break - } - dappID := ExtractDappIDFromWebsocketConnection(c) - utils.LavaFormatInfo("ws in <<<", &map[string]string{"seed": msgSeed, "msg": string(msg), "dappID": dappID}) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // incase there's a problem make sure to cancel the connection - metricsData := metrics.NewRelayAnalytics(dappID, chainID, apiInterface) - reply, replyServer, err := SendRelay(ctx, cp, privKey, "", string(msg), http.MethodGet, dappID, metricsData) - go cp.portalLogs.AddMetricForWebSocket(metricsData, err, c) - if err != nil { - cp.portalLogs.AnalyzeWebSocketErrorAndWriteMessage(c, mt, err, msgSeed, msg, spectypes.APIInterfaceJsonRPC) - continue - } - // If subscribe the first reply would contain the RPC ID that can be used for disconnect. - if replyServer != nil { - var reply pairingtypes.RelayReply - err = (*replyServer).RecvMsg(&reply) // this reply contains the RPC ID - if err != nil { - cp.portalLogs.AnalyzeWebSocketErrorAndWriteMessage(c, mt, err, msgSeed, msg, spectypes.APIInterfaceJsonRPC) - continue - } - - if err = c.WriteMessage(mt, reply.Data); err != nil { - cp.portalLogs.AnalyzeWebSocketErrorAndWriteMessage(c, mt, err, msgSeed, msg, spectypes.APIInterfaceJsonRPC) - continue - } - cp.portalLogs.LogRequestAndResponse("jsonrpc ws msg", false, "ws", c.LocalAddr().String(), string(msg), string(reply.Data), msgSeed, nil) - for { - err = (*replyServer).RecvMsg(&reply) - if err != nil { - cp.portalLogs.AnalyzeWebSocketErrorAndWriteMessage(c, mt, err, msgSeed, msg, spectypes.APIInterfaceJsonRPC) - break - } - - // If portal cant write to the client - if err = c.WriteMessage(mt, reply.Data); err != nil { - cancel() - cp.portalLogs.AnalyzeWebSocketErrorAndWriteMessage(c, mt, err, msgSeed, msg, spectypes.APIInterfaceJsonRPC) - // break - } - - cp.portalLogs.LogRequestAndResponse("jsonrpc ws msg", false, "ws", c.LocalAddr().String(), string(msg), string(reply.Data), msgSeed, nil) - } - } else { - if err = c.WriteMessage(mt, reply.Data); err != nil { - cp.portalLogs.AnalyzeWebSocketErrorAndWriteMessage(c, mt, err, msgSeed, msg, spectypes.APIInterfaceJsonRPC) - continue - } - cp.portalLogs.LogRequestAndResponse("jsonrpc ws msg", false, "ws", c.LocalAddr().String(), string(msg), string(reply.Data), msgSeed, nil) - } - } - }) - websocketCallbackWithDappID := constructFiberCallbackWithHeaderAndParameterExtraction(webSocketCallback, cp.portalLogs.StoreMetricData) - app.Get("/ws/:dappId", websocketCallbackWithDappID) - app.Get("/:dappId/websocket", websocketCallbackWithDappID) // catching http://HOST:PORT/1/websocket requests. - - app.Post("/:dappId/*", func(c *fiber.Ctx) error { - cp.portalLogs.LogStartTransaction("jsonRpc-http post") - msgSeed := cp.portalLogs.GetMessageSeed() - dappID := ExtractDappIDFromFiberContext(c) - metricsData := metrics.NewRelayAnalytics(dappID, chainID, apiInterface) - utils.LavaFormatInfo("in <<<", &map[string]string{"seed": msgSeed, "msg": string(c.Body()), "dappID": dappID}) - - reply, _, err := SendRelay(ctx, cp, privKey, "", string(c.Body()), http.MethodGet, dappID, metricsData) - go cp.portalLogs.AddMetricForHttp(metricsData, err, c.GetReqHeaders()) - if err != nil { - // Get unique GUID response - errMasking := cp.portalLogs.GetUniqueGuidResponseForError(err, msgSeed) - - // Log request and response - cp.portalLogs.LogRequestAndResponse("jsonrpc http", true, "POST", c.Request().URI().String(), string(c.Body()), errMasking, msgSeed, err) - - // Set status to internal error - c.Status(fiber.StatusInternalServerError) - - // Construct json response - response := convertToJsonError(errMasking) - - // Return error json response - return c.SendString(response) - } - // Log request and response - cp.portalLogs.LogRequestAndResponse("jsonrpc http", - false, - "POST", - c.Request().URI().String(), - string(c.Body()), - string(reply.Data), - msgSeed, - nil, - ) - - // Return json response - return c.SendString(string(reply.Data)) - }) - - // Go - err := app.Listen(listenAddr) - if err != nil { - utils.LavaFormatError("app.Listen(listenAddr)", err, nil) - } -} - -func (nm *JrpcMessage) GetServiceApi() *spectypes.ServiceApi { - return nm.serviceApi -} - -func (nm *JrpcMessage) GetInterface() *spectypes.ApiInterface { - return nm.apiInterface -} - -func (nm *JrpcMessage) RequestedBlock() int64 { - return nm.requestedBlock -} - -func (nm *JrpcMessage) Send(ctx context.Context, ch chan interface{}) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { - // Get node - rpc, err := nm.cp.conn.GetRpc(ctx, true) - if err != nil { - return nil, "", nil, err - } - defer nm.cp.conn.ReturnRpc(rpc) - - // Call our node - var rpcMessage *rpcclient.JsonrpcMessage - var replyMessage *JsonrpcMessage - var sub *rpcclient.ClientSubscription - if ch != nil { - sub, rpcMessage, err = rpc.Subscribe(context.Background(), nm.msg.ID, nm.msg.Method, ch, nm.msg.Params) - } else { - connectCtx, cancel := context.WithTimeout(ctx, getTimePerCu(nm.serviceApi.ComputeUnits)+nm.GetExtraContextTimeout()) - defer cancel() - rpcMessage, err = rpc.CallContext(connectCtx, nm.msg.ID, nm.msg.Method, nm.msg.Params) - } - - var replyMsg JsonrpcMessage - // the error check here would only wrap errors not from the rpc - if err != nil { - if strings.Contains(err.Error(), context.DeadlineExceeded.Error()) { - // Not an rpc error, return provider error without disclosing the endpoint address - return nil, "", nil, utils.LavaFormatError("Failed Sending Message", context.DeadlineExceeded, nil) - } - replyMsg = JsonrpcMessage{ - Version: nm.msg.Version, - ID: nm.msg.ID, - } - replyMsg.Error = &rpcclient.JsonError{ - Code: 1, - Message: fmt.Sprintf("%s", err), - } - // this later causes returning an error - } else { - replyMessage, err = convertMsg(rpcMessage) - if err != nil { - return nil, "", nil, utils.LavaFormatError("jsonRPC error", err, nil) - } - - nm.msg = replyMessage - replyMsg = *replyMessage - } - - data, err := json.Marshal(replyMsg) - if err != nil { - nm.msg.Result = []byte(fmt.Sprintf("%s", err)) - return nil, "", nil, err - } - - reply := &pairingtypes.RelayReply{ - Data: data, - } - - if ch != nil { - subscriptionID, err = strconv.Unquote(string(replyMsg.Result)) - if err != nil { - return nil, "", nil, utils.LavaFormatError("Subscription failed", err, nil) - } - } - - return reply, subscriptionID, sub, err -} diff --git a/relayer/chainproxy/portalLogs.go b/relayer/chainproxy/portalLogs.go deleted file mode 100644 index 7f0e12c82b..0000000000 --- a/relayer/chainproxy/portalLogs.go +++ /dev/null @@ -1,182 +0,0 @@ -package chainproxy - -import ( - "encoding/json" - "math/rand" - "os" - "strconv" - "strings" - - "google.golang.org/grpc/metadata" - - "github.com/lavanet/lava/relayer/metrics" - - "github.com/gofiber/websocket/v2" - "github.com/joho/godotenv" - "github.com/lavanet/lava/relayer/parser" - "github.com/lavanet/lava/utils" - "github.com/newrelic/go-agent/v3/newrelic" -) - -var ReturnMaskedErrors = "false" - -const ( - webSocketCloseMessage = "websocket: close 1005 (no status)" - RefererHeaderKey = "Referer" -) - -type PortalLogs struct { - newRelicApplication *newrelic.Application - MetricService *metrics.MetricService - StoreMetricData bool - excludeMetricsReferrers string -} - -func NewPortalLogs() (*PortalLogs, error) { - err := godotenv.Load() - if err != nil { - utils.LavaFormatInfo("New relic missing environment file", nil) - return &PortalLogs{}, nil - } - - newRelicAppName := os.Getenv("NEW_RELIC_APP_NAME") - newRelicLicenseKey := os.Getenv("NEW_RELIC_LICENSE_KEY") - if newRelicAppName == "" || newRelicLicenseKey == "" { - utils.LavaFormatInfo("New relic missing environment variables", nil) - return &PortalLogs{}, nil - } - newRelicApplication, err := newrelic.NewApplication( - newrelic.ConfigAppName(newRelicAppName), - newrelic.ConfigLicense(newRelicLicenseKey), - newrelic.ConfigFromEnvironment(), - func(cfg *newrelic.Config) { - // Set specific Config fields inside a custom ConfigOption. - sMaxSamplesStored, ok := os.LookupEnv("NEW_RELIC_TRANSACTION_EVENTS_MAX_SAMPLES_STORED") - if ok { - maxSamplesStored, err := strconv.Atoi(sMaxSamplesStored) - if err != nil { - cfg.TransactionEvents.MaxSamplesStored = maxSamplesStored - } - } - }, - ) - portal := &PortalLogs{newRelicApplication: newRelicApplication, StoreMetricData: false} - isMetricEnabled, _ := strconv.ParseBool(os.Getenv("IS_METRICS_ENABLED")) - if isMetricEnabled { - portal.StoreMetricData = true - portal.MetricService = metrics.NewMetricService() - } - return portal, err -} - -func (pl *PortalLogs) GetMessageSeed() string { - return "GUID_" + strconv.Itoa(rand.Intn(10000000000)) -} - -// Input will be masked with a random GUID if returnMaskedErrors is set to true -func (pl *PortalLogs) GetUniqueGuidResponseForError(responseError error, msgSeed string) string { - type ErrorData struct { - Error_GUID string `json:"Error_GUID"` - Error string `json:"Error,omitempty"` - } - - data := ErrorData{ - Error_GUID: msgSeed, - } - if ReturnMaskedErrors == "false" { - data.Error = responseError.Error() - } - - utils.LavaFormatError("UniqueGuidResponseForError", responseError, &map[string]string{"msgSeed": msgSeed}) - - ret, _ := json.Marshal(data) - - return string(ret) -} - -// Websocket healthy disconnections throw "websocket: close 1005 (no status)" error, -// We dont want to alert error monitoring for that purpses. -func (pl *PortalLogs) AnalyzeWebSocketErrorAndWriteMessage(c *websocket.Conn, mt int, err error, msgSeed string, msg []byte, rpcType string) { - if err != nil { - if err.Error() == webSocketCloseMessage { - utils.LavaFormatInfo("Websocket connection closed by the user, "+err.Error(), nil) - return - } - pl.LogRequestAndResponse(rpcType+" ws msg", true, "ws", c.LocalAddr().String(), string(msg), "", msgSeed, err) - - type ErrorResponse struct { - ErrorReceived string `json:"Error_Received"` - } - - jsonResponse, _ := json.Marshal(ErrorResponse{ - ErrorReceived: pl.GetUniqueGuidResponseForError(err, msgSeed), - }) - - c.WriteMessage(mt, jsonResponse) - } -} - -func (pl *PortalLogs) LogRequestAndResponse(module string, hasError bool, method string, path string, req string, resp string, msgSeed string, err error) { - if hasError && err != nil { - utils.LavaFormatError(module, err, &map[string]string{"GUID": msgSeed, "request": req, "response": parser.CapStringLen(resp), "method": method, "path": path, "HasError": strconv.FormatBool(hasError)}) - return - } - utils.LavaFormatDebug(module, &map[string]string{"GUID": msgSeed, "request": req, "response": parser.CapStringLen(resp), "method": method, "path": path, "HasError": strconv.FormatBool(hasError)}) -} - -func (pl *PortalLogs) LogStartTransaction(name string) { - if pl.newRelicApplication != nil { - txn := pl.newRelicApplication.StartTransaction(name) - defer txn.End() - } -} - -func (pl *PortalLogs) AddMetricForHttp(data *metrics.RelayMetrics, err error, headers map[string]string) { - if pl.StoreMetricData && pl.shouldCountMetricForHttp(headers) { - data.Success = err == nil - pl.MetricService.SendData(*data) - } -} - -func (pl *PortalLogs) AddMetricForWebSocket(data *metrics.RelayMetrics, err error, c *websocket.Conn) { - if pl.StoreMetricData && pl.shouldCountMetricForWebSocket(c) { - data.Success = err == nil - pl.MetricService.SendData(*data) - } -} - -func (pl *PortalLogs) AddMetricForGrpc(data *metrics.RelayMetrics, err error, metadataValues *metadata.MD) { - if pl.StoreMetricData && pl.shouldCountMetricForGrpc(metadataValues) { - data.Success = err == nil - pl.MetricService.SendData(*data) - } -} - -func (pl *PortalLogs) shouldCountMetricForHttp(headers map[string]string) bool { - refererHeaderValue := headers[RefererHeaderKey] - return pl.shouldCountMetrics(refererHeaderValue) -} - -func (pl *PortalLogs) shouldCountMetricForWebSocket(c *websocket.Conn) bool { - refererHeaderValue, isHeaderFound := c.Locals(RefererHeaderKey).(string) - if !isHeaderFound { - return true - } - return pl.shouldCountMetrics(refererHeaderValue) -} - -func (pl *PortalLogs) shouldCountMetricForGrpc(metadataValues *metadata.MD) bool { - if metadataValues != nil { - refererHeaderValue := metadataValues.Get(RefererHeaderKey) - result := len(refererHeaderValue) > 0 && pl.shouldCountMetrics(refererHeaderValue[0]) - return !result - } - return true -} - -func (pl *PortalLogs) shouldCountMetrics(refererHeaderValue string) bool { - if len(pl.excludeMetricsReferrers) > 0 && len(refererHeaderValue) > 0 { - return !strings.Contains(refererHeaderValue, pl.excludeMetricsReferrers) - } - return true -} diff --git a/relayer/chainproxy/rest.go b/relayer/chainproxy/rest.go deleted file mode 100644 index 920e1d6373..0000000000 --- a/relayer/chainproxy/rest.go +++ /dev/null @@ -1,404 +0,0 @@ -package chainproxy - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "strings" - "time" - - "github.com/lavanet/lava/relayer/metrics" - - "github.com/btcsuite/btcd/btcec" - "github.com/gofiber/fiber/v2" - "github.com/gofiber/fiber/v2/middleware/favicon" - "github.com/lavanet/lava/protocol/lavasession" - "github.com/lavanet/lava/relayer/chainproxy/rpcclient" - "github.com/lavanet/lava/relayer/parser" - "github.com/lavanet/lava/relayer/performance" - "github.com/lavanet/lava/relayer/sentry" - "github.com/lavanet/lava/utils" - pairingtypes "github.com/lavanet/lava/x/pairing/types" - spectypes "github.com/lavanet/lava/x/spec/types" -) - -type RestMessage struct { - cp *RestChainProxy - serviceApi *spectypes.ServiceApi - path string - msg []byte - requestedBlock int64 - Result json.RawMessage - apiInterface *spectypes.ApiInterface - extendContextTimeout time.Duration -} - -type RestChainProxy struct { - nodeUrl string - sentry *sentry.Sentry - csm *lavasession.ConsumerSessionManager - portalLogs *PortalLogs - cache *performance.Cache -} - -func (r *RestMessage) GetExtraContextTimeout() time.Duration { - return r.extendContextTimeout -} - -func (r *RestMessage) GetMsg() interface{} { - return r.msg -} - -func NewRestChainProxy(nodeUrl string, sentry *sentry.Sentry, csm *lavasession.ConsumerSessionManager, pLogs *PortalLogs) ChainProxy { - nodeUrl = strings.TrimSuffix(nodeUrl, "/") - return &RestChainProxy{ - nodeUrl: nodeUrl, - sentry: sentry, - csm: csm, - portalLogs: pLogs, - } -} - -func (cp *RestChainProxy) GetConsumerSessionManager() *lavasession.ConsumerSessionManager { - return cp.csm -} - -func (cp *RestChainProxy) NewMessage(path string, data []byte, connectionType string) (*RestMessage, error) { - // - // Check api is supported an save it in nodeMsg - serviceApi, err := cp.getSupportedApi(path) - if err != nil { - return nil, err - } - - var apiInterface *spectypes.ApiInterface = nil - for i := range serviceApi.ApiInterfaces { - if serviceApi.ApiInterfaces[i].Type == connectionType { - apiInterface = &serviceApi.ApiInterfaces[i] - break - } - } - if apiInterface == nil { - return nil, fmt.Errorf("could not find the interface %s in the service %s", connectionType, serviceApi.Name) - } - - nodeMsg := &RestMessage{ - cp: cp, - serviceApi: serviceApi, - apiInterface: apiInterface, - path: path, - msg: data, - } - - return nodeMsg, nil -} - -func (m RestMessage) GetParams() interface{} { - retArr := make([]interface{}, 0) - retArr = append(retArr, m.msg) - return retArr -} - -func (m RestMessage) GetResult() json.RawMessage { - return m.Result -} - -func (m RestMessage) ParseBlock(inp string) (int64, error) { - return parser.ParseDefaultBlockParameter(inp) -} - -func (cp *RestChainProxy) SetCache(cache *performance.Cache) { - cp.cache = cache -} - -func (cp *RestChainProxy) GetCache() *performance.Cache { - return cp.cache -} - -func (cp *RestChainProxy) FetchBlockHashByNum(ctx context.Context, blockNum int64) (string, error) { - serviceApi, ok := cp.GetSentry().GetSpecApiByTag(spectypes.GET_BLOCK_BY_NUM) - if !ok { - return "", errors.New(spectypes.GET_BLOCKNUM + " tag function not found") - } - - var nodeMsg NodeMessage - var err error - if serviceApi.GetParsing().FunctionTemplate != "" { - nodeMsg, err = cp.ParseMsg(fmt.Sprintf(serviceApi.GetParsing().FunctionTemplate, blockNum), nil, http.MethodGet) - } else { - nodeMsg, err = cp.NewMessage(serviceApi.Name, nil, http.MethodGet) - } - - if err != nil { - return "", err - } - - _, _, _, err = nodeMsg.Send(ctx, nil) - if err != nil { - return "", utils.LavaFormatError("Error On Send FetchBlockHashByNum", err, &map[string]string{"nodeUrl": cp.nodeUrl}) - } - - blockData, err := parser.ParseMessageResponse((nodeMsg.(*RestMessage)), serviceApi.Parsing.ResultParsing) - if err != nil { - return "", err - } - - // blockData is an interface array with the parsed result in index 0. - // we know to expect a string result for a hash. - parsedIndexString, ok := blockData[spectypes.DEFAULT_PARSED_RESULT_INDEX].(string) - if !ok { - return "", fmt.Errorf("FetchBlockHashByNum - blockData[spectypes.DEFAULT_PARSED_RESULT_INDEX].(string) - type assertion failed, type:" + fmt.Sprintf("%s", blockData[spectypes.DEFAULT_PARSED_RESULT_INDEX])) - } - return parsedIndexString, nil -} - -func (cp *RestChainProxy) FetchLatestBlockNum(ctx context.Context) (int64, error) { - serviceApi, ok := cp.GetSentry().GetSpecApiByTag(spectypes.GET_BLOCKNUM) - if !ok { - return spectypes.NOT_APPLICABLE, errors.New(spectypes.GET_BLOCKNUM + " tag function not found") - } - - params := []byte{} - nodeMsg, err := cp.NewMessage(serviceApi.GetName(), params, http.MethodGet) - if err != nil { - return spectypes.NOT_APPLICABLE, err - } - - _, _, _, err = nodeMsg.Send(ctx, nil) - if err != nil { - return spectypes.NOT_APPLICABLE, utils.LavaFormatError("Error On Send FetchLatestBlockNum", err, &map[string]string{"nodeUrl": cp.nodeUrl}) - } - - blocknum, err := parser.ParseBlockFromReply(nodeMsg, serviceApi.Parsing.ResultParsing) - if err != nil { - return spectypes.NOT_APPLICABLE, utils.LavaFormatError("Failed To Parse FetchLatestBlockNum", err, &map[string]string{ - "nodeUrl": cp.nodeUrl, - "Method": nodeMsg.path, - "Response": string(nodeMsg.Result), - }) - } - - return blocknum, nil -} - -func (cp *RestChainProxy) GetSentry() *sentry.Sentry { - return cp.sentry -} - -func (cp *RestChainProxy) Start(context.Context) error { - return nil -} - -func (cp *RestChainProxy) getSupportedApi(path string) (*spectypes.ServiceApi, error) { - path = strings.SplitN(path, "?", 2)[0] - if api, ok := cp.sentry.MatchSpecApiByName(path); ok { - if !api.Enabled { - return nil, fmt.Errorf("REST Api is disabled %s ", path) - } - return &api, nil - } - return nil, fmt.Errorf("REST Api not supported %s ", path) -} - -func (cp *RestChainProxy) ParseMsg(path string, data []byte, connectionType string) (NodeMessage, error) { - // - // Check api is supported an save it in nodeMsg - serviceApi, err := cp.getSupportedApi(path) - if err != nil { - return nil, err - } - - var apiInterface *spectypes.ApiInterface = nil - for i := range serviceApi.ApiInterfaces { - if serviceApi.ApiInterfaces[i].Type == connectionType { - apiInterface = &serviceApi.ApiInterfaces[i] - break - } - } - if apiInterface == nil { - return nil, fmt.Errorf("could not find the interface %s in the service %s", connectionType, serviceApi.Name) - } - - var extraTimeout time.Duration - if apiInterface.Category.HangingApi { - extraTimeout = time.Duration(cp.sentry.GetAverageBlockTime()) * time.Millisecond - } - - // data contains the query string - nodeMsg := &RestMessage{ - cp: cp, - serviceApi: serviceApi, - path: path, - msg: data, - apiInterface: apiInterface, // POST,GET etc.. - extendContextTimeout: extraTimeout, - } - - return nodeMsg, nil -} - -func (cp *RestChainProxy) PortalStart(ctx context.Context, privKey *btcec.PrivateKey, listenAddr string) { - // - // Setup HTTP Server - app := fiber.New(fiber.Config{}) - - app.Use(favicon.New()) - - chainID := cp.GetSentry().ChainID - apiInterface := cp.GetSentry().ApiInterface - // Catch Post - app.Post("/:dappId/*", func(c *fiber.Ctx) error { - cp.portalLogs.LogStartTransaction("rest-http") - - msgSeed := cp.portalLogs.GetMessageSeed() - - path := "/" + c.Params("*") - - // TODO: handle contentType, in case its not application/json currently we set it to application/json in the Send() method - // contentType := string(c.Context().Request.Header.ContentType()) - dappID := ExtractDappIDFromFiberContext(c) - metricsData := metrics.NewRelayAnalytics(dappID, chainID, apiInterface) - utils.LavaFormatInfo("in <<<", &map[string]string{"path": path, "dappID": dappID, "msgSeed": msgSeed}) - requestBody := string(c.Body()) - reply, _, err := SendRelay(ctx, cp, privKey, path, requestBody, http.MethodPost, dappID, metricsData) - go cp.portalLogs.AddMetricForHttp(metricsData, err, c.GetReqHeaders()) - if err != nil { - // Get unique GUID response - errMasking := cp.portalLogs.GetUniqueGuidResponseForError(err, msgSeed) - - // Log request and response - cp.portalLogs.LogRequestAndResponse("http in/out", true, http.MethodPost, path, requestBody, errMasking, msgSeed, err) - - // Set status to internal error - c.Status(fiber.StatusInternalServerError) - - // Construct json response - response := convertToJsonError(errMasking) - - // Return error json response - return c.SendString(response) - } - // Log request and response - cp.portalLogs.LogRequestAndResponse("http in/out", false, http.MethodPost, path, requestBody, string(reply.Data), msgSeed, nil) - - // Return json response - return c.SendString(string(reply.Data)) - }) - - // - // Catch the others - app.Use("/:dappId/*", func(c *fiber.Ctx) error { - cp.portalLogs.LogStartTransaction("rest-http") - msgSeed := cp.portalLogs.GetMessageSeed() - - query := "?" + string(c.Request().URI().QueryString()) - path := "/" + c.Params("*") - dappID := ExtractDappIDFromFiberContext(c) - utils.LavaFormatInfo("in <<<", &map[string]string{"path": path, "dappID": dappID, "msgSeed": msgSeed}) - analytics := metrics.NewRelayAnalytics(dappID, chainID, apiInterface) - - reply, _, err := SendRelay(ctx, cp, privKey, path, query, http.MethodGet, dappID, analytics) - go cp.portalLogs.AddMetricForHttp(analytics, err, c.GetReqHeaders()) - if err != nil { - // Get unique GUID response - errMasking := cp.portalLogs.GetUniqueGuidResponseForError(err, msgSeed) - - // Log request and response - cp.portalLogs.LogRequestAndResponse("http in/out", true, http.MethodGet, path, "", errMasking, msgSeed, err) - - // Set status to internal error - c.Status(fiber.StatusInternalServerError) - - // Construct json response - response := convertToJsonError(errMasking) - - // Return error json response - return c.SendString(response) - } - // Log request and response - cp.portalLogs.LogRequestAndResponse("http in/out", false, http.MethodGet, path, "", string(reply.Data), msgSeed, nil) - - // Return json response - return c.SendString(string(reply.Data)) - }) - // - // Go - err := app.Listen(listenAddr) - if err != nil { - utils.LavaFormatError("app.Listen(listenAddr)", err, nil) - } -} - -func (nm *RestMessage) RequestedBlock() int64 { - return nm.requestedBlock -} - -func (nm *RestMessage) GetServiceApi() *spectypes.ServiceApi { - return nm.serviceApi -} - -func (nm *RestMessage) GetInterface() *spectypes.ApiInterface { - return nm.apiInterface -} - -func (nm *RestMessage) Send(ctx context.Context, ch chan interface{}) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { - if ch != nil { - return nil, "", nil, utils.LavaFormatError("Subscribe is not allowed on rest", nil, nil) - } - httpClient := http.Client{ - Timeout: getTimePerCu(nm.serviceApi.ComputeUnits), - } - - var connectionTypeSelected string = http.MethodGet - // if ConnectionType is default value or empty we will choose http.MethodGet otherwise choosing the header type provided - if nm.apiInterface.Type != "" { - connectionTypeSelected = nm.apiInterface.Type - } - - msgBuffer := bytes.NewBuffer(nm.msg) - url := nm.cp.nodeUrl + nm.path - // Only get calls uses query params the rest uses the body - if connectionTypeSelected == http.MethodGet { - url += string(nm.msg) - } - - connectCtx, cancel := context.WithTimeout(ctx, getTimePerCu(nm.serviceApi.ComputeUnits)+nm.GetExtraContextTimeout()) - defer cancel() - - req, err := http.NewRequestWithContext(connectCtx, connectionTypeSelected, url, msgBuffer) - if err != nil { - nm.Result = []byte(fmt.Sprintf("%s", err)) - return nil, "", nil, err - } - - // setting the content-type to be application/json instead of Go's default http.DefaultClient - if connectionTypeSelected == http.MethodPost || connectionTypeSelected == http.MethodPut { - req.Header.Set("Content-Type", "application/json") - } - res, err := httpClient.Do(req) - if err != nil { - nm.Result = []byte(fmt.Sprintf("%s", err)) - return nil, "", nil, err - } - - if res.Body != nil { - defer res.Body.Close() - } - - body, err := io.ReadAll(res.Body) - if err != nil { - nm.Result = []byte(fmt.Sprintf("%s", err)) - return nil, "", nil, err - } - - reply := &pairingtypes.RelayReply{ - Data: body, - } - nm.Result = body - - return reply, "", nil, nil -} diff --git a/relayer/chainproxy/rpcclient/client.go b/relayer/chainproxy/rpcclient/client.go deleted file mode 100755 index 2e31564af8..0000000000 --- a/relayer/chainproxy/rpcclient/client.go +++ /dev/null @@ -1,683 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package rpcclient - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/url" - "reflect" - "strconv" - "sync/atomic" - "time" - - "github.com/ethereum/go-ethereum/log" -) - -var ( - ErrClientQuit = errors.New("client is closed") - ErrSubscriptionQueueOverflow = errors.New("subscription queue overflow") - errClientReconnected = errors.New("client reconnected") - errDead = errors.New("connection lost") -) - -const ( - // Timeouts - defaultDialTimeout = 10 * time.Second // used if context has no deadline - subscribeTimeout = 5 * time.Second // overall timeout eth_subscribe, rpc_modules calls -) - -const ( - // Subscriptions are removed when the subscriber cannot keep up. - // - // This can be worked around by supplying a channel with sufficiently sized buffer, - // but this can be inconvenient and hard to explain in the docs. Another issue with - // buffered channels is that the buffer is static even though it might not be needed - // most of the time. - // - // The approach taken here is to maintain a per-subscription linked list buffer - // shrinks on demand. If the buffer reaches the size below, the subscription is - // dropped. - maxClientSubscriptionBuffer = 20000 -) - -// BatchElem is an element in a batch request. -type BatchElem struct { - Method string - Args []interface{} - // The result is unmarshaled into this field. Result must be set to a - // non-nil pointer value of the desired type, otherwise the response will be - // discarded. - Result interface{} - // Error is set if the server returns an error for this request, or if - // unmarshaling into Result fails. It is not set for I/O errors. - Error error -} - -// Client represents a connection to an RPC server. -type Client struct { - idgen func() ID // for subscriptions - isHTTP bool // connection type: http, ws or ipc - services *serviceRegistry - - idCounter uint32 - - // This function, if non-nil, is called when the connection is lost. - reconnectFunc reconnectFunc - - // writeConn is used for writing to the connection on the caller's goroutine. It should - // only be accessed outside of dispatch, with the write lock held. The write lock is - // taken by sending on reqInit and released by sending on reqSent. - writeConn jsonWriter - - // for dispatch - close chan struct{} - closing chan struct{} // closed when client is quitting - didClose chan struct{} // closed when client quits - reconnected chan ServerCodec // where write/reconnect sends the new connection - readOp chan readOp // read messages - readErr chan error // errors from read - reqInit chan *requestOp // register response IDs, takes write lock - reqSent chan error // signals write completion, releases write lock - reqTimeout chan *requestOp // removes response IDs when call timeout expires -} - -type reconnectFunc func(ctx context.Context) (ServerCodec, error) - -type clientContextKey struct{} - -type clientConn struct { - codec ServerCodec - handler *handler -} - -func (c *Client) newClientConn(conn ServerCodec) *clientConn { - ctx := context.Background() - ctx = context.WithValue(ctx, clientContextKey{}, c) - ctx = context.WithValue(ctx, peerInfoContextKey{}, conn.peerInfo()) - handler := newHandler(ctx, conn, c.idgen, c.services) - return &clientConn{conn, handler} -} - -func (cc *clientConn) close(err error, inflightReq *requestOp) { - cc.handler.close(err, inflightReq) - cc.codec.close() -} - -type readOp struct { - msgs []*JsonrpcMessage - batch bool -} - -type requestOp struct { - ids []json.RawMessage - err error - resp chan *JsonrpcMessage // receives up to len(ids) responses - sub *ClientSubscription // only set for EthSubscribe requests - subId string // this is only set for tendermint subscription -} - -func (op *requestOp) wait(ctx context.Context, c *Client) (*JsonrpcMessage, error) { - select { - case <-ctx.Done(): - // Send the timeout to dispatch so it can remove the request IDs. - if !c.isHTTP { - select { - case c.reqTimeout <- op: - case <-c.closing: - } - } - return nil, ctx.Err() - case resp := <-op.resp: - return resp, op.err - } -} - -// Dial creates a new client for the given URL. -// -// The currently supported URL schemes are "http", "https", "ws" and "wss". If rawurl is a -// file name with no URL scheme, a local socket connection is established using UNIX -// domain sockets on supported platforms and named pipes on Windows. If you want to -// configure transport options, use DialHTTP, DialWebsocket or DialIPC instead. -// -// For websocket connections, the origin is set to the local host name. -// -// The client reconnects automatically if the connection is lost. -func Dial(rawurl string) (*Client, error) { - return DialContext(context.Background(), rawurl) -} - -// DialContext creates a new RPC client, just like Dial. -// -// The context is used to cancel or time out the initial connection establishment. It does -// not affect subsequent interactions with the client. -func DialContext(ctx context.Context, rawurl string) (*Client, error) { - u, err := url.Parse(rawurl) - if err != nil { - return nil, err - } - switch u.Scheme { - case "http", "https": - return DialHTTP(rawurl) - case "ws", "wss": - return DialWebsocket(ctx, rawurl, "") - case "stdio": - return DialStdIO(ctx) - case "": - return DialIPC(ctx, rawurl) - default: - return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme) - } -} - -// ClientFromContext retrieves the client from the context, if any. This can be used to perform -// 'reverse calls' in a handler method. -func ClientFromContext(ctx context.Context) (*Client, bool) { - client, ok := ctx.Value(clientContextKey{}).(*Client) - return client, ok -} - -func newClient(initctx context.Context, connect reconnectFunc) (*Client, error) { - conn, err := connect(initctx) - if err != nil { - return nil, err - } - c := initClient(conn, randomIDGenerator(), new(serviceRegistry)) - c.reconnectFunc = connect - return c, nil -} - -func initClient(conn ServerCodec, idgen func() ID, services *serviceRegistry) *Client { - _, isHTTP := conn.(*httpConn) - c := &Client{ - isHTTP: isHTTP, - idgen: idgen, - services: services, - writeConn: conn, - close: make(chan struct{}), - closing: make(chan struct{}), - didClose: make(chan struct{}), - reconnected: make(chan ServerCodec), - readOp: make(chan readOp), - readErr: make(chan error), - reqInit: make(chan *requestOp), - reqSent: make(chan error, 1), - reqTimeout: make(chan *requestOp), - } - if !isHTTP { - go c.dispatch(conn) - } - return c -} - -// RegisterName creates a service for the given receiver type under the given name. When no -// methods on the given receiver match the criteria to be either a RPC method or a -// subscription an error is returned. Otherwise a new service is created and added to the -// service collection this client provides to the server. -func (c *Client) RegisterName(name string, receiver interface{}) error { - return c.services.registerName(name, receiver) -} - -func (c *Client) nextID() json.RawMessage { - id := atomic.AddUint32(&c.idCounter, 1) - return strconv.AppendUint(nil, uint64(id), 10) -} - -// Close closes the client, aborting any in-flight requests. -func (c *Client) Close() { - if c.isHTTP { - return - } - select { - case c.close <- struct{}{}: - <-c.didClose - case <-c.didClose: - } -} - -// SetHeader adds a custom HTTP header to the client's requests. -// This method only works for clients using HTTP, it doesn't have -// any effect for clients using another transport. -func (c *Client) SetHeader(key, value string) { - if !c.isHTTP { - return - } - conn, ok := c.writeConn.(*httpConn) - if !ok { - panic("SetHeader - c.writeConn.(*httpConn) - type assertion failed") - } - conn.mu.Lock() - conn.headers.Set(key, value) - conn.mu.Unlock() -} - -// CallContext performs a JSON-RPC call with the given arguments. If the context is -// canceled before the call has successfully returned, CallContext returns immediately. -// -// The result must be a pointer so that package json can unmarshal into it. You -// can also pass nil, in which case the result is ignored. -func (c *Client) CallContext(ctx context.Context, id json.RawMessage, method string, params interface{}) (*JsonrpcMessage, error) { - var msg *JsonrpcMessage - var err error - switch p := params.(type) { - case []interface{}: - msg, err = c.newMessageArrayWithID(method, id, p) - case map[string]interface{}: - msg, err = c.newMessageMapWithID(method, id, p) - case nil: - msg, err = c.newMessageArrayWithID(method, id, (make([]interface{}, 0))) // in case of nil, we will send it as an empty array. - default: - return nil, fmt.Errorf("%s unknown parameters type %s", p, reflect.TypeOf(p)) - } - - if err != nil { - return nil, err - } - - op := &requestOp{ids: []json.RawMessage{msg.ID}, resp: make(chan *JsonrpcMessage, 1)} - - if c.isHTTP { - err = c.sendHTTP(ctx, op, msg) - } else { - err = c.send(ctx, op, msg) - } - if err != nil { - return nil, err - } - - // dispatch has accepted the request and will close the channel when it quits. - - resp, err := op.wait(ctx, c) - if err != nil { - return nil, err - } - - return resp, nil -} - -// BatchCall sends all given requests as a single batch and waits for the server -// to return a response for all of them. -// -// In contrast to Call, BatchCall only returns I/O errors. Any error specific to -// a request is reported through the Error field of the corresponding BatchElem. -// -// Note that batch calls may not be executed atomically on the server side. -func (c *Client) BatchCall(b []BatchElem) error { - ctx := context.Background() - return c.BatchCallContext(ctx, b) -} - -// BatchCallContext sends all given requests as a single batch and waits for the server -// to return a response for all of them. The wait duration is bounded by the -// context's deadline. -// -// In contrast to CallContext, BatchCallContext only returns errors that have occurred -// while sending the request. Any error specific to a request is reported through the -// Error field of the corresponding BatchElem. -// -// Note that batch calls may not be executed atomically on the server side. -func (c *Client) BatchCallContext(ctx context.Context, b []BatchElem) error { - var ( - msgs = make([]*JsonrpcMessage, len(b)) - byID = make(map[string]int, len(b)) - ) - op := &requestOp{ - ids: make([]json.RawMessage, len(b)), - resp: make(chan *JsonrpcMessage, len(b)), - } - for i, elem := range b { - msg, err := c.newMessageArray(elem.Method, elem.Args...) - if err != nil { - return err - } - msgs[i] = msg - op.ids[i] = msg.ID - byID[string(msg.ID)] = i - } - - var err error - if c.isHTTP { - err = c.sendBatchHTTP(ctx, op, msgs) - } else { - err = c.send(ctx, op, msgs) - } - - // Wait for all responses to come back. - for n := 0; n < len(b) && err == nil; n++ { - var resp *JsonrpcMessage - resp, err = op.wait(ctx, c) - if err != nil { - break - } - // Find the element corresponding to this response. - // The element is guaranteed to be present because dispatch - // only sends valid IDs to our channel. - elem := &b[byID[string(resp.ID)]] - if resp.Error != nil { - elem.Error = resp.Error - continue - } - - elem.Error = json.Unmarshal(resp.Result, elem.Result) - } - return err -} - -// Notify sends a notification, i.e. a method call that doesn't expect a response. -func (c *Client) Notify(ctx context.Context, method string, args ...interface{}) error { - op := new(requestOp) - msg, err := c.newMessageArray(method, args...) - if err != nil { - return err - } - msg.ID = nil - - if c.isHTTP { - return c.sendHTTP(ctx, op, msg) - } - return c.send(ctx, op, msg) -} - -// Subscribe calls the "_subscribe" method with the given arguments, -// registering a subscription. Server notifications for the subscription are -// sent to the given channel. The element type of the channel must match the -// expected type of content returned by the subscription. -// -// The context argument cancels the RPC request that sets up the subscription but has no -// effect on the subscription after Subscribe has returned. -// -// Slow subscribers will be dropped eventually. Client buffers up to 20000 notifications -// before considering the subscriber dead. The subscription Err channel will receive -// ErrSubscriptionQueueOverflow. Use a sufficiently large buffer on the channel or ensure -// that the channel usually has at least one reader to prevent this issue. -func (c *Client) Subscribe(ctx context.Context, id json.RawMessage, method string, channel interface{}, params interface{}) (*ClientSubscription, *JsonrpcMessage, error) { - // Check type of channel first. - chanVal := reflect.ValueOf(channel) - if chanVal.Kind() != reflect.Chan || chanVal.Type().ChanDir()&reflect.SendDir == 0 { - panic(fmt.Sprintf("channel argument of Subscribe has type %T, need writable channel", channel)) - } - if chanVal.IsNil() { - panic("channel given to Subscribe must not be nil") - } - if c.isHTTP { - return nil, nil, ErrNotificationsUnsupported - } - var msg *JsonrpcMessage - var err error - var subId string - var ok bool - switch p := params.(type) { - case []interface{}: - msg, err = c.newMessageArrayWithID(method, id, p) - case map[string]interface{}: - msg, err = c.newMessageMapWithID(method, id, p) - subId, ok = p["query"].(string) - if !ok { - return nil, nil, fmt.Errorf("Subscribe - p['query'].(string) - type assertion failed") - } - default: - return nil, nil, fmt.Errorf("%s unknown parameters type %s", p, reflect.TypeOf(p)) - } - if err != nil { - return nil, nil, err - } - - op := &requestOp{ - ids: []json.RawMessage{msg.ID}, - resp: make(chan *JsonrpcMessage), - sub: newClientSubscription(c, method, chanVal), - subId: subId, - } - - // Send the subscription request. - // The arrival and validity of the response is signaled on sub.quit. - if err := c.send(ctx, op, msg); err != nil { - return nil, nil, err - } - resp, err := op.wait(ctx, c) - if err != nil { - return nil, nil, err - } - - return op.sub, resp, nil -} - -func (c *Client) newMessageArrayWithID(method string, id json.RawMessage, paramsIn interface{}) (*JsonrpcMessage, error) { - var msg *JsonrpcMessage - if id == nil { - msg = &JsonrpcMessage{Version: vsn, ID: c.nextID(), Method: method} - } else { - msg = &JsonrpcMessage{Version: vsn, ID: id, Method: method} - } - if paramsIn != nil { // prevent sending "params":null - var err error - if msg.Params, err = json.Marshal(paramsIn); err != nil { - return nil, err - } - } - return msg, nil -} - -func (c *Client) newMessageArray(method string, paramsIn ...interface{}) (*JsonrpcMessage, error) { - msg := &JsonrpcMessage{Version: vsn, ID: c.nextID(), Method: method} - if paramsIn != nil { // prevent sending "params":null - var err error - if msg.Params, err = json.Marshal(paramsIn); err != nil { - return nil, err - } - } - return msg, nil -} - -func (c *Client) newMessageMapWithID(method string, id json.RawMessage, paramsIn map[string]interface{}) (*JsonrpcMessage, error) { - var msg *JsonrpcMessage - if id == nil { - msg = &JsonrpcMessage{Version: vsn, ID: c.nextID(), Method: method} - } else { - msg = &JsonrpcMessage{Version: vsn, ID: id, Method: method} - } - if paramsIn != nil { // prevent sending "params":null - var err error - if msg.Params, err = json.Marshal(paramsIn); err != nil { - return nil, err - } - - // test this too: - // - // var paramsMap = make(map[string]json.RawMessage, len(paramsIn)) - // for name, value := range paramsIn { - // valueJSON, err := json.Marshal(value) - // if err != nil { - // return nil, err - // } - // paramsMap[name] = valueJSON - // } - // if msg.Params, err = json.Marshal(paramsMap); err != nil { - // return nil, err - // } - } - return msg, nil -} - -// send registers op with the dispatch loop, then sends msg on the connection. -// if sending fails, op is deregistered. -func (c *Client) send(ctx context.Context, op *requestOp, msg interface{}) error { - select { - case c.reqInit <- op: - err := c.write(ctx, msg, false) - c.reqSent <- err - return err - case <-ctx.Done(): - // This can happen if the client is overloaded or unable to keep up with - // subscription notifications. - return ctx.Err() - case <-c.closing: - return ErrClientQuit - } -} - -func (c *Client) write(ctx context.Context, msg interface{}, retry bool) error { - if c.writeConn == nil { - // The previous write failed. Try to establish a new connection. - if err := c.reconnect(ctx); err != nil { - return err - } - } - err := c.writeConn.writeJSON(ctx, msg) - if err != nil { - c.writeConn = nil - if !retry { - return c.write(ctx, msg, true) - } - } - return err -} - -func (c *Client) reconnect(ctx context.Context) error { - if c.reconnectFunc == nil { - return errDead - } - - if _, ok := ctx.Deadline(); !ok { - var cancel func() - ctx, cancel = context.WithTimeout(ctx, defaultDialTimeout) - defer cancel() - } - newconn, err := c.reconnectFunc(ctx) - if err != nil { - log.Trace("RPC client reconnect failed", "err", err) - return err - } - select { - case c.reconnected <- newconn: - c.writeConn = newconn - return nil - case <-c.didClose: - newconn.close() - return ErrClientQuit - } -} - -// dispatch is the main loop of the client. -// It sends read messages to waiting calls to Call and BatchCall -// and subscription notifications to registered subscriptions. -func (c *Client) dispatch(codec ServerCodec) { - var ( - lastOp *requestOp // tracks last send operation - reqInitLock = c.reqInit // nil while the send lock is held - conn = c.newClientConn(codec) - reading = true - ) - defer func() { - close(c.closing) - if reading { - conn.close(ErrClientQuit, nil) - c.drainRead() - } - close(c.didClose) - }() - - // Spawn the initial read loop. - go c.read(codec) - - for { - select { - case <-c.close: - return - - // Read path: - case op := <-c.readOp: - if op.batch { - conn.handler.handleBatch(op.msgs) - } else { - conn.handler.handleMsg(op.msgs[0]) - } - - case err := <-c.readErr: - conn.handler.log.Debug("RPC connection read error", "err", err) - conn.close(err, lastOp) - reading = false - - // Reconnect: - case newcodec := <-c.reconnected: - log.Debug("RPC client reconnected", "reading", reading, "conn", newcodec.remoteAddr()) - if reading { - // Wait for the previous read loop to exit. This is a rare case which - // happens if this loop isn't notified in time after the connection breaks. - // In those cases the caller will notice first and reconnect. Closing the - // handler terminates all waiting requests (closing op.resp) except for - // lastOp, which will be transferred to the new handler. - conn.close(errClientReconnected, lastOp) - c.drainRead() - } - go c.read(newcodec) - reading = true - conn = c.newClientConn(newcodec) - // Re-register the in-flight request on the new handler - // because that's where it will be sent. - conn.handler.addRequestOp(lastOp) - - // Send path: - case op := <-reqInitLock: - // Stop listening for further requests until the current one has been sent. - reqInitLock = nil - lastOp = op - conn.handler.addRequestOp(op) - - case err := <-c.reqSent: - if err != nil { - // Remove response handlers for the last send. When the read loop - // goes down, it will signal all other current operations. - conn.handler.removeRequestOp(lastOp) - } - // Let the next request in. - reqInitLock = c.reqInit - lastOp = nil - - case op := <-c.reqTimeout: - conn.handler.removeRequestOp(op) - } - } -} - -// drainRead drops read messages until an error occurs. -func (c *Client) drainRead() { - for { - select { - case <-c.readOp: - case <-c.readErr: - return - } - } -} - -// read decodes RPC messages from a codec, feeding them into dispatch. -func (c *Client) read(codec ServerCodec) { - for { - msgs, batch, err := codec.readBatch() - if _, ok := err.(*json.SyntaxError); ok { - codec.writeJSON(context.Background(), errorMessage(&parseError{err.Error()})) - } - if err != nil { - c.readErr <- err - return - } - c.readOp <- readOp{msgs, batch} - } -} diff --git a/relayer/chainproxy/rpcclient/doc.go b/relayer/chainproxy/rpcclient/doc.go deleted file mode 100755 index 7d592556a1..0000000000 --- a/relayer/chainproxy/rpcclient/doc.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -/* -package rpcclient implements bi-directional JSON-RPC 2.0 on multiple transports. - -It provides access to the exported methods of an object across a network or other I/O -connection. After creating a server or client instance, objects can be registered to make -them visible as 'services'. Exported methods that follow specific conventions can be -called remotely. It also has support for the publish/subscribe pattern. - -# RPC Methods - -Methods that satisfy the following criteria are made available for remote access: - - - method must be exported - - method returns 0, 1 (response or error) or 2 (response and error) values - -An example method: - - func (s *CalcService) Add(a, b int) (int, error) - -When the returned error isn't nil the returned integer is ignored and the error is sent -back to the client. Otherwise the returned integer is sent back to the client. - -Optional arguments are supported by accepting pointer values as arguments. E.g. if we want -to do the addition in an optional finite field we can accept a mod argument as pointer -value. - - func (s *CalcService) Add(a, b int, mod *int) (int, error) - -This RPC method can be called with 2 integers and a null value as third argument. In that -case the mod argument will be nil. Or it can be called with 3 integers, in that case mod -will be pointing to the given third argument. Since the optional argument is the last -argument the RPC package will also accept 2 integers as arguments. It will pass the mod -argument as nil to the RPC method. - -The server offers the ServeCodec method which accepts a ServerCodec instance. It will read -requests from the codec, process the request and sends the response back to the client -using the codec. The server can execute requests concurrently. Responses can be sent back -to the client out of order. - -An example server which uses the JSON codec: - - type CalculatorService struct {} - - func (s *CalculatorService) Add(a, b int) int { - return a + b - } - - func (s *CalculatorService) Div(a, b int) (int, error) { - if b == 0 { - return 0, errors.New("divide by zero") - } - return a/b, nil - } - - calculator := new(CalculatorService) - server := NewServer() - server.RegisterName("calculator", calculator) - l, _ := net.ListenUnix("unix", &net.UnixAddr{Net: "unix", Name: "/tmp/calculator.sock"}) - server.ServeListener(l) - -# Subscriptions - -The package also supports the publish subscribe pattern through the use of subscriptions. -A method that is considered eligible for notifications must satisfy the following -criteria: - - - method must be exported - - first method argument type must be context.Context - - method must have return types (rpc.Subscription, error) - -An example method: - - func (s *BlockChainService) NewBlocks(ctx context.Context) (rpc.Subscription, error) { - ... - } - -When the service containing the subscription method is registered to the server, for -example under the "blockchain" namespace, a subscription is created by calling the -"blockchain_subscribe" method. - -Subscriptions are deleted when the user sends an unsubscribe request or when the -connection which was used to create the subscription is closed. This can be initiated by -the client and server. The server will close the connection for any write error. - -For more information about subscriptions, see https://github.com/ethereum/go-ethereum/wiki/RPC-PUB-SUB. - -# Reverse Calls - -In any method handler, an instance of rpc.Client can be accessed through the -ClientFromContext method. Using this client instance, server-to-client method calls can be -performed on the RPC connection. -*/ -package rpcclient diff --git a/relayer/chainproxy/rpcclient/endpoints.go b/relayer/chainproxy/rpcclient/endpoints.go deleted file mode 100755 index e84e376b74..0000000000 --- a/relayer/chainproxy/rpcclient/endpoints.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package rpcclient - -import ( - "net" - "strings" - - "github.com/ethereum/go-ethereum/log" -) - -// StartIPCEndpoint starts an IPC endpoint. -func StartIPCEndpoint(ipcEndpoint string, apis []API) (net.Listener, *Server, error) { - // Register all the APIs exposed by the services. - var ( - handler = NewServer() - regMap = make(map[string]struct{}) - registered []string - ) - for _, api := range apis { - if err := handler.RegisterName(api.Namespace, api.Service); err != nil { - log.Info("IPC registration failed", "namespace", api.Namespace, "error", err) - return nil, nil, err - } - if _, ok := regMap[api.Namespace]; !ok { - registered = append(registered, api.Namespace) - regMap[api.Namespace] = struct{}{} - } - } - log.Debug("IPCs registered", "namespaces", strings.Join(registered, ",")) - // All APIs registered, start the IPC listener. - listener, err := ipcListen(ipcEndpoint) - if err != nil { - return nil, nil, err - } - go handler.ServeListener(listener) - return listener, handler, nil -} diff --git a/relayer/chainproxy/rpcclient/errors.go b/relayer/chainproxy/rpcclient/errors.go deleted file mode 100755 index ff3b344f99..0000000000 --- a/relayer/chainproxy/rpcclient/errors.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package rpcclient - -import "fmt" - -// HTTPError is returned by client operations when the HTTP status code of the -// response is not a 2xx status. -type HTTPError struct { - StatusCode int - Status string - Body []byte -} - -func (err HTTPError) Error() string { - if len(err.Body) == 0 { - return err.Status - } - return fmt.Sprintf("%v: %s", err.Status, err.Body) -} - -// Error wraps RPC errors, which contain an error code in addition to the message. -type Error interface { - Error() string // returns the message - ErrorCode() int // returns the code -} - -// A DataError contains some data in addition to the error message. -type DataError interface { - Error() string // returns the message - ErrorData() interface{} // returns the error data -} - -// Error types defined below are the built-in JSON-RPC errors. - -var ( - _ Error = new(methodNotFoundError) - _ Error = new(subscriptionNotFoundError) - _ Error = new(parseError) - _ Error = new(invalidRequestError) - _ Error = new(invalidMessageError) - _ Error = new(invalidParamsError) -) - -const defaultErrorCode = -32000 - -type methodNotFoundError struct{ method string } - -func (e *methodNotFoundError) ErrorCode() int { return -32601 } - -func (e *methodNotFoundError) Error() string { - return fmt.Sprintf("the method %s does not exist/is not available", e.method) -} - -type subscriptionNotFoundError struct{ namespace, subscription string } - -func (e *subscriptionNotFoundError) ErrorCode() int { return -32601 } - -func (e *subscriptionNotFoundError) Error() string { - return fmt.Sprintf("no %q subscription in %s namespace", e.subscription, e.namespace) -} - -// Invalid JSON was received by the server. -type parseError struct{ message string } - -func (e *parseError) ErrorCode() int { return -32700 } - -func (e *parseError) Error() string { return e.message } - -// received message isn't a valid request -type invalidRequestError struct{ message string } - -func (e *invalidRequestError) ErrorCode() int { return -32600 } - -func (e *invalidRequestError) Error() string { return e.message } - -// received message is invalid -type invalidMessageError struct{ message string } - -func (e *invalidMessageError) ErrorCode() int { return -32700 } - -func (e *invalidMessageError) Error() string { return e.message } - -// unable to decode supplied params, or an invalid number of parameters -type invalidParamsError struct{ message string } - -func (e *invalidParamsError) ErrorCode() int { return -32602 } - -func (e *invalidParamsError) Error() string { return e.message } diff --git a/relayer/chainproxy/rpcclient/handler.go b/relayer/chainproxy/rpcclient/handler.go deleted file mode 100755 index ce34fc4395..0000000000 --- a/relayer/chainproxy/rpcclient/handler.go +++ /dev/null @@ -1,435 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package rpcclient - -import ( - "context" - "encoding/json" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/ethereum/go-ethereum/log" - "github.com/lavanet/lava/utils" -) - -// handler handles JSON-RPC messages. There is one handler per connection. Note that -// handler is not safe for concurrent use. Message handling never blocks indefinitely -// because RPCs are processed on background goroutines launched by handler. -// -// The entry points for incoming messages are: -// -// h.handleMsg(message) -// h.handleBatch(message) -// -// Outgoing calls use the requestOp struct. Register the request before sending it -// on the connection: -// -// op := &requestOp{ids: ...} -// h.addRequestOp(op) -// -// Now send the request, then wait for the reply to be delivered through handleMsg: -// -// if err := op.wait(...); err != nil { -// h.removeRequestOp(op) // timeout, etc. -// } -type handler struct { - reg *serviceRegistry - unsubscribeCb *callback - idgen func() ID // subscription ID generator - respWait map[string]*requestOp // active client requests - clientSubs map[string]*ClientSubscription // active client subscriptions - callWG sync.WaitGroup // pending call goroutines - rootCtx context.Context // canceled by close() - cancelRoot func() // cancel function for rootCtx - conn jsonWriter // where responses will be sent - log log.Logger - allowSubscribe bool - - subLock utils.LavaMutex - serverSubs map[ID]*Subscription -} - -type callProc struct { - ctx context.Context - notifiers []*Notifier -} - -func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg *serviceRegistry) *handler { - rootCtx, cancelRoot := context.WithCancel(connCtx) - h := &handler{ - reg: reg, - idgen: idgen, - conn: conn, - respWait: make(map[string]*requestOp), - clientSubs: make(map[string]*ClientSubscription), - rootCtx: rootCtx, - cancelRoot: cancelRoot, - allowSubscribe: true, - serverSubs: make(map[ID]*Subscription), - log: log.Root(), - } - if conn.remoteAddr() != "" { - h.log = h.log.New("conn", conn.remoteAddr()) - } - h.unsubscribeCb = newCallback(reflect.Value{}, reflect.ValueOf(h.unsubscribe)) - return h -} - -// handleBatch executes all messages in a batch and returns the responses. -func (h *handler) handleBatch(msgs []*JsonrpcMessage) { - // Emit error response for empty batches: - if len(msgs) == 0 { - h.startCallProc(func(cp *callProc) { - h.conn.writeJSON(cp.ctx, errorMessage(&invalidRequestError{"empty batch"})) - }) - return - } - - // Handle non-call messages first: - calls := make([]*JsonrpcMessage, 0, len(msgs)) - for _, msg := range msgs { - if handled := h.handleImmediate(msg); !handled { - calls = append(calls, msg) - } - } - if len(calls) == 0 { - return - } - // Process calls on a goroutine because they may block indefinitely: - h.startCallProc(func(cp *callProc) { - answers := make([]*JsonrpcMessage, 0, len(msgs)) - for _, msg := range calls { - if answer := h.handleCallMsg(cp, msg); answer != nil { - answers = append(answers, answer) - } - } - h.addSubscriptions(cp.notifiers) - if len(answers) > 0 { - h.conn.writeJSON(cp.ctx, answers) - } - for _, n := range cp.notifiers { - n.activate() - } - }) -} - -// handleMsg handles a single message. -func (h *handler) handleMsg(msg *JsonrpcMessage) { - if ok := h.handleImmediate(msg); ok { - return - } - h.startCallProc(func(cp *callProc) { - answer := h.handleCallMsg(cp, msg) - h.addSubscriptions(cp.notifiers) - if answer != nil { - h.conn.writeJSON(cp.ctx, answer) - } - for _, n := range cp.notifiers { - n.activate() - } - }) -} - -// close cancels all requests except for inflightReq and waits for -// call goroutines to shut down. -func (h *handler) close(err error, inflightReq *requestOp) { - h.cancelAllRequests(err, inflightReq) - h.callWG.Wait() - h.cancelRoot() - h.cancelServerSubscriptions(err) -} - -// addRequestOp registers a request operation. -func (h *handler) addRequestOp(op *requestOp) { - for _, id := range op.ids { - h.respWait[string(id)] = op - } -} - -// removeRequestOps stops waiting for the given request IDs. -func (h *handler) removeRequestOp(op *requestOp) { - for _, id := range op.ids { - delete(h.respWait, string(id)) - } -} - -// cancelAllRequests unblocks and removes pending requests and active subscriptions. -func (h *handler) cancelAllRequests(err error, inflightReq *requestOp) { - didClose := make(map[*requestOp]bool) - if inflightReq != nil { - didClose[inflightReq] = true - } - - for id, op := range h.respWait { - // Remove the op so that later calls will not close op.resp again. - delete(h.respWait, id) - - if !didClose[op] { - op.err = err - close(op.resp) - didClose[op] = true - } - } - for id, sub := range h.clientSubs { - delete(h.clientSubs, id) - sub.close(err) - } -} - -func (h *handler) addSubscriptions(nn []*Notifier) { - h.subLock.Lock() - defer h.subLock.Unlock() - - for _, n := range nn { - if sub := n.takeSubscription(); sub != nil { - h.serverSubs[sub.ID] = sub - } - } -} - -// cancelServerSubscriptions removes all subscriptions and closes their error channels. -func (h *handler) cancelServerSubscriptions(err error) { - h.subLock.Lock() - defer h.subLock.Unlock() - - for id, s := range h.serverSubs { - s.err <- err - close(s.err) - delete(h.serverSubs, id) - } -} - -// startCallProc runs fn in a new goroutine and starts tracking it in the h.calls wait group. -func (h *handler) startCallProc(fn func(*callProc)) { - h.callWG.Add(1) - go func() { - ctx, cancel := context.WithCancel(h.rootCtx) - defer h.callWG.Done() - defer cancel() - fn(&callProc{ctx: ctx}) - }() -} - -// handleImmediate executes non-call messages. It returns false if the message is a -// call or requires a reply. -func (h *handler) handleImmediate(msg *JsonrpcMessage) bool { - start := time.Now() - switch { - case msg.isTendermintNotification(): - h.handleSubscriptionResultTendermint(msg) - return true - case msg.isEthereumNotification(): - if strings.HasSuffix(msg.Method, notificationMethodSuffix) { - h.handleSubscriptionResultEthereum(msg) - return true - } - return false - case msg.isResponse(): - h.handleResponse(msg) - h.log.Trace("Handled RPC response", "reqid", idForLog{msg.ID}, "duration", time.Since(start)) - return true - default: - return false - } -} - -// handleSubscriptionResult processes subscription notifications. -func (h *handler) handleSubscriptionResultEthereum(msg *JsonrpcMessage) { - var result ethereumSubscriptionResult - if err := json.Unmarshal(msg.Params, &result); err != nil { - h.log.Debug("Dropping invalid subscription message") - return - } - if h.clientSubs[result.ID] != nil { - h.clientSubs[result.ID].deliver(msg) - } -} - -func (h *handler) handleSubscriptionResultTendermint(msg *JsonrpcMessage) { - var result tendermintSubscriptionResult - if err := json.Unmarshal(msg.Result, &result); err != nil { - h.log.Debug("Dropping invalid subscription message") - return - } - if h.clientSubs[result.Query] != nil { - h.clientSubs[result.Query].deliver(msg) - } -} - -// handleResponse processes method call responses. -func (h *handler) handleResponse(msg *JsonrpcMessage) { - op := h.respWait[string(msg.ID)] - if op == nil { - h.log.Debug("Unsolicited RPC response", "reqid", idForLog{msg.ID}) - return - } - delete(h.respWait, string(msg.ID)) - // For normal responses, just forward the reply to Call/BatchCall. - op.resp <- msg - if op.sub == nil { - return - } - // For subscription responses, start the subscription if the server - // indicates success. EthSubscribe gets unblocked in either case through - // the op.resp channel. - defer close(op.resp) - if msg.Error != nil { - op.err = msg.Error - return - } - - if op.subId != "" { - go op.sub.run() - h.clientSubs[op.subId] = op.sub - } else if op.err = json.Unmarshal(msg.Result, &op.sub.subid); op.err == nil { - go op.sub.run() - h.clientSubs[op.sub.subid] = op.sub - } -} - -// handleCallMsg executes a call message and returns the answer. -func (h *handler) handleCallMsg(ctx *callProc, msg *JsonrpcMessage) *JsonrpcMessage { - start := time.Now() - switch { - case msg.isEthereumNotification(), msg.isTendermintNotification(): - h.handleCall(ctx, msg) - h.log.Debug("Served "+msg.Method, "duration", time.Since(start)) - return nil - case msg.isCall(): - resp := h.handleCall(ctx, msg) - var ctx []interface{} - ctx = append(ctx, "reqid", idForLog{msg.ID}, "duration", time.Since(start)) - if resp.Error != nil { - ctx = append(ctx, "err", resp.Error.Message) - if resp.Error.Data != nil { - ctx = append(ctx, "errdata", resp.Error.Data) - } - h.log.Warn("Served "+msg.Method, ctx...) - } else { - h.log.Debug("Served "+msg.Method, ctx...) - } - return resp - case msg.hasValidID(): - return msg.errorResponse(&invalidRequestError{"invalid request"}) - default: - return errorMessage(&invalidRequestError{"invalid request"}) - } -} - -// handleCall processes method calls. -func (h *handler) handleCall(cp *callProc, msg *JsonrpcMessage) *JsonrpcMessage { - if msg.isSubscribe() { - return h.handleSubscribe(cp, msg) - } - var callb *callback - if msg.isUnsubscribe() { - callb = h.unsubscribeCb - } else { - callb = h.reg.callback(msg.Method) - } - if callb == nil { - return msg.errorResponse(&methodNotFoundError{method: msg.Method}) - } - args, err := parsePositionalArguments(msg.Params, callb.argTypes) - if err != nil { - return msg.errorResponse(&invalidParamsError{err.Error()}) - } - start := time.Now() - answer := h.runMethod(cp.ctx, msg, callb, args) - - // Collect the statistics for RPC calls if metrics is enabled. - // We only care about pure rpc call. Filter out subscription. - if callb != h.unsubscribeCb { - rpcRequestGauge.Inc(1) - if answer.Error != nil { - failedRequestGauge.Inc(1) - } else { - successfulRequestGauge.Inc(1) - } - rpcServingTimer.UpdateSince(start) - newRPCServingTimer(msg.Method, answer.Error == nil).UpdateSince(start) - } - return answer -} - -// handleSubscribe processes *_subscribe method calls. -func (h *handler) handleSubscribe(cp *callProc, msg *JsonrpcMessage) *JsonrpcMessage { - if !h.allowSubscribe { - return msg.errorResponse(ErrNotificationsUnsupported) - } - - // Subscription method name is first argument. - name, err := parseSubscriptionName(msg.Params) - if err != nil { - return msg.errorResponse(&invalidParamsError{err.Error()}) - } - namespace := msg.namespace() - callb := h.reg.subscription(namespace, name) - if callb == nil { - return msg.errorResponse(&subscriptionNotFoundError{namespace, name}) - } - - // Parse subscription name arg too, but remove it before calling the callback. - argTypes := append([]reflect.Type{stringType}, callb.argTypes...) - args, err := parsePositionalArguments(msg.Params, argTypes) - if err != nil { - return msg.errorResponse(&invalidParamsError{err.Error()}) - } - args = args[1:] - - // Install notifier in context so the subscription handler can find it. - n := &Notifier{h: h, namespace: namespace} - cp.notifiers = append(cp.notifiers, n) - ctx := context.WithValue(cp.ctx, notifierKey{}, n) - - return h.runMethod(ctx, msg, callb, args) -} - -// runMethod runs the Go callback for an RPC method. -func (h *handler) runMethod(ctx context.Context, msg *JsonrpcMessage, callb *callback, args []reflect.Value) *JsonrpcMessage { - result, err := callb.call(ctx, msg.Method, args) - if err != nil { - return msg.errorResponse(err) - } - return msg.response(result) -} - -// unsubscribe is the callback function for all *_unsubscribe calls. -func (h *handler) unsubscribe(ctx context.Context, id ID) (bool, error) { - h.subLock.Lock() - defer h.subLock.Unlock() - - s := h.serverSubs[id] - if s == nil { - return false, ErrSubscriptionNotFound - } - close(s.err) - delete(h.serverSubs, id) - return true, nil -} - -type idForLog struct{ json.RawMessage } - -func (id idForLog) String() string { - if s, err := strconv.Unquote(string(id.RawMessage)); err == nil { - return s - } - return string(id.RawMessage) -} diff --git a/relayer/chainproxy/rpcclient/http.go b/relayer/chainproxy/rpcclient/http.go deleted file mode 100755 index 261effc67a..0000000000 --- a/relayer/chainproxy/rpcclient/http.go +++ /dev/null @@ -1,298 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package rpcclient - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "mime" - "net/http" - "net/url" - "sync" - "time" - - "github.com/lavanet/lava/utils" -) - -const ( - maxRequestContentLength = 1024 * 1024 * 5 - contentType = "application/json" -) - -// https://www.jsonrpc.org/historical/json-rpc-over-http.html#id13 -var acceptedContentTypes = []string{contentType, "application/json-rpc", "application/jsonrequest"} - -type httpConn struct { - client *http.Client - url string - closeOnce sync.Once - closeCh chan interface{} - mu utils.LavaMutex // protects headers - headers http.Header -} - -// httpConn implements ServerCodec, but it is treated specially by Client -// and some methods don't work. The panic() stubs here exist to ensure -// this special treatment is correct. - -func (hc *httpConn) writeJSON(context.Context, interface{}) error { - panic("writeJSON called on httpConn") -} - -func (hc *httpConn) peerInfo() PeerInfo { - panic("peerInfo called on httpConn") -} - -func (hc *httpConn) remoteAddr() string { - return hc.url -} - -func (hc *httpConn) readBatch() ([]*JsonrpcMessage, bool, error) { - <-hc.closeCh - return nil, false, io.EOF -} - -func (hc *httpConn) close() { - hc.closeOnce.Do(func() { close(hc.closeCh) }) -} - -func (hc *httpConn) closed() <-chan interface{} { - return hc.closeCh -} - -// HTTPTimeouts represents the configuration params for the HTTP RPC server. -type HTTPTimeouts struct { - // ReadTimeout is the maximum duration for reading the entire - // request, including the body. - // - // Because ReadTimeout does not let Handlers make per-request - // decisions on each request body's acceptable deadline or - // upload rate, most users will prefer to use - // ReadHeaderTimeout. It is valid to use them both. - ReadTimeout time.Duration - - // WriteTimeout is the maximum duration before timing out - // writes of the response. It is reset whenever a new - // request's header is read. Like ReadTimeout, it does not - // let Handlers make decisions on a per-request basis. - WriteTimeout time.Duration - - // IdleTimeout is the maximum amount of time to wait for the - // next request when keep-alives are enabled. If IdleTimeout - // is zero, the value of ReadTimeout is used. If both are - // zero, ReadHeaderTimeout is used. - IdleTimeout time.Duration -} - -// DefaultHTTPTimeouts represents the default timeout values used if further -// configuration is not provided. -var DefaultHTTPTimeouts = HTTPTimeouts{ - ReadTimeout: 30 * time.Second, - WriteTimeout: 30 * time.Second, - IdleTimeout: 120 * time.Second, -} - -// DialHTTPWithClient creates a new RPC client that connects to an RPC server over HTTP -// using the provided HTTP Client. -func DialHTTPWithClient(endpoint string, client *http.Client) (*Client, error) { - // Sanity check URL so we don't end up with a client that will fail every request. - _, err := url.Parse(endpoint) - if err != nil { - return nil, err - } - - initctx := context.Background() - headers := make(http.Header, 2) - headers.Set("accept", contentType) - headers.Set("content-type", contentType) - return newClient(initctx, func(context.Context) (ServerCodec, error) { - hc := &httpConn{ - client: client, - headers: headers, - url: endpoint, - closeCh: make(chan interface{}), - } - return hc, nil - }) -} - -// DialHTTP creates a new RPC client that connects to an RPC server over HTTP. -func DialHTTP(endpoint string) (*Client, error) { - return DialHTTPWithClient(endpoint, new(http.Client)) -} - -func (c *Client) sendHTTP(ctx context.Context, op *requestOp, msg interface{}) error { - hc, ok := c.writeConn.(*httpConn) - if !ok { - return fmt.Errorf("sendHTTP - c.writeConn.(*httpConn) - type assertion failed" + fmt.Sprintf("%s", c.writeConn)) - } - respBody, err := hc.doRequest(ctx, msg) - if err != nil { - return err - } - defer respBody.Close() - - var respmsg JsonrpcMessage - if err := json.NewDecoder(respBody).Decode(&respmsg); err != nil { - return err - } - op.resp <- &respmsg - return nil -} - -func (c *Client) sendBatchHTTP(ctx context.Context, op *requestOp, msgs []*JsonrpcMessage) error { - hc, ok := c.writeConn.(*httpConn) - if !ok { - return fmt.Errorf("sendBatchHTTP - c.writeConn.(*httpConn) - type assertion failed, type:" + fmt.Sprintf("%s", c.writeConn)) - } - respBody, err := hc.doRequest(ctx, msgs) - if err != nil { - return err - } - defer respBody.Close() - var respmsgs []JsonrpcMessage - if err := json.NewDecoder(respBody).Decode(&respmsgs); err != nil { - return err - } - for i := 0; i < len(respmsgs); i++ { - op.resp <- &respmsgs[i] - } - return nil -} - -func (hc *httpConn) doRequest(ctx context.Context, msg interface{}) (io.ReadCloser, error) { - body, err := json.Marshal(msg) - if err != nil { - return nil, err - } - req, err := http.NewRequestWithContext(ctx, "POST", hc.url, io.NopCloser(bytes.NewReader(body))) - if err != nil { - return nil, err - } - req.ContentLength = int64(len(body)) - req.GetBody = func() (io.ReadCloser, error) { return io.NopCloser(bytes.NewReader(body)), nil } - - // set headers - hc.mu.Lock() - req.Header = hc.headers.Clone() - hc.mu.Unlock() - - // do request - resp, err := hc.client.Do(req) - if err != nil { - return nil, err - } - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - var buf bytes.Buffer - var body []byte - if _, err := buf.ReadFrom(resp.Body); err == nil { - body = buf.Bytes() - } - - return nil, HTTPError{ - Status: resp.Status, - StatusCode: resp.StatusCode, - Body: body, - } - } - return resp.Body, nil -} - -// httpServerConn turns a HTTP connection into a Conn. -type httpServerConn struct { - io.Reader - io.Writer - r *http.Request -} - -func newHTTPServerConn(r *http.Request, w http.ResponseWriter) ServerCodec { - body := io.LimitReader(r.Body, maxRequestContentLength) - conn := &httpServerConn{Reader: body, Writer: w, r: r} - return NewCodec(conn) -} - -// Close does nothing and always returns nil. -func (t *httpServerConn) Close() error { return nil } - -// RemoteAddr returns the peer address of the underlying connection. -func (t *httpServerConn) RemoteAddr() string { - return t.r.RemoteAddr -} - -// SetWriteDeadline does nothing and always returns nil. -func (t *httpServerConn) SetWriteDeadline(time.Time) error { return nil } - -// ServeHTTP serves JSON-RPC requests over HTTP. -func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // Permit dumb empty requests for remote health-checks (AWS) - if r.Method == http.MethodGet && r.ContentLength == 0 && r.URL.RawQuery == "" { - w.WriteHeader(http.StatusOK) - return - } - if code, err := validateRequest(r); err != nil { - http.Error(w, err.Error(), code) - return - } - - // Create request-scoped context. - connInfo := PeerInfo{Transport: "http", RemoteAddr: r.RemoteAddr} - connInfo.HTTP.Version = r.Proto - connInfo.HTTP.Host = r.Host - connInfo.HTTP.Origin = r.Header.Get("Origin") - connInfo.HTTP.UserAgent = r.Header.Get("User-Agent") - ctx := r.Context() - ctx = context.WithValue(ctx, peerInfoContextKey{}, connInfo) - - // All checks passed, create a codec that reads directly from the request body - // until EOF, writes the response to w, and orders the server to process a - // single request. - w.Header().Set("content-type", contentType) - codec := newHTTPServerConn(r, w) - defer codec.close() - s.serveSingleRequest(ctx, codec) -} - -// validateRequest returns a non-zero response code and error message if the -// request is invalid. -func validateRequest(r *http.Request) (int, error) { - if r.Method == http.MethodPut || r.Method == http.MethodDelete { - return http.StatusMethodNotAllowed, errors.New("method not allowed") - } - if r.ContentLength > maxRequestContentLength { - err := fmt.Errorf("content length too large (%d>%d)", r.ContentLength, maxRequestContentLength) - return http.StatusRequestEntityTooLarge, err - } - // Allow OPTIONS (regardless of content-type) - if r.Method == http.MethodOptions { - return 0, nil - } - // Check content-type - if mt, _, err := mime.ParseMediaType(r.Header.Get("content-type")); err == nil { - for _, accepted := range acceptedContentTypes { - if accepted == mt { - return 0, nil - } - } - } - // Invalid content-type - err := fmt.Errorf("invalid content type, only %s is supported", contentType) - return http.StatusUnsupportedMediaType, err -} diff --git a/relayer/chainproxy/rpcclient/inproc.go b/relayer/chainproxy/rpcclient/inproc.go deleted file mode 100755 index bfc4966ba2..0000000000 --- a/relayer/chainproxy/rpcclient/inproc.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package rpcclient - -import ( - "context" - "net" -) - -// DialInProc attaches an in-process connection to the given RPC server. -func DialInProc(handler *Server) *Client { - initctx := context.Background() - c, _ := newClient(initctx, func(context.Context) (ServerCodec, error) { - p1, p2 := net.Pipe() - go handler.ServeCodec(NewCodec(p1), 0) - return NewCodec(p2), nil - }) - return c -} diff --git a/relayer/chainproxy/rpcclient/ipc.go b/relayer/chainproxy/rpcclient/ipc.go deleted file mode 100755 index 74a2daa3dd..0000000000 --- a/relayer/chainproxy/rpcclient/ipc.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package rpcclient - -import ( - "context" - "net" - - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/p2p/netutil" -) - -// ServeListener accepts connections on l, serving JSON-RPC on them. -func (s *Server) ServeListener(l net.Listener) error { - for { - conn, err := l.Accept() - if netutil.IsTemporaryError(err) { - log.Warn("RPC accept error", "err", err) - continue - } else if err != nil { - return err - } - log.Trace("Accepted RPC connection", "conn", conn.RemoteAddr()) - go s.ServeCodec(NewCodec(conn), 0) - } -} - -// DialIPC create a new IPC client that connects to the given endpoint. On Unix it assumes -// the endpoint is the full path to a unix socket, and Windows the endpoint is an -// identifier for a named pipe. -// -// The context is used for the initial connection establishment. It does not -// affect subsequent interactions with the client. -func DialIPC(ctx context.Context, endpoint string) (*Client, error) { - return newClient(ctx, func(ctx context.Context) (ServerCodec, error) { - conn, err := newIPCConnection(ctx, endpoint) - if err != nil { - return nil, err - } - return NewCodec(conn), err - }) -} diff --git a/relayer/chainproxy/rpcclient/ipc_js.go b/relayer/chainproxy/rpcclient/ipc_js.go deleted file mode 100755 index cfb8cd223d..0000000000 --- a/relayer/chainproxy/rpcclient/ipc_js.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -//go:build js -// +build js - -package rpcclient - -import ( - "context" - "errors" - "net" -) - -var errNotSupported = errors.New("rpc: not supported") - -// ipcListen will create a named pipe on the given endpoint. -func ipcListen(endpoint string) (net.Listener, error) { - return nil, errNotSupported -} - -// newIPCConnection will connect to a named pipe with the given endpoint as name. -func newIPCConnection(ctx context.Context, endpoint string) (net.Conn, error) { - return nil, errNotSupported -} diff --git a/relayer/chainproxy/rpcclient/ipc_unix.go b/relayer/chainproxy/rpcclient/ipc_unix.go deleted file mode 100755 index e97625bb87..0000000000 --- a/relayer/chainproxy/rpcclient/ipc_unix.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -//go:build darwin || dragonfly || freebsd || linux || nacl || netbsd || openbsd || solaris -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris - -package rpcclient - -import ( - "context" - "fmt" - "net" - "os" - "path/filepath" - - "github.com/ethereum/go-ethereum/log" -) - -var max_path_size_rpc_client = 108 // C.max_socket_path_size() - -// ipcListen will create a Unix socket on the given endpoint. -func ipcListen(endpoint string) (net.Listener, error) { - if len(endpoint) > max_path_size_rpc_client { - log.Warn(fmt.Sprintf("The ipc endpoint is longer than %d characters. ", max_path_size_rpc_client), - "endpoint", endpoint) - } - - // Ensure the IPC path exists and remove any previous leftover - if err := os.MkdirAll(filepath.Dir(endpoint), 0o751); err != nil { - return nil, err - } - os.Remove(endpoint) - l, err := net.Listen("unix", endpoint) - if err != nil { - return nil, err - } - os.Chmod(endpoint, 0o600) - return l, nil -} - -// newIPCConnection will connect to a Unix socket on the given endpoint. -func newIPCConnection(ctx context.Context, endpoint string) (net.Conn, error) { - return new(net.Dialer).DialContext(ctx, "unix", endpoint) -} diff --git a/relayer/chainproxy/rpcclient/ipc_windows.go b/relayer/chainproxy/rpcclient/ipc_windows.go deleted file mode 100755 index 8cc3cf1744..0000000000 --- a/relayer/chainproxy/rpcclient/ipc_windows.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -//go:build windows -// +build windows - -package rpcclient - -import ( - "context" - "net" - "time" - - "gopkg.in/natefinch/npipe.v2" -) - -// This is used if the dialing context has no deadline. It is much smaller than the -// defaultDialTimeout because named pipes are local and there is no need to wait so long. -const defaultPipeDialTimeout = 2 * time.Second - -// ipcListen will create a named pipe on the given endpoint. -func ipcListen(endpoint string) (net.Listener, error) { - return npipe.Listen(endpoint) -} - -// newIPCConnection will connect to a named pipe with the given endpoint as name. -func newIPCConnection(ctx context.Context, endpoint string) (net.Conn, error) { - timeout := defaultPipeDialTimeout - if deadline, ok := ctx.Deadline(); ok { - timeout = deadline.Sub(time.Now()) - if timeout < 0 { - timeout = 0 - } - } - return npipe.DialTimeout(endpoint, timeout) -} diff --git a/relayer/chainproxy/rpcclient/json.go b/relayer/chainproxy/rpcclient/json.go deleted file mode 100755 index b1ce854d39..0000000000 --- a/relayer/chainproxy/rpcclient/json.go +++ /dev/null @@ -1,366 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package rpcclient - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "reflect" - "strings" - "sync" - "time" - - "github.com/lavanet/lava/utils" -) - -const ( - vsn = "2.0" - serviceMethodSeparator = "_" - subscribeMethodSuffix = "_subscribe" - unsubscribeMethodSuffix = "_unsubscribe" - notificationMethodSuffix = "_subscription" - - defaultWriteTimeout = 10 * time.Second // used if context has no deadline -) - -var null = json.RawMessage("null") - -type ethereumSubscriptionResult struct { - ID string `json:"subscription"` - Result json.RawMessage `json:"result,omitempty"` -} - -type tendermintSubscriptionResult struct { - Query string `json:"query"` -} - -// A value of this type can a JSON-RPC request, notification, successful response or -// error response. Which one it is depends on the fields. -type JsonrpcMessage struct { - Version string `json:"jsonrpc,omitempty"` - ID json.RawMessage `json:"id,omitempty"` - Method string `json:"method,omitempty"` - Params json.RawMessage `json:"params,omitempty"` - Error *JsonError `json:"error,omitempty"` - Result json.RawMessage `json:"result,omitempty"` -} - -type tendermintSubscribeReply struct { - Query string `json:"query"` -} - -func (msg *JsonrpcMessage) isEthereumNotification() bool { - return msg.ID == nil && msg.Method != "" -} - -func (msg *JsonrpcMessage) isTendermintNotification() bool { - var result tendermintSubscribeReply - err := json.Unmarshal(msg.Result, &result) - if err == nil && result.Query != "" { - return true - } - return false -} - -func (msg *JsonrpcMessage) isCall() bool { - return msg.hasValidID() && msg.Method != "" -} - -func (msg *JsonrpcMessage) isResponse() bool { - return msg.hasValidID() && msg.Method == "" && msg.Params == nil && (msg.Result != nil || msg.Error != nil) -} - -func (msg *JsonrpcMessage) hasValidID() bool { - return len(msg.ID) > 0 && msg.ID[0] != '{' && msg.ID[0] != '[' -} - -func (msg *JsonrpcMessage) isSubscribe() bool { - return strings.HasSuffix(msg.Method, subscribeMethodSuffix) -} - -func (msg *JsonrpcMessage) isUnsubscribe() bool { - return strings.HasSuffix(msg.Method, unsubscribeMethodSuffix) -} - -func (msg *JsonrpcMessage) namespace() string { - elem := strings.SplitN(msg.Method, serviceMethodSeparator, 2) - return elem[0] -} - -func (msg *JsonrpcMessage) String() string { - b, _ := json.Marshal(msg) - return string(b) -} - -func (msg *JsonrpcMessage) errorResponse(err error) *JsonrpcMessage { - resp := errorMessage(err) - resp.ID = msg.ID - return resp -} - -func (msg *JsonrpcMessage) response(result interface{}) *JsonrpcMessage { - enc, err := json.Marshal(result) - if err != nil { - // TODO: wrap with 'internal server error' - return msg.errorResponse(err) - } - return &JsonrpcMessage{Version: vsn, ID: msg.ID, Result: enc} -} - -func errorMessage(err error) *JsonrpcMessage { - msg := &JsonrpcMessage{Version: vsn, ID: null, Error: &JsonError{ - Code: defaultErrorCode, - Message: err.Error(), - }} - ec, ok := err.(Error) - if ok { - msg.Error.Code = ec.ErrorCode() - } - de, ok := err.(DataError) - if ok { - msg.Error.Data = de.ErrorData() - } - return msg -} - -type JsonError struct { - Code int `json:"code"` - Message string `json:"message"` - Data interface{} `json:"data,omitempty"` -} - -func (err *JsonError) Error() string { - if err.Message == "" { - return fmt.Sprintf("json-rpc error %d", err.Code) - } - return err.Message -} - -func (err *JsonError) ErrorCode() int { - return err.Code -} - -func (err *JsonError) ErrorData() interface{} { - return err.Data -} - -// Conn is a subset of the methods of net.Conn which are sufficient for ServerCodec. -type Conn interface { - io.ReadWriteCloser - SetWriteDeadline(time.Time) error -} - -type deadlineCloser interface { - io.Closer - SetWriteDeadline(time.Time) error -} - -// ConnRemoteAddr wraps the RemoteAddr operation, which returns a description -// of the peer address of a connection. If a Conn also implements ConnRemoteAddr, this -// description is used in log messages. -type ConnRemoteAddr interface { - RemoteAddr() string -} - -// jsonCodec reads and writes JSON-RPC messages to the underlying connection. It also has -// support for parsing arguments and serializing (result) objects. -type jsonCodec struct { - remote string - closer sync.Once // close closed channel once - closeCh chan interface{} // closed on Close - decode func(v interface{}) error // decoder to allow multiple transports - encMu utils.LavaMutex // guards the encoder - encode func(v interface{}) error // encoder to allow multiple transports - conn deadlineCloser -} - -// NewFuncCodec creates a codec which uses the given functions to read and write. If conn -// implements ConnRemoteAddr, log messages will use it to include the remote address of -// the connection. -func NewFuncCodec(conn deadlineCloser, encode, decode func(v interface{}) error) ServerCodec { - codec := &jsonCodec{ - closeCh: make(chan interface{}), - encode: encode, - decode: decode, - conn: conn, - } - if ra, ok := conn.(ConnRemoteAddr); ok { - codec.remote = ra.RemoteAddr() - } - return codec -} - -// NewCodec creates a codec on the given connection. If conn implements ConnRemoteAddr, log -// messages will use it to include the remote address of the connection. -func NewCodec(conn Conn) ServerCodec { - enc := json.NewEncoder(conn) - dec := json.NewDecoder(conn) - dec.UseNumber() - return NewFuncCodec(conn, enc.Encode, dec.Decode) -} - -func (c *jsonCodec) peerInfo() PeerInfo { - // This returns "ipc" because all other built-in transports have a separate codec type. - return PeerInfo{Transport: "ipc", RemoteAddr: c.remote} -} - -func (c *jsonCodec) remoteAddr() string { - return c.remote -} - -func (c *jsonCodec) readBatch() (messages []*JsonrpcMessage, batch bool, err error) { - // Decode the next JSON object in the input stream. - // This verifies basic syntax, etc. - var rawmsg json.RawMessage - if err := c.decode(&rawmsg); err != nil { - return nil, false, err - } - messages, batch = parseMessage(rawmsg) - for i, msg := range messages { - if msg == nil { - // Message is JSON 'null'. Replace with zero value so it - // will be treated like any other invalid message. - messages[i] = new(JsonrpcMessage) - } - } - return messages, batch, nil -} - -func (c *jsonCodec) writeJSON(ctx context.Context, v interface{}) error { - c.encMu.Lock() - defer c.encMu.Unlock() - - deadline, ok := ctx.Deadline() - if !ok { - deadline = time.Now().Add(defaultWriteTimeout) - } - c.conn.SetWriteDeadline(deadline) - return c.encode(v) -} - -func (c *jsonCodec) close() { - c.closer.Do(func() { - close(c.closeCh) - c.conn.Close() - }) -} - -// Closed returns a channel which will be closed when Close is called -func (c *jsonCodec) closed() <-chan interface{} { - return c.closeCh -} - -// parseMessage parses raw bytes as a (batch of) JSON-RPC message(s). There are no error -// checks in this function because the raw message has already been syntax-checked when it -// is called. Any non-JSON-RPC messages in the input return the zero value of -// jsonrpcMessage. -func parseMessage(raw json.RawMessage) ([]*JsonrpcMessage, bool) { - if !isBatch(raw) { - msgs := []*JsonrpcMessage{{}} - json.Unmarshal(raw, &msgs[0]) - return msgs, false - } - dec := json.NewDecoder(bytes.NewReader(raw)) - dec.Token() // skip '[' - var msgs []*JsonrpcMessage - for dec.More() { - msgs = append(msgs, new(JsonrpcMessage)) - dec.Decode(&msgs[len(msgs)-1]) - } - return msgs, true -} - -// isBatch returns true when the first non-whitespace characters is '[' -func isBatch(raw json.RawMessage) bool { - for _, c := range raw { - // skip insignificant whitespace (http://www.ietf.org/rfc/rfc4627.txt) - if c == 0x20 || c == 0x09 || c == 0x0a || c == 0x0d { - continue - } - return c == '[' - } - return false -} - -// parsePositionalArguments tries to parse the given args to an array of values with the -// given types. It returns the parsed values or an error when the args could not be -// parsed. Missing optional arguments are returned as reflect.Zero values. -func parsePositionalArguments(rawArgs json.RawMessage, types []reflect.Type) ([]reflect.Value, error) { - dec := json.NewDecoder(bytes.NewReader(rawArgs)) - var args []reflect.Value - tok, err := dec.Token() - switch { - case err == io.EOF || tok == nil && err == nil: - // "params" is optional and may be empty. Also allow "params":null even though it's - // not in the spec because our own client used to send it. - case err != nil: - return nil, err - case tok == json.Delim('['): - // Read argument array. - if args, err = parseArgumentArray(dec, types); err != nil { - return nil, err - } - default: - return nil, errors.New("non-array args") - } - // Set any missing args to nil. - for i := len(args); i < len(types); i++ { - if types[i].Kind() != reflect.Ptr { - return nil, fmt.Errorf("missing value for required argument %d", i) - } - args = append(args, reflect.Zero(types[i])) - } - return args, nil -} - -func parseArgumentArray(dec *json.Decoder, types []reflect.Type) ([]reflect.Value, error) { - args := make([]reflect.Value, 0, len(types)) - for i := 0; dec.More(); i++ { - if i >= len(types) { - return args, fmt.Errorf("too many arguments, want at most %d", len(types)) - } - argval := reflect.New(types[i]) - if err := dec.Decode(argval.Interface()); err != nil { - return args, fmt.Errorf("invalid argument %d: %v", i, err) - } - if argval.IsNil() && types[i].Kind() != reflect.Ptr { - return args, fmt.Errorf("missing value for required argument %d", i) - } - args = append(args, argval.Elem()) - } - // Read end of args array. - _, err := dec.Token() - return args, err -} - -// parseSubscriptionName extracts the subscription name from an encoded argument array. -func parseSubscriptionName(rawArgs json.RawMessage) (string, error) { - dec := json.NewDecoder(bytes.NewReader(rawArgs)) - if tok, _ := dec.Token(); tok != json.Delim('[') { - return "", errors.New("non-array args") - } - v, _ := dec.Token() - method, ok := v.(string) - if !ok { - return "", errors.New("expected subscription name as first argument") - } - return method, nil -} diff --git a/relayer/chainproxy/rpcclient/metrics.go b/relayer/chainproxy/rpcclient/metrics.go deleted file mode 100755 index 9403259c52..0000000000 --- a/relayer/chainproxy/rpcclient/metrics.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package rpcclient - -import ( - "fmt" - - "github.com/ethereum/go-ethereum/metrics" -) - -var ( - rpcRequestGauge = metrics.NewRegisteredGauge("rpc/requests", nil) - successfulRequestGauge = metrics.NewRegisteredGauge("rpc/success", nil) - failedRequestGauge = metrics.NewRegisteredGauge("rpc/failure", nil) - rpcServingTimer = metrics.NewRegisteredTimer("rpc/duration/all", nil) -) - -func newRPCServingTimer(method string, valid bool) metrics.Timer { - flag := "success" - if !valid { - flag = "failure" - } - m := fmt.Sprintf("rpc/duration/%s/%s", method, flag) - return metrics.GetOrRegisterTimer(m, nil) -} diff --git a/relayer/chainproxy/rpcclient/server.go b/relayer/chainproxy/rpcclient/server.go deleted file mode 100755 index ed5054080d..0000000000 --- a/relayer/chainproxy/rpcclient/server.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package rpcclient - -import ( - "context" - "fmt" - "io" - "sync/atomic" - - mapset "github.com/deckarep/golang-set" - "github.com/ethereum/go-ethereum/log" -) - -const ( - MetadataApi = "rpc" - EngineApi = "engine" -) - -// CodecOption specifies which type of messages a codec supports. -// -// Deprecated: this option is no longer honored by Server. -type CodecOption int - -const ( - // OptionMethodInvocation is an indication that the codec supports RPC method calls - OptionMethodInvocation CodecOption = 1 << iota - - // OptionSubscriptions is an indication that the codec supports RPC notifications - OptionSubscriptions = 1 << iota // support pub sub -) - -// Server is an RPC server. -type Server struct { - services serviceRegistry - idgen func() ID - run int32 - codecs mapset.Set -} - -// NewServer creates a new server instance with no registered handlers. -func NewServer() *Server { - server := &Server{idgen: randomIDGenerator(), codecs: mapset.NewSet(), run: 1} - // Register the default service providing meta information about the RPC service such - // as the services and methods it offers. - rpcService := &RPCService{server} - server.RegisterName(MetadataApi, rpcService) - return server -} - -// RegisterName creates a service for the given receiver type under the given name. When no -// methods on the given receiver match the criteria to be either a RPC method or a -// subscription an error is returned. Otherwise a new service is created and added to the -// service collection this server provides to clients. -func (s *Server) RegisterName(name string, receiver interface{}) error { - return s.services.registerName(name, receiver) -} - -// ServeCodec reads incoming requests from codec, calls the appropriate callback and writes -// the response back using the given codec. It will block until the codec is closed or the -// server is stopped. In either case the codec is closed. -// -// Note that codec options are no longer supported. -func (s *Server) ServeCodec(codec ServerCodec, options CodecOption) { - defer codec.close() - - // Don't serve if server is stopped. - if atomic.LoadInt32(&s.run) == 0 { - return - } - - // Add the codec to the set so it can be closed by Stop. - s.codecs.Add(codec) - defer s.codecs.Remove(codec) - - c := initClient(codec, s.idgen, &s.services) - <-codec.closed() - c.Close() -} - -// serveSingleRequest reads and processes a single RPC request from the given codec. This -// is used to serve HTTP connections. Subscriptions and reverse calls are not allowed in -// this mode. -func (s *Server) serveSingleRequest(ctx context.Context, codec ServerCodec) { - // Don't serve if server is stopped. - if atomic.LoadInt32(&s.run) == 0 { - return - } - - h := newHandler(ctx, codec, s.idgen, &s.services) - h.allowSubscribe = false - defer h.close(io.EOF, nil) - - reqs, batch, err := codec.readBatch() - if err != nil { - if err != io.EOF { - codec.writeJSON(ctx, errorMessage(&invalidMessageError{"parse error"})) - } - return - } - if batch { - h.handleBatch(reqs) - } else { - h.handleMsg(reqs[0]) - } -} - -// Stop stops reading new requests, waits for stopPendingRequestTimeout to allow pending -// requests to finish, then closes all codecs which will cancel pending requests and -// subscriptions. -func (s *Server) Stop() { - if atomic.CompareAndSwapInt32(&s.run, 1, 0) { - log.Debug("RPC server shutting down") - s.codecs.Each(func(c interface{}) bool { - serverCodec, ok := c.(ServerCodec) - if !ok { - panic("(s *Server) Stop() - serverCodec, ok := c.(ServerCodec) - type assertion failed: " + fmt.Sprintf("%s", c)) - } - serverCodec.close() - return true - }) - } -} - -// RPCService gives meta information about the server. -// e.g. gives information about the loaded modules. -type RPCService struct { - server *Server -} - -// Modules returns the list of RPC services with their version number -func (s *RPCService) Modules() map[string]string { - s.server.services.mu.Lock() - defer s.server.services.mu.Unlock() - - modules := make(map[string]string) - for name := range s.server.services.services { - modules[name] = "1.0" - } - return modules -} - -// PeerInfo contains information about the remote end of the network connection. -// -// This is available within RPC method handlers through the context. Call -// PeerInfoFromContext to get information about the client connection related to -// the current method call. -type PeerInfo struct { - // Transport is name of the protocol used by the client. - // This can be "http", "ws" or "ipc". - Transport string - - // Address of client. This will usually contain the IP address and port. - RemoteAddr string - - // Addditional information for HTTP and WebSocket connections. - HTTP struct { - // Protocol version, i.e. "HTTP/1.1". This is not set for WebSocket. - Version string - // Header values sent by the client. - UserAgent string - Origin string - Host string - } -} - -type peerInfoContextKey struct{} - -// PeerInfoFromContext returns information about the client's network connection. -// Use this with the context passed to RPC method handler functions. -// -// The zero value is returned if no connection info is present in ctx. -func PeerInfoFromContext(ctx context.Context) PeerInfo { - info, _ := ctx.Value(peerInfoContextKey{}).(PeerInfo) - return info -} diff --git a/relayer/chainproxy/rpcclient/service.go b/relayer/chainproxy/rpcclient/service.go deleted file mode 100755 index 198d944226..0000000000 --- a/relayer/chainproxy/rpcclient/service.go +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package rpcclient - -import ( - "context" - "errors" - "fmt" - "reflect" - "runtime" - "strings" - "unicode" - - "github.com/ethereum/go-ethereum/log" - "github.com/lavanet/lava/utils" -) - -var ( - contextType = reflect.TypeOf((*context.Context)(nil)).Elem() - errorType = reflect.TypeOf((*error)(nil)).Elem() - subscriptionType = reflect.TypeOf(Subscription{}) - stringType = reflect.TypeOf("") -) - -type serviceRegistry struct { - mu utils.LavaMutex - services map[string]service -} - -// service represents a registered object. -type service struct { - name string // name for service - callbacks map[string]*callback // registered handlers - subscriptions map[string]*callback // available subscriptions/notifications -} - -// callback is a method callback which was registered in the server -type callback struct { - fn reflect.Value // the function - rcvr reflect.Value // receiver object of method, set if fn is method - argTypes []reflect.Type // input argument types - hasCtx bool // method's first argument is a context (not included in argTypes) - errPos int // err return idx, of -1 when method cannot return error - isSubscribe bool // true if this is a subscription callback -} - -func (r *serviceRegistry) registerName(name string, rcvr interface{}) error { - rcvrVal := reflect.ValueOf(rcvr) - if name == "" { - return fmt.Errorf("no service name for type %s", rcvrVal.Type().String()) - } - callbacks := suitableCallbacks(rcvrVal) - if len(callbacks) == 0 { - return fmt.Errorf("service %T doesn't have any suitable methods/subscriptions to expose", rcvr) - } - - r.mu.Lock() - defer r.mu.Unlock() - if r.services == nil { - r.services = make(map[string]service) - } - svc, ok := r.services[name] - if !ok { - svc = service{ - name: name, - callbacks: make(map[string]*callback), - subscriptions: make(map[string]*callback), - } - r.services[name] = svc - } - for name, cb := range callbacks { - if cb.isSubscribe { - svc.subscriptions[name] = cb - } else { - svc.callbacks[name] = cb - } - } - return nil -} - -// callback returns the callback corresponding to the given RPC method name. -func (r *serviceRegistry) callback(method string) *callback { - elem := strings.SplitN(method, serviceMethodSeparator, 2) - if len(elem) != 2 { - return nil - } - r.mu.Lock() - defer r.mu.Unlock() - return r.services[elem[0]].callbacks[elem[1]] -} - -// subscription returns a subscription callback in the given service. -func (r *serviceRegistry) subscription(service, name string) *callback { - r.mu.Lock() - defer r.mu.Unlock() - return r.services[service].subscriptions[name] -} - -// suitableCallbacks iterates over the methods of the given type. It determines if a method -// satisfies the criteria for a RPC callback or a subscription callback and adds it to the -// collection of callbacks. See server documentation for a summary of these criteria. -func suitableCallbacks(receiver reflect.Value) map[string]*callback { - typ := receiver.Type() - callbacks := make(map[string]*callback) - for m := 0; m < typ.NumMethod(); m++ { - method := typ.Method(m) - if method.PkgPath != "" { - continue // method not exported - } - cb := newCallback(receiver, method.Func) - if cb == nil { - continue // function invalid - } - name := formatName(method.Name) - callbacks[name] = cb - } - return callbacks -} - -// newCallback turns fn (a function) into a callback object. It returns nil if the function -// is unsuitable as an RPC callback. -func newCallback(receiver, fn reflect.Value) *callback { - fntype := fn.Type() - c := &callback{fn: fn, rcvr: receiver, errPos: -1, isSubscribe: isPubSub(fntype)} - // Determine parameter types. They must all be exported or builtin types. - c.makeArgTypes() - - // Verify return types. The function must return at most one error - // and/or one other non-error value. - outs := make([]reflect.Type, fntype.NumOut()) - for i := 0; i < fntype.NumOut(); i++ { - outs[i] = fntype.Out(i) - } - if len(outs) > 2 { - return nil - } - // If an error is returned, it must be the last returned value. - switch { - case len(outs) == 1 && isErrorType(outs[0]): - c.errPos = 0 - case len(outs) == 2: - if isErrorType(outs[0]) || !isErrorType(outs[1]) { - return nil - } - c.errPos = 1 - } - return c -} - -// makeArgTypes composes the argTypes list. -func (c *callback) makeArgTypes() { - fntype := c.fn.Type() - // Skip receiver and context.Context parameter (if present). - firstArg := 0 - if c.rcvr.IsValid() { - firstArg++ - } - if fntype.NumIn() > firstArg && fntype.In(firstArg) == contextType { - c.hasCtx = true - firstArg++ - } - // Add all remaining parameters. - c.argTypes = make([]reflect.Type, fntype.NumIn()-firstArg) - for i := firstArg; i < fntype.NumIn(); i++ { - c.argTypes[i-firstArg] = fntype.In(i) - } -} - -// call invokes the callback. -func (c *callback) call(ctx context.Context, method string, args []reflect.Value) (res interface{}, errRes error) { - // Create the argument slice. - fullargs := make([]reflect.Value, 0, 2+len(args)) - if c.rcvr.IsValid() { - fullargs = append(fullargs, c.rcvr) - } - if c.hasCtx { - fullargs = append(fullargs, reflect.ValueOf(ctx)) - } - fullargs = append(fullargs, args...) - - // Catch panic while running the callback. - defer func() { - if err := recover(); err != nil { - const size = 64 << 10 - buf := make([]byte, size) - buf = buf[:runtime.Stack(buf, false)] - log.Error("RPC method " + method + " crashed: " + fmt.Sprintf("%v\n%s", err, buf)) - errRes = errors.New("method handler crashed") - } - }() - // Run the callback. - results := c.fn.Call(fullargs) - if len(results) == 0 { - var ret interface{} - return ret, nil - } - if c.errPos >= 0 && !results[c.errPos].IsNil() { - // Method has returned non-nil error value. - err := results[c.errPos].Interface() - var ok bool - errRet, ok := err.(error) - if !ok { - return reflect.Value{}, fmt.Errorf("(c *callback) call - errRet, ok := err.(error) - type assertion failed" + fmt.Sprintf("%s", err)) - } - return reflect.Value{}, errRet - } - return results[0].Interface(), nil -} - -// Is t context.Context or *context.Context? -func isContextType(t reflect.Type) bool { - for t.Kind() == reflect.Ptr { - t = t.Elem() - } - return t == contextType -} - -// Does t satisfy the error interface? -func isErrorType(t reflect.Type) bool { - for t.Kind() == reflect.Ptr { - t = t.Elem() - } - return t.Implements(errorType) -} - -// Is t Subscription or *Subscription? -func isSubscriptionType(t reflect.Type) bool { - for t.Kind() == reflect.Ptr { - t = t.Elem() - } - return t == subscriptionType -} - -// isPubSub tests whether the given method has as as first argument a context.Context and -// returns the pair (Subscription, error). -func isPubSub(methodType reflect.Type) bool { - // numIn(0) is the receiver type - if methodType.NumIn() < 2 || methodType.NumOut() != 2 { - return false - } - return isContextType(methodType.In(1)) && - isSubscriptionType(methodType.Out(0)) && - isErrorType(methodType.Out(1)) -} - -// formatName converts to first character of name to lowercase. -func formatName(name string) string { - ret := []rune(name) - if len(ret) > 0 { - ret[0] = unicode.ToLower(ret[0]) - } - return string(ret) -} diff --git a/relayer/chainproxy/rpcclient/stdio.go b/relayer/chainproxy/rpcclient/stdio.go deleted file mode 100755 index d6b0b5ee11..0000000000 --- a/relayer/chainproxy/rpcclient/stdio.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package rpcclient - -import ( - "context" - "errors" - "io" - "net" - "os" - "time" -) - -// DialStdIO creates a client on stdin/stdout. -func DialStdIO(ctx context.Context) (*Client, error) { - return DialIO(ctx, os.Stdin, os.Stdout) -} - -// DialIO creates a client which uses the given IO channels -func DialIO(ctx context.Context, in io.Reader, out io.Writer) (*Client, error) { - return newClient(ctx, func(_ context.Context) (ServerCodec, error) { - return NewCodec(stdioConn{ - in: in, - out: out, - }), nil - }) -} - -type stdioConn struct { - in io.Reader - out io.Writer -} - -func (io stdioConn) Read(b []byte) (n int, err error) { - return io.in.Read(b) -} - -func (io stdioConn) Write(b []byte) (n int, err error) { - return io.out.Write(b) -} - -func (io stdioConn) Close() error { - return nil -} - -func (io stdioConn) RemoteAddr() string { - return "/dev/stdin" -} - -func (io stdioConn) SetWriteDeadline(t time.Time) error { - return &net.OpError{Op: "set", Net: "stdio", Source: nil, Addr: nil, Err: errors.New("deadline not supported")} -} diff --git a/relayer/chainproxy/rpcclient/subscription.go b/relayer/chainproxy/rpcclient/subscription.go deleted file mode 100755 index 35f8e3a4dc..0000000000 --- a/relayer/chainproxy/rpcclient/subscription.go +++ /dev/null @@ -1,369 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package rpcclient - -import ( - "container/list" - "context" - crand "crypto/rand" - "encoding/binary" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "math/rand" - "reflect" - "strings" - "sync" - "time" - - "github.com/lavanet/lava/utils" -) - -var ( - // ErrNotificationsUnsupported is returned when the connection doesn't support notifications - ErrNotificationsUnsupported = errors.New("notifications not supported") - // ErrSubscriptionNotFound is returned when the notification for the given id is not found - ErrSubscriptionNotFound = errors.New("subscription not found") -) - -var globalGen = randomIDGenerator() - -// ID defines a pseudo random number that is used to identify RPC subscriptions. -type ID string - -// NewID returns a new, random ID. -func NewID() ID { - return globalGen() -} - -// randomIDGenerator returns a function generates a random IDs. -func randomIDGenerator() func() ID { - buf := make([]byte, 8) - var seed int64 - if _, err := crand.Read(buf); err == nil { - seed = int64(binary.BigEndian.Uint64(buf)) - } else { - seed = int64(time.Now().Nanosecond()) - } - - var ( - mu utils.LavaMutex - rng = rand.New(rand.NewSource(seed)) - ) - return func() ID { - mu.Lock() - defer mu.Unlock() - id := make([]byte, 16) - rng.Read(id) - return encodeID(id) - } -} - -func encodeID(b []byte) ID { - id := hex.EncodeToString(b) - id = strings.TrimLeft(id, "0") - if id == "" { - id = "0" // ID's are RPC quantities, no leading zero's and 0 is 0x0. - } - return ID("0x" + id) -} - -type notifierKey struct{} - -// NotifierFromContext returns the Notifier value stored in ctx, if any. -func NotifierFromContext(ctx context.Context) (*Notifier, bool) { - n, ok := ctx.Value(notifierKey{}).(*Notifier) - return n, ok -} - -// Notifier is tied to a RPC connection that supports subscriptions. -// Server callbacks use the notifier to send notifications. -type Notifier struct { - h *handler - namespace string - - mu utils.LavaMutex - sub *Subscription - buffer []json.RawMessage - callReturned bool - activated bool -} - -// CreateSubscription returns a new subscription that is coupled to the -// RPC connection. By default subscriptions are inactive and notifications -// are dropped until the subscription is marked as active. This is done -// by the RPC server after the subscription ID is send to the client. -func (n *Notifier) CreateSubscription() *Subscription { - n.mu.Lock() - defer n.mu.Unlock() - - if n.sub != nil { - panic("can't create multiple subscriptions with Notifier") - } else if n.callReturned { - panic("can't create subscription after subscribe call has returned") - } - n.sub = &Subscription{ID: n.h.idgen(), namespace: n.namespace, err: make(chan error, 1)} - return n.sub -} - -// Notify sends a notification to the client with the given data as payload. -// If an error occurs the RPC connection is closed and the error is returned. -func (n *Notifier) Notify(id ID, data interface{}) error { - enc, err := json.Marshal(data) - if err != nil { - return err - } - - n.mu.Lock() - defer n.mu.Unlock() - - if n.sub == nil { - panic("can't Notify before subscription is created") - } else if n.sub.ID != id { - panic("Notify with wrong ID") - } - if n.activated { - return n.send(n.sub, enc) - } - n.buffer = append(n.buffer, enc) - return nil -} - -// Closed returns a channel that is closed when the RPC connection is closed. -// Deprecated: use subscription error channel -func (n *Notifier) Closed() <-chan interface{} { - return n.h.conn.closed() -} - -// takeSubscription returns the subscription (if one has been created). No subscription can -// be created after this call. -func (n *Notifier) takeSubscription() *Subscription { - n.mu.Lock() - defer n.mu.Unlock() - n.callReturned = true - return n.sub -} - -// activate is called after the subscription ID was sent to client. Notifications are -// buffered before activation. This prevents notifications being sent to the client before -// the subscription ID is sent to the client. -func (n *Notifier) activate() error { - n.mu.Lock() - defer n.mu.Unlock() - - for _, data := range n.buffer { - if err := n.send(n.sub, data); err != nil { - return err - } - } - n.activated = true - return nil -} - -func (n *Notifier) send(sub *Subscription, data json.RawMessage) error { - params, _ := json.Marshal(ðereumSubscriptionResult{ID: string(sub.ID), Result: data}) - ctx := context.Background() - return n.h.conn.writeJSON(ctx, &JsonrpcMessage{ - Version: vsn, - Method: n.namespace + notificationMethodSuffix, - Params: params, - }) -} - -// A Subscription is created by a notifier and tied to that notifier. The client can use -// this subscription to wait for an unsubscribe request for the client, see Err(). -type Subscription struct { - ID ID - namespace string - err chan error // closed on unsubscribe -} - -// Err returns a channel that is closed when the client send an unsubscribe request. -func (s *Subscription) Err() <-chan error { - return s.err -} - -// MarshalJSON marshals a subscription as its ID. -func (s *Subscription) MarshalJSON() ([]byte, error) { - return json.Marshal(s.ID) -} - -// ClientSubscription is a subscription established through the Client's Subscribe or -// EthSubscribe methods. -type ClientSubscription struct { - client *Client - etype reflect.Type - channel reflect.Value - namespace string - subid string - - // The in channel receives notification values from client dispatcher. - in chan *JsonrpcMessage - - // The error channel receives the error from the forwarding loop. - // It is closed by Unsubscribe. - err chan error - errOnce sync.Once - - // Closing of the subscription is requested by sending on 'quit'. This is handled by - // the forwarding loop, which closes 'forwardDone' when it has stopped sending to - // sub.channel. Finally, 'unsubDone' is closed after unsubscribing on the server side. - quit chan error - forwardDone chan struct{} - unsubDone chan struct{} -} - -// This is the sentinel value sent on sub.quit when Unsubscribe is called. -var errUnsubscribed = errors.New("unsubscribed") - -func newClientSubscription(c *Client, namespace string, channel reflect.Value) *ClientSubscription { - sub := &ClientSubscription{ - client: c, - namespace: namespace, - etype: channel.Type().Elem(), - channel: channel, - in: make(chan *JsonrpcMessage), - quit: make(chan error), - forwardDone: make(chan struct{}), - unsubDone: make(chan struct{}), - err: make(chan error, 1), - } - return sub -} - -// Err returns the subscription error channel. The intended use of Err is to schedule -// resubscription when the client connection is closed unexpectedly. -// -// The error channel receives a value when the subscription has ended due to an error. The -// received error is nil if Close has been called on the underlying client and no other -// error has occurred. -// -// The error channel is closed when Unsubscribe is called on the subscription. -func (sub *ClientSubscription) Err() <-chan error { - return sub.err -} - -// Unsubscribe unsubscribes the notification and closes the error channel. -// It can safely be called more than once. -func (sub *ClientSubscription) Unsubscribe() { - sub.errOnce.Do(func() { - select { - case sub.quit <- errUnsubscribed: - <-sub.unsubDone - case <-sub.unsubDone: - } - close(sub.err) - }) -} - -// deliver is called by the client's message dispatcher to send a notification value. -func (sub *ClientSubscription) deliver(result *JsonrpcMessage) (ok bool) { - select { - case sub.in <- result: - return true - case <-sub.forwardDone: - return false - } -} - -// close is called by the client's message dispatcher when the connection is closed. -func (sub *ClientSubscription) close(err error) { - select { - case sub.quit <- err: - case <-sub.forwardDone: - } -} - -// run is the forwarding loop of the subscription. It runs in its own goroutine and -// is launched by the client's handler after the subscription has been created. -func (sub *ClientSubscription) run() { - defer close(sub.unsubDone) - - _, err := sub.forward() - - // The client's dispatch loop won't be able to execute the unsubscribe call if it is - // blocked in sub.deliver() or sub.close(). Closing forwardDone unblocks them. - close(sub.forwardDone) - - // Send the error. - if err != nil { - if err == ErrClientQuit { - // ErrClientQuit gets here when Client.Close is called. This is reported as a - // nil error because it's not an error, but we can't close sub.err here. - err = nil - } - sub.err <- err - } -} - -// forward is the forwarding loop. It takes in RPC notifications and sends them -// on the subscription channel. -func (sub *ClientSubscription) forward() (unsubscribeServer bool, err error) { - cases := []reflect.SelectCase{ - {Dir: reflect.SelectRecv, Chan: reflect.ValueOf(sub.quit)}, - {Dir: reflect.SelectRecv, Chan: reflect.ValueOf(sub.in)}, - {Dir: reflect.SelectSend, Chan: sub.channel}, - } - buffer := list.New() - - for { - var chosen int - var recv reflect.Value - if buffer.Len() == 0 { - // Idle, omit send case. - chosen, recv, _ = reflect.Select(cases[:2]) - } else { - // Non-empty buffer, send the first queued item. - cases[2].Send = reflect.ValueOf(buffer.Front().Value) - chosen, recv, _ = reflect.Select(cases) - } - - switch chosen { - case 0: // <-sub.quit - if !recv.IsNil() { - var ok bool - err, ok = recv.Interface().(error) - if !ok { - return false, fmt.Errorf("(sub *ClientSubscription) forward() - recv.Interface().(error) - type assertion failed" + fmt.Sprintf("%s", recv.Interface())) - } - } - if err == errUnsubscribed { - // Exiting because Unsubscribe was called, unsubscribe on server. - return true, nil - } - return false, err - - case 1: // <-sub.in - msg, ok := recv.Interface().(*JsonrpcMessage) - if !ok { - return false, fmt.Errorf("(sub *ClientSubscription) forward() - recv.Interface().(*JsonrpcMessage) - type assertion failed" + fmt.Sprintf("%s", recv.Interface())) - } - if msg.Error != nil { - return true, err - } - if buffer.Len() == maxClientSubscriptionBuffer { - return true, ErrSubscriptionQueueOverflow - } - buffer.PushBack(msg) - - case 2: // sub.channel<- - cases[2].Send = reflect.Value{} // Don't hold onto the value. - buffer.Remove(buffer.Front()) - } - } -} diff --git a/relayer/chainproxy/rpcclient/types.go b/relayer/chainproxy/rpcclient/types.go deleted file mode 100755 index 9819876acd..0000000000 --- a/relayer/chainproxy/rpcclient/types.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package rpcclient - -import ( - "context" - "encoding/json" - "fmt" - "math" - "strconv" - "strings" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - spectypes "github.com/lavanet/lava/x/spec/types" -) - -// API describes the set of methods offered over the RPC interface -type API struct { - Namespace string // namespace under which the rpc methods of Service are exposed - Version string // api version for DApp's - Service interface{} // receiver instance which holds the methods - Public bool // indication if the methods must be considered safe for public use - Authenticated bool // whether the api should only be available behind authentication. -} - -// ServerCodec implements reading, parsing and writing RPC messages for the server side of -// a RPC session. Implementations must be go-routine safe since the codec can be called in -// multiple go-routines concurrently. -type ServerCodec interface { - peerInfo() PeerInfo - readBatch() (msgs []*JsonrpcMessage, isBatch bool, err error) - close() - - jsonWriter -} - -// jsonWriter can write JSON messages to its underlying connection. -// Implementations must be safe for concurrent use. -type jsonWriter interface { - writeJSON(context.Context, interface{}) error - // Closed returns a channel which is closed when the connection is closed. - closed() <-chan interface{} - // RemoteAddr returns the peer address of the connection. - remoteAddr() string -} - -type BlockNumber int64 - -const ( - FinalizedBlockNumber = BlockNumber(spectypes.FINALIZED_BLOCK) - PendingBlockNumber = BlockNumber(spectypes.PENDING_BLOCK) - LatestBlockNumber = BlockNumber(spectypes.LATEST_BLOCK) - EarliestBlockNumber = BlockNumber(spectypes.EARLIEST_BLOCK) - SafeBlockNumber = BlockNumber(spectypes.SAFE_BLOCK) -) - -// UnmarshalJSON parses the given JSON fragment into a BlockNumber. It supports: -// - "latest", "earliest" or "pending" as string arguments -// - the block number -// Returned errors: -// - an invalid block number error when the given argument isn't a known strings -// - an out of range error when the given block number is either too little or too large -func (bn *BlockNumber) UnmarshalJSON(data []byte) error { - input := strings.TrimSpace(string(data)) - if len(input) >= 2 && input[0] == '"' && input[len(input)-1] == '"' { - input = input[1 : len(input)-1] - } - - switch input { - case "earliest": - *bn = EarliestBlockNumber - return nil - case "latest": - *bn = LatestBlockNumber - return nil - case "pending": - *bn = PendingBlockNumber - return nil - case "finalized": - *bn = FinalizedBlockNumber - return nil - case "safe": - *bn = SafeBlockNumber - return nil - } - - blckNum, err := hexutil.DecodeUint64(input) - if err != nil { - return err - } - if blckNum > math.MaxInt64 { - return fmt.Errorf("block number larger than int64") - } - *bn = BlockNumber(blckNum) - return nil -} - -// MarshalText implements encoding.TextMarshaler. It marshals: -// - "latest", "earliest" or "pending" as strings -// - other numbers as hex -func (bn BlockNumber) MarshalText() ([]byte, error) { - switch bn { - case EarliestBlockNumber: - return []byte("earliest"), nil - case LatestBlockNumber: - return []byte("latest"), nil - case PendingBlockNumber: - return []byte("pending"), nil - case FinalizedBlockNumber: - return []byte("finalized"), nil - case SafeBlockNumber: - return []byte("safe"), nil - default: - return hexutil.Uint64(bn).MarshalText() - } -} - -func (bn BlockNumber) Int64() int64 { - return (int64)(bn) -} - -type BlockNumberOrHash struct { - BlockNumber *BlockNumber `json:"blockNumber,omitempty"` - BlockHash *common.Hash `json:"blockHash,omitempty"` - RequireCanonical bool `json:"requireCanonical,omitempty"` -} - -func (bnh *BlockNumberOrHash) UnmarshalJSON(data []byte) error { - type erased BlockNumberOrHash - e := erased{} - err := json.Unmarshal(data, &e) - if err == nil { - if e.BlockNumber != nil && e.BlockHash != nil { - return fmt.Errorf("cannot specify both BlockHash and BlockNumber, choose one or the other") - } - bnh.BlockNumber = e.BlockNumber - bnh.BlockHash = e.BlockHash - bnh.RequireCanonical = e.RequireCanonical - return nil - } - var input string - err = json.Unmarshal(data, &input) - if err != nil { - return err - } - switch input { - case "earliest": - bn := EarliestBlockNumber - bnh.BlockNumber = &bn - return nil - case "latest": - bn := LatestBlockNumber - bnh.BlockNumber = &bn - return nil - case "pending": - bn := PendingBlockNumber - bnh.BlockNumber = &bn - return nil - case "finalized": - bn := FinalizedBlockNumber - bnh.BlockNumber = &bn - return nil - case "safe": - bn := SafeBlockNumber - bnh.BlockNumber = &bn - return nil - default: - if len(input) == 66 { - hash := common.Hash{} - err := hash.UnmarshalText([]byte(input)) - if err != nil { - return err - } - bnh.BlockHash = &hash - return nil - } else { - blckNum, err := hexutil.DecodeUint64(input) - if err != nil { - return err - } - if blckNum > math.MaxInt64 { - return fmt.Errorf("blocknumber too high") - } - bn := BlockNumber(blckNum) - bnh.BlockNumber = &bn - return nil - } - } -} - -func (bnh *BlockNumberOrHash) Number() (BlockNumber, bool) { - if bnh.BlockNumber != nil { - return *bnh.BlockNumber, true - } - return BlockNumber(0), false -} - -func (bnh *BlockNumberOrHash) String() string { - if bnh.BlockNumber != nil { - return strconv.Itoa(int(*bnh.BlockNumber)) - } - if bnh.BlockHash != nil { - return bnh.BlockHash.String() - } - return "nil" -} - -func (bnh *BlockNumberOrHash) Hash() (common.Hash, bool) { - if bnh.BlockHash != nil { - return *bnh.BlockHash, true - } - return common.Hash{}, false -} - -func BlockNumberOrHashWithNumber(blockNr BlockNumber) BlockNumberOrHash { - return BlockNumberOrHash{ - BlockNumber: &blockNr, - BlockHash: nil, - RequireCanonical: false, - } -} - -func BlockNumberOrHashWithHash(hash common.Hash, canonical bool) BlockNumberOrHash { - return BlockNumberOrHash{ - BlockNumber: nil, - BlockHash: &hash, - RequireCanonical: canonical, - } -} - -// DecimalOrHex unmarshals a non-negative decimal or hex parameter into a uint64. -type DecimalOrHex uint64 - -// UnmarshalJSON implements json.Unmarshaler. -func (dh *DecimalOrHex) UnmarshalJSON(data []byte) error { - input := strings.TrimSpace(string(data)) - if len(input) >= 2 && input[0] == '"' && input[len(input)-1] == '"' { - input = input[1 : len(input)-1] - } - - value, err := strconv.ParseUint(input, 10, 64) - if err != nil { - value, err = hexutil.DecodeUint64(input) - } - if err != nil { - return err - } - *dh = DecimalOrHex(value) - return nil -} diff --git a/relayer/chainproxy/rpcclient/types_test.go b/relayer/chainproxy/rpcclient/types_test.go deleted file mode 100755 index 2d34568d06..0000000000 --- a/relayer/chainproxy/rpcclient/types_test.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package rpcclient - -import ( - "encoding/json" - "reflect" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/math" -) - -func TestBlockNumberJSONUnmarshal(t *testing.T) { - tests := []struct { - input string - mustFail bool - expected BlockNumber - }{ - 0: {`"0x"`, true, BlockNumber(0)}, - 1: {`"0x0"`, false, BlockNumber(0)}, - 2: {`"0X1"`, false, BlockNumber(1)}, - 3: {`"0x00"`, true, BlockNumber(0)}, - 4: {`"0x01"`, true, BlockNumber(0)}, - 5: {`"0x1"`, false, BlockNumber(1)}, - 6: {`"0x12"`, false, BlockNumber(18)}, - 7: {`"0x7fffffffffffffff"`, false, BlockNumber(math.MaxInt64)}, - 8: {`"0x8000000000000000"`, true, BlockNumber(0)}, - 9: {"0", true, BlockNumber(0)}, - 10: {`"ff"`, true, BlockNumber(0)}, - 11: {`"pending"`, false, PendingBlockNumber}, - 12: {`"latest"`, false, LatestBlockNumber}, - 13: {`"earliest"`, false, EarliestBlockNumber}, - 14: {`someString`, true, BlockNumber(0)}, - 15: {`""`, true, BlockNumber(0)}, - 16: {``, true, BlockNumber(0)}, - } - - for i, test := range tests { - var num BlockNumber - err := json.Unmarshal([]byte(test.input), &num) - if test.mustFail && err == nil { - t.Errorf("Test %d should fail", i) - continue - } - if !test.mustFail && err != nil { - t.Errorf("Test %d should pass but got err: %v", i, err) - continue - } - if num != test.expected { - t.Errorf("Test %d got unexpected value, want %d, got %d", i, test.expected, num) - } - } -} - -func TestBlockNumberOrHash_UnmarshalJSON(t *testing.T) { - tests := []struct { - input string - mustFail bool - expected BlockNumberOrHash - }{ - 0: {`"0x"`, true, BlockNumberOrHash{}}, - 1: {`"0x0"`, false, BlockNumberOrHashWithNumber(0)}, - 2: {`"0X1"`, false, BlockNumberOrHashWithNumber(1)}, - 3: {`"0x00"`, true, BlockNumberOrHash{}}, - 4: {`"0x01"`, true, BlockNumberOrHash{}}, - 5: {`"0x1"`, false, BlockNumberOrHashWithNumber(1)}, - 6: {`"0x12"`, false, BlockNumberOrHashWithNumber(18)}, - 7: {`"0x7fffffffffffffff"`, false, BlockNumberOrHashWithNumber(math.MaxInt64)}, - 8: {`"0x8000000000000000"`, true, BlockNumberOrHash{}}, - 9: {"0", true, BlockNumberOrHash{}}, - 10: {`"ff"`, true, BlockNumberOrHash{}}, - 11: {`"pending"`, false, BlockNumberOrHashWithNumber(PendingBlockNumber)}, - 12: {`"latest"`, false, BlockNumberOrHashWithNumber(LatestBlockNumber)}, - 13: {`"earliest"`, false, BlockNumberOrHashWithNumber(EarliestBlockNumber)}, - 14: {`someString`, true, BlockNumberOrHash{}}, - 15: {`""`, true, BlockNumberOrHash{}}, - 16: {``, true, BlockNumberOrHash{}}, - 17: {`"0x0000000000000000000000000000000000000000000000000000000000000000"`, false, BlockNumberOrHashWithHash(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), false)}, - 18: {`{"blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}`, false, BlockNumberOrHashWithHash(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), false)}, - 19: {`{"blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","requireCanonical":false}`, false, BlockNumberOrHashWithHash(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), false)}, - 20: {`{"blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","requireCanonical":true}`, false, BlockNumberOrHashWithHash(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), true)}, - 21: {`{"blockNumber":"0x1"}`, false, BlockNumberOrHashWithNumber(1)}, - 22: {`{"blockNumber":"pending"}`, false, BlockNumberOrHashWithNumber(PendingBlockNumber)}, - 23: {`{"blockNumber":"latest"}`, false, BlockNumberOrHashWithNumber(LatestBlockNumber)}, - 24: {`{"blockNumber":"earliest"}`, false, BlockNumberOrHashWithNumber(EarliestBlockNumber)}, - 25: {`{"blockNumber":"0x1", "blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}`, true, BlockNumberOrHash{}}, - } - - for i, test := range tests { - var bnh BlockNumberOrHash - err := json.Unmarshal([]byte(test.input), &bnh) - if test.mustFail && err == nil { - t.Errorf("Test %d should fail", i) - continue - } - if !test.mustFail && err != nil { - t.Errorf("Test %d should pass but got err: %v", i, err) - continue - } - hash, hashOk := bnh.Hash() - expectedHash, expectedHashOk := test.expected.Hash() - num, numOk := bnh.Number() - expectedNum, expectedNumOk := test.expected.Number() - if bnh.RequireCanonical != test.expected.RequireCanonical || - hash != expectedHash || hashOk != expectedHashOk || - num != expectedNum || numOk != expectedNumOk { - t.Errorf("Test %d got unexpected value, want %v, got %v", i, test.expected, bnh) - } - } -} - -func TestBlockNumberOrHash_WithNumber_MarshalAndUnmarshal(t *testing.T) { - tests := []struct { - name string - number int64 - }{ - {"max", math.MaxInt64}, - {"pending", int64(PendingBlockNumber)}, - {"latest", int64(LatestBlockNumber)}, - {"earliest", int64(EarliestBlockNumber)}, - } - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - bnh := BlockNumberOrHashWithNumber(BlockNumber(test.number)) - marshalled, err := json.Marshal(bnh) - if err != nil { - t.Fatal("cannot marshal:", err) - } - var unmarshalled BlockNumberOrHash - err = json.Unmarshal(marshalled, &unmarshalled) - if err != nil { - t.Fatal("cannot unmarshal:", err) - } - if !reflect.DeepEqual(bnh, unmarshalled) { - t.Fatalf("wrong result: expected %v, got %v", bnh, unmarshalled) - } - }) - } -} diff --git a/relayer/chainproxy/rpcclient/websocket.go b/relayer/chainproxy/rpcclient/websocket.go deleted file mode 100755 index b82710e93a..0000000000 --- a/relayer/chainproxy/rpcclient/websocket.go +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package rpcclient - -import ( - "context" - "encoding/base64" - "fmt" - "net/http" - "net/url" - "os" - "strings" - "sync" - "time" - - mapset "github.com/deckarep/golang-set" - "github.com/ethereum/go-ethereum/log" - "github.com/gorilla/websocket" -) - -const ( - wsReadBuffer = 1024 - wsWriteBuffer = 1024 - wsPingInterval = 60 * time.Second - wsPingWriteTimeout = 5 * time.Second - wsPongTimeout = 30 * time.Second - wsMessageSizeLimit = 15 * 1024 * 1024 -) - -var wsBufferPool = new(sync.Pool) - -// WebsocketHandler returns a handler that serves JSON-RPC to WebSocket connections. -// -// allowedOrigins should be a comma-separated list of allowed origin URLs. -// To allow connections with any origin, pass "*". -func (s *Server) WebsocketHandler(allowedOrigins []string) http.Handler { - upgrader := websocket.Upgrader{ - ReadBufferSize: wsReadBuffer, - WriteBufferSize: wsWriteBuffer, - WriteBufferPool: wsBufferPool, - CheckOrigin: wsHandshakeValidator(allowedOrigins), - } - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - conn, err := upgrader.Upgrade(w, r, nil) - if err != nil { - log.Debug("WebSocket upgrade failed", "err", err) - return - } - codec := newWebsocketCodec(conn, r.Host, r.Header) - s.ServeCodec(codec, 0) - }) -} - -// wsHandshakeValidator returns a handler that verifies the origin during the -// websocket upgrade process. When a '*' is specified as an allowed origins all -// connections are accepted. -func wsHandshakeValidator(allowedOrigins []string) func(*http.Request) bool { - origins := mapset.NewSet() - allowAllOrigins := false - - for _, origin := range allowedOrigins { - if origin == "*" { - allowAllOrigins = true - } - if origin != "" { - origins.Add(origin) - } - } - // allow localhost if no allowedOrigins are specified. - if len(origins.ToSlice()) == 0 { - origins.Add("http://localhost") - if hostname, err := os.Hostname(); err == nil { - origins.Add("http://" + hostname) - } - } - log.Debug(fmt.Sprintf("Allowed origin(s) for WS RPC interface %v", origins.ToSlice())) - - f := func(req *http.Request) bool { - // Skip origin verification if no Origin header is present. The origin check - // is supposed to protect against browser based attacks. Browsers always set - // Origin. Non-browser software can put anything in origin and checking it doesn't - // provide additional security. - if _, ok := req.Header["Origin"]; !ok { - return true - } - // Verify origin against allow list. - origin := strings.ToLower(req.Header.Get("Origin")) - if allowAllOrigins || originIsAllowed(origins, origin) { - return true - } - log.Warn("Rejected WebSocket connection", "origin", origin) - return false - } - - return f -} - -type wsHandshakeError struct { - err error - status string -} - -func (e wsHandshakeError) Error() string { - s := e.err.Error() - if e.status != "" { - s += " (HTTP status " + e.status + ")" - } - return s -} - -func originIsAllowed(allowedOrigins mapset.Set, browserOrigin string) bool { - it := allowedOrigins.Iterator() - for origin := range it.C { - originString, ok := origin.(string) - if !ok { - panic("originIsAllowed - origin.(string) - type assertion failed: " + fmt.Sprintf("%s", origin)) - } - if ruleAllowsOrigin(originString, browserOrigin) { - return true - } - } - return false -} - -func ruleAllowsOrigin(allowedOrigin string, browserOrigin string) bool { - var ( - allowedScheme, allowedHostname, allowedPort string - browserScheme, browserHostname, browserPort string - err error - ) - allowedScheme, allowedHostname, allowedPort, err = parseOriginURL(allowedOrigin) - if err != nil { - log.Warn("Error parsing allowed origin specification", "spec", allowedOrigin, "error", err) - return false - } - browserScheme, browserHostname, browserPort, err = parseOriginURL(browserOrigin) - if err != nil { - log.Warn("Error parsing browser 'Origin' field", "Origin", browserOrigin, "error", err) - return false - } - if allowedScheme != "" && allowedScheme != browserScheme { - return false - } - if allowedHostname != "" && allowedHostname != browserHostname { - return false - } - if allowedPort != "" && allowedPort != browserPort { - return false - } - return true -} - -func parseOriginURL(origin string) (string, string, string, error) { - parsedURL, err := url.Parse(strings.ToLower(origin)) - if err != nil { - return "", "", "", err - } - var scheme, hostname, port string - if strings.Contains(origin, "://") { - scheme = parsedURL.Scheme - hostname = parsedURL.Hostname() - port = parsedURL.Port() - } else { - scheme = "" - hostname = parsedURL.Scheme - port = parsedURL.Opaque - if hostname == "" { - hostname = origin - } - } - return scheme, hostname, port, nil -} - -// DialWebsocketWithDialer creates a new RPC client that communicates with a JSON-RPC server -// that is listening on the given endpoint using the provided dialer. -func DialWebsocketWithDialer(ctx context.Context, endpoint, origin string, dialer websocket.Dialer) (*Client, error) { - endpoint, header, err := wsClientHeaders(endpoint, origin) - if err != nil { - return nil, err - } - return newClient(ctx, func(ctx context.Context) (ServerCodec, error) { - conn, resp, err := dialer.DialContext(ctx, endpoint, header) - if err != nil { - hErr := wsHandshakeError{err: err} - if resp != nil { - hErr.status = resp.Status - } - return nil, hErr - } - return newWebsocketCodec(conn, endpoint, header), nil - }) -} - -// DialWebsocket creates a new RPC client that communicates with a JSON-RPC server -// that is listening on the given endpoint. -// -// The context is used for the initial connection establishment. It does not -// affect subsequent interactions with the client. -func DialWebsocket(ctx context.Context, endpoint, origin string) (*Client, error) { - dialer := websocket.Dialer{ - ReadBufferSize: wsReadBuffer, - WriteBufferSize: wsWriteBuffer, - WriteBufferPool: wsBufferPool, - } - return DialWebsocketWithDialer(ctx, endpoint, origin, dialer) -} - -func wsClientHeaders(endpoint, origin string) (string, http.Header, error) { - endpointURL, err := url.Parse(endpoint) - if err != nil { - return endpoint, nil, err - } - header := make(http.Header) - if origin != "" { - header.Add("origin", origin) - } - if endpointURL.User != nil { - b64auth := base64.StdEncoding.EncodeToString([]byte(endpointURL.User.String())) - header.Add("authorization", "Basic "+b64auth) - endpointURL.User = nil - } - return endpointURL.String(), header, nil -} - -type websocketCodec struct { - *jsonCodec - conn *websocket.Conn - info PeerInfo - - wg sync.WaitGroup - pingReset chan struct{} -} - -func newWebsocketCodec(conn *websocket.Conn, host string, req http.Header) ServerCodec { - conn.SetReadLimit(wsMessageSizeLimit) - conn.SetPongHandler(func(appData string) error { - conn.SetReadDeadline(time.Time{}) - return nil - }) - newCodec := NewFuncCodec(conn, conn.WriteJSON, conn.ReadJSON) - codec, ok := newCodec.(*jsonCodec) - if !ok { - panic("newWebsocketCodec - newCodec.(*jsonCodec) - type assertion failed, type:" + fmt.Sprintf("%s", newCodec)) - } - - wc := &websocketCodec{ - jsonCodec: codec, - conn: conn, - pingReset: make(chan struct{}, 1), - info: PeerInfo{ - Transport: "ws", - RemoteAddr: conn.RemoteAddr().String(), - }, - } - // Fill in connection details. - wc.info.HTTP.Host = host - wc.info.HTTP.Origin = req.Get("Origin") - wc.info.HTTP.UserAgent = req.Get("User-Agent") - // Start pinger. - wc.wg.Add(1) - go wc.pingLoop() - return wc -} - -func (wc *websocketCodec) close() { - wc.jsonCodec.close() - wc.wg.Wait() -} - -func (wc *websocketCodec) peerInfo() PeerInfo { - return wc.info -} - -func (wc *websocketCodec) writeJSON(ctx context.Context, v interface{}) error { - err := wc.jsonCodec.writeJSON(ctx, v) - if err == nil { - // Notify pingLoop to delay the next idle ping. - select { - case wc.pingReset <- struct{}{}: - default: - } - } - return err -} - -// pingLoop sends periodic ping frames when the connection is idle. -func (wc *websocketCodec) pingLoop() { - timer := time.NewTimer(wsPingInterval) - defer wc.wg.Done() - defer timer.Stop() - - for { - select { - case <-wc.closed(): - return - case <-wc.pingReset: - if !timer.Stop() { - <-timer.C - } - timer.Reset(wsPingInterval) - case <-timer.C: - wc.jsonCodec.encMu.Lock() - wc.conn.SetWriteDeadline(time.Now().Add(wsPingWriteTimeout)) - wc.conn.WriteMessage(websocket.PingMessage, nil) - wc.conn.SetReadDeadline(time.Now().Add(wsPongTimeout)) - wc.jsonCodec.encMu.Unlock() - timer.Reset(wsPingInterval) - } - } -} diff --git a/relayer/chainproxy/tendermintRPC.go b/relayer/chainproxy/tendermintRPC.go deleted file mode 100644 index 5af8c76e9b..0000000000 --- a/relayer/chainproxy/tendermintRPC.go +++ /dev/null @@ -1,715 +0,0 @@ -package chainproxy - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "reflect" - "strings" - "time" - - "github.com/lavanet/lava/relayer/metrics" - "github.com/spf13/pflag" - - "github.com/btcsuite/btcd/btcec" - "github.com/gofiber/fiber/v2" - "github.com/gofiber/fiber/v2/middleware/favicon" - "github.com/gofiber/websocket/v2" - "github.com/lavanet/lava/protocol/lavasession" - "github.com/lavanet/lava/relayer/chainproxy/rpcclient" - - "github.com/lavanet/lava/relayer/parser" - "github.com/lavanet/lava/relayer/sentry" - "github.com/lavanet/lava/utils" - pairingtypes "github.com/lavanet/lava/x/pairing/types" - spectypes "github.com/lavanet/lava/x/spec/types" - tenderminttypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" -) - -const TendermintProviderHttpEndpoint = "tendermint-http-endpoint" - -type TendemintRpcMessage struct { - JrpcMessage - cp *tendermintRpcChainProxy - path string -} - -type tendermintRpcChainProxy struct { - // embedding the jrpc chain proxy because the only diff is on parse message - JrpcChainProxy - httpUrl string -} - -func (m TendemintRpcMessage) GetParams() interface{} { - return m.msg.Params -} - -func (m TendemintRpcMessage) GetResult() json.RawMessage { - return m.msg.Result -} - -func (m TendemintRpcMessage) ParseBlock(inp string) (int64, error) { - return parser.ParseDefaultBlockParameter(inp) -} - -func (cp *tendermintRpcChainProxy) FetchLatestBlockNum(ctx context.Context) (int64, error) { - serviceApi, ok := cp.GetSentry().GetSpecApiByTag(spectypes.GET_BLOCKNUM) - if !ok { - return spectypes.NOT_APPLICABLE, errors.New(spectypes.GET_BLOCKNUM + " tag function not found") - } - - params := []interface{}{} - nodeMsg, err := cp.newMessage(&serviceApi, spectypes.LATEST_BLOCK, params, http.MethodGet) - if err != nil { - return spectypes.NOT_APPLICABLE, err - } - - _, _, _, err = nodeMsg.Send(ctx, nil) - if err != nil { - return spectypes.NOT_APPLICABLE, utils.LavaFormatError("Error On Send FetchLatestBlockNum", err, &map[string]string{"nodeUrl": cp.nodeUrl}) - } - - msgParsed, ok := nodeMsg.GetMsg().(*JsonrpcMessage) - if !ok { - return spectypes.NOT_APPLICABLE, fmt.Errorf("FetchLatestBlockNum - nodeMsg.GetMsg().(*JsonrpcMessage) - type assertion failed, type:" + fmt.Sprintf("%s", nodeMsg.GetMsg())) - } - blocknum, err := parser.ParseBlockFromReply(msgParsed, serviceApi.Parsing.ResultParsing) - if err != nil { - return spectypes.NOT_APPLICABLE, err - } - - return blocknum, nil -} - -func (cp *tendermintRpcChainProxy) GetConsumerSessionManager() *lavasession.ConsumerSessionManager { - return cp.csm -} - -func (cp *tendermintRpcChainProxy) FetchBlockHashByNum(ctx context.Context, blockNum int64) (string, error) { - serviceApi, ok := cp.GetSentry().GetSpecApiByTag(spectypes.GET_BLOCK_BY_NUM) - if !ok { - return "", errors.New(spectypes.GET_BLOCK_BY_NUM + " tag function not found") - } - - var nodeMsg NodeMessage - var err error - if serviceApi.GetParsing().FunctionTemplate != "" { - nodeMsg, err = cp.ParseMsg("", []byte(fmt.Sprintf(serviceApi.Parsing.FunctionTemplate, blockNum)), http.MethodGet) - } else { - params := make([]interface{}, 0) - params = append(params, blockNum) - nodeMsg, err = cp.newMessage(&serviceApi, spectypes.LATEST_BLOCK, params, http.MethodGet) - } - - if err != nil { - return "", err - } - - _, _, _, err = nodeMsg.Send(ctx, nil) - if err != nil { - return "", utils.LavaFormatError("Error On Send FetchBlockHashByNum", err, &map[string]string{"nodeUrl": cp.nodeUrl}) - } - - msg, ok := nodeMsg.GetMsg().(*JsonrpcMessage) - if !ok { - return "", fmt.Errorf("FetchBlockHashByNum - nodeMsg.GetMsg().(*JsonrpcMessage) - type assertion failed, type:" + fmt.Sprintf("%s", nodeMsg.GetMsg())) - } - blockData, err := parser.ParseMessageResponse(msg, serviceApi.Parsing.ResultParsing) - if err != nil { - return "", utils.LavaFormatError("Failed To Parse FetchLatestBlockNum", err, &map[string]string{ - "nodeUrl": cp.nodeUrl, - "Method": msg.Method, - "Response": string(msg.Result), - }) - } - - // blockData is an interface array with the parsed result in index 0. - // we know to expect a string result for a hash. - hash, ok := blockData[spectypes.DEFAULT_PARSED_RESULT_INDEX].(string) - if !ok { - return "", errors.New("hash not string parsable") - } - - return hash, nil -} - -func verifyTendermintNodeURL(nodeUrl string, flagSet *pflag.FlagSet) string { - var httpUrl string - if nodeUrl != "" { // provider process - // verifyRPCendpoint(nodeUrl) // verify websocket - var err error - httpUrl, err = flagSet.GetString(TendermintProviderHttpEndpoint) - if err != nil { - utils.LavaFormatFatal("Error fetching rpc provider flag.", err, nil) - } - if httpUrl == "" { - httpUrl = nodeUrl - // utils.LavaFormatFatal("http endpoint was not set for tendermint provider, please add the following flag: --"+TendermintProviderHttpEndpoint, err, nil) - } - } - return httpUrl -} - -func NewtendermintRpcChainProxy(nodeUrl string, nConns uint, sentry *sentry.Sentry, csm *lavasession.ConsumerSessionManager, pLogs *PortalLogs, flagSet *pflag.FlagSet) ChainProxy { - httpUrl := verifyTendermintNodeURL(nodeUrl, flagSet) - return &tendermintRpcChainProxy{ - JrpcChainProxy: JrpcChainProxy{ - nodeUrl: nodeUrl, - nConns: nConns, - sentry: sentry, - portalLogs: pLogs, - csm: csm, - }, - httpUrl: httpUrl, - } -} - -func (cp *tendermintRpcChainProxy) newMessage(serviceApi *spectypes.ServiceApi, requestedBlock int64, params []interface{}, connectionType string) (*TendemintRpcMessage, error) { - var apiInterface *spectypes.ApiInterface = nil - for i := range serviceApi.ApiInterfaces { - if serviceApi.ApiInterfaces[i].Type == connectionType { - apiInterface = &serviceApi.ApiInterfaces[i] - break - } - } - if apiInterface == nil { - return nil, fmt.Errorf("could not find the interface %s in the service %s", connectionType, serviceApi.Name) - } - - nodeMsg := &TendemintRpcMessage{ - JrpcMessage: JrpcMessage{ - serviceApi: serviceApi, - apiInterface: apiInterface, - msg: &JsonrpcMessage{ - Version: "2.0", - ID: []byte("1"), // TODO:: use ids - Method: serviceApi.GetName(), - Params: params, - }, - requestedBlock: requestedBlock, - }, - cp: cp, - } - return nodeMsg, nil -} - -func (cp *tendermintRpcChainProxy) ParseMsg(path string, data []byte, connectionType string) (NodeMessage, error) { - // connectionType is currently only used in rest api - // Unmarshal request - var msg JsonrpcMessage - if string(data) != "" { - // assuming jsonrpc - err := json.Unmarshal(data, &msg) - if err != nil { - return nil, err - } - } else { - // assuming URI - var parsedMethod string - idx := strings.Index(path, "?") - if idx == -1 { - parsedMethod = path - } else { - parsedMethod = path[0:idx] - } - - msg = JsonrpcMessage{ - ID: []byte("1"), - Version: "2.0", - Method: parsedMethod, - } - if strings.Contains(path[idx+1:], "=") { - params := make(map[string]interface{}) - rawParams := strings.Split(path[idx+1:], "&") // list with structure ['height=0x500',...] - for _, param := range rawParams { - splitParam := strings.Split(param, "=") - if len(splitParam) != 2 { - return nil, utils.LavaFormatError("Cannot parse query params", nil, &map[string]string{"params": param}) - } - params[splitParam[0]] = splitParam[1] - } - msg.Params = params - } else { - msg.Params = make(map[string]interface{}, 0) - } - } - - // Check api is supported and save it in nodeMsg - serviceApi, err := cp.getSupportedApi(msg.Method) - if err != nil { - return nil, utils.LavaFormatError("getSupportedApi failed", err, &map[string]string{"method": msg.Method}) - } - - // Extract default block parser - blockParser := serviceApi.BlockParsing - - // Find matched api interface by connection type - var apiInterface *spectypes.ApiInterface = nil - for i := range serviceApi.ApiInterfaces { - if serviceApi.ApiInterfaces[i].Type == connectionType { - apiInterface = &serviceApi.ApiInterfaces[i] - break - } - } - if apiInterface == nil { - return nil, fmt.Errorf("could not find the interface %s in the service %s", connectionType, serviceApi.Name) - } - - // Check if custom block parser exists in the api interface - // Use custom block parser only for URI calls - if apiInterface.GetOverwriteBlockParsing() != nil && path != "" { - blockParser = *apiInterface.GetOverwriteBlockParsing() - } - - // Fetch requested block, it is used for data reliability - requestedBlock, err := parser.ParseBlockFromParams(msg, blockParser) - if err != nil { - return nil, err - } - - var extraTimeout time.Duration - if apiInterface.Category.HangingApi { - extraTimeout = time.Duration(cp.sentry.GetAverageBlockTime()) * time.Millisecond - } - - nodeMsg := &TendemintRpcMessage{ - JrpcMessage: JrpcMessage{ - serviceApi: serviceApi, - apiInterface: apiInterface, - msg: &msg, - requestedBlock: requestedBlock, - extendContextTimeout: extraTimeout, - }, - path: path, - cp: cp, - } - return nodeMsg, nil -} - -func (cp *tendermintRpcChainProxy) PortalStart(ctx context.Context, privKey *btcec.PrivateKey, listenAddr string) { - // - // Setup HTTP Server - app := fiber.New(fiber.Config{}) - chainID := cp.GetSentry().ChainID - apiInterface := cp.GetSentry().ApiInterface - - app.Use(favicon.New()) - - app.Use("/ws/:dappId", func(c *fiber.Ctx) error { - cp.portalLogs.LogStartTransaction("tendermint-WebSocket") - // IsWebSocketUpgrade returns true if the client - // requested upgrade to the WebSocket protocol. - if websocket.IsWebSocketUpgrade(c) { - c.Locals("allowed", true) - return c.Next() - } - return fiber.ErrUpgradeRequired - }) - webSocketCallback := websocket.New(func(c *websocket.Conn) { - var ( - mt int - msg []byte - err error - ) - msgSeed := cp.portalLogs.GetMessageSeed() - for { - if mt, msg, err = c.ReadMessage(); err != nil { - cp.portalLogs.AnalyzeWebSocketErrorAndWriteMessage(c, mt, err, msgSeed, msg, "tendermint") - break - } - dappID := ExtractDappIDFromWebsocketConnection(c) - utils.LavaFormatInfo("ws in <<<", &map[string]string{"seed": msgSeed, "msg": string(msg), "dappID": dappID}) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // incase there's a problem make sure to cancel the connection - metricsData := metrics.NewRelayAnalytics(dappID, chainID, apiInterface) - reply, replyServer, err := SendRelay(ctx, cp, privKey, "", string(msg), http.MethodGet, dappID, metricsData) - go cp.portalLogs.AddMetricForWebSocket(metricsData, err, c) - if err != nil { - cp.portalLogs.AnalyzeWebSocketErrorAndWriteMessage(c, mt, err, msgSeed, msg, "tendermint") - continue - } - // If subscribe the first reply would contain the RPC ID that can be used for disconnect. - if replyServer != nil { - var reply pairingtypes.RelayReply - err = (*replyServer).RecvMsg(&reply) // this reply contains the RPC ID - if err != nil { - cp.portalLogs.AnalyzeWebSocketErrorAndWriteMessage(c, mt, err, msgSeed, msg, "tendermint") - continue - } - - if err = c.WriteMessage(mt, reply.Data); err != nil { - cp.portalLogs.AnalyzeWebSocketErrorAndWriteMessage(c, mt, err, msgSeed, msg, "tendermint") - continue - } - cp.portalLogs.LogRequestAndResponse("tendermint ws", false, "ws", c.LocalAddr().String(), string(msg), string(reply.Data), msgSeed, nil) - for { - err = (*replyServer).RecvMsg(&reply) - if err != nil { - cp.portalLogs.AnalyzeWebSocketErrorAndWriteMessage(c, mt, err, msgSeed, msg, "tendermint") - break - } - - // If portal cant write to the client - if err = c.WriteMessage(mt, reply.Data); err != nil { - cancel() - cp.portalLogs.AnalyzeWebSocketErrorAndWriteMessage(c, mt, err, msgSeed, msg, "tendermint") - // break - } - cp.portalLogs.LogRequestAndResponse("tendermint ws", false, "ws", c.LocalAddr().String(), string(msg), string(reply.Data), msgSeed, nil) - } - } else { - if err = c.WriteMessage(mt, reply.Data); err != nil { - cp.portalLogs.AnalyzeWebSocketErrorAndWriteMessage(c, mt, err, msgSeed, msg, "tendermint") - continue - } - cp.portalLogs.LogRequestAndResponse("tendermint ws", false, "ws", c.LocalAddr().String(), string(msg), string(reply.Data), msgSeed, nil) - } - } - }) - websocketCallbackWithDappID := constructFiberCallbackWithHeaderAndParameterExtraction(webSocketCallback, cp.portalLogs.StoreMetricData) - app.Get("/ws/:dappId", websocketCallbackWithDappID) - app.Get("/:dappId/websocket", websocketCallbackWithDappID) // catching http://HOST:PORT/1/websocket requests. - - app.Post("/:dappId/*", func(c *fiber.Ctx) error { - cp.portalLogs.LogStartTransaction("tendermint-WebSocket") - msgSeed := cp.portalLogs.GetMessageSeed() - dappID := ExtractDappIDFromFiberContext(c) - utils.LavaFormatInfo("in <<<", &map[string]string{"seed": msgSeed, "msg": string(c.Body()), "dappID": dappID}) - metricsData := metrics.NewRelayAnalytics(dappID, chainID, apiInterface) - reply, _, err := SendRelay(ctx, cp, privKey, "", string(c.Body()), http.MethodGet, dappID, metricsData) - go cp.portalLogs.AddMetricForHttp(metricsData, err, c.GetReqHeaders()) - if err != nil { - // Get unique GUID response - errMasking := cp.portalLogs.GetUniqueGuidResponseForError(err, msgSeed) - - // Log request and response - cp.portalLogs.LogRequestAndResponse("tendermint http in/out", true, "POST", c.Request().URI().String(), string(c.Body()), errMasking, msgSeed, err) - - // Set status to internal error - c.Status(fiber.StatusInternalServerError) - - // Construct json (tendermint) response - response := convertToTendermintError(errMasking, c.Body()) - - // Return error json response - return c.SendString(response) - } - // Log request and response - cp.portalLogs.LogRequestAndResponse("tendermint http in/out", false, "POST", c.Request().URI().String(), string(c.Body()), string(reply.Data), msgSeed, nil) - - // Return json response - return c.SendString(string(reply.Data)) - }) - - app.Get("/:dappId/*", func(c *fiber.Ctx) error { - cp.portalLogs.LogStartTransaction("tendermint-WebSocket") - - query := "?" + string(c.Request().URI().QueryString()) - path := c.Params("*") - dappID := ExtractDappIDFromFiberContext(c) - msgSeed := cp.portalLogs.GetMessageSeed() - utils.LavaFormatInfo("urirpc in <<<", &map[string]string{"seed": msgSeed, "msg": path, "dappID": dappID}) - metricsData := metrics.NewRelayAnalytics(dappID, chainID, apiInterface) - reply, _, err := SendRelay(ctx, cp, privKey, path+query, "", http.MethodGet, dappID, metricsData) - go cp.portalLogs.AddMetricForHttp(metricsData, err, c.GetReqHeaders()) - if err != nil { - // Get unique GUID response - errMasking := cp.portalLogs.GetUniqueGuidResponseForError(err, msgSeed) - - // Log request and response - cp.portalLogs.LogRequestAndResponse("tendermint http in/out", true, "GET", c.Request().URI().String(), "", errMasking, msgSeed, err) - - // Set status to internal error - c.Status(fiber.StatusInternalServerError) - - if string(c.Body()) != "" { - errMasking = addAttributeToError("recommendation", "For jsonRPC use POST", errMasking) - } - - // Construct json response - response := convertToJsonError(errMasking) - - // Return error json response - return c.SendString(response) - } - // Log request and response - cp.portalLogs.LogRequestAndResponse("tendermint http in/out", false, "GET", c.Request().URI().String(), "", string(reply.Data), msgSeed, nil) - - // Return json response - return c.SendString(string(reply.Data)) - }) - // - // Go - err := app.Listen(listenAddr) - if err != nil { - utils.LavaFormatError("app.Listen(listenAddr)", err, nil) - } -} - -func convertToTendermintError(errString string, inputInfo []byte) string { - var msg JsonrpcMessage - err := json.Unmarshal(inputInfo, &msg) - if err == nil { - id, errId := idFromRawMessage(msg.ID) - if errId != nil { - utils.LavaFormatError("error idFromRawMessage", errId, nil) - return InternalErrorString - } - res, merr := json.Marshal(&RPCResponse{ - JSONRPC: msg.Version, - ID: id, - Error: convertErrorToRPCError(errString, LavaErrorCode), - }) - if merr != nil { - utils.LavaFormatError("convertToTendermintError json.Marshal", merr, nil) - return InternalErrorString - } - return string(res) - } - utils.LavaFormatError("error convertToTendermintError", err, nil) - return InternalErrorString -} - -func getTendermintRPCError(jsonError *rpcclient.JsonError) (*tenderminttypes.RPCError, error) { - var rpcError *tenderminttypes.RPCError - if jsonError != nil { - errData, ok := (jsonError.Data).(string) - if !ok { - return nil, utils.LavaFormatError("(rpcMsg.Error.Data).(string) conversion failed", nil, &map[string]string{"data": fmt.Sprintf("%v", jsonError.Data)}) - } - rpcError = &tenderminttypes.RPCError{ - Code: jsonError.Code, - Message: jsonError.Message, - Data: errData, - } - } - return rpcError, nil -} - -func convertErrorToRPCError(errString string, code int) *tenderminttypes.RPCError { - var rpcError *tenderminttypes.RPCError - unmarshalError := json.Unmarshal([]byte(errString), &rpcError) - if unmarshalError != nil || (rpcError.Data == "" && rpcError.Message == "") { - utils.LavaFormatWarning("Failed unmarshalling error tendermintrpc", unmarshalError, &map[string]string{"err": errString}) - rpcError = &tenderminttypes.RPCError{ - Code: code, - Message: "Rpc Error", - Data: errString, - } - } - return rpcError -} - -type jsonrpcId interface { - isJSONRPCID() -} - -// JSONRPCStringID a wrapper for JSON-RPC string IDs -type JSONRPCStringID string - -func (JSONRPCStringID) isJSONRPCID() {} -func (id JSONRPCStringID) String() string { return string(id) } - -// JSONRPCIntID a wrapper for JSON-RPC integer IDs -type JSONRPCIntID int - -func (JSONRPCIntID) isJSONRPCID() {} -func (id JSONRPCIntID) String() string { return fmt.Sprintf("%d", id) } - -func idFromRawMessage(rawID json.RawMessage) (jsonrpcId, error) { - var idInterface interface{} - err := json.Unmarshal(rawID, &idInterface) - if err != nil { - return nil, utils.LavaFormatError("failed to unmarshal id from response", err, &map[string]string{"id": fmt.Sprintf("%v", rawID)}) - } - - switch id := idInterface.(type) { - case string: - return JSONRPCStringID(id), nil - case float64: - // json.Unmarshal uses float64 for all numbers - return JSONRPCIntID(int(id)), nil - default: - typ := reflect.TypeOf(id) - return nil, utils.LavaFormatError("failed to unmarshal id not a string or float", err, &map[string]string{"id": fmt.Sprintf("%v", rawID), "id type": fmt.Sprintf("%v", typ)}) - } -} - -type RPCResponse struct { - JSONRPC string `json:"jsonrpc"` - ID jsonrpcId `json:"id,omitempty"` - Result json.RawMessage `json:"result,omitempty"` - Error *tenderminttypes.RPCError `json:"error,omitempty"` -} - -func convertTendermintMsg(rpcMsg *rpcclient.JsonrpcMessage) (*RPCResponse, error) { - // Return an error if the message was not sent - if rpcMsg == nil { - return nil, ErrFailedToConvertMessage - } - rpcError, err := getTendermintRPCError(rpcMsg.Error) - if err != nil { - return nil, err - } - - jsonid, err := idFromRawMessage(rpcMsg.ID) - if err != nil { - return nil, err - } - msg := &RPCResponse{ - JSONRPC: rpcMsg.Version, - ID: jsonid, - Result: rpcMsg.Result, - Error: rpcError, - } - - return msg, nil -} - -// Send sends either Tendermint RPC or URI call depending on the type -func (nm *TendemintRpcMessage) Send(ctx context.Context, ch chan interface{}) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { - // If path exists then the call is URI - if nm.path != "" { - return nm.SendURI(ctx, ch) - } - - // Else do RPC call - return nm.SendRPC(ctx, ch) -} - -// SendURI sends URI HTTP call -func (nm *TendemintRpcMessage) SendURI(ctx context.Context, ch chan interface{}) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { - // check if the input channel is not nil - if ch != nil { - // return an error if the channel is not nil - return nil, "", nil, utils.LavaFormatError("Subscribe is not allowed on Tendermint URI", nil, nil) - } - - // create a new http client with a timeout set by the getTimePerCu function - httpClient := http.Client{ - Timeout: getTimePerCu(nm.serviceApi.ComputeUnits), - } - - // construct the url by concatenating the node url with the path variable - url := nm.cp.httpUrl + "/" + nm.path - - // create a new http request - connectCtx, cancel := context.WithTimeout(ctx, getTimePerCu(nm.serviceApi.ComputeUnits)+nm.GetExtraContextTimeout()) - defer cancel() - - req, err := http.NewRequestWithContext(connectCtx, http.MethodGet, url, nil) - if err != nil { - return nil, "", nil, err - } - - // send the http request and get the response - res, err := httpClient.Do(req) - if err != nil { - return nil, "", nil, err - } - - // close the response body - if res.Body != nil { - defer res.Body.Close() - } - - // read the response body - body, err := io.ReadAll(res.Body) - if err != nil { - return nil, "", nil, err - } - - // create a new relay reply struct with the response body as the data - reply := &pairingtypes.RelayReply{ - Data: body, - } - - return reply, "", nil, nil -} - -// SendRPC sends Tendermint HTTP or WebSockets call -func (nm *TendemintRpcMessage) SendRPC(ctx context.Context, ch chan interface{}) (relayReply *pairingtypes.RelayReply, subscriptionID string, relayReplyServer *rpcclient.ClientSubscription, err error) { - // Get rpc connection from the connection pool - rpc, err := nm.cp.conn.GetRpc(ctx, true) - if err != nil { - return nil, "", nil, err - } - - // return the rpc connection to the pool after the function completes - defer nm.cp.conn.ReturnRpc(rpc) - - // create variables for the rpc message and reply message - var rpcMessage *rpcclient.JsonrpcMessage - var replyMessage *RPCResponse - var sub *rpcclient.ClientSubscription - - // If ch is not nil do subscription - if ch != nil { - // subscribe to the rpc call if the channel is not nil - sub, rpcMessage, err = rpc.Subscribe(context.Background(), nm.msg.ID, nm.msg.Method, ch, nm.msg.Params) - } else { - // create a context with a timeout set by the getTimePerCu function - connectCtx, cancel := context.WithTimeout(ctx, getTimePerCu(nm.serviceApi.ComputeUnits)+nm.GetExtraContextTimeout()) - defer cancel() - // perform the rpc call - rpcMessage, err = rpc.CallContext(connectCtx, nm.msg.ID, nm.msg.Method, nm.msg.Params) - } - - var replyMsg *RPCResponse - // the error check here would only wrap errors not from the rpc - if err != nil { - if strings.Contains(err.Error(), context.DeadlineExceeded.Error()) { - // Not an rpc error, return provider error without disclosing the endpoint address - return nil, "", nil, utils.LavaFormatError("Failed Sending Message", context.DeadlineExceeded, nil) - } - id, idErr := idFromRawMessage(nm.msg.ID) - if idErr != nil { - return nil, "", nil, utils.LavaFormatError("Failed parsing ID when getting rpc error", idErr, nil) - } - replyMsg = &RPCResponse{ - JSONRPC: nm.msg.Version, - ID: id, - Error: convertErrorToRPCError(err.Error(), -1), // TODO: fetch error code from err. - } - } else { - replyMessage, err = convertTendermintMsg(rpcMessage) - if err != nil { - return nil, "", nil, utils.LavaFormatError("tendermingRPC error", err, nil) - } - - replyMsg = replyMessage - nm.msg.Result = replyMessage.Result - } - - // marshal the jsonrpc message to json - data, err := json.Marshal(replyMsg) - if err != nil { - nm.msg.Result = []byte(fmt.Sprintf("%s", err)) - return nil, "", nil, err - } - - // create a new relay reply struct - reply := &pairingtypes.RelayReply{ - Data: data, - } - - if ch != nil { - // get the params for the rpc call - params := nm.msg.Params - - paramsMap, ok := params.(map[string]interface{}) - if !ok { - return nil, "", nil, utils.LavaFormatError("unknown params type on tendermint subscribe", nil, nil) - } - subscriptionID, ok = paramsMap["query"].(string) - if !ok { - return nil, "", nil, utils.LavaFormatError("unknown subscriptionID type on tendermint subscribe", nil, nil) - } - } - - return reply, subscriptionID, sub, err -} diff --git a/relayer/chainsentry/chainSentryErrors.go b/relayer/chainsentry/chainSentryErrors.go deleted file mode 100644 index 78a267dd09..0000000000 --- a/relayer/chainsentry/chainSentryErrors.go +++ /dev/null @@ -1,7 +0,0 @@ -package chainsentry - -import ( - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -var ErrorFailedToFetchLatestBlock = sdkerrors.New("Error FailedToFetchLatestBlock", 1001, "Failed to fetch latest block from node") diff --git a/relayer/chainsentry/chainsentry.go b/relayer/chainsentry/chainsentry.go deleted file mode 100644 index 4c2bb13b97..0000000000 --- a/relayer/chainsentry/chainsentry.go +++ /dev/null @@ -1,190 +0,0 @@ -package chainsentry - -import ( - "context" - "log" - "strconv" - "sync/atomic" - "time" - - "github.com/cosmos/cosmos-sdk/client" - "github.com/lavanet/lava/relayer/chainproxy" - "github.com/lavanet/lava/relayer/sentry" - "github.com/lavanet/lava/utils" -) - -type ChainSentry struct { - chainProxy chainproxy.ChainProxy - finalizedBlockDistance int // the distance from the latest block to the latest finalized block - in eth its 7 - numFinalBlocks int // how many finalized blocks to keep - latestBlockNum int64 // uint? - ChainID string - - quit chan bool - // Spec blockQueueMu (rw mutex) - blockQueueMu utils.LavaMutex - blocksQueue []string // holds all past hashes up until latest block -} - -func (cs *ChainSentry) GetLatestBlockNum() int64 { - return atomic.LoadInt64(&cs.latestBlockNum) -} - -func (cs *ChainSentry) SetLatestBlockNum(value int64) { - atomic.StoreInt64(&cs.latestBlockNum, value) -} - -func (cs *ChainSentry) GetLatestBlockData(requestedBlock int64) (latestBlock int64, hashesRes map[int64]interface{}, requestedBlockHash string, err error) { - cs.blockQueueMu.Lock() - defer cs.blockQueueMu.Unlock() - - latestBlockNum := cs.GetLatestBlockNum() - if len(cs.blocksQueue) == 0 { - return latestBlockNum, nil, "", utils.LavaFormatError("chainSentry GetLatestBlockData had no blocks", nil, &map[string]string{"latestBlock": strconv.FormatInt(latestBlockNum, 10)}) - } - if len(cs.blocksQueue) < cs.numFinalBlocks { - return latestBlockNum, nil, "", utils.LavaFormatError("chainSentry GetLatestBlockData had too little blocks in queue", nil, &map[string]string{"numFinalBlocks": strconv.FormatInt(int64(cs.numFinalBlocks), 10), "blocksQueueLen": strconv.FormatInt(int64(len(cs.blocksQueue)), 10)}) - } - if requestedBlock < 0 { - requestedBlock = sentry.ReplaceRequestedBlock(requestedBlock, latestBlockNum) - } - hashes := make(map[int64]interface{}, len(cs.blocksQueue)) - - for indexInQueue := 0; indexInQueue < cs.numFinalBlocks+cs.finalizedBlockDistance; indexInQueue++ { - blockNum := latestBlockNum - int64(cs.finalizedBlockDistance) - int64(cs.numFinalBlocks) + int64(indexInQueue+1) - if blockNum < 0 { - continue - } - if indexInQueue < cs.numFinalBlocks { - // only return numFinalBlocks in the finalization guarantee - hashes[blockNum] = cs.blocksQueue[indexInQueue] - } - // keep iterating on the others to find a match for the request - // utils.LavaFormatDebug("blocksCompare", &map[string]string{"blocksQueue": fmt.Sprintf("%+v", blockNum), "requestedBlock": strconv.FormatInt(requestedBlock, 10), "indexInQueue": strconv.FormatUint(uint64(indexInQueue), 10)}) - if blockNum == requestedBlock { - requestedBlockHash = cs.blocksQueue[indexInQueue] - } - } - // utils.LavaFormatDebug("ChainSentry LatestBlockData", &map[string]string{"blocksQueue": fmt.Sprintf("%+v", cs.blocksQueue), "requestedBlock": strconv.FormatInt(requestedBlock, 10), "initialBlocknum": strconv.FormatInt(latestBlockNum-int64(cs.finalizedBlockDistance)-int64(cs.numFinalBlocks)+int64(0+1), 10)}) - return latestBlockNum, hashes, requestedBlockHash, nil -} - -func (cs *ChainSentry) fetchLatestBlockNum(ctx context.Context) (int64, error) { - return cs.chainProxy.FetchLatestBlockNum(ctx) -} - -func (cs *ChainSentry) fetchBlockHashByNum(ctx context.Context, blockNum int64) (string, error) { - return cs.chainProxy.FetchBlockHashByNum(ctx, blockNum) -} - -func (cs *ChainSentry) Init(ctx context.Context) error { - latestBlock, err := cs.fetchLatestBlockNum(ctx) - // TODO:: chekc if we have at least x blocknums before forloop - if err != nil { - return ErrorFailedToFetchLatestBlock.Wrapf("Chain Sentry Init failed, additional info: " + err.Error()) - } - - err = cs.fetchAllPreviousBlocks(ctx, latestBlock) - if err != nil { - return ErrorFailedToFetchLatestBlock.Wrapf("Chain Sentry Init failed to fetch all blocks, additional info: " + err.Error()) - } - return nil -} - -func (cs *ChainSentry) fetchAllPreviousBlocks(ctx context.Context, latestBlock int64) error { - tmpArr := []string{} - for i := latestBlock - int64(cs.finalizedBlockDistance+cs.numFinalBlocks) + 1; i <= latestBlock; i++ { // save all blocks from the past up until latest block - result, err := cs.fetchBlockHashByNum(ctx, i) - if err != nil { - utils.LavaFormatError("could not get block data in chainSentry", err, &map[string]string{"block": strconv.FormatInt(i, 10)}) - return err - } - - utils.LavaFormatDebug("ChainSentry read a block", &map[string]string{"block": strconv.FormatInt(i, 10), "result": result}) - tmpArr = append(tmpArr, result) // save entire block data for now - } - cs.blockQueueMu.Lock() - cs.SetLatestBlockNum(latestBlock) - cs.blocksQueue = tmpArr - blocksQueueLen := int64(len(cs.blocksQueue)) - cs.blockQueueMu.Unlock() - utils.LavaFormatInfo("ChainSentry Updated latest block", &map[string]string{"block": strconv.FormatInt(latestBlock, 10), "latestHash": cs.GetLatestBlockHash(), "blocksQueueLen": strconv.FormatInt(blocksQueueLen, 10)}) - return nil -} - -func (cs *ChainSentry) forkChangedOrGotNewBlock(ctx context.Context, latestBlock int64) (bool, error) { - if cs.latestBlockNum != latestBlock { - return true, nil - } - blockHash, err := cs.fetchBlockHashByNum(ctx, latestBlock) - if err != nil { - return true, utils.LavaFormatError("ChainSentry fetchBlockHashByNum failed", err, &map[string]string{"block": strconv.FormatInt(latestBlock, 10)}) - } - return cs.GetLatestBlockHash() != blockHash, nil -} - -func (cs *ChainSentry) catchupOnFinalizedBlocks(ctx context.Context) error { - latestBlock, err := cs.fetchLatestBlockNum(ctx) // get actual latest from chain - if err != nil { - return utils.LavaFormatError("error getting latestBlockNum on catchup", err, nil) - } - shouldFetchAgain, err := cs.forkChangedOrGotNewBlock(ctx, latestBlock) - if shouldFetchAgain || err != nil { - err := cs.fetchAllPreviousBlocks(ctx, latestBlock) - if err != nil { - return utils.LavaFormatError("error getting all previous blocks on catchup", err, nil) - } - } else { - utils.LavaFormatDebug("chainSentry skipped reading blocks because its up to date", &map[string]string{"latestHash": cs.GetLatestBlockHash()}) - } - return nil -} - -func (cs *ChainSentry) GetLatestBlockHash() string { - cs.blockQueueMu.Lock() - latestHash := cs.blocksQueue[len(cs.blocksQueue)-1] - cs.blockQueueMu.Unlock() - return latestHash -} - -func (cs *ChainSentry) Start(ctx context.Context) error { - // how often to query latest block. - ticker := time.NewTicker( - time.Millisecond * time.Duration(cs.chainProxy.GetSentry().GetAverageBlockTime())) - - // Polls blocks and keeps a queue of them - go func() { - for { - select { - case <-ticker.C: - err := cs.catchupOnFinalizedBlocks(ctx) - if err != nil { - log.Println(err) - } - case <-cs.quit: - ticker.Stop() - return - } - } - }() - - return nil -} - -func (cs *ChainSentry) quitSentry(ctx context.Context) { - cs.quit <- true -} - -func NewChainSentry( - clientCtx client.Context, - cp chainproxy.ChainProxy, - chainID string, -) *ChainSentry { - return &ChainSentry{ - chainProxy: cp, - ChainID: chainID, - numFinalBlocks: int(cp.GetSentry().GetSpecBlocksInFinalizationProof()), - finalizedBlockDistance: int(cp.GetSentry().GetSpecBlockDistanceForFinalizedData()), - quit: make(chan bool), - } -} diff --git a/relayer/readme.md b/relayer/readme.md deleted file mode 100644 index cb2ac1a738..0000000000 --- a/relayer/readme.md +++ /dev/null @@ -1,68 +0,0 @@ -# Relayer - -## Compile protobuf - -```bash -# in lava folder -bash ./relayer/compile_proto.sh -``` - -## Run relayer server - -```bash -# in lava folder -lavad server 127.0.0.1 2222 wss://mainnet.infura.io/ws/v3/ 0 --from bob -``` - -## Run relayer test client - -```bash -# in lava folder -lavad test_client 0 --from alice -``` - -## Run portal server - -```bash -# in lava folder -lavad portal_server 127.0.0.1 3333 0 --from user2 -geth attach ws://127.0.0.1:3333/ws -``` -### debug -for a more verbose logging use the flag: --log_level debug -## Debug the relayer mutexes - -This flag turns on warnings for mutexes thay are locked for a long time -```bash -# in lava folder -DEBUG_MUTEX="true" make # make with this flag on -# Run any of the above with the compiled lavad -build/lavad server 127.0.0.1 2222 wss://mainnet.infura.io/ws/v3/ 0 --from bob -``` - -## Hide Portal Errors for consumer requests - -This flag will show only a unique identifier id for each error. - -If the flag is true: -``` -curl -X GET "http://127.0.0.1:3340/1/nbobo" -{"error": "unsupported api","more_information" Error guid: GUID2756376310285318670}% -``` - -If the flag is off -``` -curl -X GET "http://127.0.0.1:3340/1/nbobo" -{"error": "unsupported api","more_information" Error guid: GUID55979968042711362, Error: REST Api not supported /nbobo }% -``` - -To run the flag use the make file with the following command - -off: -``` -MASK_CONSUMER_LOGS="false"; make build -``` -on: -``` -MASK_CONSUMER_LOGS="false"; make build -``` \ No newline at end of file diff --git a/relayer/sentry/sentry.go b/relayer/sentry/sentry.go deleted file mode 100755 index 7a190ff225..0000000000 --- a/relayer/sentry/sentry.go +++ /dev/null @@ -1,1557 +0,0 @@ -package sentry - -import ( - "bytes" - "context" - "encoding/binary" - "encoding/json" - "fmt" - "math" - "math/rand" - "regexp" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/coniks-sys/coniks-go/crypto/vrf" - "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/client/rpc" - "github.com/cosmos/cosmos-sdk/client/tx" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/lavanet/lava/protocol/lavasession" - "github.com/lavanet/lava/relayer/sigs" - "github.com/lavanet/lava/utils" - conflicttypes "github.com/lavanet/lava/x/conflict/types" - epochstoragetypes "github.com/lavanet/lava/x/epochstorage/types" - pairingtypes "github.com/lavanet/lava/x/pairing/types" - spectypes "github.com/lavanet/lava/x/spec/types" - "github.com/spf13/pflag" - tendermintcrypto "github.com/tendermint/tendermint/crypto" - rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - tenderminttypes "github.com/tendermint/tendermint/types" - "golang.org/x/exp/slices" - grpc "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" -) - -const ( - maxRetries = 10 - providerWasntFound = -1 - findPairingFailedIndex = -1 - supportedNumberOfVRFs = 2 - GeolocationFlag = "geolocation" -) - -type VoteParams struct { - CloseVote bool - ChainID string - ApiURL string - RequestData []byte - RequestBlock uint64 - Voters []string - ConnectionType string -} - -func (vp *VoteParams) GetCloseVote() bool { - if vp == nil { - // default returns false - return false - } - return vp.CloseVote -} - -// Constants - -var AvailabilityPercentage sdk.Dec = sdk.NewDecWithPrec(5, 2) // TODO move to params pairing -const ( - MaxConsecutiveConnectionAttempts = 3 - PercentileToCalculateLatency = 0.9 - MinProvidersForSync = 0.6 - LatencyThresholdStatic = 1 * time.Second - LatencyThresholdSlope = 1 * time.Millisecond - StaleEpochDistance = 3 // relays done 3 epochs back are ready to be rewarded -) - -type PaymentRequest struct { - CU uint64 - BlockHeightDeadline int64 - Amount sdk.Coin - Client sdk.AccAddress - UniqueIdentifier uint64 -} - -type providerDataContainer struct { - // keep all data used to sign sigblocks - LatestFinalizedBlock int64 - LatestBlockTime time.Time - FinalizedBlocksHashes map[int64]string - SigBlocks []byte - SessionId uint64 - BlockHeight int64 - RelayNum uint64 - LatestBlock int64 - // TODO:: keep relay request for conflict reporting -} - -type ProviderHashesConsensus struct { - FinalizedBlocksHashes map[int64]string - agreeingProviders map[string]providerDataContainer -} - -type RewardHandler struct { - epochEventTriggered bool - delayRewardBy int - waitedBlocks int - blockHeight int64 -} - -type Sentry struct { - ClientCtx client.Context - rpcClient rpcclient.Client - specQueryClient spectypes.QueryClient - pairingQueryClient pairingtypes.QueryClient - epochStorageQueryClient epochstoragetypes.QueryClient - ChainID string - NewTransactionEvents <-chan ctypes.ResultEvent - NewBlockEvents <-chan ctypes.ResultEvent - isUser bool - Acc string // account address (bech32) - voteInitiationCb func(ctx context.Context, voteID string, voteDeadline uint64, voteParams *VoteParams) - newEpochCb func(epochHeight int64) - ApiInterface string - cmdFlags *pflag.FlagSet - serverID uint64 - authorizationCache map[uint64]map[string]*pairingtypes.QueryVerifyPairingResponse - authorizationCacheMutex sync.RWMutex - txFactory tx.Factory - geolocation uint64 - // - // expected payments storage - PaymentsMu sync.RWMutex - expectedPayments []PaymentRequest - receivedPayments []PaymentRequest - totalCUServiced uint64 - totalCUPaid uint64 - - // server Blocks To Save (atomic) - earliestSavedBlock uint64 - // Block storage (atomic) - blockHeight int64 - currentEpoch uint64 - prevEpoch uint64 - EpochSize uint64 - EpochBlocksOverlap uint64 - providersCount uint64 - // - // Spec storage (rw mutex) - specMu sync.RWMutex - specHash []byte - serverSpec spectypes.Spec - serverApis map[string]spectypes.ServiceApi - taggedApis map[string]spectypes.ServiceApi - - VrfSkMu utils.LavaMutex - VrfSk vrf.PrivateKey - - // every entry in providerHashesConsensus is conflicted with the other entries - providerHashesConsensus []ProviderHashesConsensus - prevEpochProviderHashesConsensus []ProviderHashesConsensus - providerDataContainersMu sync.RWMutex - - consumerSessionManager *lavasession.ConsumerSessionManager -} - -func (s *Sentry) SetupConsumerSessionManager(ctx context.Context, consumerSessionManager *lavasession.ConsumerSessionManager) error { - utils.LavaFormatInfo("Setting up ConsumerSessionManager", nil) - s.consumerSessionManager = consumerSessionManager - // Get pairing for the first time, for clients - // pairingList, err := s.getPairing(ctx) - // if err != nil { - // utils.LavaFormatFatal("Failed getting pairing for consumer in initialization", err, &map[string]string{"Address": s.Acc}) - // } - // err = s.consumerSessionManager.UpdateAllProviders(s.GetCurrentEpochHeight(), pairingList) - // if err != nil { - // utils.LavaFormatFatal("Failed UpdateAllProviders", err, &map[string]string{"Address": s.Acc}) - // } - return nil -} - -func (s *Sentry) FetchProvidersCount(ctx context.Context) error { - res, err := s.pairingQueryClient.Params(ctx, &pairingtypes.QueryParamsRequest{}) - if err != nil { - return err - } - atomic.StoreUint64(&s.providersCount, res.GetParams().ServicersToPairCount) - return nil -} - -func (s *Sentry) GetProvidersCount() uint64 { - return atomic.LoadUint64(&s.providersCount) -} - -func (s *Sentry) GetEpochSize() uint64 { - return atomic.LoadUint64(&s.EpochSize) -} - -func (s *Sentry) FetchEpochSize(ctx context.Context) error { - res, err := s.epochStorageQueryClient.Params(ctx, &epochstoragetypes.QueryParamsRequest{}) - if err != nil { - return err - } - atomic.StoreUint64(&s.EpochSize, res.GetParams().EpochBlocks) - - return nil -} - -func (s *Sentry) FetchOverlapSize(ctx context.Context) error { - res, err := s.pairingQueryClient.Params(ctx, &pairingtypes.QueryParamsRequest{}) - if err != nil { - return err - } - atomic.StoreUint64(&s.EpochBlocksOverlap, res.GetParams().EpochBlocksOverlap) - return nil -} - -func (s *Sentry) FetchEpochParams(ctx context.Context) error { - res, err := s.epochStorageQueryClient.EpochDetails(ctx, &epochstoragetypes.QueryGetEpochDetailsRequest{}) - if err != nil { - return err - } - earliestBlock := res.GetEpochDetails().EarliestStart - currentEpoch := res.GetEpochDetails().StartBlock - atomic.StoreUint64(&s.earliestSavedBlock, earliestBlock) - atomic.StoreUint64(&s.currentEpoch, currentEpoch) - return nil -} - -func (s *Sentry) getPairing(ctx context.Context) ([]*lavasession.ConsumerSessionsWithProvider, error) { - // - // sentry for server module does not need a pairing - if !s.isUser { - return nil, nil - } - - // - // Get - res, err := s.pairingQueryClient.GetPairing(ctx, &pairingtypes.QueryGetPairingRequest{ - ChainID: s.GetChainID(), - Client: s.Acc, - }) - if err != nil { - return nil, utils.LavaFormatError("Failed in get pairing query", err, &map[string]string{}) - } - - providers := res.GetProviders() - if len(providers) == 0 { - return nil, utils.LavaFormatError("no providers found in pairing, returned empty list", nil, &map[string]string{}) - } - - // - // Set - pairing := []*lavasession.ConsumerSessionsWithProvider{} - for _, provider := range providers { - // - // Sanity - providerEndpoints := provider.GetEndpoints() - if len(providerEndpoints) == 0 { - utils.LavaFormatError("skipping provider with no endoints", nil, &map[string]string{"Address": provider.Address, "ChainID": provider.Chain}) - continue - } - - relevantEndpoints := []epochstoragetypes.Endpoint{} - for _, endpoint := range providerEndpoints { - // only take into account endpoints that use the same api interface and the same geolocation - if endpoint.UseType == s.ApiInterface && endpoint.Geolocation == s.geolocation { - relevantEndpoints = append(relevantEndpoints, endpoint) - } - } - if len(relevantEndpoints) == 0 { - utils.LavaFormatError("skipping provider, No relevant endpoints for apiInterface", nil, &map[string]string{"Address": provider.Address, "ChainID": provider.Chain, "apiInterface": s.ApiInterface, "Endpoints": fmt.Sprintf("%v", providerEndpoints)}) - continue - } - - maxcu, err := s.GetMaxCUForUser(ctx, s.Acc, provider.Chain) - if err != nil { - return nil, utils.LavaFormatError("Failed getting max CU for user", err, &map[string]string{"Address": s.Acc, "ChainID": provider.Chain}) - } - // - pairingEndpoints := make([]*lavasession.Endpoint, len(relevantEndpoints)) - for idx, relevantEndpoint := range relevantEndpoints { - endp := &lavasession.Endpoint{NetworkAddress: relevantEndpoint.IPPORT, Enabled: true, Client: nil, ConnectionRefusals: 0} - pairingEndpoints[idx] = endp - } - - pairing = append(pairing, &lavasession.ConsumerSessionsWithProvider{ - PublicLavaAddress: provider.Address, - Endpoints: pairingEndpoints, - Sessions: map[int64]*lavasession.SingleConsumerSession{}, - MaxComputeUnits: maxcu, - ReliabilitySent: false, - PairingEpoch: s.GetCurrentEpochHeight(), - }) - } - if len(pairing) == 0 { - utils.LavaFormatError("Failed getting pairing for consumer, pairing is empty", err, &map[string]string{"Address": s.Acc, "ChainID": s.GetChainID(), "geolocation": strconv.FormatUint(s.geolocation, 10)}) - } - // replace previous pairing with new providers - return pairing, nil -} - -func (s *Sentry) GetSpecHash() []byte { - s.specMu.Lock() - defer s.specMu.Unlock() - return s.specHash -} - -func (s *Sentry) GetAllSpecNames(ctx context.Context) (map[string][]spectypes.ApiInterface, error) { - spec, err := s.specQueryClient.Spec(ctx, &spectypes.QueryGetSpecRequest{ - ChainID: s.ChainID, - }) - if err != nil { - return nil, utils.LavaFormatError("Failed Querying spec for chain", err, &map[string]string{"ChainID": s.ChainID}) - } - serverApis, _ := s.getServiceApis(spec) - allSpecNames := make(map[string][]spectypes.ApiInterface) - for _, api := range serverApis { - allSpecNames[api.Name] = api.ApiInterfaces - } - return allSpecNames, nil -} - -func (s *Sentry) getServiceApis(spec *spectypes.QueryGetSpecResponse) (retServerApis map[string]spectypes.ServiceApi, retTaggedApis map[string]spectypes.ServiceApi) { - serverApis := map[string]spectypes.ServiceApi{} - taggedApis := map[string]spectypes.ServiceApi{} - if spec.Spec.Enabled { - for _, api := range spec.Spec.Apis { - if !api.Enabled { - continue - } - // - // TODO: find a better spot for this (more optimized, precompile regex, etc) - for _, apiInterface := range api.ApiInterfaces { - if apiInterface.Interface != s.ApiInterface { - // spec will contain many api interfaces, we only need those that belong to the apiInterface of this sentry - continue - } - if apiInterface.Interface == spectypes.APIInterfaceRest { - re := regexp.MustCompile(`{[^}]+}`) - processedName := string(re.ReplaceAll([]byte(api.Name), []byte("replace-me-with-regex"))) - processedName = regexp.QuoteMeta(processedName) - processedName = strings.ReplaceAll(processedName, "replace-me-with-regex", `[^\/\s]+`) - serverApis[processedName] = api - } else { - serverApis[api.Name] = api - } - - if api.Parsing.GetFunctionTag() != "" { - taggedApis[api.Parsing.GetFunctionTag()] = api - } - } - } - } - return serverApis, taggedApis -} - -func (s *Sentry) getSpec(ctx context.Context) error { - // - // TODO: decide if it's fatal to not have spec (probably!) - spec, err := s.specQueryClient.Spec(ctx, &spectypes.QueryGetSpecRequest{ - ChainID: s.ChainID, - }) - if err != nil { - return utils.LavaFormatError("Failed Querying spec for chain", err, &map[string]string{"ChainID": s.ChainID}) - } - - // - // Check if updated - hash := tendermintcrypto.Sha256([]byte(spec.String())) // TODO: we use cheaper algo for speed - if bytes.Equal(s.specHash, hash) { - // spec for chain didnt change - return nil - } - s.specHash = hash - - // - // Update - utils.LavaFormatInfo("Sentry updated spec", &map[string]string{"ChainID": spec.Spec.Index, "spec name": spec.Spec.Name}) - serverApis, taggedApis := s.getServiceApis(spec) - - s.specMu.Lock() - defer s.specMu.Unlock() - s.serverSpec = spec.Spec - s.serverApis = serverApis - s.taggedApis = taggedApis - - return nil -} - -func (s *Sentry) Init(ctx context.Context) error { - // - // New client - err := s.rpcClient.Start() - if err != nil { - return err - } - - // - // Listen to new blocks - query := "tm.event = 'NewBlock'" - // - txs, err := s.rpcClient.Subscribe(ctx, "test-client", query) - if err != nil { - return utils.LavaFormatError("Failed subscribing to new blocks", err, &map[string]string{}) - } - s.NewBlockEvents = txs - - query = "tm.event = 'Tx'" - txs, err = s.rpcClient.Subscribe(ctx, "test-client", query) - if err != nil { - return utils.LavaFormatError("Failed subscribing to transactions", err, &map[string]string{}) - } - s.NewTransactionEvents = txs - // - // Get spec for the first time - err = s.getSpec(ctx) - if err != nil { - return utils.LavaFormatError("Failed getting spec in initialization", err, &map[string]string{}) - } - - s.SetPrevEpochHeight(0) - err = s.FetchChainParams(ctx) - if err != nil { - return err - } - - geolocation, err := s.cmdFlags.GetUint64(GeolocationFlag) - if err != nil { - utils.LavaFormatFatal("failed to read geolocation flag, required flag", err, nil) - } - if geolocation > 0 && (geolocation&(geolocation-1)) == 0 { - // geolocation is a power of 2 - s.geolocation = geolocation - } else { - // geolocation is not a power of 2 - utils.LavaFormatFatal("geolocation flag needs to set only one geolocation, 1<= rewardHandler.delayRewardBy { - utils.LavaFormatInfo("Asking for rewards", &map[string]string{"delayedBlocks": strconv.Itoa(rewardHandler.delayRewardBy)}) - go s.newEpochCb(rewardHandler.blockHeight) // Currently this is only askForRewards - rewardHandler = &RewardHandler{} // reset reward handler to default. - } - rewardHandler.waitedBlocks += 1 - } - } -} - -func (s *Sentry) FetchChainParams(ctx context.Context) error { - err := s.FetchEpochSize(ctx) - if err != nil { - return err - } - - err = s.FetchOverlapSize(ctx) - if err != nil { - return err - } - - err = s.FetchEpochParams(ctx) - if err != nil { - return err - } - - err = s.FetchProvidersCount(ctx) - if err != nil { - return err - } - - return nil -} - -func (s *Sentry) IdentifyMissingPayments() { - lastBlockInMemory := atomic.LoadUint64(&s.earliestSavedBlock) - s.PaymentsMu.Lock() - - var updatedExpectedPayments []PaymentRequest - - for idx, expectedPay := range s.expectedPayments { - // Exclude and log missing payments - if uint64(expectedPay.BlockHeightDeadline) < lastBlockInMemory { - utils.LavaFormatError("Identified Missing Payment", nil, - &map[string]string{ - "expectedPay.CU": strconv.FormatUint(expectedPay.CU, 10), - "expectedPay.BlockHeightDeadline": strconv.FormatInt(expectedPay.BlockHeightDeadline, 10), - "lastBlockInMemory": strconv.FormatUint(lastBlockInMemory, 10), - }) - - continue - } - - // Include others - updatedExpectedPayments = append(updatedExpectedPayments, s.expectedPayments[idx]) - } - - // Update expectedPayment - s.expectedPayments = updatedExpectedPayments - - s.PaymentsMu.Unlock() - // can be modified in this race window, so we double-check - - utils.LavaFormatInfo("Service report", &map[string]string{ - "total CU serviced": strconv.FormatUint(s.GetCUServiced(), 10), - "total CU that got paid": strconv.FormatUint(s.GetPaidCU(), 10), - }) -} - -// expecting caller to lock -func (s *Sentry) AddExpectedPayment(expectedPay PaymentRequest) { - s.PaymentsMu.Lock() - defer s.PaymentsMu.Unlock() - s.expectedPayments = append(s.expectedPayments, expectedPay) -} - -func (s *Sentry) connectRawClient(ctx context.Context, addr string) (*pairingtypes.RelayerClient, error) { - connectCtx, cancel := context.WithTimeout(ctx, 3*time.Second) - defer cancel() - conn, err := grpc.DialContext(connectCtx, addr, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) - if err != nil { - return nil, err - } - /*defer conn.Close()*/ - - c := pairingtypes.NewRelayerClient(conn) - return &c, nil -} - -func (s *Sentry) CompareRelaysAndReportConflict(reply0 *pairingtypes.RelayReply, request0 *pairingtypes.RelayRequest, reply1 *pairingtypes.RelayReply, request1 *pairingtypes.RelayRequest) (ok bool) { - compare_result := bytes.Compare(reply0.Data, reply1.Data) - if compare_result == 0 { - // they have equal data - return true - } - // they have different data! report! - utils.LavaFormatWarning("Simulation: DataReliability detected mismatching results, Reporting...", nil, &map[string]string{"Data0": string(reply0.Data), "Data1": string(reply1.Data)}) - responseConflict := conflicttypes.ResponseConflict{ - ConflictRelayData0: &conflicttypes.ConflictRelayData{Reply: reply0, Request: request0}, - ConflictRelayData1: &conflicttypes.ConflictRelayData{Reply: reply1, Request: request1}, - } - msg := conflicttypes.NewMsgDetection(s.Acc, nil, &responseConflict, nil) - s.ClientCtx.SkipConfirm = true - // txFactory := tx.NewFactoryCLI(s.ClientCtx, s.cmdFlags).WithChainID("lava") - err := SimulateAndBroadCastTx(s.ClientCtx, s.txFactory, msg) - if err != nil { - utils.LavaFormatError("CompareRelaysAndReportConflict - SimulateAndBroadCastTx Failed", err, nil) - } - // report the conflict - return false -} - -func (s *Sentry) DataReliabilityThresholdToSession(vrfs [][]byte, uniqueIdentifiers []bool) (indexes map[int64]bool) { - // check for the VRF thresholds and if holds true send a relay to the provider - // TODO: improve with blacklisted address, and the module-1 - s.specMu.RLock() - reliabilityThreshold := s.serverSpec.ReliabilityThreshold - s.specMu.RUnlock() - - providersCount := uint32(s.consumerSessionManager.GetAtomicPairingAddressesLength()) - indexes = make(map[int64]bool, len(vrfs)) - for vrfIndex, vrf := range vrfs { - index, err := utils.GetIndexForVrf(vrf, providersCount, reliabilityThreshold) - if index == -1 || err != nil { - continue // no reliability this time. - } - if _, ok := indexes[index]; !ok { - indexes[index] = uniqueIdentifiers[vrfIndex] - } - } - return -} - -func (s *Sentry) discrepancyChecker(finalizedBlocksA map[int64]string, consensus ProviderHashesConsensus) (discrepancy bool, errRet error) { - var toIterate map[int64]string // the smaller map between the two to compare - var otherBlocks map[int64]string // the other map - - if len(finalizedBlocksA) < len(consensus.FinalizedBlocksHashes) { - toIterate = finalizedBlocksA - otherBlocks = consensus.FinalizedBlocksHashes - } else { - toIterate = consensus.FinalizedBlocksHashes - otherBlocks = finalizedBlocksA - } - - // Iterate over smaller array, looks for mismatching hashes between the inputs - for blockNum, blockHash := range toIterate { - if otherHash, ok := otherBlocks[blockNum]; ok { - if blockHash != otherHash { - // - // TODO:: Fill msg with incriminating data - msg := conflicttypes.NewMsgDetection(s.Acc, nil, nil, nil) - s.ClientCtx.SkipConfirm = true - // txFactory := tx.NewFactoryCLI(s.ClientCtx, s.cmdFlags).WithChainID("lava") - err := SimulateAndBroadCastTx(s.ClientCtx, s.txFactory, msg) - if err != nil { - return false, utils.LavaFormatError("discrepancyChecker - SimulateAndBroadCastTx Failed", err, nil) - } - // TODO:: should break here? is one enough or search for more? - return true, utils.LavaFormatError("Simulation: reliability discrepancy, different hashes detected for block", nil, &map[string]string{"blockNum": strconv.FormatInt(blockNum, 10), "Hashes": fmt.Sprintf("%s vs %s", blockHash, otherHash), "toIterate": fmt.Sprintf("%v", toIterate), "otherBlocks": fmt.Sprintf("%v", otherBlocks)}) - } - } - } - - return false, nil -} - -func (s *Sentry) validateProviderReply(finalizedBlocks map[int64]string, latestBlock int64, providerAcc string, session *lavasession.SingleConsumerSession) error { - sorted := make([]int64, len(finalizedBlocks)) - idx := 0 - maxBlockNum := int64(0) - for blockNum := range finalizedBlocks { - if !s.IsFinalizedBlock(blockNum, latestBlock) { - return utils.LavaFormatError("Simulation: provider returned non finalized block reply for reliability", nil, &map[string]string{"blockNum": strconv.FormatInt(blockNum, 10), "latestBlock": strconv.FormatInt(latestBlock, 10), "ChainID": s.ChainID, "Provider": providerAcc, "finalizedBlocks": fmt.Sprintf("%+v", finalizedBlocks)}) - } - - sorted[idx] = blockNum - - if blockNum > maxBlockNum { - maxBlockNum = blockNum - } - idx++ - // TODO: check blockhash length and format - } - - // check for consecutive blocks - sort.Slice(sorted, func(i, j int) bool { return sorted[i] < sorted[j] }) - for index := range sorted { - if index != 0 && sorted[index]-1 != sorted[index-1] { - // log.Println("provider returned non consecutive finalized blocks reply.\n Provider: %s", providerAcc) - return utils.LavaFormatError("Simulation: provider returned non consecutive finalized blocks reply", nil, &map[string]string{"curr block": strconv.FormatInt(sorted[index], 10), "prev block": strconv.FormatInt(sorted[index-1], 10), "ChainID": s.ChainID, "Provider": providerAcc, "finalizedBlocks": fmt.Sprintf("%+v", finalizedBlocks)}) - } - } - - // check that latest finalized block address + 1 points to a non finalized block - if s.IsFinalizedBlock(maxBlockNum+1, latestBlock) { - return utils.LavaFormatError("Simulation: provider returned finalized hashes for an older latest block", nil, &map[string]string{ - "maxBlockNum": strconv.FormatInt(maxBlockNum, 10), - "latestBlock": strconv.FormatInt(latestBlock, 10), "ChainID": s.ChainID, "Provider": providerAcc, "finalizedBlocks": fmt.Sprintf("%+v", finalizedBlocks), - }) - } - - // New reply should have blocknum >= from block same provider - if session.LatestBlock > latestBlock { - // - // Report same provider discrepancy - // TODO:: Fill msg with incriminating data - msg := conflicttypes.NewMsgDetection(s.Acc, nil, nil, nil) - s.ClientCtx.SkipConfirm = true - // txFactory := tx.NewFactoryCLI(s.ClientCtx, s.cmdFlags).WithChainID("lava") - err := SimulateAndBroadCastTx(s.ClientCtx, s.txFactory, msg) - if err != nil { - return utils.LavaFormatError("validateProviderReply - SimulateAndBroadCastTx Failed", err, nil) - } - - return utils.LavaFormatError("Simulation: Provider supplied an older latest block than it has previously", nil, &map[string]string{ - "session.LatestBlock": strconv.FormatInt(session.LatestBlock, 10), - "latestBlock": strconv.FormatInt(latestBlock, 10), "ChainID": s.ChainID, "Provider": providerAcc, - }) - } - - return nil -} - -func (s *Sentry) initProviderHashesConsensus(providerAcc string, latestBlock int64, finalizedBlocks map[int64]string, reply *pairingtypes.RelayReply, req *pairingtypes.RelayRequest) ProviderHashesConsensus { - newProviderDataContainer := providerDataContainer{ - LatestFinalizedBlock: s.GetLatestFinalizedBlock(latestBlock), - LatestBlockTime: time.Now(), - FinalizedBlocksHashes: finalizedBlocks, - SigBlocks: reply.SigBlocks, - SessionId: req.RelaySession.SessionId, - RelayNum: req.RelaySession.RelayNum, - BlockHeight: req.RelaySession.Epoch, - LatestBlock: latestBlock, - } - providerDataContainers := map[string]providerDataContainer{} - providerDataContainers[providerAcc] = newProviderDataContainer - return ProviderHashesConsensus{ - FinalizedBlocksHashes: finalizedBlocks, - agreeingProviders: providerDataContainers, - } -} - -func (s *Sentry) insertProviderToConsensus(consensus *ProviderHashesConsensus, finalizedBlocks map[int64]string, latestBlock int64, reply *pairingtypes.RelayReply, req *pairingtypes.RelayRequest, providerAcc string) { - newProviderDataContainer := providerDataContainer{ - LatestFinalizedBlock: s.GetLatestFinalizedBlock(latestBlock), - LatestBlockTime: time.Now(), - FinalizedBlocksHashes: finalizedBlocks, - SigBlocks: reply.SigBlocks, - SessionId: req.RelaySession.SessionId, - RelayNum: req.RelaySession.RelayNum, - BlockHeight: req.RelaySession.Epoch, - LatestBlock: latestBlock, - } - consensus.agreeingProviders[providerAcc] = newProviderDataContainer - - for blockNum, blockHash := range finalizedBlocks { - consensus.FinalizedBlocksHashes[blockNum] = blockHash - } -} - -type DataReliabilitySession struct { - singleConsumerSession *lavasession.SingleConsumerSession - epoch uint64 - providerPublicAddress string - uniqueIdentifier bool -} - -type DataReliabilityResult struct { - reply *pairingtypes.RelayReply - relayRequest *pairingtypes.RelayRequest - providerPublicAddress string -} - -func (s *Sentry) SendRelay( - ctx context.Context, - consumerSession *lavasession.SingleConsumerSession, - sessionEpoch uint64, - providerPubAddress string, - cb_send_relay func(consumerSession *lavasession.SingleConsumerSession) (*pairingtypes.RelayReply, *pairingtypes.Relayer_RelaySubscribeClient, *pairingtypes.RelayRequest, time.Duration, bool, error), - cb_send_reliability func(consumerSession *lavasession.SingleConsumerSession, dataReliability *pairingtypes.VRFData, providerAddress string) (*pairingtypes.RelayReply, *pairingtypes.RelayRequest, time.Duration, time.Duration, error), - specCategory *spectypes.SpecCategory, -) (*pairingtypes.RelayReply, *pairingtypes.Relayer_RelaySubscribeClient, time.Duration, bool, error) { - // callback user - reply, replyServer, request, latency, fromCache, err := cb_send_relay(consumerSession) - // error using this provider - if err != nil { - // Lava format error overrides the error status code. - // so in this case we just want to return the error as it is - utils.LavaFormatError("failed sending relay", err, nil) - return nil, nil, 0, fromCache, err - } - - if s.GetSpecDataReliabilityEnabled() && reply != nil && !fromCache { - finalizedBlocks := map[int64]string{} // TODO:: define struct in relay response - err = json.Unmarshal(reply.FinalizedBlocksHashes, &finalizedBlocks) - if err != nil { - return nil, nil, latency, fromCache, utils.LavaFormatError("failed in unmarshalling finalized blocks data", lavasession.SendRelayError, &map[string]string{"ErrMsg": err.Error()}) - } - latestBlock := reply.LatestBlock - - // validate that finalizedBlocks makes sense - err = s.validateProviderReply(finalizedBlocks, latestBlock, providerPubAddress, consumerSession) - if err != nil { - return nil, nil, latency, fromCache, utils.LavaFormatError("failed provider reply validation", lavasession.SendRelayError, &map[string]string{"ErrMsg": err.Error()}) - } - // - // Compare finalized block hashes with previous providers - // Looks for discrepancy with current epoch providers - // if no conflicts, insert into consensus and break - // create new consensus group if no consensus matched - // check for discrepancy with old epoch - _, err := checkFinalizedHashes(s, providerPubAddress, latestBlock, finalizedBlocks, request, reply) - if err != nil { - return nil, nil, latency, fromCache, utils.LavaFormatError("failed to check finalized hashes", lavasession.SendRelayError, &map[string]string{"ErrMsg": err.Error()}) - } - - if specCategory.Deterministic && s.IsFinalizedBlock(request.RelayData.RequestBlock, reply.LatestBlock) { - var dataReliabilitySessions []*DataReliabilitySession - - // handle data reliability - s.VrfSkMu.Lock() - vrfRes0, vrfRes1 := utils.CalculateVrfOnRelay(request.RelayData, reply, s.VrfSk, sessionEpoch) - s.VrfSkMu.Unlock() - // get two indexesMap for data reliability. - indexesMap := s.DataReliabilityThresholdToSession([][]byte{vrfRes0, vrfRes1}, []bool{false, true}) - utils.LavaFormatDebug("DataReliability Randomized Values", &map[string]string{"vrf0": strconv.FormatUint(uint64(binary.LittleEndian.Uint32(vrfRes0)), 10), "vrf1": strconv.FormatUint(uint64(binary.LittleEndian.Uint32(vrfRes1)), 10), "decisionMap": fmt.Sprintf("%+v", indexesMap)}) - for idxExtract, uniqueIdentifier := range indexesMap { // go over each unique index and get a session. - // the key in the indexesMap are unique indexes to fetch from consumerSessionManager - dataReliabilityConsumerSession, providerPublicAddress, epoch, err := s.consumerSessionManager.GetDataReliabilitySession(ctx, providerPubAddress, idxExtract, sessionEpoch) - if err != nil { - if lavasession.DataReliabilityIndexRequestedIsOriginalProviderError.Is(err) { - // index belongs to original provider, nothing is wrong here, print info and continue - utils.LavaFormatInfo("DataReliability: Trying to get the same provider index as original request", &map[string]string{"provider": providerPubAddress, "Index": strconv.FormatInt(idxExtract, 10)}) - } else if lavasession.DataReliabilityAlreadySentThisEpochError.Is(err) { - utils.LavaFormatInfo("DataReliability: Already Sent Data Reliability This Epoch To This Provider.", &map[string]string{"Provider": providerPubAddress, "Epoch": strconv.FormatUint(epoch, 10)}) - } else if lavasession.DataReliabilityEpochMismatchError.Is(err) { - utils.LavaFormatInfo("DataReliability: Epoch changed cannot send data reliability", &map[string]string{"original_epoch": strconv.FormatUint(sessionEpoch, 10), "data_reliability_epoch": strconv.FormatUint(epoch, 10)}) - // if epoch changed, we can stop trying to get data reliability sessions - break - } else { - utils.LavaFormatError("GetDataReliabilitySession", err, nil) - } - continue // if got an error continue to next index. - } - dataReliabilitySessions = append(dataReliabilitySessions, &DataReliabilitySession{ - singleConsumerSession: dataReliabilityConsumerSession, - epoch: epoch, - providerPublicAddress: providerPublicAddress, - uniqueIdentifier: uniqueIdentifier, - }) - } - - sendReliabilityRelay := func(singleConsumerSession *lavasession.SingleConsumerSession, providerAddress string, differentiator bool) (relay_rep *pairingtypes.RelayReply, relay_req *pairingtypes.RelayRequest, err error) { - var dataReliabilityLatency time.Duration - var dataReliabilityTimeout time.Duration - s.VrfSkMu.Lock() - vrf_res, vrf_proof := utils.ProveVrfOnRelay(request.RelayData, reply, s.VrfSk, differentiator, sessionEpoch) - s.VrfSkMu.Unlock() - dataReliability := &pairingtypes.VRFData{ - ChainID: request.RelaySession.SpecID, - Epoch: request.RelaySession.Epoch, - Differentiator: differentiator, - VrfValue: vrf_res, - VrfProof: vrf_proof, - ProviderSig: reply.Sig, - AllDataHash: sigs.AllDataHash(reply, request), - QueryHash: utils.CalculateQueryHash(*request.RelayData), // calculated from query body anyway, but we will use this on payment - Sig: nil, // calculated in cb_send_reliability - } - relay_rep, relay_req, dataReliabilityLatency, dataReliabilityTimeout, err = cb_send_reliability(singleConsumerSession, dataReliability, providerAddress) - if err != nil { - errRet := s.consumerSessionManager.OnDataReliabilitySessionFailure(singleConsumerSession, err) - if errRet != nil { - return nil, nil, utils.LavaFormatError("OnDataReliabilitySessionFailure Error", errRet, &map[string]string{"sendReliabilityError": err.Error()}) - } - return nil, nil, utils.LavaFormatError("sendReliabilityRelay Could not get reply to reliability relay from provider", err, &map[string]string{"Address": providerAddress}) - } - - expectedBH, numOfProviders := s.ExpectedBlockHeight() - err = s.consumerSessionManager.OnDataReliabilitySessionDone(singleConsumerSession, relay_rep.LatestBlock, singleConsumerSession.LatestRelayCu, dataReliabilityLatency, singleConsumerSession.CalculateExpectedLatency(dataReliabilityTimeout), expectedBH, numOfProviders, s.GetProvidersCount()) - return relay_rep, relay_req, err - } - - checkReliability := func() { - numberOfReliabilitySessions := len(dataReliabilitySessions) - if numberOfReliabilitySessions > supportedNumberOfVRFs { - utils.LavaFormatError("Trying to use DataReliability with more than two vrf sessions, currently not supported", nil, &map[string]string{"number_of_DataReliabilitySessions": strconv.Itoa(numberOfReliabilitySessions)}) - return - } else if numberOfReliabilitySessions == 0 { - return - } - // apply first request and reply to dataReliabilityVerifications - originalDataReliabilityResult := &DataReliabilityResult{reply: reply, relayRequest: request, providerPublicAddress: providerPubAddress} - dataReliabilityVerifications := make([]*DataReliabilityResult, 0) - - for _, dataReliabilitySession := range dataReliabilitySessions { - reliabilityReply, reliabilityRequest, err := sendReliabilityRelay(dataReliabilitySession.singleConsumerSession, dataReliabilitySession.providerPublicAddress, dataReliabilitySession.uniqueIdentifier) - if err == nil && reliabilityReply != nil { - dataReliabilityVerifications = append(dataReliabilityVerifications, - &DataReliabilityResult{ - reply: reliabilityReply, - relayRequest: reliabilityRequest, - providerPublicAddress: dataReliabilitySession.providerPublicAddress, - }) - } - } - if len(dataReliabilityVerifications) > 0 { - s.verifyReliabilityResults(originalDataReliabilityResult, dataReliabilityVerifications, numberOfReliabilitySessions) - } - } - go checkReliability() - } - } - - return reply, replyServer, latency, fromCache, nil -} - -// Verify all dataReliabilityVerifications with one another -// The original reply and request should be in dataReliabilityVerifications as well. -func (s *Sentry) verifyReliabilityResults(originalResult *DataReliabilityResult, dataReliabilityResults []*DataReliabilityResult, totalNumberOfSessions int) { - verificationsLength := len(dataReliabilityResults) - var conflict bool // if conflict is true at the end of the function, reliability failed. - participatingProviders := make(map[string]string, verificationsLength+1) - participatingProviders["originalAddress"] = originalResult.providerPublicAddress - for idx, drr := range dataReliabilityResults { - add := drr.providerPublicAddress - participatingProviders["address"+strconv.Itoa(idx)] = add - if !s.CompareRelaysAndReportConflict(originalResult.reply, originalResult.relayRequest, drr.reply, drr.relayRequest) { - // if we failed to compare relays with original reply and result we need to stop and compare them to one another. - conflict = true - } - } - - if conflict { - // CompareRelaysAndReportConflict to each one of the data reliability relays to confirm that the first relay was'nt ok - for idx1 := 0; idx1 < verificationsLength; idx1++ { - for idx2 := (idx1 + 1); idx2 < verificationsLength; idx2++ { - s.CompareRelaysAndReportConflict( - dataReliabilityResults[idx1].reply, // reply 1 - dataReliabilityResults[idx1].relayRequest, // request 1 - dataReliabilityResults[idx2].reply, // reply 2 - dataReliabilityResults[idx2].relayRequest) // request 2 - } - } - } - - if !conflict && totalNumberOfSessions == verificationsLength { // if no conflict was detected data reliability was successful - // all reliability sessions succeeded - utils.LavaFormatInfo("Reliability verified successfully!", &participatingProviders) - } else { - utils.LavaFormatInfo("Reliability failed to verify!", &participatingProviders) - } -} - -func checkFinalizedHashes(s *Sentry, providerAcc string, latestBlock int64, finalizedBlocks map[int64]string, req *pairingtypes.RelayRequest, reply *pairingtypes.RelayReply) (bool, error) { - s.providerDataContainersMu.Lock() - defer s.providerDataContainersMu.Unlock() - - if len(s.providerHashesConsensus) == 0 && len(s.prevEpochProviderHashesConsensus) == 0 { - newHashConsensus := s.initProviderHashesConsensus(providerAcc, latestBlock, finalizedBlocks, reply, req) - s.providerHashesConsensus = append(make([]ProviderHashesConsensus, 0), newHashConsensus) - } else { - matchWithExistingConsensus := false - - // Looks for discrepancy wit current epoch providers - for idx, consensus := range s.providerHashesConsensus { - discrepancyResult, err := s.discrepancyChecker(finalizedBlocks, consensus) - if err != nil { - return false, utils.LavaFormatError("Simulation: Conflict found in discrepancyChecker", err, nil) - } - - // if no conflicts, insert into consensus and break - if !discrepancyResult { - matchWithExistingConsensus = true - } else { - utils.LavaFormatError("Simulation: Conflict found between consensus and provider", err, &map[string]string{"Consensus idx": strconv.Itoa(idx), "provider": providerAcc}) - } - - // if no discrepency with this group -> insert into consensus and break - if matchWithExistingConsensus { - // TODO:: Add more increminiating data to consensus - s.insertProviderToConsensus(&consensus, finalizedBlocks, latestBlock, reply, req, providerAcc) - break - } - } - - // create new consensus group if no consensus matched - if !matchWithExistingConsensus { - newHashConsensus := s.initProviderHashesConsensus(providerAcc, latestBlock, finalizedBlocks, reply, req) - s.providerHashesConsensus = append(make([]ProviderHashesConsensus, 0), newHashConsensus) - } - - // check for discrepancy with old epoch - for idx, consensus := range s.prevEpochProviderHashesConsensus { - discrepancyResult, err := s.discrepancyChecker(finalizedBlocks, consensus) - if err != nil { - return false, utils.LavaFormatError("Simulation: prev epoch Conflict found in discrepancyChecker", err, nil) - } - - if discrepancyResult { - utils.LavaFormatError("Simulation: prev epoch Conflict found between consensus and provider", err, &map[string]string{"Consensus idx": strconv.Itoa(idx), "provider": providerAcc}) - } - } - } - - return false, nil -} - -func (s *Sentry) IsFinalizedBlock(requestedBlock int64, latestBlock int64) bool { - return spectypes.IsFinalizedBlock(requestedBlock, latestBlock, s.GetSpecBlockDistanceForFinalizedData()) -} - -func (s *Sentry) GetLatestFinalizedBlock(latestBlock int64) int64 { - finalization_criteria := int64(s.GetSpecBlockDistanceForFinalizedData()) - return latestBlock - finalization_criteria -} - -func (s *Sentry) clearAuthResponseCache(blockHeight int64) { - // Clear cache - s.authorizationCacheMutex.Lock() - defer s.authorizationCacheMutex.Unlock() - for key := range s.authorizationCache { - if key < s.GetPrevEpochHeight() { - delete(s.authorizationCache, key) - } - } -} - -func (s *Sentry) getAuthResponseFromCache(consumer string, blockHeight uint64) *pairingtypes.QueryVerifyPairingResponse { - // Check cache - s.authorizationCacheMutex.RLock() - defer s.authorizationCacheMutex.RUnlock() - if entry, hasEntryForBlockHeight := s.authorizationCache[blockHeight]; hasEntryForBlockHeight { - if cachedResponse, ok := entry[consumer]; ok { - return cachedResponse - } - } - - return nil -} - -func (s *Sentry) IsAuthorizedConsumer(ctx context.Context, consumer string, blockHeight uint64) (*pairingtypes.QueryVerifyPairingResponse, error) { - res := s.getAuthResponseFromCache(consumer, blockHeight) - if res != nil { - // User was authorized before, response returned from cache. - return res, nil - } - - res, err := s.pairingQueryClient.VerifyPairing(context.Background(), &pairingtypes.QueryVerifyPairingRequest{ - ChainID: s.ChainID, - Client: consumer, - Provider: s.Acc, - Block: blockHeight, - }) - if err != nil { - return nil, err - } - if res.GetValid() { - s.authorizationCacheMutex.Lock() - if _, ok := s.authorizationCache[blockHeight]; !ok { - s.authorizationCache[blockHeight] = map[string]*pairingtypes.QueryVerifyPairingResponse{} // init - } - s.authorizationCache[blockHeight][consumer] = res - s.authorizationCacheMutex.Unlock() - return res, nil - } - - return nil, utils.LavaFormatError("invalid self pairing with consumer", nil, &map[string]string{"consumer address": consumer, "CurrentBlock": strconv.FormatInt(s.GetBlockHeight(), 10)}) -} - -func (s *Sentry) IsAuthorizedPairing(ctx context.Context, consumer string, provider string, block uint64) (bool, error) { - // - // TODO: cache results! - - res, err := s.pairingQueryClient.VerifyPairing(context.Background(), &pairingtypes.QueryVerifyPairingRequest{ - ChainID: s.ChainID, - Client: consumer, - Provider: provider, - Block: block, - }) - if err != nil { - return false, err - } - if res.GetValid() { - return true, nil - } - return false, utils.LavaFormatError("invalid pairing with consumer", nil, &map[string]string{"consumer address": consumer, "CurrentBlock": strconv.FormatInt(s.GetBlockHeight(), 10), "requested block": strconv.FormatUint(block, 10)}) -} - -func (s *Sentry) GetReliabilityThreshold() uint32 { - return s.serverSpec.ReliabilityThreshold -} - -func (s *Sentry) GetSpecName() string { - return s.serverSpec.Name -} - -func (s *Sentry) GetSpecDataReliabilityEnabled() bool { - return s.serverSpec.DataReliabilityEnabled -} - -func (s *Sentry) GetSpecBlockDistanceForFinalizedData() uint32 { - return s.serverSpec.BlockDistanceForFinalizedData -} - -func (s *Sentry) GetSpecBlocksInFinalizationProof() uint32 { - return s.serverSpec.BlocksInFinalizationProof -} - -func (s *Sentry) GetChainID() string { - return s.serverSpec.Index -} - -func (s *Sentry) GetAverageBlockTime() int64 { - return s.serverSpec.AverageBlockTime -} - -func (s *Sentry) MatchSpecApiByName(name string) (spectypes.ServiceApi, bool) { - s.specMu.RLock() - defer s.specMu.RUnlock() - // TODO: make it faster and better by not doing a regex instead using a better algorithm - for apiName, api := range s.serverApis { - re, err := regexp.Compile(apiName) - if err != nil { - utils.LavaFormatError("regex Compile api", err, &map[string]string{"apiName": apiName}) - continue - } - if re.Match([]byte(name)) { - return api, true - } - } - return spectypes.ServiceApi{}, false -} - -func (s *Sentry) GetSpecApiByName(name string) (spectypes.ServiceApi, bool) { - s.specMu.RLock() - defer s.specMu.RUnlock() - - val, ok := s.serverApis[name] - return val, ok -} - -func (s *Sentry) GetSpecApiByTag(tag string) (spectypes.ServiceApi, bool) { - s.specMu.RLock() - defer s.specMu.RUnlock() - - val, ok := s.taggedApis[tag] - return val, ok -} - -func (s *Sentry) GetBlockHeight() int64 { - return atomic.LoadInt64(&s.blockHeight) -} - -func (s *Sentry) SetBlockHeight(blockHeight int64) { - atomic.StoreInt64(&s.blockHeight, blockHeight) -} - -func (s *Sentry) GetCurrentEpochHeight() uint64 { - return atomic.LoadUint64(&s.currentEpoch) -} - -func (s *Sentry) SetCurrentEpochHeight(blockHeight int64) { - atomic.StoreUint64(&s.currentEpoch, uint64(blockHeight)) -} - -func (s *Sentry) GetPrevEpochHeight() uint64 { - return atomic.LoadUint64(&s.prevEpoch) -} - -func (s *Sentry) SetPrevEpochHeight(blockHeight uint64) { - atomic.StoreUint64(&s.prevEpoch, blockHeight) -} - -func (s *Sentry) GetOverlapSize() uint64 { - return atomic.LoadUint64(&s.EpochBlocksOverlap) -} - -func (s *Sentry) GetCUServiced() uint64 { - return atomic.LoadUint64(&s.totalCUServiced) -} - -func (s *Sentry) SetCUServiced(cu uint64) { - atomic.StoreUint64(&s.totalCUServiced, cu) -} - -func (s *Sentry) UpdateCUServiced(cu uint64) { - // we lock because we dont want the value changing after we read it before we store - s.PaymentsMu.Lock() - defer s.PaymentsMu.Unlock() - currentCU := atomic.LoadUint64(&s.totalCUServiced) - atomic.StoreUint64(&s.totalCUServiced, currentCU+cu) -} - -func (s *Sentry) GetMaxCUForUser(ctx context.Context, address string, chainID string) (maxCu uint64, err error) { - UserEntryRes, err := s.pairingQueryClient.UserEntry(ctx, &pairingtypes.QueryUserEntryRequest{ChainID: chainID, Address: address, Block: uint64(s.GetBlockHeight())}) - if err != nil { - return 0, utils.LavaFormatError("failed querying StakeEntry for consumer", err, &map[string]string{"chainID": chainID, "address": address, "block": strconv.FormatInt(s.GetBlockHeight(), 10)}) - } - return UserEntryRes.GetMaxCU(), nil -} - -func (s *Sentry) GetVrfPkAndMaxCuForUser(ctx context.Context, address string, chainID string, requestBlock int64) (vrfPk *utils.VrfPubKey, maxCu uint64, err error) { - UserEntryRes, err := s.pairingQueryClient.UserEntry(ctx, &pairingtypes.QueryUserEntryRequest{ChainID: chainID, Address: address, Block: uint64(requestBlock)}) - if err != nil { - return nil, 0, utils.LavaFormatError("StakeEntry querying for consumer failed", err, &map[string]string{"chainID": chainID, "address": address, "block": strconv.FormatInt(requestBlock, 10)}) - } - vrfPk = &utils.VrfPubKey{} - vrfPk, err = vrfPk.DecodeFromBech32(UserEntryRes.GetConsumer().Vrfpk) - if err != nil { - err = utils.LavaFormatError("decoding vrfpk from bech32", err, &map[string]string{"chainID": chainID, "address": address, "block": strconv.FormatInt(requestBlock, 10), "UserEntryRes": fmt.Sprintf("%v", UserEntryRes)}) - } - return vrfPk, UserEntryRes.GetMaxCU(), err -} - -func (s *Sentry) ExpectedBlockHeight() (int64, int) { - s.providerDataContainersMu.RLock() - defer s.providerDataContainersMu.RUnlock() - averageBlockTime_ms := s.serverSpec.AverageBlockTime - listExpectedBlockHeights := []int64{} - - var highestBlockNumber int64 = 0 - FindHighestBlockNumber := func(listProviderHashesConsensus []ProviderHashesConsensus) int64 { - for _, providerHashesConsensus := range listProviderHashesConsensus { - for _, providerDataContainer := range providerHashesConsensus.agreeingProviders { - if highestBlockNumber < providerDataContainer.LatestFinalizedBlock { - highestBlockNumber = providerDataContainer.LatestFinalizedBlock - } - } - } - return highestBlockNumber - } - highestBlockNumber = FindHighestBlockNumber(s.prevEpochProviderHashesConsensus) // update the highest in place - highestBlockNumber = FindHighestBlockNumber(s.providerHashesConsensus) - - now := time.Now() - calcExpectedBlocks := func(listProviderHashesConsensus []ProviderHashesConsensus) []int64 { - listExpectedBH := []int64{} - for _, providerHashesConsensus := range listProviderHashesConsensus { - for _, providerDataContainer := range providerHashesConsensus.agreeingProviders { - expected := providerDataContainer.LatestFinalizedBlock + (now.Sub(providerDataContainer.LatestBlockTime).Milliseconds() / averageBlockTime_ms) // interpolation - // limit the interpolation to the highest seen block height - if expected > highestBlockNumber { - expected = highestBlockNumber - } - listExpectedBH = append(listExpectedBH, expected) - } - } - return listExpectedBH - } - listExpectedBlockHeights = append(listExpectedBlockHeights, calcExpectedBlocks(s.prevEpochProviderHashesConsensus)...) - listExpectedBlockHeights = append(listExpectedBlockHeights, calcExpectedBlocks(s.providerHashesConsensus)...) - - median := func(data []int64) int64 { - slices.Sort(data) - - var median int64 - data_len := len(data) - if data_len == 0 { - return 0 - } else if data_len%2 == 0 { - median = (data[data_len/2-1] + data[data_len/2]/2.0) - } else { - median = data[data_len/2] - } - return median - } - - return median(listExpectedBlockHeights) - s.serverSpec.AllowedBlockLagForQosSync, len(listExpectedBlockHeights) -} - -func NewSentry( - clientCtx client.Context, - txFactory tx.Factory, - chainID string, - isUser bool, - voteInitiationCb func(ctx context.Context, voteID string, voteDeadline uint64, voteParams *VoteParams), - newEpochCb func(epochHeight int64), - apiInterface string, - vrf_sk vrf.PrivateKey, - flagSet *pflag.FlagSet, - serverID uint64, -) *Sentry { - rpcClient := clientCtx.Client - specQueryClient := spectypes.NewQueryClient(clientCtx) - pairingQueryClient := pairingtypes.NewQueryClient(clientCtx) - epochStorageQueryClient := epochstoragetypes.NewQueryClient(clientCtx) - acc := clientCtx.GetFromAddress().String() - currentBlock, err := rpc.GetChainHeight(clientCtx) - if err != nil { - utils.LavaFormatError("Sentry failed to get chain height", err, &map[string]string{"account": acc, "ChainID": chainID, "apiInterface": apiInterface}) - currentBlock = 0 - } - return &Sentry{ - ClientCtx: clientCtx, - rpcClient: rpcClient, - specQueryClient: specQueryClient, - pairingQueryClient: pairingQueryClient, - epochStorageQueryClient: epochStorageQueryClient, - ChainID: chainID, - txFactory: txFactory, - isUser: isUser, - Acc: acc, - newEpochCb: newEpochCb, - ApiInterface: apiInterface, - VrfSk: vrf_sk, - blockHeight: currentBlock, - specHash: nil, - cmdFlags: flagSet, - voteInitiationCb: voteInitiationCb, - serverID: serverID, - authorizationCache: map[uint64]map[string]*pairingtypes.QueryVerifyPairingResponse{}, - } -} - -func UpdateRequestedBlock(request *pairingtypes.RelayRequest, response *pairingtypes.RelayReply) { - // since sometimes the user is sending requested block that is a magic like latest, or earliest we need to specify to the reliability what it is - request.RelayData.RequestBlock = ReplaceRequestedBlock(request.RelayData.RequestBlock, response.LatestBlock) -} - -func ReplaceRequestedBlock(requestedBlock int64, latestBlock int64) int64 { - switch requestedBlock { - case spectypes.LATEST_BLOCK: - return latestBlock - case spectypes.SAFE_BLOCK: - return latestBlock - case spectypes.FINALIZED_BLOCK: - return latestBlock - case spectypes.EARLIEST_BLOCK: - return spectypes.NOT_APPLICABLE // TODO: add support for earliest block reliability - } - return requestedBlock -} diff --git a/relayer/sentry/tx.go b/relayer/sentry/tx.go deleted file mode 100644 index 57deb7c61a..0000000000 --- a/relayer/sentry/tx.go +++ /dev/null @@ -1,117 +0,0 @@ -package sentry - -import ( - "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/client/tx" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/lavanet/lava/utils" -) - -const ( - defaultGasPrice = "0.000000001ulava" - defaultGasAdjustment = 1.5 -) - -func SimulateAndBroadCastTx(clientCtx client.Context, txf tx.Factory, msg sdk.Msg) error { - txf = txf.WithGasPrices(defaultGasPrice) - txf = txf.WithGasAdjustment(defaultGasAdjustment) - if err := msg.ValidateBasic(); err != nil { - return err - } - - txf, err := prepareFactory(clientCtx, txf) - if err != nil { - return err - } - - _, gasUsed, err := tx.CalculateGas(clientCtx, txf, msg) - if err != nil { - return err - } - - txf = txf.WithGas(gasUsed) - - err = tx.GenerateOrBroadcastTxWithFactory(clientCtx, txf, msg) - if err != nil { - return err - } - return nil -} - -// this function is extracted from the tx package so that we can use it locally to set the tx factory correctly -func prepareFactory(clientCtx client.Context, txf tx.Factory) (tx.Factory, error) { - from := clientCtx.GetFromAddress() - - if err := clientCtx.AccountRetriever.EnsureExists(clientCtx, from); err != nil { - return txf, err - } - - initNum, initSeq := txf.AccountNumber(), txf.Sequence() - if initNum == 0 || initSeq == 0 { - num, seq, err := clientCtx.AccountRetriever.GetAccountNumberSequence(clientCtx, from) - if err != nil { - return txf, err - } - - if initNum == 0 { - txf = txf.WithAccountNumber(num) - } - - if initSeq == 0 { - txf = txf.WithSequence(seq) - } - } - - return txf, nil -} - -func CheckProfitabilityAndBroadCastTx(clientCtx client.Context, txf tx.Factory, msg sdk.Msg) error { - txf = txf.WithGasPrices(defaultGasPrice) - txf = txf.WithGasAdjustment(defaultGasAdjustment) - if err := msg.ValidateBasic(); err != nil { - return err - } - - txf, err := prepareFactory(clientCtx, txf) - if err != nil { - return err - } - - simResult, gasUsed, err := tx.CalculateGas(clientCtx, txf, msg) - if err != nil { - return err - } - - txEvents := simResult.GetResult().Events - lavaReward := sdk.NewCoin("ulava", sdk.NewInt(0)) - for _, txEvent := range txEvents { - if txEvent.Type == "lava_relay_payment" { - for _, attribute := range txEvent.Attributes { - if string(attribute.Key) == "BasePay" { - lavaRewardTemp, err := sdk.ParseCoinNormalized(string(attribute.Value)) - if err != nil { - return err - } - lavaReward = lavaReward.Add(lavaRewardTemp) - break - } - } - } - } - - txf = txf.WithGas(gasUsed) - - gasFee := txf.GasPrices()[0] - gasFee.Amount = gasFee.Amount.MulInt64(int64(gasUsed)) - lavaRewardDec := sdk.NewDecCoinFromCoin(lavaReward) - - if gasFee.IsGTE(lavaRewardDec) { - return utils.LavaFormatError("lava_relay_payment claim is not profitable", nil, &map[string]string{"gasFee": gasFee.String(), "lavareward:": lavaRewardDec.String(), "msg": msg.String()}) - } - - err = tx.GenerateOrBroadcastTxWithFactory(clientCtx, txf, msg) - if err != nil { - return err - } - return nil -} diff --git a/relayer/test_client.go b/relayer/test_client.go deleted file mode 100644 index d06a3946e5..0000000000 --- a/relayer/test_client.go +++ /dev/null @@ -1,107 +0,0 @@ -package relayer - -import ( - context "context" - "log" - "math/rand" - "time" - - "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/client/tx" - "github.com/lavanet/lava/relayer/chainproxy" - "github.com/lavanet/lava/relayer/sentry" - "github.com/lavanet/lava/relayer/sigs" - "github.com/lavanet/lava/relayer/testclients" - "github.com/lavanet/lava/utils" - "github.com/spf13/pflag" -) - -func TestClient( - ctx context.Context, - txFactory tx.Factory, - clientCtx client.Context, - chainID string, - apiInterface string, - duration int64, - flagSet *pflag.FlagSet, -) { - // Every client must preseed - rand.Seed(time.Now().UnixNano()) - - // - sk, _, err := utils.GetOrCreateVRFKey(clientCtx) - if err != nil { - log.Fatalln("error: GetOrCreateVRFKey", err) - } - // Start sentry - sentry := sentry.NewSentry(clientCtx, txFactory, chainID, true, nil, nil, apiInterface, sk, flagSet, 0) - err = sentry.Init(ctx) - if err != nil { - log.Fatalln("error sentry.Init", err) - } - go sentry.Start(ctx) - for sentry.GetBlockHeight() == 0 { - time.Sleep(1 * time.Second) - } - - // - // Node - chainProxy, err := chainproxy.GetChainProxy("", chainproxy.NumberOfParallelConnections, sentry, nil, nil) - if err != nil { - log.Fatalln("error: GetChainProxy", err) - } - err = sentry.SetupConsumerSessionManager(ctx, chainProxy.GetConsumerSessionManager()) - if err != nil { - log.Fatalln("error: SetupConsumerSessionManager", err) - } - - // - // Set up a connection to the server. - log.Println("TestClient connecting") - - keyName, err := sigs.GetKeyName(clientCtx) - if err != nil { - log.Fatalln("error: getKeyName", err) - } - - privKey, err := sigs.GetPrivKey(clientCtx, keyName) - if err != nil { - log.Fatalln("error: getPrivKey", err) - } - clientKey, _ := clientCtx.Keyring.Key(keyName) - log.Println("Client pubkey", clientKey.GetPubKey().Address()) - - testDuration := time.Second * time.Duration(duration) - // Run tests - var testErrors error = nil - switch chainID { - case "ETH1": - testErrors = testclients.EthTests(ctx, chainID, "http://127.0.0.1:3333/1", testDuration) - case "GTH1": - testErrors = testclients.EthTests(ctx, chainID, "http://127.0.0.1:3339/1", testDuration) - case "FTM250": - testErrors = testclients.EthTests(ctx, chainID, "http://127.0.0.1:3336/1", testDuration) - case "COS1": - testErrors = testclients.TerraTests(ctx, chainProxy, privKey, apiInterface) - case "COS3", "COS4": - testErrors = testclients.OsmosisTests(ctx, chainProxy, privKey, apiInterface) - case "LAV1": - testErrors = testclients.LavaTests(ctx, chainProxy, privKey, apiInterface, sentry, clientCtx) - case "APT1": - testErrors = testclients.AptosTests(ctx, chainProxy, privKey, apiInterface, sentry, clientCtx) - case "JUN1": - testErrors = testclients.JunoTests(ctx, chainProxy, privKey, apiInterface) - case "COS5": - testErrors = testclients.CosmoshubTests(ctx, chainProxy, privKey, apiInterface, sentry, clientCtx) - case "STRK", "STRKT": - testErrors = testclients.StarknetTests(ctx, chainID, "http://127.0.0.1:3347/1", chainProxy, privKey, testDuration) - case "POLYGON1", "POLYGON1T": - testErrors = testclients.PolygonTests(ctx, chainID, "http://127.0.0.1:3351/1", chainProxy, privKey, testDuration) - } - - if testErrors != nil { - log.Fatalf("%s Client test failed with errors %s\n", chainID, testErrors) - } else { - log.Printf("%s Client test complete \n", chainID) - } -} diff --git a/relayer/testclients/aptos_tests.go b/relayer/testclients/aptos_tests.go deleted file mode 100644 index 93bb1db50d..0000000000 --- a/relayer/testclients/aptos_tests.go +++ /dev/null @@ -1,96 +0,0 @@ -package testclients - -import ( - "context" - "fmt" - "log" - "net/http" - "strings" - - "github.com/btcsuite/btcd/btcec" - "github.com/cosmos/cosmos-sdk/client" - "github.com/lavanet/lava/relayer/chainproxy" - "github.com/lavanet/lava/relayer/sentry" - spectypes "github.com/lavanet/lava/x/spec/types" -) - -const ( - restString string = spectypes.APIInterfaceRest - tendermintString string = spectypes.APIInterfaceTendermintRPC -) - -// AptosTests -func AptosTests(ctx context.Context, chainProxy chainproxy.ChainProxy, privKey *btcec.PrivateKey, apiInterface string, s *sentry.Sentry, clientCtx client.Context) error { - errors := []string{} - log.Println("Aptos test") - if apiInterface == restString { - log.Println("starting run important apis") - // clientAdress := clientCtx.FromAddress - version := "2406784" - account := "0x10f9091d233fd38b9d774bc641ed71ea7d3a21a0254fdfa9556901e9fad6a533" - - mostImportantApisToTest := map[string][]string{ - http.MethodGet: { - "/blocks/by_height/5", - "/blocks/by_version/" + version, - "/accounts/" + account, - "/accounts/" + account + "/resources", - "/accounts/" + account + "/modules", - }, - http.MethodPost: {}, - } - - for httpMethod, api := range mostImportantApisToTest { - for _, api_value := range api { - for i := 0; i < 100; i++ { - reply, _, err := chainproxy.SendRelay(ctx, chainProxy, privKey, api_value, "", httpMethod, "aptos_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - log.Printf("LavaTestsResponse: %v\n", reply) - // prettyPrintReply(*reply, "LavaTestsResponse") - } - } - } - } - - log.Println("continuing to other spec apis") - // finish with testing all other API methods that dont require parameters - allSpecNames, err := s.GetAllSpecNames(ctx) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } - for apiName, apiInterfaceList := range allSpecNames { - if strings.Contains(apiName, "/{") { - continue - } - - for _, api_interface := range apiInterfaceList { - if api_interface.Type == http.MethodPost { - // for now we dont want to run the post apis in this test - continue - } - log.Printf("%s", apiName) - reply, _, err := chainproxy.SendRelay(ctx, chainProxy, privKey, apiName, "", http.MethodGet, "aptos_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "LavaTestsResponse") - } - } - } - } else { - log.Printf("currently no tests for %s protocol", apiInterface) - return nil - } - - // if we had any errors we return them here - if len(errors) > 0 { - return fmt.Errorf(strings.Join(errors, ",\n")) - } - - return nil -} diff --git a/relayer/testclients/coshub_tests.go b/relayer/testclients/coshub_tests.go deleted file mode 100644 index bbe5b0848f..0000000000 --- a/relayer/testclients/coshub_tests.go +++ /dev/null @@ -1,124 +0,0 @@ -package testclients - -import ( - "context" - "fmt" - "log" - "net/http" - "strings" - - "github.com/btcsuite/btcd/btcec" - "github.com/cosmos/cosmos-sdk/client" - "github.com/lavanet/lava/relayer/chainproxy" - "github.com/lavanet/lava/relayer/sentry" -) - -// CosmoshubTests -func CosmoshubTests(ctx context.Context, chainProxy chainproxy.ChainProxy, privKey *btcec.PrivateKey, apiInterface string, s *sentry.Sentry, clientCtx client.Context) error { - errors := []string{} - switch apiInterface { - case restString: - { - log.Println("starting run important apis") - clientAdress := clientCtx.FromAddress - mostImportantApisToTest := map[string][]string{ - http.MethodGet: { - "/blocks/latest", - fmt.Sprintf("/cosmos/bank/v1beta1/balances/%s", clientAdress), - "/cosmos/gov/v1beta1/proposals", - "/blocks/latest", - "/cosmos/bank/v1beta1/balances/osmo1500hy75krs9e8t50aav6fahk8sxhajn9ctp40qwvvn8tcprkk6wszun4a5", - "/cosmos/gov/v1beta1/proposals", - }, - http.MethodPost: {}, - } - - for httpMethod, api := range mostImportantApisToTest { - for _, api_value := range api { - for i := 0; i < 100; i++ { - reply, _, err := chainproxy.SendRelay(ctx, chainProxy, privKey, api_value, "", httpMethod, "coshub_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "CosmoshubTestsResonse") - } - } - } - } - - log.Println("continuing to other spec apis") - // finish with testing all other API methods that dont require parameters - allSpecNames, err := s.GetAllSpecNames(ctx) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } - for apiName, apiInterfaceList := range allSpecNames { - if strings.Contains(apiName, "/{") { - continue - } - - for _, api_interface := range apiInterfaceList { - if api_interface.Type == http.MethodPost { - // for now we dont want to run the post apis in this test - continue - } - log.Printf("%s", apiName) - reply, _, err := chainproxy.SendRelay(ctx, chainProxy, privKey, apiName, "", http.MethodGet, "coshub_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "CosmoshubTestsResonse") - } - } - } - } - case tendermintString: - { - for i := 0; i < 100; i++ { - reply, _, err := chainproxy.SendRelay(ctx, chainProxy, privKey, "", JSONRPC_TERRA_STATUS, http.MethodGet, "coshub_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "JSONRPC_TERRA_STATUS") - } - reply, _, err = chainproxy.SendRelay(ctx, chainProxy, privKey, "", JSONRPC_TERRA_HEALTH, http.MethodGet, "coshub_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "JSONRPC_TERRA_HEALTH") - } - reply, _, err = chainproxy.SendRelay(ctx, chainProxy, privKey, URIRPC_TERRA_STATUS, "", http.MethodGet, "coshub_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "URIRPC_TERRA_STATUS") - } - reply, _, err = chainproxy.SendRelay(ctx, chainProxy, privKey, URIRPC_TERRA_HEALTH, "", http.MethodGet, "coshub_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "URIRPC_TERRA_HEALTH") - } - } - } - default: - { - log.Printf("currently no tests for %s protocol", apiInterface) - return nil - } - } - - // if we had any errors we return them here - if len(errors) > 0 { - return fmt.Errorf(strings.Join(errors, ",\n")) - } - - return nil -} diff --git a/relayer/testclients/ethereum_tests.go b/relayer/testclients/ethereum_tests.go deleted file mode 100644 index fbad2de90f..0000000000 --- a/relayer/testclients/ethereum_tests.go +++ /dev/null @@ -1,146 +0,0 @@ -package testclients - -import ( - "context" - "fmt" - "math/big" - "strings" - "time" - - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/lavanet/lava/utils" -) - -func EthTests(ctx context.Context, chainID string, rpcURL string, testDuration time.Duration) error { - utils.LavaFormatInfo("Starting "+chainID+" Tests", nil) - client, err := ethclient.Dial(rpcURL) - if err != nil { - return utils.LavaFormatError("error client dial", err, nil) - } - for start := time.Now(); time.Since(start) < testDuration; { - // eth_blockNumber - latestBlockNumberUint, err := client.BlockNumber(ctx) - if err != nil { - return utils.LavaFormatError("error eth_blockNumber", err, nil) - } - utils.LavaFormatInfo("reply JSONRPC_eth_blockNumber", &map[string]string{"blockNumber": fmt.Sprintf("%d", latestBlockNumberUint)}) - - // put in a loop for cases that a block have no tx because - var latestBlock *types.Block - var latestBlockNumber *big.Int - var latestBlockTxs types.Transactions - for { - // eth_getBlockByNumber - latestBlockNumber = big.NewInt(int64(latestBlockNumberUint)) - latestBlock, err = client.BlockByNumber(ctx, latestBlockNumber) - if err != nil { - return utils.LavaFormatError("error eth_getBlockByNumber", err, nil) - } - latestBlockTxs = latestBlock.Transactions() - - if len(latestBlockTxs) == 0 { - latestBlockNumberUint -= 1 - continue - } - break - } - utils.LavaFormatInfo("reply JSONRPC_eth_getBlockByNumber", nil) - - // eth_gasPrice - _, err = client.SuggestGasPrice(ctx) - if err != nil && !strings.Contains(err.Error(), "rpc error") { - return utils.LavaFormatError("error eth_gasPrice", err, nil) - } - utils.LavaFormatInfo("reply JSONRPC_eth_gasPrice", nil) - - if chainID != "FTM250" { - // eth_getBlockByHash - _, err = client.BlockByHash(ctx, latestBlock.Hash()) - if err != nil && !strings.Contains(err.Error(), "rpc error") { - return utils.LavaFormatError("error eth_getBlockByHash", err, nil) - } - utils.LavaFormatInfo("reply JSONRPC_eth_getBlockByHash", nil) - } - - targetTx := latestBlockTxs[0] - - // eth_getTransactionByHash - targetTx, _, err = client.TransactionByHash(ctx, targetTx.Hash()) - if err != nil { - return utils.LavaFormatError("error eth_getTransactionByHash", err, nil) - } - utils.LavaFormatInfo("reply JSONRPC_eth_getTransactionByHash", nil) - - // eth_getTransactionReceipt - _, err = client.TransactionReceipt(ctx, targetTx.Hash()) - if err != nil && !strings.Contains(err.Error(), "rpc error") { - return utils.LavaFormatError("error eth_getTransactionReceipt", err, nil) - } - utils.LavaFormatInfo("reply JSONRPC_eth_getTransactionReceipt", nil) - - targetTxMsg, _ := targetTx.AsMessage(types.LatestSignerForChainID(targetTx.ChainId()), nil) - - // eth_getBalance - _, err = client.BalanceAt(ctx, targetTxMsg.From(), nil) - if err != nil && !strings.Contains(err.Error(), "rpc error") { - return utils.LavaFormatError("error eth_getBalance", err, nil) - } - utils.LavaFormatInfo("reply JSONRPC_eth_getBalance", nil) - - // eth_getStorageAt - _, err = client.StorageAt(ctx, *targetTx.To(), common.HexToHash("00"), nil) - if err != nil && !strings.Contains(err.Error(), "rpc error") { - return utils.LavaFormatError("error eth_getStorageAt", err, nil) - } - utils.LavaFormatInfo("reply JSONRPC_eth_getStorageAt", nil) - - if chainID != "FTM250" { - // eth_getTransactionCount - _, err = client.TransactionCount(ctx, latestBlock.Hash()) - if err != nil && !strings.Contains(err.Error(), "rpc error") { - return utils.LavaFormatError("error eth_getTransactionCount", err, nil) - } - utils.LavaFormatInfo("reply JSONRPC_eth_getTransactionCount", nil) - } - // eth_getCode - _, err = client.CodeAt(ctx, *targetTx.To(), nil) - if err != nil && !strings.Contains(err.Error(), "rpc error") { - return utils.LavaFormatError("error eth_getCode", err, nil) - } - utils.LavaFormatInfo("reply JSONRPC_eth_getCode", nil) - - previousBlock := big.NewInt(int64(latestBlockNumberUint - 1)) - - callMsg := ethereum.CallMsg{ - From: targetTxMsg.From(), - To: targetTxMsg.To(), - Gas: targetTxMsg.Gas(), - GasPrice: targetTxMsg.GasPrice(), - GasFeeCap: targetTxMsg.GasFeeCap(), - GasTipCap: targetTxMsg.GasTipCap(), - Value: targetTxMsg.Value(), - Data: targetTxMsg.Data(), - AccessList: targetTxMsg.AccessList(), - } - - // eth_call - _, err = client.CallContract(ctx, callMsg, previousBlock) - if err != nil && !strings.Contains(err.Error(), "rpc error") { - return utils.LavaFormatError("error eth_call", err, nil) - } - utils.LavaFormatInfo("reply JSONRPC_eth_call", nil) - - if chainID != "GTH1" { - // eth_estimateGas - _, err = client.EstimateGas(ctx, callMsg) - if err != nil && !strings.Contains(err.Error(), "rpc error") { - return utils.LavaFormatError("error eth_estimateGas", err, nil) - } - utils.LavaFormatInfo("reply JSONRPC_eth_estimateGas", nil) - } - } - return nil -} diff --git a/relayer/testclients/juno_tests.go b/relayer/testclients/juno_tests.go deleted file mode 100644 index 7c8c893fe2..0000000000 --- a/relayer/testclients/juno_tests.go +++ /dev/null @@ -1,112 +0,0 @@ -package testclients - -import ( - "context" - "fmt" - "log" - "net/http" - "strings" - - "github.com/btcsuite/btcd/btcec" - "github.com/lavanet/lava/relayer/chainproxy" -) - -func JunoTests(ctx context.Context, chainProxy chainproxy.ChainProxy, privKey *btcec.PrivateKey, apiInterface string) error { - errors := []string{} - - switch apiInterface { - case restString: - { - // most important api test - mostImportantApisToTest := map[string][]string{ - http.MethodGet: { - // cosmos apis - "/cosmos/gov/v1beta1/proposals", - "/blocks/latest", - "/blocks/1", - // juno specific apis - "/cosmwasm/wasm/v1/code", - "/cosmwasm/wasm/v1/codes/pinned", - }, - http.MethodPost: {}, - } - - for httpMethod, api := range mostImportantApisToTest { - for _, api_value := range api { - for i := 0; i < 20; i++ { - reply, _, err := chainproxy.SendRelay(ctx, chainProxy, privKey, api_value, "", httpMethod, "juno_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "JunoTestsResponse") - } - } - } - } - - // other juno tests - for i := 0; i < 100; i++ { - reply, _, err := chainproxy.SendRelay(ctx, chainProxy, privKey, TERRA_BLOCKS_LATEST_URL_REST, TERRA_BLOCKS_LATEST_DATA_REST, http.MethodGet, "juno_test", nil) - if err != nil { - log.Println("1:" + err.Error()) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "TERRA_BLOCKS_LATEST_URL_REST") - } - reply, _, err = chainproxy.SendRelay(ctx, chainProxy, privKey, URIRPC_TERRA_STATUS, OSMOSIS_NUM_POOLS_DATA_REST, http.MethodGet, "juno_test", nil) - if err != nil { - log.Println("1:" + err.Error()) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "URIRPC_TERRA_STATUS") - } - } - } - case tendermintString: - { - for i := 0; i < 100; i++ { - reply, _, err := chainproxy.SendRelay(ctx, chainProxy, privKey, "", JSONRPC_TERRA_STATUS, http.MethodGet, "juno_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "JSONRPC_TERRA_STATUS") - } - reply, _, err = chainproxy.SendRelay(ctx, chainProxy, privKey, "", JSONRPC_TERRA_HEALTH, http.MethodGet, "juno_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "JSONRPC_TERRA_HEALTH") - } - reply, _, err = chainproxy.SendRelay(ctx, chainProxy, privKey, URIRPC_TERRA_STATUS, "", http.MethodGet, "juno_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "URIRPC_TERRA_STATUS") - } - reply, _, err = chainproxy.SendRelay(ctx, chainProxy, privKey, URIRPC_TERRA_HEALTH, "", http.MethodGet, "juno_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "URIRPC_TERRA_HEALTH") - } - } - } - default: - { - log.Println("ERROR: not supported apiInterface: ", apiInterface) - return nil - } - } - - // if we had any errors we return them here - if len(errors) > 0 { - return fmt.Errorf(strings.Join(errors, ",\n")) - } - - return nil -} diff --git a/relayer/testclients/lava_tests.go b/relayer/testclients/lava_tests.go deleted file mode 100644 index f17e8736fa..0000000000 --- a/relayer/testclients/lava_tests.go +++ /dev/null @@ -1,89 +0,0 @@ -package testclients - -import ( - "context" - "fmt" - "log" - "net/http" - "strings" - - "github.com/btcsuite/btcd/btcec" - "github.com/cosmos/cosmos-sdk/client" - "github.com/lavanet/lava/relayer/chainproxy" - "github.com/lavanet/lava/relayer/sentry" -) - -// LavaTests -func LavaTests(ctx context.Context, chainProxy chainproxy.ChainProxy, privKey *btcec.PrivateKey, apiInterface string, s *sentry.Sentry, clientCtx client.Context) error { - errors := []string{} - if apiInterface == restString { - log.Println("starting run important apis") - clientAdress := clientCtx.FromAddress - mostImportantApisToTest := map[string][]string{ - http.MethodGet: { - "/blocks/latest", - "/lavanet/lava/pairing/providers/LAV1", - "/lavanet/lava/pairing/clients/LAV1", - fmt.Sprintf("/lavanet/lava/pairing/get_pairing/LAV1/%s", clientAdress), - // fmt.Sprintf("/lavanet/lava/pairing/verify_pairing/LAV1/%s/%s/%d", clientAdress, clientAdress, 78), // verify pairing needs more work. as block is changed every iterations - fmt.Sprintf("/cosmos/bank/v1beta1/balances/%s", clientAdress), - "/cosmos/gov/v1beta1/proposals", - "/lavanet/lava/spec/spec", - "/blocks/1", - }, - http.MethodPost: {}, - } - - for httpMethod, api := range mostImportantApisToTest { - for _, api_value := range api { - for i := 0; i < 100; i++ { - reply, _, err := chainproxy.SendRelay(ctx, chainProxy, privKey, api_value, "", httpMethod, "lava_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "LavaTestsResponse") - } - } - } - } - - log.Println("continuing to other spec apis") - // finish with testing all other API methods that dont require parameters - allSpecNames, err := s.GetAllSpecNames(ctx) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } - for apiName, apiInterfaceList := range allSpecNames { - if strings.Contains(apiName, "/{") { - continue - } - - for _, api_interface := range apiInterfaceList { - if api_interface.Type == http.MethodPost { - // for now we dont want to run the post apis in this test - continue - } - log.Printf("%s", apiName) - reply, _, err := chainproxy.SendRelay(ctx, chainProxy, privKey, apiName, "", http.MethodGet, "lava_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "LavaTestsResponse") - } - } - } - } else { - log.Printf("currently no tests for %s protocol", apiInterface) - return nil - } - - // if we had any errors we return them here - if len(errors) > 0 { - return fmt.Errorf(strings.Join(errors, ",\n")) - } - - return nil -} diff --git a/relayer/testclients/osmosis_tests.go b/relayer/testclients/osmosis_tests.go deleted file mode 100644 index e542033825..0000000000 --- a/relayer/testclients/osmosis_tests.go +++ /dev/null @@ -1,118 +0,0 @@ -package testclients - -import ( - "context" - "fmt" - "log" - "net/http" - "strings" - - "github.com/btcsuite/btcd/btcec" - "github.com/lavanet/lava/relayer/chainproxy" -) - -func OsmosisTests(ctx context.Context, chainProxy chainproxy.ChainProxy, privKey *btcec.PrivateKey, apiInterface string) error { - errors := []string{} - switch apiInterface { - case restString: - { - // most important api test - mostImportantApisToTest := map[string][]string{ - http.MethodGet: { - // cosmos apis - "/cosmos/bank/v1beta1/balances/osmo1500hy75krs9e8t50aav6fahk8sxhajn9ctp40qwvvn8tcprkk6wszun4a5", - "/cosmos/gov/v1beta1/proposals", - "/blocks/latest", - "/blocks/1", - // osmosis apis - "/osmosis/gamm/v1beta1/pools", - "/osmosis/epochs/v1beta1/epochs", - "/osmosis/pool-incentives/v1beta1/incentivized_pools", - fmt.Sprintf("/osmosis/incentives/v1beta1/gauge_by_id/%d", 5411374), - "/osmosis/superfluid/v1beta1/all_assets", - "/osmosis/pool-incentives/v1beta1/distr_info", - "/osmosis/mint/v1beta1/epoch_provisions", - }, - http.MethodPost: {}, - } - - for httpMethod, api := range mostImportantApisToTest { - for _, api_value := range api { - for i := 0; i < 20; i++ { - reply, _, err := chainproxy.SendRelay(ctx, chainProxy, privKey, api_value, "", httpMethod, "osmo_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "OsmosisTestsResponse") - } - } - } - } - - // other osmosis tests - for i := 0; i < 100; i++ { - reply, _, err := chainproxy.SendRelay(ctx, chainProxy, privKey, TERRA_BLOCKS_LATEST_URL_REST, TERRA_BLOCKS_LATEST_DATA_REST, http.MethodGet, "osmo_test", nil) - if err != nil { - log.Println("1:" + err.Error()) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "TERRA_BLOCKS_LATEST_URL_REST") - } - reply, _, err = chainproxy.SendRelay(ctx, chainProxy, privKey, OSMOSIS_NUM_POOLS_URL_REST, OSMOSIS_NUM_POOLS_DATA_REST, http.MethodGet, "osmo_test", nil) - if err != nil { - log.Println("1:" + err.Error()) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "OSMOSIS_NUM_POOLS_URL_REST") - } - } - } - case tendermintString: - { - for i := 0; i < 100; i++ { - reply, _, err := chainproxy.SendRelay(ctx, chainProxy, privKey, "", JSONRPC_TERRA_STATUS, http.MethodGet, "osmo_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "JSONRPC_TERRA_STATUS") - } - reply, _, err = chainproxy.SendRelay(ctx, chainProxy, privKey, "", JSONRPC_TERRA_HEALTH, http.MethodGet, "osmo_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "JSONRPC_TERRA_HEALTH") - } - reply, _, err = chainproxy.SendRelay(ctx, chainProxy, privKey, URIRPC_TERRA_STATUS, "", http.MethodGet, "osmo_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "URIRPC_TERRA_STATUS") - } - reply, _, err = chainproxy.SendRelay(ctx, chainProxy, privKey, URIRPC_TERRA_HEALTH, "", http.MethodGet, "osmo_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "URIRPC_TERRA_HEALTH") - } - } - } - - default: - { - log.Println("ERROR: not supported apiInterface: ", apiInterface) - return nil - } - } - - // if we had any errors we return them here - if len(errors) > 0 { - return fmt.Errorf(strings.Join(errors, ",\n")) - } - - return nil -} diff --git a/relayer/testclients/polygon_tests.go b/relayer/testclients/polygon_tests.go deleted file mode 100644 index ac44c4b554..0000000000 --- a/relayer/testclients/polygon_tests.go +++ /dev/null @@ -1,54 +0,0 @@ -package testclients - -import ( - "context" - "net/http" - "time" - - "github.com/lavanet/lava/utils" - - "github.com/btcsuite/btcd/btcec" - "github.com/lavanet/lava/relayer/chainproxy" -) - -const ( - // JSONRPC_ETH_BLOCKNUMBER (defined in relayer/testclients/test_utils.go) - // JSONRPC_ETH_GETBALANCE (defined in relayer/testclients/test_utils.go) - // JSONRPC_ETH_NEWFILTER (defined in relayer/testclients/test_utils.go) - JSONRPC_BOR_GETAUTHOR = `{"jsonrpc":"2.0","method":"bor_getAuthor","params":["0x1000"],"id":1}` - JSONRPC_BOR_GETCURRENTVALIDATORS = `{"jsonrpc":"2.0","method":"bor_getCurrentValidators","id":1}` - JSONRPC_BOR_GETSIGNERSATHASH = `{"jsonrpc":"2.0","method":"bor_getSignersAtHash","params":["0x29fa73e3da83ddac98f527254fe37002e052725a88904bac14f03e919e1e2876"],"id":1}` - JSONRPC_BOR_GETROOTHASH = `{"jsonrpc":"2.0","method":"bor_getRootHash","params":[1024,1026],"id":1}` - // [NOT SUPPORTED] JSONRPC_BOR_GETCURRENTPROPOSER = `{"jsonrpc":"2.0","method":"bor_getCurrentProposer","id":1}` - // [NOT SUPPORTED] JSONRPC_ETH_GETROOTHASH = `{"jsonrpc":"2.0","method":"eth_getRootHash","params":[1024,1026],"id":1}` -) - -var polygon_tests = []struct{ name, payload string }{ - {"eth_blockNumber", JSONRPC_ETH_BLOCKNUMBER}, - {"eth_getBalance", JSONRPC_ETH_GETBALANCE}, - {"eth_newFilter", JSONRPC_ETH_NEWFILTER}, - {"bor_getAuthor", JSONRPC_BOR_GETAUTHOR}, - {"bor_getCurrentValidators", JSONRPC_BOR_GETCURRENTVALIDATORS}, - {"bor_getSignersAtHash", JSONRPC_BOR_GETSIGNERSATHASH}, - {"bor_getRootHash", JSONRPC_BOR_GETROOTHASH}, - // [NOT SUPPORTED] { "bor_getCurrentProposer", JSONRPC_BOR_GETCURRENTPROPOSER }, - // [NOT SUPPORTED] { "eth_getRootHash", JSONRPC_ETH_GETROOTHASH }, -} - -func PolygonTests(ctx context.Context, chainID string, rpcURL string, chainProxy chainproxy.ChainProxy, privKey *btcec.PrivateKey, testDuration time.Duration) error { - utils.LavaFormatInfo("Starting "+chainID+" Tests", nil) - - for start := time.Now(); time.Since(start) < testDuration; { - for j := 0; j < 10; j++ { - for _, t := range polygon_tests { - reply, _, err := chainproxy.SendRelay(ctx, chainProxy, privKey, rpcURL, t.payload, http.MethodGet, "polygon_test", nil) - if err != nil { - return utils.LavaFormatError("error "+t.name, err, nil) - } - prettyPrintReply(*reply, "JSONRPC_"+t.name) - } - } - time.Sleep(1 * time.Second) - } - return nil -} diff --git a/relayer/testclients/starknet_tests.go b/relayer/testclients/starknet_tests.go deleted file mode 100644 index ad1b8f8e26..0000000000 --- a/relayer/testclients/starknet_tests.go +++ /dev/null @@ -1,39 +0,0 @@ -package testclients - -import ( - "context" - "net/http" - "time" - - "github.com/lavanet/lava/utils" - - "github.com/btcsuite/btcd/btcec" - "github.com/lavanet/lava/relayer/chainproxy" -) - -const ( - JSONRPC_STRK_BLOCKNUMBER = `{"jsonrpc":"2.0","method":"starknet_blockNumber","params":[],"id":1}` - JSONRPC_STRK_BLOCKHASHANDNUMBER = `{"jsonrpc":"2.0","method":"starknet_blockHashAndNumber","params":[],"id":1}` -) - -func StarknetTests(ctx context.Context, chainID string, rpcURL string, chainProxy chainproxy.ChainProxy, privKey *btcec.PrivateKey, testDuration time.Duration) error { - utils.LavaFormatInfo("Starting "+chainID+" Tests", nil) - - for start := time.Now(); time.Since(start) < testDuration; { - for j := 0; j < 10; j++ { - reply, _, err := chainproxy.SendRelay(ctx, chainProxy, privKey, rpcURL, JSONRPC_STRK_BLOCKNUMBER, http.MethodGet, "starknet_test", nil) - if err != nil { - return utils.LavaFormatError("error starknet_blockNumber", err, nil) - } - prettyPrintReply(*reply, "JSONRPC_STRK_BLOCKNUMBER") - - reply, _, err = chainproxy.SendRelay(ctx, chainProxy, privKey, rpcURL, JSONRPC_STRK_BLOCKHASHANDNUMBER, http.MethodGet, "starknet_test", nil) - if err != nil { - return utils.LavaFormatError("error starknet_blockHashAndNumber", err, nil) - } - prettyPrintReply(*reply, "JSONRPC_STRK_BLOCKHASHANDNUMBER") - } - time.Sleep(1 * time.Second) - } - return nil -} diff --git a/relayer/testclients/terra_tests.go b/relayer/testclients/terra_tests.go deleted file mode 100644 index 418d10a7c9..0000000000 --- a/relayer/testclients/terra_tests.go +++ /dev/null @@ -1,74 +0,0 @@ -package testclients - -import ( - "context" - "fmt" - "log" - "net/http" - "strings" - - "github.com/btcsuite/btcd/btcec" - "github.com/lavanet/lava/relayer/chainproxy" -) - -func TerraTests(ctx context.Context, chainProxy chainproxy.ChainProxy, privKey *btcec.PrivateKey, apiInterface string) error { - errors := []string{} - switch apiInterface { - case restString: - { - for i := 0; i < 10; i++ { - reply, _, err := chainproxy.SendRelay(ctx, chainProxy, privKey, TERRA_BLOCKS_LATEST_URL_REST, TERRA_BLOCKS_LATEST_DATA_REST, http.MethodGet, "terra_test", nil) - if err != nil { - log.Println("1:" + err.Error()) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "TERRA_BLOCKS_LATEST_URL_REST") - } - } - } - case tendermintString: - { - for i := 0; i < 10; i++ { - reply, _, err := chainproxy.SendRelay(ctx, chainProxy, privKey, "", JSONRPC_TERRA_STATUS, http.MethodGet, "terra_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "JSONRPC_TERRA_STATUS") - } - reply, _, err = chainproxy.SendRelay(ctx, chainProxy, privKey, "", JSONRPC_TERRA_HEALTH, http.MethodGet, "terra_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "JSONRPC_TERRA_HEALTH") - } - reply, _, err = chainproxy.SendRelay(ctx, chainProxy, privKey, URIRPC_TERRA_STATUS, "", http.MethodGet, "terra_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "JSONRPC_TERRA_HEALTH") - log.Println("reply URIRPC_TERRA_STATUS", reply) - } - reply, _, err = chainproxy.SendRelay(ctx, chainProxy, privKey, URIRPC_TERRA_HEALTH, "", http.MethodGet, "terra_test", nil) - if err != nil { - log.Println(err) - errors = append(errors, fmt.Sprintf("%s", err)) - } else { - prettyPrintReply(*reply, "URIRPC_TERRA_HEALTH") - } - } - } - default: - log.Println("ERROR: not supported apiInterface: ", apiInterface) - return nil - } - - // if we had any errors we return them here - if len(errors) > 0 { - return fmt.Errorf(strings.Join(errors, ",\n")) - } - - return nil -} diff --git a/relayer/testclients/test_utils.go b/relayer/testclients/test_utils.go deleted file mode 100644 index fd26bb2b5c..0000000000 --- a/relayer/testclients/test_utils.go +++ /dev/null @@ -1,36 +0,0 @@ -package testclients - -import ( - "bytes" - "log" - - "github.com/lavanet/lava/x/pairing/types" -) - -const ( - JSONRPC_ETH_BLOCKNUMBER = `{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}` - JSONRPC_ETH_GETBALANCE = `{"jsonrpc":"2.0","method":"eth_getBalance","params":["0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8", "latest"],"id":77}` - JSONRPC_ETH_TRACE_REPLAY_BLOCK_TRANSACTIONS = `{"jsonrpc":"2.0","method":"trace_replayBlockTransactions","params":["latest", "trace"],"id":1}` - JSONRPC_UNSUPPORTED = `{"jsonrpc":"2.0","method":"eth_blahblah","params":[],"id":1}` - JSONRPC_ETH_NEWFILTER = `{"jsonrpc":"2.0","method":"eth_newFilter","params":[{"fromBlock": "0x12345","toBlock": "0x23456"}],"id":73}` - JSONRPC_ETH_GETBLOCK_FORMAT = `{"jsonrpc":"2.0","method":"eth_getBlockByNumber","params":["0x%x", false],"id":1}` - - TERRA_BLOCKS_LATEST_URL_REST = "/blocks/latest" - TERRA_BLOCKS_LATEST_DATA_REST = `` - OSMOSIS_NUM_POOLS_URL_REST = "/osmosis/gamm/v1beta1/num_pools" - OSMOSIS_NUM_POOLS_DATA_REST = `` - JSONRPC_TERRA_STATUS = `{"jsonrpc":"2.0","method":"status","params":[],"id":1}` - JSONRPC_TERRA_HEALTH = `{"jsonrpc":"2.0","method":"health","params":[],"id":2}` - URIRPC_TERRA_STATUS = `status?` - URIRPC_TERRA_HEALTH = `health` -) - -func prettyPrintReply(reply types.RelayReply, name string) { - reply.Sig = nil // for nicer prints - reply.SigBlocks = nil - reply.FinalizedBlocksHashes = nil - if len(reply.Data) > 200 { - reply.Data = bytes.Join([][]byte{reply.Data[:200], []byte("...TooLong...")}, nil) // too long is ugly - } - log.Printf("reply %s, %s", name, reply.String()) -} diff --git a/testutil/common/common.go b/testutil/common/common.go index 897584deb4..30745bbe31 100644 --- a/testutil/common/common.go +++ b/testutil/common/common.go @@ -7,9 +7,9 @@ import ( btcSecp256k1 "github.com/btcsuite/btcd/btcec" "github.com/coniks-sys/coniks-go/crypto/vrf" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/lavanet/lava/relayer/sigs" testkeeper "github.com/lavanet/lava/testutil/keeper" "github.com/lavanet/lava/utils" + "github.com/lavanet/lava/utils/sigs" conflicttypes "github.com/lavanet/lava/x/conflict/types" epochstoragetypes "github.com/lavanet/lava/x/epochstorage/types" "github.com/lavanet/lava/x/pairing/types" diff --git a/relayer/sigs/sigs.go b/utils/sigs/sigs.go similarity index 100% rename from relayer/sigs/sigs.go rename to utils/sigs/sigs.go diff --git a/x/conflict/keeper/conflict.go b/x/conflict/keeper/conflict.go index 9b9283e88e..4ec9b63232 100644 --- a/x/conflict/keeper/conflict.go +++ b/x/conflict/keeper/conflict.go @@ -5,7 +5,7 @@ import ( "fmt" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/lavanet/lava/relayer/sigs" + "github.com/lavanet/lava/utils/sigs" "github.com/lavanet/lava/x/conflict/types" pairingtypes "github.com/lavanet/lava/x/pairing/types" ) diff --git a/x/conflict/keeper/msg_server_detection_test.go b/x/conflict/keeper/msg_server_detection_test.go index 70574197f3..112e08ba55 100644 --- a/x/conflict/keeper/msg_server_detection_test.go +++ b/x/conflict/keeper/msg_server_detection_test.go @@ -5,9 +5,9 @@ import ( "testing" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/testutil/common" testkeeper "github.com/lavanet/lava/testutil/keeper" + "github.com/lavanet/lava/utils/sigs" conflicttypes "github.com/lavanet/lava/x/conflict/types" "github.com/lavanet/lava/x/pairing/types" spectypes "github.com/lavanet/lava/x/spec/types" diff --git a/x/conflict/keeper/vote_test.go b/x/conflict/keeper/vote_test.go index 71f64b12c9..a33c5e4ee4 100644 --- a/x/conflict/keeper/vote_test.go +++ b/x/conflict/keeper/vote_test.go @@ -5,9 +5,9 @@ import ( "testing" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/testutil/common" testkeeper "github.com/lavanet/lava/testutil/keeper" + "github.com/lavanet/lava/utils/sigs" conflicttypes "github.com/lavanet/lava/x/conflict/types" "github.com/stretchr/testify/require" ) diff --git a/x/pairing/keeper/fixation_test.go b/x/pairing/keeper/fixation_test.go index 944c8586e3..16fa75a29a 100644 --- a/x/pairing/keeper/fixation_test.go +++ b/x/pairing/keeper/fixation_test.go @@ -6,9 +6,9 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/testutil/common" testkeeper "github.com/lavanet/lava/testutil/keeper" + "github.com/lavanet/lava/utils/sigs" epochstoragetypes "github.com/lavanet/lava/x/epochstorage/types" pairingtypes "github.com/lavanet/lava/x/pairing/types" "github.com/stretchr/testify/require" diff --git a/x/pairing/keeper/msg_server_freeze_test.go b/x/pairing/keeper/msg_server_freeze_test.go index 997decba1d..6abd3ff2a8 100644 --- a/x/pairing/keeper/msg_server_freeze_test.go +++ b/x/pairing/keeper/msg_server_freeze_test.go @@ -4,8 +4,8 @@ import ( "testing" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/lavanet/lava/relayer/sigs" testkeeper "github.com/lavanet/lava/testutil/keeper" + "github.com/lavanet/lava/utils/sigs" "github.com/lavanet/lava/x/pairing/types" "github.com/stretchr/testify/require" ) diff --git a/x/pairing/keeper/msg_server_relay_payment.go b/x/pairing/keeper/msg_server_relay_payment.go index 7f1da36746..a9c5138bff 100644 --- a/x/pairing/keeper/msg_server_relay_payment.go +++ b/x/pairing/keeper/msg_server_relay_payment.go @@ -7,8 +7,8 @@ import ( "strconv" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/utils" + "github.com/lavanet/lava/utils/sigs" epochstoragetypes "github.com/lavanet/lava/x/epochstorage/types" "github.com/lavanet/lava/x/pairing/types" "github.com/tendermint/tendermint/libs/log" diff --git a/x/pairing/keeper/msg_server_relay_payment_gov_test.go b/x/pairing/keeper/msg_server_relay_payment_gov_test.go index 008b590f76..602967f063 100644 --- a/x/pairing/keeper/msg_server_relay_payment_gov_test.go +++ b/x/pairing/keeper/msg_server_relay_payment_gov_test.go @@ -5,8 +5,8 @@ import ( "testing" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/lavanet/lava/relayer/sigs" testkeeper "github.com/lavanet/lava/testutil/keeper" + "github.com/lavanet/lava/utils/sigs" epochstoragetypes "github.com/lavanet/lava/x/epochstorage/types" pairingtypes "github.com/lavanet/lava/x/pairing/types" "github.com/stretchr/testify/require" diff --git a/x/pairing/keeper/msg_server_relay_payment_test.go b/x/pairing/keeper/msg_server_relay_payment_test.go index 078e6c1d73..51757d6730 100644 --- a/x/pairing/keeper/msg_server_relay_payment_test.go +++ b/x/pairing/keeper/msg_server_relay_payment_test.go @@ -6,10 +6,10 @@ import ( "testing" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/testutil/common" testkeeper "github.com/lavanet/lava/testutil/keeper" "github.com/lavanet/lava/utils" + "github.com/lavanet/lava/utils/sigs" epochstoragetypes "github.com/lavanet/lava/x/epochstorage/types" "github.com/lavanet/lava/x/pairing/types" plantypes "github.com/lavanet/lava/x/plans/types" diff --git a/x/pairing/keeper/msg_server_stake_client_test.go b/x/pairing/keeper/msg_server_stake_client_test.go index 4c71093c47..d109be0cb8 100644 --- a/x/pairing/keeper/msg_server_stake_client_test.go +++ b/x/pairing/keeper/msg_server_stake_client_test.go @@ -4,10 +4,10 @@ import ( "testing" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/testutil/common" testkeeper "github.com/lavanet/lava/testutil/keeper" "github.com/lavanet/lava/utils" + "github.com/lavanet/lava/utils/sigs" epochstoragetypes "github.com/lavanet/lava/x/epochstorage/types" "github.com/lavanet/lava/x/pairing/types" "github.com/stretchr/testify/require" diff --git a/x/pairing/keeper/msg_server_stake_provider_test.go b/x/pairing/keeper/msg_server_stake_provider_test.go index 7301b79a37..f57783b9ab 100644 --- a/x/pairing/keeper/msg_server_stake_provider_test.go +++ b/x/pairing/keeper/msg_server_stake_provider_test.go @@ -4,9 +4,9 @@ import ( "testing" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/testutil/common" testkeeper "github.com/lavanet/lava/testutil/keeper" + "github.com/lavanet/lava/utils/sigs" epochstoragetypes "github.com/lavanet/lava/x/epochstorage/types" "github.com/lavanet/lava/x/pairing/types" "github.com/stretchr/testify/require" diff --git a/x/pairing/keeper/msg_server_unstake_client_test.go b/x/pairing/keeper/msg_server_unstake_client_test.go index ff5542eb19..d18e41bab7 100644 --- a/x/pairing/keeper/msg_server_unstake_client_test.go +++ b/x/pairing/keeper/msg_server_unstake_client_test.go @@ -4,10 +4,10 @@ import ( "testing" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/testutil/common" testkeeper "github.com/lavanet/lava/testutil/keeper" "github.com/lavanet/lava/utils" + "github.com/lavanet/lava/utils/sigs" epochstoragetypes "github.com/lavanet/lava/x/epochstorage/types" "github.com/lavanet/lava/x/pairing/types" "github.com/stretchr/testify/require" diff --git a/x/pairing/keeper/pairing_subscription_test.go b/x/pairing/keeper/pairing_subscription_test.go index 620a616b3c..a35af1263c 100644 --- a/x/pairing/keeper/pairing_subscription_test.go +++ b/x/pairing/keeper/pairing_subscription_test.go @@ -4,9 +4,9 @@ import ( "testing" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/testutil/common" testkeeper "github.com/lavanet/lava/testutil/keeper" + "github.com/lavanet/lava/utils/sigs" "github.com/lavanet/lava/x/pairing/types" subtypes "github.com/lavanet/lava/x/subscription/types" "github.com/stretchr/testify/require" diff --git a/x/pairing/keeper/unresponsive_provider_test.go b/x/pairing/keeper/unresponsive_provider_test.go index 149963b5c4..75555942ee 100644 --- a/x/pairing/keeper/unresponsive_provider_test.go +++ b/x/pairing/keeper/unresponsive_provider_test.go @@ -6,8 +6,8 @@ import ( "testing" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/lavanet/lava/relayer/sigs" testkeeper "github.com/lavanet/lava/testutil/keeper" + "github.com/lavanet/lava/utils/sigs" epochstoragetypes "github.com/lavanet/lava/x/epochstorage/types" "github.com/lavanet/lava/x/pairing" "github.com/lavanet/lava/x/pairing/types" diff --git a/x/subscription/keeper/epoch_start_test.go b/x/subscription/keeper/epoch_start_test.go index 3590c96fed..09431e436e 100644 --- a/x/subscription/keeper/epoch_start_test.go +++ b/x/subscription/keeper/epoch_start_test.go @@ -4,7 +4,7 @@ import ( "testing" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/lavanet/lava/relayer/sigs" + "github.com/lavanet/lava/utils/sigs" epochstoragetypes "github.com/lavanet/lava/x/epochstorage/types" "github.com/stretchr/testify/require" ) diff --git a/x/subscription/keeper/subscription_test.go b/x/subscription/keeper/subscription_test.go index fac7a3a608..2f6f262274 100644 --- a/x/subscription/keeper/subscription_test.go +++ b/x/subscription/keeper/subscription_test.go @@ -7,10 +7,10 @@ import ( "time" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/lavanet/lava/relayer/sigs" "github.com/lavanet/lava/testutil/common" keepertest "github.com/lavanet/lava/testutil/keeper" "github.com/lavanet/lava/testutil/nullify" + "github.com/lavanet/lava/utils/sigs" epochstoragetypes "github.com/lavanet/lava/x/epochstorage/types" planskeeper "github.com/lavanet/lava/x/plans/keeper" planstypes "github.com/lavanet/lava/x/plans/types" From cd3b82f9646e4b6253514260173e267489880678 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Mar 2023 13:19:12 +0200 Subject: [PATCH 117/123] fix problem in dev script --- scripts/init_chain_commands.sh | 6 +++--- x/pairing/client/cli/tx_stake_provider.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/init_chain_commands.sh b/scripts/init_chain_commands.sh index 508ae8b659..630136b8b9 100755 --- a/scripts/init_chain_commands.sh +++ b/scripts/init_chain_commands.sh @@ -115,9 +115,9 @@ lavad tx pairing stake-provider "BASET" $PROVIDERSTAKE "$PROVIDER2_LISTENER,json lavad tx pairing stake-provider "BASET" $PROVIDERSTAKE "$PROVIDER3_LISTENER,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # Sui Providers -lavad tx pairing stake-provider "SUIT" $PROVIDERSTAKE "$PROVIDER1_LISTENER,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "SUIT" $PROVIDERSTAKE "$PROVIDER2_LISTENER,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "SUIT" $PROVIDERSTAKE "$PROVIDER3_LISTENER,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "SUIT" $PROVIDERSTAKE "$PROVIDER1_LISTENER,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "SUIT" $PROVIDERSTAKE "$PROVIDER2_LISTENER,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx pairing stake-provider "SUIT" $PROVIDERSTAKE "$PROVIDER3_LISTENER,jsonrpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE # SOLANA Providers lavad tx pairing stake-provider "SOLANA" $PROVIDERSTAKE "$PROVIDER1_LISTENER,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE diff --git a/x/pairing/client/cli/tx_stake_provider.go b/x/pairing/client/cli/tx_stake_provider.go index e33d2156b9..1590edbd9f 100644 --- a/x/pairing/client/cli/tx_stake_provider.go +++ b/x/pairing/client/cli/tx_stake_provider.go @@ -35,7 +35,7 @@ func CmdStakeProvider() *cobra.Command { for _, endpointStr := range tmpArg { splitted := strings.Split(endpointStr, ",") if len(splitted) != 3 { - return fmt.Errorf("invalid argument format in endpoints, must be: HOST:PORT,useType,geolocation HOST:PORT,useType,geolocation") + return fmt.Errorf("invalid argument format in endpoints, must be: HOST:PORT,useType,geolocation HOST:PORT,useType,geolocation, received: %s", endpointStr) } geoloc, err := strconv.ParseUint(splitted[2], 10, 64) if err != nil { From 26eadb8b5a90abf94a010de4e9ce87714326a516 Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Mar 2023 13:35:09 +0200 Subject: [PATCH 118/123] removed relayer/ unit tests after migrating to protocol --- .github/workflows/protocol_tests.yml | 6 +----- .golangci.yml | 2 -- Makefile | 2 +- 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/.github/workflows/protocol_tests.yml b/.github/workflows/protocol_tests.yml index c7d76bb7c0..36ed370091 100644 --- a/.github/workflows/protocol_tests.yml +++ b/.github/workflows/protocol_tests.yml @@ -61,8 +61,4 @@ jobs: ### Run protocol unitests ###################################################### - name: Run Lava Protocol Tests - run: go test ./protocol/... -v - - name: Run Lava Chain Proxy Tests - run: go test ./relayer/chainproxy/ -v - - name: Run Relayer Metrics Unit Tests - run: go test ./relayer/metrics/ -v + run: go test ./protocol/... -v \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml index ce259eb2b0..5107b17c7d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -3,8 +3,6 @@ run: # timeout for analysis, e.g. 30s, 5m, default is 1m timeout: 5m skip-files: - - "relayer/chainproxy/thirdparty/*" - - "relayer/chainproxy/grpc.go" - "protocol/chainlib/chainproxy/rpcInterfaceMessages/grpcMessage.go" - "protocol/chainlib/chainproxy/thirdparty/*" - "protocol/chainlib/grpc.go" diff --git a/Makefile b/Makefile index 833a8d8094..b31729c325 100644 --- a/Makefile +++ b/Makefile @@ -168,7 +168,7 @@ ifeq (static,$(findstring static,$(LAVA_BUILD_OPTIONS))) endif ifeq (mask_consumer_logs,$(findstring mask_consumer_logs,$(LAVA_BUILD_OPTIONS))) - ldflags += -X github.com/lavanet/lava/relayer/chainproxy.ReturnMaskedErrors=true + ldflags += -X github.com/lavanet/lava/protocol/common.ReturnMaskedErrors=true endif ifeq (debug_mutex,$(findstring debug_mutex,$(LAVA_BUILD_OPTIONS))) ldflags += -X github.com/lavanet/lava/utils.TimeoutMutex=true From 3c9458c38ec36de0a9d6e67475014462c6ca717e Mon Sep 17 00:00:00 2001 From: omer mishael Date: Thu, 16 Mar 2023 13:35:28 +0200 Subject: [PATCH 119/123] fixed comment naming convention --- x/pairing/keeper/pairing.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x/pairing/keeper/pairing.go b/x/pairing/keeper/pairing.go index b8250b239a..bd0bd2a4f7 100644 --- a/x/pairing/keeper/pairing.go +++ b/x/pairing/keeper/pairing.go @@ -97,7 +97,7 @@ func (k Keeper) GetPairingForClient(ctx sdk.Context, chainID string, clientAddre return providers, err } -// function used to get a new pairing from relayer and client +// function used to get a new pairing from provider and client // first argument has all metadata, second argument is only the addresses func (k Keeper) getPairingForClient(ctx sdk.Context, chainID string, clientAddress sdk.AccAddress, block uint64) (providers []epochstoragetypes.StakeEntry, vrfk string, allowedCU uint64, legacyStake bool, errorRet error) { var geolocation uint64 From b320d49caac6f40677359e1ff999c349b2742388 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Wed, 22 Mar 2023 12:56:47 +0100 Subject: [PATCH 120/123] Renaming psm --- docs/static/openapi.yml | 20 ----- .../lavasession/provider_session_manager.go | 2 +- protocol/lavasession/provider_types.go | 4 +- .../init_chain_commands_one_provider.sh | 81 ------------------- scripts/pre_setups/init_gth_only.sh | 26 ------ scripts/pre_setups/init_lava_grpc.sh | 58 ------------- scripts/pre_setups/init_lava_only.sh | 4 + scripts/pre_setups/init_osmosis_test.sh | 42 ---------- scripts/pre_setups/setup_provider.sh | 77 ------------------ scripts/setup_providers.sh | 4 +- 10 files changed, 9 insertions(+), 309 deletions(-) delete mode 100755 scripts/pre_setups/init_chain_commands_one_provider.sh delete mode 100755 scripts/pre_setups/init_gth_only.sh delete mode 100755 scripts/pre_setups/init_lava_grpc.sh create mode 100644 scripts/pre_setups/init_lava_only.sh delete mode 100755 scripts/pre_setups/init_osmosis_test.sh delete mode 100755 scripts/pre_setups/setup_provider.sh diff --git a/docs/static/openapi.yml b/docs/static/openapi.yml index 3dfab40735..2490646808 100644 --- a/docs/static/openapi.yml +++ b/docs/static/openapi.yml @@ -29674,16 +29674,6 @@ paths: in: query required: false type: boolean - - name: pagination.reverse - description: >- - reverse is set to true if results are to be returned in the - descending order. - - - Since: cosmos-sdk 0.43 - in: query - required: false - type: boolean tags: - Query '/lavanet/lava/epochstorage/fixated_params/{index}': @@ -29943,16 +29933,6 @@ paths: in: query required: false type: boolean - - name: pagination.reverse - description: >- - reverse is set to true if results are to be returned in the - descending order. - - - Since: cosmos-sdk 0.43 - in: query - required: false - type: boolean tags: - Query '/lavanet/lava/epochstorage/stake_storage/{index}': diff --git a/protocol/lavasession/provider_session_manager.go b/protocol/lavasession/provider_session_manager.go index 2c2edc1ed5..02970d9742 100644 --- a/protocol/lavasession/provider_session_manager.go +++ b/protocol/lavasession/provider_session_manager.go @@ -217,7 +217,7 @@ func (psm *ProviderSessionManager) getActiveConsumer(epoch uint64, address strin } func (psm *ProviderSessionManager) getSessionFromAnActiveConsumer(providerSessionWithConsumer *ProviderSessionsWithConsumer, sessionId uint64, epoch uint64) (singleProviderSession *SingleProviderSession, err error) { - session, err := providerSessionWithConsumer.GetExistingSession(sessionId) + session, err := providerSessionWithConsumer.getExistingSession(sessionId) if err == nil { return session, nil } else if SessionDoesNotExist.Is(err) { diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index 43c7f7cab8..f9b8e63d30 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -182,13 +182,13 @@ func (pswc *ProviderSessionsWithConsumer) createNewSingleProviderSession(session } // this function returns the session locked to be used -func (pswc *ProviderSessionsWithConsumer) GetExistingSession(sessionId uint64) (session *SingleProviderSession, err error) { +func (pswc *ProviderSessionsWithConsumer) getExistingSession(sessionId uint64) (session *SingleProviderSession, err error) { pswc.Lock.RLock() defer pswc.Lock.RUnlock() if session, ok := pswc.Sessions[sessionId]; ok { locked := session.lock.TryLock() if !locked { - return nil, utils.LavaFormatError("GetExistingSession failed to lock when getting session", LockMisUseDetectedError, nil) + return nil, utils.LavaFormatError("getExistingSession failed to lock when getting session", LockMisUseDetectedError, &map[string]string{"sessionId": strconv.FormatUint(session.SessionID, 10)}) } return session, nil } diff --git a/scripts/pre_setups/init_chain_commands_one_provider.sh b/scripts/pre_setups/init_chain_commands_one_provider.sh deleted file mode 100755 index 77d0169c2a..0000000000 --- a/scripts/pre_setups/init_chain_commands_one_provider.sh +++ /dev/null @@ -1,81 +0,0 @@ -#!/bin/bash -__dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -source "$__dir"/../useful_commands.sh -. "${__dir}"/../vars/variables.sh -# Making sure old screens are not running -killall screen -screen -wipe -GASPRICE="0.000000001ulava" -lavad tx gov submit-proposal spec-add ./cookbook/spec_add_lava.json,./cookbook/spec_add_ethereum.json,./cookbook/spec_add_osmosis.json,./cookbook/spec_add_fantom.json,./cookbook/spec_add_celo.json,./cookbook/spec_add_alfajores.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx gov vote 1 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -lavad tx gov submit-proposal spec-add ./cookbook/spec_add_arbitrum.json,./cookbook/spec_add_starknet.json,./cookbook/spec_add_aptos.json,./cookbook/spec_add_juno.json,./cookbook/spec_add_cosmoshub.json,./cookbook/spec_add_polygon.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx gov vote 2 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -STAKE="500000000000ulava" -sleep 4 -lavad tx pairing stake-client "ETH1" $STAKE 1 -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-client "GTH1" $STAKE 1 -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-client "COS3" $STAKE 1 -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-client "FTM250" $STAKE 1 -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-client "CELO" $STAKE 1 -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-client "LAV1" $STAKE 1 -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-client "COS4" $STAKE 1 -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-client "ALFAJORES" $STAKE 1 -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-client "ARB1" $STAKE 1 -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-client "ARBN" $STAKE 1 -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-client "APT1" $STAKE 1 -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-client "STRK" $STAKE 1 -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-client "JUN1" $STAKE 1 -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-client "COS5" $STAKE 1 -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-client "POLYGON1" $STAKE 1 -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - - -# Ethereum providers -lavad tx pairing stake-provider "ETH1" $STAKE "127.0.0.1:2221,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -#Goerli providers -lavad tx pairing stake-provider "GTH1" $STAKE "127.0.0.1:2121,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -# Fantom providers -lavad tx pairing stake-provider "FTM250" $STAKE "127.0.0.1:2251,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -# Celo providers -lavad tx pairing stake-provider "CELO" $STAKE "127.0.0.1:5241,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -#Celo alfahores testnet providers -lavad tx pairing stake-provider "ALFAJORES" $STAKE "127.0.0.1:6241,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -#Arbitrum mainet providers -lavad tx pairing stake-provider "ARB1" $STAKE "127.0.0.1:7241,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -#Aptos mainet providers -lavad tx pairing stake-provider "APT1" $STAKE "127.0.0.1:10031,rest,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -#Starknet mainet providers -lavad tx pairing stake-provider "STRK" $STAKE "127.0.0.1:8241,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -# Polygon Providers -lavad tx pairing stake-provider "POLYGON1" $STAKE "127.0.0.1:4344,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -# Cosmos Chains: - -# Osmosis providers -lavad tx pairing stake-provider "COS3" $STAKE "127.0.0.1:2241,tendermintrpc,1 127.0.0.1:2231,rest,1 127.0.0.1:2234,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -# Lava Providers -lavad tx pairing stake-provider "LAV1" $STAKE "127.0.0.1:2261,tendermintrpc,1 127.0.0.1:2271,rest,1 127.0.0.1:2274,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -# Juno providers -lavad tx pairing stake-provider "JUN1" $STAKE "127.0.0.1:2361,tendermintrpc,1 127.0.0.1:2371,rest,1 127.0.0.1:2374,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -# Osmosis testnet providers -lavad tx pairing stake-provider "COS4" $STAKE "127.0.0.1:4241,tendermintrpc,1 127.0.0.1:4231,rest,1 127.0.0.1:4234,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -# Cosmoshub Providers -lavad tx pairing stake-provider "COS5" $STAKE "127.0.0.1:2344,tendermintrpc,1 127.0.0.1:2331,rest,1 127.0.0.1:2334,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -# we need to wait for the next epoch for the stake to take action. -sleep_until_next_epoch - -. ${__dir}/setup_provider.sh diff --git a/scripts/pre_setups/init_gth_only.sh b/scripts/pre_setups/init_gth_only.sh deleted file mode 100755 index 2f5a871918..0000000000 --- a/scripts/pre_setups/init_gth_only.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -__dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -source "$__dir"/../useful_commands.sh -. "${__dir}"/../vars/variables.sh -# Making sure old screens are not running -killall screen -screen -wipe -LOGS_DIR=${__dir}/../../testutil/debugging/logs -GASPRICE="0.000000001ulava" -lavad tx gov submit-proposal spec-add ./cookbook/spec_add_ethereum.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx gov vote 1 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -sleep 4 - -STAKE="500000000000ulava" -lavad tx pairing stake-client "GTH1" $STAKE 1 -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -#Goerli providers -lavad tx pairing stake-provider "GTH1" $STAKE "127.0.0.1:2121,jsonrpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "GTH1" $STAKE "127.0.0.1:2122,jsonrpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -sleep_until_next_epoch - -screen -d -m -S gth_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 2121 $GTH_RPC_WS GTH1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/GTH1_2121.log" && sleep 0.25 -screen -S gth_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2122 $GTH_RPC_WS GTH1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer2 2>&1 | tee $LOGS_DIR/GTH1_2122.log" - -screen -d -m -S portals bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3339 GTH1 jsonrpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_3339.log" diff --git a/scripts/pre_setups/init_lava_grpc.sh b/scripts/pre_setups/init_lava_grpc.sh deleted file mode 100755 index 3bc89d8c77..0000000000 --- a/scripts/pre_setups/init_lava_grpc.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash -__dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -source "$__dir"/../useful_commands.sh -. "${__dir}"/../vars/variables.sh -# Making sure old screens are not running -killall screen -screen -wipe -LOGS_DIR=${__dir}/../../testutil/debugging/logs -GASPRICE="0.000000001ulava" -lavad tx gov submit-proposal spec-add ./cookbook/spec_add_cosmoshub.json,./cookbook/spec_add_ethereum.json --from alice -y --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx gov vote 1 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -sleep 3 -lavad tx gov submit-proposal spec-add ./cookbook/spec_add_lava.json --from alice -y --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx gov vote 2 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -sleep 4 - -STAKE="500000000000ulava" - -lavad tx pairing stake-client "LAV1" $STAKE 1 -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-client "LAV1" $STAKE 1 -y --from user2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -# Lava Providers -lavad tx pairing stake-provider "LAV1" $STAKE "127.0.0.1:2261,tendermintrpc,1 127.0.0.1:2271,rest,1 127.0.0.1:2281,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "LAV1" $STAKE "127.0.0.1:2262,tendermintrpc,1 127.0.0.1:2272,rest,1 127.0.0.1:2282,grpc,1" 1 -y --from servicer2 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx pairing stake-provider "LAV1" $STAKE "127.0.0.1:2263,tendermintrpc,1 127.0.0.1:2273,rest,1 127.0.0.1:2283,grpc,1" 1 -y --from servicer3 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -sleep_until_next_epoch - -# Lava providers -# screen -d -m -S lav1_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 2271 $LAVA_REST LAV1 rest --from servicer1 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug 2>&1 | tee $LOGS_DIR/LAV1_2271.log"; sleep 0.3 -# screen -S lav1_providers -X screen -t win1 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2272 $LAVA_REST LAV1 rest --from servicer2 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug 2>&1 | tee $LOGS_DIR/LAV1_2272.log" -# screen -S lav1_providers -X screen -t win2 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2273 $LAVA_REST LAV1 rest --from servicer3 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug 2>&1 | tee $LOGS_DIR/LAV1_2273.log" -# screen -S lav1_providers -X screen -t win3 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2261 $LAVA_RPC LAV1 tendermintrpc --from servicer1 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --tendermint-http-endpoint $LAVA_RPC_HTTP 2>&1 | tee $LOGS_DIR/LAV1_2261.log" -# screen -S lav1_providers -X screen -t win4 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2262 $LAVA_RPC LAV1 tendermintrpc --from servicer2 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --tendermint-http-endpoint $LAVA_RPC_HTTP 2>&1 | tee $LOGS_DIR/LAV1_2262.log" -# screen -S lav1_providers -X screen -t win5 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2263 $LAVA_RPC LAV1 tendermintrpc --from servicer3 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --tendermint-http-endpoint $LAVA_RPC_HTTP 2>&1 | tee $LOGS_DIR/LAV1_2263.log" -# screen -S lav1_providers -X screen -t win6 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2281 $LAVA_GRPC LAV1 grpc --from servicer1 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug 2>&1 | tee $LOGS_DIR/LAV1_2281.log" -# screen -S lav1_providers -X screen -t win7 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2282 $LAVA_GRPC LAV1 grpc --from servicer2 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug 2>&1 | tee $LOGS_DIR/LAV1_2282.log" -# screen -S lav1_providers -X screen -t win8 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2283 $LAVA_GRPC LAV1 grpc --from servicer3 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug 2>&1 | tee $LOGS_DIR/LAV1_2283.log" - -screen -d -m -S portals bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3340 LAV1 rest 127.0.0.1:3341 LAV1 tendermintrpc 127.0.0.1:3342 LAV1 grpc --from user1 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug | tee $LOGS_DIR/LAV1_tendermint_portal.log"; sleep 0.3 -lavad rpcprovider 127.0.0.1:2272 LAV1 rest http://0.0.0.0:1317 --from servicer2 --geolocation 1 -# screen -r portals -# Lava Over Lava ETH - -sleep 3 # wait for the portal to start. - -# lavad tx pairing stake-client "ETH1" 200000ulava 1 -y --from user1 --node "http://127.0.0.1:3341/1" - -# lavad tx pairing stake-provider "ETH1" 2010ulava "127.0.0.1:2221,jsonrpc,1" 1 -y --from servicer1 --node "http://127.0.0.1:3341/1" - -# sleep_until_next_epoch - -# screen -d -m -S eth1_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 2221 $ETH_RPC_WS ETH1 jsonrpc --from servicer1 --node \"http://127.0.0.1:3341/1\" 2>&1 | tee $LOGS_DIR/ETH1_2221.log" - -# screen -d -m -S eth1_portals bash -c "source ~/.bashrc; lavad portal_server 127.0.0.1 3333 ETH1 jsonrpc --from user1 --node \"http://127.0.0.1:3341/1\" 2>&1 | tee $LOGS_DIR/PORTAL_3333.log" - -screen -ls \ No newline at end of file diff --git a/scripts/pre_setups/init_lava_only.sh b/scripts/pre_setups/init_lava_only.sh new file mode 100644 index 0000000000..cccfb6e691 --- /dev/null +++ b/scripts/pre_setups/init_lava_only.sh @@ -0,0 +1,4 @@ +#!/bin/bash +__dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +source "$__dir"/../useful_commands.sh +. "${__dir}"/../vars/variables.sh \ No newline at end of file diff --git a/scripts/pre_setups/init_osmosis_test.sh b/scripts/pre_setups/init_osmosis_test.sh deleted file mode 100755 index d0efff5c7b..0000000000 --- a/scripts/pre_setups/init_osmosis_test.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash -__dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -source "$__dir"/../useful_commands.sh -. "${__dir}"/../vars/variables.sh -# Making sure old screens are not running -killall screen -screen -wipe -LOGS_DIR=${__dir}/../../testutil/debugging/logs -GASPRICE="0.000000001ulava" -lavad tx gov submit-proposal spec-add ./cookbook/spec_add_lava.json,./cookbook/spec_add_ethereum.json,./cookbook/spec_add_osmosis.json,./cookbook/spec_add_fantom.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE -lavad tx gov vote 1 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -sleep 4 -STAKE="500000000000ulava" -lavad tx pairing stake-client "COS4" $STAKE 1 -y --from user4 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -# Lava Providers -lavad tx pairing stake-provider "COS4" $STAKE "127.0.0.1:2261,tendermintrpc,1 127.0.0.1:2271,rest,1 127.0.0.1:2281,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE - -sleep_until_next_epoch - -# Lava providers -screen -d -m -S cos4_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 2271 $OSMO_TEST_REST COS4 rest --from servicer1 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug 2>&1 | tee $LOGS_DIR/COS4_2271.log"; sleep 0.3 -# screen -S cos4_providers -X screen -t win3 -X bash -c "source ~/.bashrc; lavad server 127.0.0.1 2261 $OSMO_TEST_RPC COS4 tendermintrpc --from servicer1 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug 2>&1 | tee $LOGS_DIR/COS4_2261.log" -screen -d -m -S portals bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3340 COS4 rest 127.0.0.1:3341 COS4 tendermintrpc --from user4 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug | tee $LOGS_DIR/COS4_tendermint_portal.log"; sleep 0.3 - -lavad server 127.0.0.1 2261 $OSMO_TEST_RPC COS4 tendermintrpc --from servicer1 $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --tendermint-http-endpoint $OSMO_TEST_RPC_HTTP -# Lava Over Lava ETH - -sleep 3 # wait for the portal to start. - -# lavad tx pairing stake-client "ETH1" 200000ulava 1 -y --from user1 --node "http://127.0.0.1:3341/1" - -# lavad tx pairing stake-provider "ETH1" 2010ulava "127.0.0.1:2221,jsonrpc,1" 1 -y --from servicer1 --node "http://127.0.0.1:3341/1" - -# sleep_until_next_epoch - -# screen -d -m -S eth1_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 2221 $ETH_RPC_WS ETH1 jsonrpc --from servicer1 --node \"http://127.0.0.1:3341/1\" 2>&1 | tee $LOGS_DIR/ETH1_2221.log" - -# screen -d -m -S eth1_portals bash -c "source ~/.bashrc; lavad portal_server 127.0.0.1 3333 ETH1 jsonrpc --from user1 --node \"http://127.0.0.1:3341/1\" 2>&1 | tee $LOGS_DIR/PORTAL_3333.log" - -screen -ls \ No newline at end of file diff --git a/scripts/pre_setups/setup_provider.sh b/scripts/pre_setups/setup_provider.sh deleted file mode 100755 index 4f60759e2d..0000000000 --- a/scripts/pre_setups/setup_provider.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/bin/bash -__dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -. "${__dir}"/../vars/variables.sh -LOGS_DIR=${__dir}/../../testutil/debugging/logs -mkdir -p $LOGS_DIR -rm $LOGS_DIR/*.log - -echo "---------------Setup Providers------------------" -killall screen -screen -wipe - -#ETH providers -screen -d -m -S eth1_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 2221 $ETH_RPC_WS ETH1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/ETH1_2221.log" && sleep 0.25 - -#GTH providers -screen -d -m -S gth_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 2121 $GTH_RPC_WS GTH1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/GTH1_2121.log" && sleep 0.25 - -#FTM providers -screen -d -m -S ftm250_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 2251 $FTM_RPC_HTTP FTM250 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/FTM250_2251.log" && sleep 0.25 - -#Celo providers -screen -d -m -S celo_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 5241 $CELO_HTTP CELO jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/CELO_2221.log" && sleep 0.25 - -# #Celo alfahores providers -screen -d -m -S alfajores_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 6241 $CELO_ALFAJORES_HTTP ALFAJORES jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/ALFAJORES_2221.log" && sleep 0.25 - -#Arbitrum providers -screen -d -m -S arb_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 7241 $ARB1_HTTP ARB1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/ARB1_2221.log" && sleep 0.25 - -#Aptos providers -screen -d -m -S apt1_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 10031 $APTOS_REST APT1 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/APT1_10031.log" && sleep 0.25 - -#Starknet providers -screen -d -m -S strk_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 8241 $STARKNET_RPC STRK jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/STRK_2221.log" - -#Polygon providers -screen -d -m -S polygon_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 4344 $POLYGON_MAINNET_RPC POLYGON1 jsonrpc $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/POLYGON_4344.log" - -# All Cosmos-SDK Chains below - -# Osmosis providers -screen -d -m -S cos3_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 2231 $OSMO_REST COS3 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/COS3_2231.log" && sleep 0.25 - -# Osmosis testnet providers -screen -d -m -S cos4_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 4231 $OSMO_TEST_REST COS4 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/COS4_4231.log" && sleep 0.25 - -# Lava providers -screen -d -m -S lav1_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 2271 $LAVA_REST LAV1 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/LAV1_2271.log" && sleep 0.25 - -# Cosmoshub providers -screen -d -m -S cos5_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 2331 $GAIA_REST COS5 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/COS5_2331.log" - -# Juno providers -screen -d -m -S jun1_providers bash -c "source ~/.bashrc; lavad server 127.0.0.1 2371 $JUNO_REST JUN1 rest $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/JUN1_2371.log" - -# Setup Portals -screen -d -m -S portals bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3333 ETH1 jsonrpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_ETH_3333.log" && sleep 0.25 -screen -S portals -X screen -t win3 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3336 FTM250 jsonrpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_FTM250_3336.log" -screen -S portals -X screen -t win6 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3339 GTH1 jsonrpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_3339.log" -screen -S portals -X screen -t win9 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3342 CELO jsonrpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_3342.log" -screen -S portals -X screen -t win12 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3345 ALFAJORES jsonrpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_3345.log" -screen -S portals -X screen -t win13 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3346 ARB1 jsonrpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_3346.log" -screen -S portals -X screen -t win14 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3347 STRK jsonrpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_3347.log" -screen -S portals -X screen -t win15 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3348 APT1 rest $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_3348.log" -screen -S portals -X screen -t win18 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3351 POLYGON1 jsonrpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_3351.log" -# Cosmos-SDK based chains -screen -S portals -X screen -t win1 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3334 COS3 rest 127.0.0.1:3335 COS3 tendermintrpc 127.0.0.1:3353 COS3 grpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_COS3_3334.log" -screen -S portals -X screen -t win4 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3337 COS4 rest 127.0.0.1:3338 COS4 tendermintrpc 127.0.0.1:3354 COS4 grpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_COS4_3337.log" -screen -S portals -X screen -t win7 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3340 LAV1 rest 127.0.0.1:3341 LAV1 tendermintrpc 127.0.0.1:3352 LAV1 grpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_LAV1_3340.log" -screen -S portals -X screen -t win10 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3343 COS5 rest 127.0.0.1:3344 COS5 tendermintrpc 127.0.0.1:3356 COS5 grpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_3343.log" -screen -S portals -X screen -t win16 -X bash -c "source ~/.bashrc; lavad rpcconsumer 127.0.0.1:3349 JUN1 rest 127.0.0.1:3350 JUN1 tendermintrpc 127.0.0.1:3355 JUN1 grpc $EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL_3349.log" - - - - -echo "--- setting up screens done ---" -screen -ls \ No newline at end of file diff --git a/scripts/setup_providers.sh b/scripts/setup_providers.sh index dc2d316d52..80cbafe790 100755 --- a/scripts/setup_providers.sh +++ b/scripts/setup_providers.sh @@ -136,7 +136,7 @@ $PROVIDER2_LISTENER AXELAR grpc '$AXELAR_GRPC' \ $EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer3 2>&1 | tee $LOGS_DIR/PROVIDER3.log" && sleep 0.25 # Setup Portal -screen -d -m -S portals bash -c "source ~/.bashrc; lavad rpcconsumer \ +screen -d -m -S consumers bash -c "source ~/.bashrc; lavad rpcconsumer \ 127.0.0.1:3333 ETH1 jsonrpc \ 127.0.0.1:3334 GTH1 jsonrpc \ 127.0.0.1:3335 FTM250 jsonrpc \ @@ -159,7 +159,7 @@ screen -d -m -S portals bash -c "source ~/.bashrc; lavad rpcconsumer \ 127.0.0.1:3380 BSC jsonrpc \ 127.0.0.1:3381 SOLANA jsonrpc \ 127.0.0.1:3382 SUIT jsonrpc \ -$EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/PORTAL.log" && sleep 0.25 +$EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/CONSUMERS.log" && sleep 0.25 echo "--- setting up screens done ---" screen -ls \ No newline at end of file From 923a4cfccd913baf771d8edd2a0032dbcca2ab57 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Wed, 22 Mar 2023 18:01:20 +0100 Subject: [PATCH 121/123] fixing possible lock issue --- protocol/rpcprovider/rpcprovider_server.go | 11 +++--- scripts/pre_setups/init_lava_only.sh | 4 --- scripts/pre_setups/init_lava_only_test.sh | 39 ++++++++++++++++++++++ 3 files changed, 45 insertions(+), 9 deletions(-) delete mode 100644 scripts/pre_setups/init_lava_only.sh create mode 100755 scripts/pre_setups/init_lava_only_test.sh diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 8d31dd21cb..dbe034e8e0 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -115,6 +115,7 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes }) } else { // On successful relay + pairingEpoch := relaySession.PairingEpoch // before release lock get pairing epoch for proof relayError := rpcps.providerSessionManager.OnSessionDone(relaySession) if relayError != nil { err = sdkerrors.Wrapf(relayError, "OnSession Done failure: "+err.Error()) @@ -122,7 +123,7 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes if request.DataReliability == nil { // SendProof gets the request copy, as in the case of data reliability enabled the request.blockNumber is changed. // Therefore the signature changes, so we need the original copy to extract the address from it. - err = rpcps.SendProof(ctx, relaySession, request, consumerAddress) + err = rpcps.SendProof(ctx, pairingEpoch, request, consumerAddress) if err != nil { return nil, err } @@ -131,7 +132,7 @@ func (rpcps *RPCProviderServer) Relay(ctx context.Context, request *pairingtypes "request.relayNumber": strconv.FormatUint(request.RelaySession.RelayNum, 10), }) } else { - updated := rpcps.rewardServer.SendNewDataReliabilityProof(ctx, request.DataReliability, relaySession.PairingEpoch, consumerAddress.String()) + updated := rpcps.rewardServer.SendNewDataReliabilityProof(ctx, request.DataReliability, pairingEpoch, consumerAddress.String()) if !updated { return nil, utils.LavaFormatError("existing data reliability proof", lavasession.DataReliabilityAlreadySentThisEpochError, nil) } @@ -183,11 +184,12 @@ func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayReques subscribed, err := rpcps.TryRelaySubscribe(ctx, uint64(request.RelaySession.Epoch), srv, chainMessage, consumerAddress, relaySession) // this function does not return until subscription ends if subscribed { // meaning we created a subscription and used it for at least a message + pairingEpoch := relaySession.PairingEpoch // before release lock get pairing epoch for proof relayError := rpcps.providerSessionManager.OnSessionDone(relaySession) // TODO: when we pay as u go on subscription this will need to change if relayError != nil { err = sdkerrors.Wrapf(relayError, "OnSession Done failure: "+err.Error()) } else { - err = rpcps.SendProof(ctx, relaySession, request, consumerAddress) + err = rpcps.SendProof(ctx, pairingEpoch, request, consumerAddress) if err != nil { return err } @@ -211,8 +213,7 @@ func (rpcps *RPCProviderServer) RelaySubscribe(request *pairingtypes.RelayReques return rpcps.handleRelayErrorStatus(err) } -func (rpcps *RPCProviderServer) SendProof(ctx context.Context, providerSession *lavasession.SingleProviderSession, request *pairingtypes.RelayRequest, consumerAddress sdk.AccAddress) error { - epoch := providerSession.PairingEpoch +func (rpcps *RPCProviderServer) SendProof(ctx context.Context, epoch uint64, request *pairingtypes.RelayRequest, consumerAddress sdk.AccAddress) error { storedCU, updatedWithProof := rpcps.rewardServer.SendNewProof(ctx, request.RelaySession, epoch, consumerAddress.String()) if !updatedWithProof && storedCU > request.RelaySession.CuSum { rpcps.providerSessionManager.UpdateSessionCU(consumerAddress.String(), epoch, request.RelaySession.SessionId, storedCU) diff --git a/scripts/pre_setups/init_lava_only.sh b/scripts/pre_setups/init_lava_only.sh deleted file mode 100644 index cccfb6e691..0000000000 --- a/scripts/pre_setups/init_lava_only.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -__dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -source "$__dir"/../useful_commands.sh -. "${__dir}"/../vars/variables.sh \ No newline at end of file diff --git a/scripts/pre_setups/init_lava_only_test.sh b/scripts/pre_setups/init_lava_only_test.sh new file mode 100755 index 0000000000..30247fdcc3 --- /dev/null +++ b/scripts/pre_setups/init_lava_only_test.sh @@ -0,0 +1,39 @@ +#!/bin/bash +__dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +source "$__dir"/../useful_commands.sh +. "${__dir}"/../vars/variables.sh + +killall screen +screen -wipe +GASPRICE="0.000000001ulava" +lavad tx gov submit-proposal spec-add ./cookbook/spec_add_ibc.json,./cookbook/spec_add_cosmoswasm.json,./cookbook/spec_add_cosmossdk.json,./cookbook/spec_add_cosmossdk_full.json,./cookbook/spec_add_ethereum.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx gov vote 1 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +sleep 4 + +lavad tx gov submit-proposal spec-add ./cookbook/spec_add_cosmoshub.json,./cookbook/spec_add_lava.json,./cookbook/spec_add_osmosis.json,./cookbook/spec_add_fantom.json,./cookbook/spec_add_celo.json,./cookbook/spec_add_optimism.json,./cookbook/spec_add_arbitrum.json -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE +lavad tx gov vote 2 yes -y --from alice --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE + +sleep 4 + + +CLIENTSTAKE="500000000000ulava" +PROVIDERSTAKE="500000000000ulava" + +PROVIDER1_LISTENER="127.0.0.1:2221" + +lavad tx pairing stake-client "LAV1" $CLIENTSTAKE 1 -y --from user1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE + +lavad tx pairing stake-provider "LAV1" $PROVIDERSTAKE "$PROVIDER1_LISTENER,tendermintrpc,1 $PROVIDER1_LISTENER,rest,1 $PROVIDER1_LISTENER,grpc,1" 1 -y --from servicer1 --gas-adjustment "1.5" --gas "auto" --gas-prices $GASPRICE + +screen -d -m -S provider1 bash -c "source ~/.bashrc; lavad rpcprovider \ +$PROVIDER1_LISTENER LAV1 rest '$LAVA_REST' \ +$PROVIDER1_LISTENER LAV1 tendermintrpc '$LAVA_RPC,$LAVA_RPC' \ +$PROVIDER1_LISTENER LAV1 grpc '$LAVA_GRPC' \ +$EXTRA_PROVIDER_FLAGS --geolocation 1 --log_level debug --from servicer1 2>&1 | tee $LOGS_DIR/PROVIDER1.log" && sleep 0.25 + +screen -d -m -S consumers bash -c "source ~/.bashrc; lavad rpcconsumer \ +127.0.0.1:3360 LAV1 rest 127.0.0.1:3361 LAV1 tendermintrpc 127.0.0.1:3362 LAV1 grpc \ +$EXTRA_PORTAL_FLAGS --geolocation 1 --log_level debug --from user1 2>&1 | tee $LOGS_DIR/CONSUMERS.log" && sleep 0.25 + +echo "--- setting up screens done ---" +screen -ls \ No newline at end of file From 26506a24ef22510e9ff1c366da497e357964e8f1 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Wed, 22 Mar 2023 18:29:48 +0100 Subject: [PATCH 122/123] returning sync loss upon trying to lock a locked session. --- protocol/lavasession/provider_types.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/protocol/lavasession/provider_types.go b/protocol/lavasession/provider_types.go index f9b8e63d30..02a2a97b56 100644 --- a/protocol/lavasession/provider_types.go +++ b/protocol/lavasession/provider_types.go @@ -186,9 +186,9 @@ func (pswc *ProviderSessionsWithConsumer) getExistingSession(sessionId uint64) ( pswc.Lock.RLock() defer pswc.Lock.RUnlock() if session, ok := pswc.Sessions[sessionId]; ok { - locked := session.lock.TryLock() - if !locked { - return nil, utils.LavaFormatError("getExistingSession failed to lock when getting session", LockMisUseDetectedError, &map[string]string{"sessionId": strconv.FormatUint(session.SessionID, 10)}) + lockSuccessful := session.lock.TryLock() + if !lockSuccessful { + return nil, utils.LavaFormatError("getExistingSession failed to lock when getting session, session is already locked and being used by the user.", SessionOutOfSyncError, &map[string]string{"sessionId": strconv.FormatUint(session.SessionID, 10)}) } return session, nil } From dc214efcfdf7d857310b917cf02620bd7409baa2 Mon Sep 17 00:00:00 2001 From: Ran Mishael Date: Sun, 26 Mar 2023 14:32:48 +0200 Subject: [PATCH 123/123] fixing camelcases in relay proto --- docs/static/openapi.yml | 55 +++-- proto/pairing/relay.proto | 14 +- protocol/lavaprotocol/request_builder.go | 10 +- protocol/rpcprovider/provider_listener.go | 2 +- .../rpcprovider/rewardserver/reward_server.go | 2 +- protocol/rpcprovider/rpcprovider_server.go | 20 +- testutil/common/common.go | 8 +- x/conflict/keeper/conflict.go | 6 +- x/conflict/keeper/msg_server_detection.go | 2 +- .../keeper/msg_server_detection_test.go | 4 +- x/pairing/keeper/fixation_test.go | 2 +- x/pairing/keeper/msg_server_freeze_test.go | 2 +- x/pairing/keeper/msg_server_relay_payment.go | 28 +-- .../msg_server_relay_payment_gov_test.go | 26 +-- .../keeper/msg_server_relay_payment_test.go | 18 +- .../keeper/unresponsive_provider_test.go | 8 +- x/pairing/types/relay.pb.go | 202 +++++++++--------- 17 files changed, 220 insertions(+), 189 deletions(-) diff --git a/docs/static/openapi.yml b/docs/static/openapi.yml index 2490646808..feb4366d18 100644 --- a/docs/static/openapi.yml +++ b/docs/static/openapi.yml @@ -54261,11 +54261,6 @@ definitions: sig: type: string format: byte - chainID: - type: string - epoch: - type: string - format: int64 QoSReport: type: object properties: @@ -54281,7 +54276,7 @@ definitions: relay_session: type: object properties: - specID: + spec_id: type: string content_hash: type: string @@ -54298,7 +54293,7 @@ definitions: relay_num: type: string format: uint64 - QoSReport: + qos_report: type: object properties: latency: @@ -54351,11 +54346,40 @@ definitions: request_block: type: string format: int64 - apiInterface: + api_interface: type: string salt: type: string format: byte + data_reliability: + type: object + properties: + chain_id: + type: string + epoch: + type: string + format: int64 + differentiator: + type: boolean + vrf_value: + type: string + format: byte + vrf_proof: + type: string + format: byte + provider_sig: + type: string + format: byte + all_data_hash: + type: string + format: byte + query_hash: + type: string + format: byte + title: we only need it for payment later + sig: + type: string + format: byte lavanet.lava.pairing.VRFData: type: object properties: @@ -54380,11 +54404,18 @@ definitions: sig: type: string format: byte - chainID: + chain_id: type: string epoch: type: string format: int64 + all_data_hash: + type: string + format: byte + query_hash: + type: string + format: byte + title: we only need it for payment later lavanet.lava.epochstorage.Endpoint: type: object properties: @@ -55374,7 +55405,7 @@ definitions: request_block: type: string format: int64 - apiInterface: + api_interface: type: string salt: type: string @@ -55382,7 +55413,7 @@ definitions: lavanet.lava.pairing.RelaySession: type: object properties: - specID: + spec_id: type: string content_hash: type: string @@ -55399,7 +55430,7 @@ definitions: relay_num: type: string format: uint64 - QoSReport: + qos_report: type: object properties: latency: diff --git a/proto/pairing/relay.proto b/proto/pairing/relay.proto index 38b7644903..ba8169272b 100644 --- a/proto/pairing/relay.proto +++ b/proto/pairing/relay.proto @@ -10,13 +10,13 @@ service Relayer { } message RelaySession { - string specID = 1; + string spec_id = 1; bytes content_hash = 2; uint64 session_id = 3; uint64 cu_sum = 4; // total compute unit used including this relay string provider = 5; uint64 relay_num = 6; - QualityOfServiceReport QoSReport = 7; + QualityOfServiceReport qos_report = 7; int64 epoch = 8; bytes unresponsive_providers = 9; string lava_chain_id = 10; @@ -29,14 +29,14 @@ message RelayPrivateData { string api_url = 2; // some relays have associated urls that are filled with params ('/block/{height}') bytes data = 3; int64 request_block = 4; - string apiInterface = 5; + string api_interface = 5; bytes salt = 6; } message RelayRequest { RelaySession relay_session = 1; RelayPrivateData relay_data= 2; - VRFData DataReliability = 3; + VRFData data_reliability = 3; } message Badge { @@ -57,14 +57,14 @@ message RelayReply { } message VRFData { - string chainID = 1; + string chain_id = 1; int64 epoch = 2; bool differentiator = 3; bytes vrf_value = 4; bytes vrf_proof = 5; bytes provider_sig = 6; - bytes allDataHash = 7; - bytes queryHash = 8; //we only need it for payment later + bytes all_data_hash = 7; + bytes query_hash = 8; //we only need it for payment later bytes sig = 9; } diff --git a/protocol/lavaprotocol/request_builder.go b/protocol/lavaprotocol/request_builder.go index 73440b03ec..cd11cc751e 100644 --- a/protocol/lavaprotocol/request_builder.go +++ b/protocol/lavaprotocol/request_builder.go @@ -55,13 +55,13 @@ func NewRelayData(connectionType string, apiUrl string, data []byte, requestBloc func ConstructRelaySession(lavaChainID string, relayRequestData *pairingtypes.RelayPrivateData, chainID string, providerPublicAddress string, consumerSession *lavasession.SingleConsumerSession, epoch int64, reportedProviders []byte) *pairingtypes.RelaySession { return &pairingtypes.RelaySession{ - SpecID: chainID, + SpecId: chainID, ContentHash: sigs.CalculateContentHashForRelayData(relayRequestData), SessionId: uint64(consumerSession.SessionId), CuSum: consumerSession.CuSum + consumerSession.LatestRelayCu, // add the latestRelayCu which will be applied when session is returned properly, Provider: providerPublicAddress, RelayNum: consumerSession.RelayNum + lavasession.RelayNumberIncrement, // increment the relay number. which will be applied when session is returned properly - QoSReport: consumerSession.QoSInfo.LastQoSReport, + QosReport: consumerSession.QoSInfo.LastQoSReport, Epoch: epoch, UnresponsiveProviders: reportedProviders, LavaChainId: lavaChainID, @@ -71,13 +71,13 @@ func ConstructRelaySession(lavaChainID string, relayRequestData *pairingtypes.Re func dataReliabilityRelaySession(lavaChainID string, relayRequestData *pairingtypes.RelayPrivateData, chainID string, providerPublicAddress string, epoch int64) *pairingtypes.RelaySession { return &pairingtypes.RelaySession{ - SpecID: chainID, + SpecId: chainID, ContentHash: sigs.CalculateContentHashForRelayData(relayRequestData), SessionId: lavasession.DataReliabilitySessionId, // sessionID for reliability is 0 CuSum: lavasession.DataReliabilityCuSum, // consumerSession.CuSum == 0 Provider: providerPublicAddress, RelayNum: 0, - QoSReport: nil, + QosReport: nil, Epoch: epoch, UnresponsiveProviders: nil, LavaChainId: lavaChainID, @@ -140,7 +140,7 @@ func DataReliabilityThresholdToSession(vrfs [][]byte, uniqueIdentifiers []bool, func NewVRFData(differentiator bool, vrf_res []byte, vrf_proof []byte, request *pairingtypes.RelayRequest, reply *pairingtypes.RelayReply) *pairingtypes.VRFData { dataReliability := &pairingtypes.VRFData{ - ChainID: request.RelaySession.SpecID, + ChainId: request.RelaySession.SpecId, Epoch: request.RelaySession.Epoch, Differentiator: differentiator, VrfValue: vrf_res, diff --git a/protocol/rpcprovider/provider_listener.go b/protocol/rpcprovider/provider_listener.go index d7d2bccbc0..ba0cb504c2 100644 --- a/protocol/rpcprovider/provider_listener.go +++ b/protocol/rpcprovider/provider_listener.go @@ -114,7 +114,7 @@ func (rs *relayServer) RelaySubscribe(request *pairingtypes.RelayRequest, srv pa func (rs *relayServer) findReceiver(request *pairingtypes.RelayRequest) (RelayReceiver, error) { apiInterface := request.RelayData.ApiInterface - chainID := request.RelaySession.SpecID + chainID := request.RelaySession.SpecId endpoint := lavasession.RPCEndpoint{ChainID: chainID, ApiInterface: apiInterface} rs.lock.RLock() defer rs.lock.RUnlock() diff --git a/protocol/rpcprovider/rewardserver/reward_server.go b/protocol/rpcprovider/rewardserver/reward_server.go index b5bb56d29a..a7858ac8ec 100644 --- a/protocol/rpcprovider/rewardserver/reward_server.go +++ b/protocol/rpcprovider/rewardserver/reward_server.go @@ -140,7 +140,7 @@ func (rws *RewardServer) sendRewardsClaim(ctx context.Context, epoch uint64) err utils.LavaFormatError("invalid consumer address extraction from relay", err, &map[string]string{"relay": fmt.Sprintf("%+v", relay)}) continue } - expectedPay := PaymentRequest{ChainID: relay.SpecID, CU: relay.CuSum, BlockHeightDeadline: relay.Epoch, Amount: sdk.Coin{}, Client: consumerAddr, UniqueIdentifier: relay.SessionId, Description: strconv.FormatUint(rws.serverID, 10)} + expectedPay := PaymentRequest{ChainID: relay.SpecId, CU: relay.CuSum, BlockHeightDeadline: relay.Epoch, Amount: sdk.Coin{}, Client: consumerAddr, UniqueIdentifier: relay.SessionId, Description: strconv.FormatUint(rws.serverID, 10)} rws.addExpectedPayment(expectedPay) rws.updateCUServiced(relay.CuSum) } diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index dbe034e8e0..688ab16a94 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -343,7 +343,7 @@ func (rpcps *RPCProviderServer) verifyRelaySession(ctx context.Context, request var validPairing bool var verifyPairingError error // verify pairing for DR session - validPairing, selfProviderIndex, verifyPairingError = rpcps.stateTracker.VerifyPairing(ctx, consumerAddressString, rpcps.providerAddress.String(), uint64(request.RelaySession.Epoch), request.RelaySession.SpecID) + validPairing, selfProviderIndex, verifyPairingError = rpcps.stateTracker.VerifyPairing(ctx, consumerAddressString, rpcps.providerAddress.String(), uint64(request.RelaySession.Epoch), request.RelaySession.SpecId) if verifyPairingError != nil { return nil, nil, utils.LavaFormatError("Failed to VerifyPairing after verifyRelaySession for GetDataReliabilitySession", verifyPairingError, &map[string]string{"sessionID": strconv.FormatUint(request.RelaySession.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelaySession.RelayNum, 10)}) } @@ -364,7 +364,7 @@ func (rpcps *RPCProviderServer) verifyRelaySession(ctx context.Context, request "dataReliability": string(dataReliabilityMarshalled), "vrfIndex": strconv.FormatInt(vrfIndex, 10), "self Index": strconv.FormatInt(selfProviderIndex, 10), - "vrf_chainId": request.DataReliability.ChainID, + "vrf_chainId": request.DataReliability.ChainId, "vrf_epoch": strconv.FormatInt(request.DataReliability.Epoch, 10), }) } @@ -386,14 +386,14 @@ func (rpcps *RPCProviderServer) getSingleProviderSession(ctx context.Context, re singleProviderSession, err := rpcps.providerSessionManager.GetSession(consumerAddressString, uint64(request.Epoch), request.SessionId, request.RelayNum) if err != nil { if lavasession.ConsumerNotRegisteredYet.Is(err) { - valid, selfProviderIndex, verifyPairingError := rpcps.stateTracker.VerifyPairing(ctx, consumerAddressString, rpcps.providerAddress.String(), uint64(request.Epoch), request.SpecID) + valid, selfProviderIndex, verifyPairingError := rpcps.stateTracker.VerifyPairing(ctx, consumerAddressString, rpcps.providerAddress.String(), uint64(request.Epoch), request.SpecId) if verifyPairingError != nil { return nil, utils.LavaFormatError("Failed to VerifyPairing after ConsumerNotRegisteredYet", verifyPairingError, &map[string]string{"sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) } if !valid { return nil, utils.LavaFormatError("VerifyPairing, this consumer address is not valid with this provider", nil, &map[string]string{"epoch": strconv.FormatInt(request.Epoch, 10), "sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) } - _, maxCuForConsumer, getVrfAndMaxCuError := rpcps.stateTracker.GetVrfPkAndMaxCuForUser(ctx, consumerAddressString, request.SpecID, uint64(request.Epoch)) + _, maxCuForConsumer, getVrfAndMaxCuError := rpcps.stateTracker.GetVrfPkAndMaxCuForUser(ctx, consumerAddressString, request.SpecId, uint64(request.Epoch)) if getVrfAndMaxCuError != nil { return nil, utils.LavaFormatError("ConsumerNotRegisteredYet: GetVrfPkAndMaxCuForUser failed", getVrfAndMaxCuError, &map[string]string{"epoch": strconv.FormatInt(request.Epoch, 10), "sessionID": strconv.FormatUint(request.SessionId, 10), "consumer": consumerAddressString, "provider": rpcps.providerAddress.String(), "relayNum": strconv.FormatUint(request.RelayNum, 10)}) } @@ -414,8 +414,8 @@ func (rpcps *RPCProviderServer) verifyRelayRequestMetaData(requestSession *pairi if requestSession.Provider != providerAddress { return utils.LavaFormatError("request had the wrong provider", nil, &map[string]string{"providerAddress": providerAddress, "request_provider": requestSession.Provider}) } - if requestSession.SpecID != rpcps.rpcProviderEndpoint.ChainID { - return utils.LavaFormatError("request had the wrong specID", nil, &map[string]string{"request_specID": requestSession.SpecID, "chainID": rpcps.rpcProviderEndpoint.ChainID}) + if requestSession.SpecId != rpcps.rpcProviderEndpoint.ChainID { + return utils.LavaFormatError("request had the wrong specID", nil, &map[string]string{"request_specID": requestSession.SpecId, "chainID": rpcps.rpcProviderEndpoint.ChainID}) } if requestSession.LavaChainId != rpcps.lavaChainID { return utils.LavaFormatError("request had the wrong lava chain ID", nil, &map[string]string{"request_lavaChainID": requestSession.LavaChainId, "lava chain id": rpcps.lavaChainID}) @@ -427,7 +427,7 @@ func (rpcps *RPCProviderServer) verifyDataReliabilityRelayRequest(ctx context.Co if request.RelaySession.CuSum != lavasession.DataReliabilityCuSum { return lavasession.IndexNotFound, utils.LavaFormatError("request's CU sum is not equal to the data reliability CU sum", nil, &map[string]string{"cuSum": strconv.FormatUint(request.RelaySession.CuSum, 10), "DataReliabilityCuSum": strconv.Itoa(lavasession.DataReliabilityCuSum)}) } - vrf_pk, _, err := rpcps.stateTracker.GetVrfPkAndMaxCuForUser(ctx, consumerAddress.String(), request.RelaySession.SpecID, uint64(request.RelaySession.Epoch)) + vrf_pk, _, err := rpcps.stateTracker.GetVrfPkAndMaxCuForUser(ctx, consumerAddress.String(), request.RelaySession.SpecId, uint64(request.RelaySession.Epoch)) if err != nil { return lavasession.IndexNotFound, utils.LavaFormatError("failed to get vrfpk and maxCURes for data reliability!", err, &map[string]string{ "userAddr": consumerAddress.String(), @@ -456,9 +456,9 @@ func (rpcps *RPCProviderServer) verifyDataReliabilityRelayRequest(ctx context.Co &map[string]string{"requested epoch": strconv.FormatInt(request.RelaySession.Epoch, 10), "userAddr": consumerAddress.String(), "dataReliability": fmt.Sprintf("%v", request.DataReliability)}) } _, dataReliabilityThreshold := rpcps.chainParser.DataReliabilityParams() - providersCount, err := rpcps.stateTracker.GetProvidersCountForConsumer(ctx, consumerAddress.String(), uint64(request.RelaySession.Epoch), request.RelaySession.SpecID) + providersCount, err := rpcps.stateTracker.GetProvidersCountForConsumer(ctx, consumerAddress.String(), uint64(request.RelaySession.Epoch), request.RelaySession.SpecId) if err != nil { - return lavasession.IndexNotFound, utils.LavaFormatError("VerifyReliabilityAddressSigning failed fetching providers count for consumer", err, &map[string]string{"chainID": request.RelaySession.SpecID, "consumer": consumerAddress.String(), "epoch": strconv.FormatInt(request.RelaySession.Epoch, 10)}) + return lavasession.IndexNotFound, utils.LavaFormatError("VerifyReliabilityAddressSigning failed fetching providers count for consumer", err, &map[string]string{"chainID": request.RelaySession.SpecId, "consumer": consumerAddress.String(), "epoch": strconv.FormatInt(request.RelaySession.Epoch, 10)}) } vrfIndex, vrfErr := utils.GetIndexForVrf(request.DataReliability.VrfValue, providersCount, dataReliabilityThreshold) if vrfErr != nil || otherProviderIndex == vrfIndex { @@ -509,7 +509,7 @@ func (rpcps *RPCProviderServer) VerifyReliabilityAddressSigning(ctx context.Cont return false, 0, utils.LavaFormatError("failed converting signer to address", err, &map[string]string{"consumer": consumer.String(), "PubKey": pubKey.Address().String()}) } - return rpcps.stateTracker.VerifyPairing(ctx, consumer.String(), providerAccAddress.String(), uint64(request.RelaySession.Epoch), request.RelaySession.SpecID) // return if this pairing is authorised + return rpcps.stateTracker.VerifyPairing(ctx, consumer.String(), providerAccAddress.String(), uint64(request.RelaySession.Epoch), request.RelaySession.SpecId) // return if this pairing is authorised } func (rpcps *RPCProviderServer) handleRelayErrorStatus(err error) error { diff --git a/testutil/common/common.go b/testutil/common/common.go index 30745bbe31..d279d30c32 100644 --- a/testutil/common/common.go +++ b/testutil/common/common.go @@ -86,11 +86,11 @@ func BuildRelayRequest(ctx context.Context, provider string, contentHash []byte, Provider: provider, ContentHash: contentHash, SessionId: uint64(1), - SpecID: spec, + SpecId: spec, CuSum: cuSum, Epoch: sdk.UnwrapSDKContext(ctx).BlockHeight(), RelayNum: 0, - QoSReport: qos, + QosReport: qos, LavaChainId: sdk.UnwrapSDKContext(ctx).BlockHeader().ChainID, } if qos != nil { @@ -116,11 +116,11 @@ func CreateMsgDetection(ctx context.Context, consumer Account, provider0 Account Provider: provider0.Addr.String(), ContentHash: sigs.CalculateContentHashForRelayData(msg.ResponseConflict.ConflictRelayData0.Request.RelayData), SessionId: uint64(1), - SpecID: spec.Index, + SpecId: spec.Index, CuSum: 0, Epoch: sdk.UnwrapSDKContext(ctx).BlockHeight(), RelayNum: 0, - QoSReport: &types.QualityOfServiceReport{Latency: sdk.OneDec(), Availability: sdk.OneDec(), Sync: sdk.OneDec()}, + QosReport: &types.QualityOfServiceReport{Latency: sdk.OneDec(), Availability: sdk.OneDec(), Sync: sdk.OneDec()}, } msg.ResponseConflict.ConflictRelayData0.Request.DataReliability = nil diff --git a/x/conflict/keeper/conflict.go b/x/conflict/keeper/conflict.go index 4ec9b63232..fc1b616118 100644 --- a/x/conflict/keeper/conflict.go +++ b/x/conflict/keeper/conflict.go @@ -16,9 +16,9 @@ func (k Keeper) ValidateFinalizationConflict(ctx sdk.Context, conflictData *type func (k Keeper) ValidateResponseConflict(ctx sdk.Context, conflictData *types.ResponseConflict, clientAddr sdk.AccAddress) error { // 1. validate mismatching data - chainID := conflictData.ConflictRelayData0.Request.RelaySession.SpecID - if chainID != conflictData.ConflictRelayData1.Request.RelaySession.SpecID { - return fmt.Errorf("mismatching request parameters between providers %s, %s", chainID, conflictData.ConflictRelayData1.Request.RelaySession.SpecID) + chainID := conflictData.ConflictRelayData0.Request.RelaySession.SpecId + if chainID != conflictData.ConflictRelayData1.Request.RelaySession.SpecId { + return fmt.Errorf("mismatching request parameters between providers %s, %s", chainID, conflictData.ConflictRelayData1.Request.RelaySession.SpecId) } block := conflictData.ConflictRelayData0.Request.RelaySession.Epoch if block != conflictData.ConflictRelayData1.Request.RelaySession.Epoch { diff --git a/x/conflict/keeper/msg_server_detection.go b/x/conflict/keeper/msg_server_detection.go index 3cbe592751..15268078dc 100644 --- a/x/conflict/keeper/msg_server_detection.go +++ b/x/conflict/keeper/msg_server_detection.go @@ -70,7 +70,7 @@ func (k msgServer) Detection(goCtx context.Context, msg *types.MsgDetection) (*t conflictVote.VoteDeadline = voteDeadline conflictVote.ApiUrl = msg.ResponseConflict.ConflictRelayData0.Request.RelayData.ApiUrl conflictVote.ClientAddress = msg.Creator - conflictVote.ChainID = msg.ResponseConflict.ConflictRelayData0.Request.RelaySession.SpecID + conflictVote.ChainID = msg.ResponseConflict.ConflictRelayData0.Request.RelaySession.SpecId conflictVote.RequestBlock = uint64(msg.ResponseConflict.ConflictRelayData0.Request.RelayData.RequestBlock) conflictVote.RequestData = msg.ResponseConflict.ConflictRelayData0.Request.RelayData.Data diff --git a/x/conflict/keeper/msg_server_detection_test.go b/x/conflict/keeper/msg_server_detection_test.go index 112e08ba55..ccca26b625 100644 --- a/x/conflict/keeper/msg_server_detection_test.go +++ b/x/conflict/keeper/msg_server_detection_test.go @@ -101,11 +101,11 @@ func TestDetection(t *testing.T) { msg.ResponseConflict.ConflictRelayData1.Request.RelayData.ConnectionType += tt.ConnectionType msg.ResponseConflict.ConflictRelayData1.Request.RelayData.ApiUrl += tt.ApiUrl msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.Epoch += tt.BlockHeight - msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.SpecID += tt.ChainID + msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.SpecId += tt.ChainID msg.ResponseConflict.ConflictRelayData1.Request.RelayData.Data = append(msg.ResponseConflict.ConflictRelayData1.Request.RelayData.Data, tt.Data...) msg.ResponseConflict.ConflictRelayData1.Request.RelayData.RequestBlock += tt.RequestBlock msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.CuSum += tt.Cusum - msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.QoSReport = tt.QoSReport + msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.QosReport = tt.QoSReport msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.RelayNum += tt.RelayNum msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.SessionId += tt.SeassionID msg.ResponseConflict.ConflictRelayData1.Request.RelaySession.Provider = tt.Provider1.Addr.String() diff --git a/x/pairing/keeper/fixation_test.go b/x/pairing/keeper/fixation_test.go index 16fa75a29a..ec715ce984 100644 --- a/x/pairing/keeper/fixation_test.go +++ b/x/pairing/keeper/fixation_test.go @@ -105,7 +105,7 @@ func TestEpochPaymentDeletionWithMemoryShortening(t *testing.T) { Provider: ts.providers[0].Addr.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - SpecID: ts.spec.Name, + SpecId: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, diff --git a/x/pairing/keeper/msg_server_freeze_test.go b/x/pairing/keeper/msg_server_freeze_test.go index 6abd3ff2a8..46a5c1a5e4 100644 --- a/x/pairing/keeper/msg_server_freeze_test.go +++ b/x/pairing/keeper/msg_server_freeze_test.go @@ -245,7 +245,7 @@ func TestPaymentFrozen(t *testing.T) { relayRequest := &types.RelaySession{ Provider: providerToFreeze.Address, SessionId: uint64(1), - SpecID: ts.spec.Name, + SpecId: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, Epoch: blockForPaymentBeforeFreeze, RelayNum: 0, diff --git a/x/pairing/keeper/msg_server_relay_payment.go b/x/pairing/keeper/msg_server_relay_payment.go index a9c5138bff..ffc6b94c74 100644 --- a/x/pairing/keeper/msg_server_relay_payment.go +++ b/x/pairing/keeper/msg_server_relay_payment.go @@ -58,14 +58,14 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen } // TODO: add support for spec changes - spec, found := k.specKeeper.GetSpec(ctx, relay.SpecID) + spec, found := k.specKeeper.GetSpec(ctx, relay.SpecId) if !found || !spec.Enabled { - return errorLogAndFormat("relay_payment_spec", map[string]string{"chainID": relay.SpecID}, "invalid spec ID specified in proof") + return errorLogAndFormat("relay_payment_spec", map[string]string{"chainID": relay.SpecId}, "invalid spec ID specified in proof") } isValidPairing, vrfk, thisProviderIndex, allowedCU, providersToPair, legacy, err := k.Keeper.ValidatePairingForClient( ctx, - relay.SpecID, + relay.SpecId, clientAddr, providerAddr, uint64(relay.Epoch), @@ -87,12 +87,12 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen payReliability := false // validate data reliability - vrfStoreKey := VRFKey{ChainID: relay.SpecID, Epoch: epochStart, Consumer: clientAddr.String()} + vrfStoreKey := VRFKey{ChainID: relay.SpecId, Epoch: epochStart, Consumer: clientAddr.String()} if vrfData, ok := dataReliabilityStore[vrfStoreKey]; ok { delete(dataReliabilityStore, vrfStoreKey) details := map[string]string{"client": clientAddr.String(), "provider": providerAddr.String()} if !spec.DataReliabilityEnabled { - details["chainID"] = relay.SpecID + details["chainID"] = relay.SpecId return errorLogAndFormat("relay_payment_data_reliability_disabled", details, "compares_hashes false for spec and reliability was received") } @@ -114,7 +114,7 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen // check this other provider is indeed legitimate isValidPairing, _, _, _, _, _, err := k.Keeper.ValidatePairingForClient( ctx, - relay.SpecID, + relay.SpecId, clientAddr, otherProviderAddress, uint64(relay.Epoch), @@ -158,7 +158,7 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen } // this prevents double spend attacks, and tracks the CU per session a client can use - totalCUInEpochForUserProvider, err := k.Keeper.AddEpochPayment(ctx, relay.SpecID, epochStart, clientAddr, providerAddr, relay.CuSum, strconv.FormatUint(relay.SessionId, 16)) + totalCUInEpochForUserProvider, err := k.Keeper.AddEpochPayment(ctx, relay.SpecId, epochStart, clientAddr, providerAddr, relay.CuSum, strconv.FormatUint(relay.SessionId, 16)) if err != nil { // double spending on user detected! details := map[string]string{ @@ -198,15 +198,15 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen if len(msg.DescriptionString) > 20 { msg.DescriptionString = msg.DescriptionString[:20] } - details := map[string]string{"chainID": fmt.Sprintf(relay.SpecID), "client": clientAddr.String(), "provider": providerAddr.String(), "CU": strconv.FormatUint(relay.CuSum, 10), "BasePay": rewardCoins.String(), "totalCUInEpoch": strconv.FormatUint(totalCUInEpochForUserProvider, 10), "uniqueIdentifier": strconv.FormatUint(relay.SessionId, 10), "descriptionString": msg.DescriptionString} + details := map[string]string{"chainID": fmt.Sprintf(relay.SpecId), "client": clientAddr.String(), "provider": providerAddr.String(), "CU": strconv.FormatUint(relay.CuSum, 10), "BasePay": rewardCoins.String(), "totalCUInEpoch": strconv.FormatUint(totalCUInEpochForUserProvider, 10), "uniqueIdentifier": strconv.FormatUint(relay.SessionId, 10), "descriptionString": msg.DescriptionString} - if relay.QoSReport != nil { - QoS, err := relay.QoSReport.ComputeQoS() + if relay.QosReport != nil { + QoS, err := relay.QosReport.ComputeQoS() if err != nil { details["error"] = err.Error() return errorLogAndFormat("relay_payment_QoS", details, "bad QoSReport") } - details["QoSReport"] = "Latency: " + relay.QoSReport.Latency.String() + ", Availability: " + relay.QoSReport.Availability.String() + ", Sync: " + relay.QoSReport.Sync.String() + details["QoSReport"] = "Latency: " + relay.QosReport.Latency.String() + ", Availability: " + relay.QosReport.Availability.String() + ", Sync: " + relay.QosReport.Sync.String() details["QoSScore"] = QoS.String() reward = reward.Mul(QoS.Mul(k.QoSWeight(ctx)).Add(sdk.OneDec().Sub(k.QoSWeight(ctx)))) // reward*QOSScore*QOSWeight + reward*(1-QOSWeight) = reward*(QOSScore*QOSWeight + (1-QOSWeight)) @@ -217,7 +217,7 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen amountToBurnClient := k.Keeper.BurnCoinsPerCU(ctx).MulInt64(int64(relay.CuSum)) if legacy { burnAmount := sdk.Coin{Amount: amountToBurnClient.TruncateInt(), Denom: epochstoragetypes.TokenDenom} - burnSucceeded, err2 := k.BurnClientStake(ctx, relay.SpecID, clientAddr, burnAmount, false) + burnSucceeded, err2 := k.BurnClientStake(ctx, relay.SpecId, clientAddr, burnAmount, false) if err2 != nil { details["amountToBurn"] = burnAmount.String() @@ -281,7 +281,7 @@ func (k msgServer) RelayPayment(goCtx context.Context, msg *types.MsgRelayPaymen } // update provider payment storage with complainer's CU - err = k.updateProviderPaymentStorageWithComplainerCU(ctx, relay.UnresponsiveProviders, logger, epochStart, relay.SpecID, relay.CuSum, servicersToPair, clientAddr) + err = k.updateProviderPaymentStorageWithComplainerCU(ctx, relay.UnresponsiveProviders, logger, epochStart, relay.SpecId, relay.CuSum, servicersToPair, clientAddr) if err != nil { utils.LogLavaEvent(ctx, logger, types.UnresponsiveProviderUnstakeFailedEventName, map[string]string{"err:": err.Error()}, "Error Unresponsive Providers could not unstake") } @@ -387,7 +387,7 @@ func dataReliabilityByConsumer(vrfs []*types.VRFData) (dataReliabilityByConsumer dataReliabilityByConsumer[VRFKey{ Consumer: signer.String(), Epoch: uint64(vrf.Epoch), - ChainID: vrf.ChainID, + ChainID: vrf.ChainId, }] = vrf } return dataReliabilityByConsumer, nil diff --git a/x/pairing/keeper/msg_server_relay_payment_gov_test.go b/x/pairing/keeper/msg_server_relay_payment_gov_test.go index 602967f063..e1235acd80 100644 --- a/x/pairing/keeper/msg_server_relay_payment_gov_test.go +++ b/x/pairing/keeper/msg_server_relay_payment_gov_test.go @@ -68,11 +68,11 @@ func TestRelayPaymentGovQosWeightChange(t *testing.T) { Provider: ts.providers[0].Addr.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(ti), - SpecID: ts.spec.Name, + SpecId: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, Epoch: int64(tt.epoch), RelayNum: 0, - QoSReport: badQoS, + QosReport: badQoS, } // Sign and send the payment requests for block 0 tx @@ -98,7 +98,7 @@ func TestRelayPaymentGovQosWeightChange(t *testing.T) { require.Equal(t, stakeClient.Stake.Amount.Int64()-burn.TruncateInt64(), newStakeClient.Stake.Amount.Int64()) // Compute the relay request's QoS score - score, err := relayRequest.QoSReport.ComputeQoS() + score, err := relayRequest.QosReport.ComputeQoS() require.Nil(t, err) // Calculate how much the provider wants to get paid for its service @@ -170,7 +170,7 @@ func TestRelayPaymentGovEpochBlocksDecrease(t *testing.T) { Provider: ts.providers[0].Addr.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(ti), - SpecID: ts.spec.Name, + SpecId: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, Epoch: int64(tt.epoch), RelayNum: 0, @@ -249,7 +249,7 @@ func TestRelayPaymentGovEpochBlocksIncrease(t *testing.T) { Provider: ts.providers[0].Addr.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(ti), - SpecID: ts.spec.Name, + SpecId: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, Epoch: int64(tt.epoch), RelayNum: 0, @@ -333,7 +333,7 @@ func TestRelayPaymentGovEpochToSaveDecrease(t *testing.T) { Provider: ts.providers[0].Addr.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(ti), - SpecID: ts.spec.Name, + SpecId: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, Epoch: int64(tt.epoch), RelayNum: 0, @@ -406,7 +406,7 @@ func TestRelayPaymentGovEpochToSaveIncrease(t *testing.T) { Provider: ts.providers[0].Addr.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(ti), - SpecID: ts.spec.Name, + SpecId: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, Epoch: int64(tt.epoch), RelayNum: 0, @@ -496,7 +496,7 @@ func TestRelayPaymentGovStakeToMaxCUListMaxCUDecrease(t *testing.T) { Provider: ts.providers[0].Addr.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(ti), - SpecID: ts.spec.Name, + SpecId: ts.spec.Name, CuSum: uint64(250001), // the relayRequest costs 250001 (more than the previous limit, and less than in the new limit). This should influence the validity of the request Epoch: int64(tt.epoch), RelayNum: 0, @@ -587,7 +587,7 @@ func TestRelayPaymentGovStakeToMaxCUListStakeThresholdIncrease(t *testing.T) { Provider: ts.providers[0].Addr.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(ti), - SpecID: ts.spec.Name, + SpecId: ts.spec.Name, CuSum: uint64(200000), // the relayRequest costs 200000 (less than the previous limit, and more than in the new limit). This should influence the validity of the request Epoch: int64(tt.epoch), RelayNum: 0, @@ -680,7 +680,7 @@ func TestRelayPaymentGovEpochBlocksMultipleChanges(t *testing.T) { Provider: ts.providers[0].Addr.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(ti), - SpecID: ts.spec.Name, + SpecId: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, Epoch: int64(tt.paymentEpoch), RelayNum: 0, @@ -806,7 +806,7 @@ func TestStakePaymentUnstake(t *testing.T) { Provider: ts.providers[0].Addr.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - SpecID: ts.spec.Name, + SpecId: ts.spec.Name, CuSum: uint64(10000), Epoch: int64(sdk.UnwrapSDKContext(ts.ctx).BlockHeight()), RelayNum: 0, @@ -880,7 +880,7 @@ func TestRelayPaymentMemoryTransferAfterEpochChangeWithGovParamChange(t *testing Provider: ts.providers[0].Addr.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(1), - SpecID: ts.spec.Name, + SpecId: ts.spec.Name, CuSum: uint64(10000), Epoch: int64(epochAfterEpochBlocksChanged), RelayNum: 0, @@ -962,7 +962,7 @@ func verifyRelayPaymentObjects(t *testing.T, ts *testStruct, relayRequest *pairi require.Equal(t, relayRequest.GetCuSum(), uniquePaymentStorageClientProviderFromProviderPaymentStorage.GetUsedCU()) // when checking CU, the client may be trying to use a relay request with more CU than his MaxCU (determined by StakeThreshold) - clientStakeEntry, err := ts.keepers.Epochstorage.GetStakeEntryForClientEpoch(sdk.UnwrapSDKContext(ts.ctx), relayRequest.GetSpecID(), ts.clients[0].Addr, uint64(relayRequest.GetEpoch())) + clientStakeEntry, err := ts.keepers.Epochstorage.GetStakeEntryForClientEpoch(sdk.UnwrapSDKContext(ts.ctx), relayRequest.GetSpecId(), ts.clients[0].Addr, uint64(relayRequest.GetEpoch())) require.Nil(t, err) clientMaxCU, err := ts.keepers.Pairing.ClientMaxCUProviderForBlock(sdk.UnwrapSDKContext(ts.ctx), uint64(relayRequest.GetEpoch()), clientStakeEntry) require.Nil(t, err) diff --git a/x/pairing/keeper/msg_server_relay_payment_test.go b/x/pairing/keeper/msg_server_relay_payment_test.go index 51757d6730..89c2d92715 100644 --- a/x/pairing/keeper/msg_server_relay_payment_test.go +++ b/x/pairing/keeper/msg_server_relay_payment_test.go @@ -743,7 +743,7 @@ func TestRelayPaymentDataReliability(t *testing.T) { index0, err = utils.GetIndexForVrf(vrfRes0, uint32(ts.keepers.Pairing.ServicersToPairCountRaw(sdk.UnwrapSDKContext(ts.ctx))), ts.spec.ReliabilityThreshold) require.Nil(t, err) - providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relaySession.SpecID, ts.clients[0].Addr) + providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relaySession.SpecId, ts.clients[0].Addr) require.Nil(t, err) if providers[index0].Address != ts.providers[0].Addr.String() { @@ -754,7 +754,7 @@ func TestRelayPaymentDataReliability(t *testing.T) { } vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].VrfSk, false, currentEpoch) dataReliability0 := &types.VRFData{ - ChainID: relayRequest.RelaySession.SpecID, + ChainId: relayRequest.RelaySession.SpecId, Epoch: relayRequest.RelaySession.Epoch, Differentiator: false, VrfValue: vrf_res0, @@ -863,7 +863,7 @@ GetWrongProvider: index0, _ = utils.GetIndexForVrf(vrfRes0, uint32(ts.keepers.Pairing.ServicersToPairCountRaw(sdk.UnwrapSDKContext(ts.ctx))), ts.spec.ReliabilityThreshold) index1, _ := utils.GetIndexForVrf(vrfRes1, uint32(ts.keepers.Pairing.ServicersToPairCountRaw(sdk.UnwrapSDKContext(ts.ctx))), ts.spec.ReliabilityThreshold) - providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relaySession.SpecID, ts.clients[0].Addr) + providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relaySession.SpecId, ts.clients[0].Addr) require.Nil(t, err) // two providers returned by GetIndexForVrf and the provider getting tested need 1 more to perform this test properly require.Greater(t, len(providers), 3) @@ -887,7 +887,7 @@ GetWrongProvider: } vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].VrfSk, false, currentEpoch) dataReliability0 := &types.VRFData{ - ChainID: relayRequest.RelaySession.SpecID, + ChainId: relayRequest.RelaySession.SpecId, Epoch: relayRequest.RelaySession.Epoch, Differentiator: false, VrfValue: vrf_res0, @@ -955,7 +955,7 @@ func TestRelayPaymentDataReliabilityBelowReliabilityThreshold(t *testing.T) { require.Equal(t, index1, int64(-1)) vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].VrfSk, false, currentEpoch) dataReliability0 := &types.VRFData{ - ChainID: relayRequest.RelaySession.SpecID, + ChainId: relayRequest.RelaySession.SpecId, Epoch: relayRequest.RelaySession.Epoch, Differentiator: false, VrfValue: vrf_res0, @@ -1023,7 +1023,7 @@ func TestRelayPaymentDataReliabilityDifferentClientSign(t *testing.T) { index0, _ = utils.GetIndexForVrf(vrfRes0, uint32(ts.keepers.Pairing.ServicersToPairCountRaw(sdk.UnwrapSDKContext(ts.ctx))), ts.spec.ReliabilityThreshold) - providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relaySession.SpecID, ts.clients[0].Addr) + providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relaySession.SpecId, ts.clients[0].Addr) require.Nil(t, err) if providers[index0].Address != ts.providers[0].Addr.String() { @@ -1034,7 +1034,7 @@ func TestRelayPaymentDataReliabilityDifferentClientSign(t *testing.T) { vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].VrfSk, false, currentEpoch) dataReliability0 := &types.VRFData{ - ChainID: relayRequest.RelaySession.SpecID, + ChainId: relayRequest.RelaySession.SpecId, Epoch: relayRequest.RelaySession.Epoch, Differentiator: false, VrfValue: vrf_res0, @@ -1099,7 +1099,7 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { index0, _ = utils.GetIndexForVrf(vrfRes0, uint32(ts.keepers.Pairing.ServicersToPairCountRaw(sdk.UnwrapSDKContext(ts.ctx))), ts.spec.ReliabilityThreshold) - providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relaySession.SpecID, ts.clients[0].Addr) + providers, err = ts.keepers.Pairing.GetPairingForClient(sdk.UnwrapSDKContext(ts.ctx), relaySession.SpecId, ts.clients[0].Addr) require.Nil(t, err) if providers[index0].Address != ts.providers[0].Addr.String() { @@ -1111,7 +1111,7 @@ func TestRelayPaymentDataReliabilityDoubleSpendDifferentEpoch(t *testing.T) { vrf_res0, vrf_proof0 := utils.ProveVrfOnRelay(relayRequest.RelayData, relayReply, ts.clients[0].VrfSk, false, currentEpoch) dataReliability0 := &types.VRFData{ - ChainID: relayRequest.RelaySession.SpecID, + ChainId: relayRequest.RelaySession.SpecId, Epoch: relayRequest.RelaySession.Epoch, Differentiator: false, VrfValue: vrf_res0, diff --git a/x/pairing/keeper/unresponsive_provider_test.go b/x/pairing/keeper/unresponsive_provider_test.go index 75555942ee..940cfd300d 100644 --- a/x/pairing/keeper/unresponsive_provider_test.go +++ b/x/pairing/keeper/unresponsive_provider_test.go @@ -60,7 +60,7 @@ func TestUnresponsivenessStressTest(t *testing.T) { Provider: providerAddress, ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(0), - SpecID: ts.spec.Name, + SpecId: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits*10 + uint64(clientIndex), Epoch: relayEpoch, RelayNum: 0, @@ -146,7 +146,7 @@ func TestUnstakingProviderForUnresponsiveness(t *testing.T) { Provider: ts.providers[0].Addr.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(0), - SpecID: ts.spec.Name, + SpecId: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits*10 + uint64(clientIndex), Epoch: relayEpoch, RelayNum: 0, @@ -245,7 +245,7 @@ func TestUnstakingProviderForUnresponsivenessContinueComplainingAfterUnstake(t * Provider: ts.providers[0].Addr.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(0), - SpecID: ts.spec.Name, + SpecId: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, Epoch: relayEpoch, RelayNum: 0, @@ -293,7 +293,7 @@ func TestUnstakingProviderForUnresponsivenessContinueComplainingAfterUnstake(t * Provider: ts.providers[0].Addr.String(), ContentHash: []byte(ts.spec.Apis[0].Name), SessionId: uint64(2), - SpecID: ts.spec.Name, + SpecId: ts.spec.Name, CuSum: ts.spec.Apis[0].ComputeUnits * 10, Epoch: sdk.UnwrapSDKContext(ts.ctx).BlockHeight(), RelayNum: 0, diff --git a/x/pairing/types/relay.pb.go b/x/pairing/types/relay.pb.go index b4efacca75..ca78d12dfa 100644 --- a/x/pairing/types/relay.pb.go +++ b/x/pairing/types/relay.pb.go @@ -30,13 +30,13 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type RelaySession struct { - SpecID string `protobuf:"bytes,1,opt,name=specID,proto3" json:"specID,omitempty"` + SpecId string `protobuf:"bytes,1,opt,name=spec_id,json=specId,proto3" json:"spec_id,omitempty"` ContentHash []byte `protobuf:"bytes,2,opt,name=content_hash,json=contentHash,proto3" json:"content_hash,omitempty"` SessionId uint64 `protobuf:"varint,3,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` CuSum uint64 `protobuf:"varint,4,opt,name=cu_sum,json=cuSum,proto3" json:"cu_sum,omitempty"` Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider,omitempty"` RelayNum uint64 `protobuf:"varint,6,opt,name=relay_num,json=relayNum,proto3" json:"relay_num,omitempty"` - QoSReport *QualityOfServiceReport `protobuf:"bytes,7,opt,name=QoSReport,proto3" json:"QoSReport,omitempty"` + QosReport *QualityOfServiceReport `protobuf:"bytes,7,opt,name=qos_report,json=qosReport,proto3" json:"qos_report,omitempty"` Epoch int64 `protobuf:"varint,8,opt,name=epoch,proto3" json:"epoch,omitempty"` UnresponsiveProviders []byte `protobuf:"bytes,9,opt,name=unresponsive_providers,json=unresponsiveProviders,proto3" json:"unresponsive_providers,omitempty"` LavaChainId string `protobuf:"bytes,10,opt,name=lava_chain_id,json=lavaChainId,proto3" json:"lava_chain_id,omitempty"` @@ -77,9 +77,9 @@ func (m *RelaySession) XXX_DiscardUnknown() { var xxx_messageInfo_RelaySession proto.InternalMessageInfo -func (m *RelaySession) GetSpecID() string { +func (m *RelaySession) GetSpecId() string { if m != nil { - return m.SpecID + return m.SpecId } return "" } @@ -119,9 +119,9 @@ func (m *RelaySession) GetRelayNum() uint64 { return 0 } -func (m *RelaySession) GetQoSReport() *QualityOfServiceReport { +func (m *RelaySession) GetQosReport() *QualityOfServiceReport { if m != nil { - return m.QoSReport + return m.QosReport } return nil } @@ -166,7 +166,7 @@ type RelayPrivateData struct { ApiUrl string `protobuf:"bytes,2,opt,name=api_url,json=apiUrl,proto3" json:"api_url,omitempty"` Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` RequestBlock int64 `protobuf:"varint,4,opt,name=request_block,json=requestBlock,proto3" json:"request_block,omitempty"` - ApiInterface string `protobuf:"bytes,5,opt,name=apiInterface,proto3" json:"apiInterface,omitempty"` + ApiInterface string `protobuf:"bytes,5,opt,name=api_interface,json=apiInterface,proto3" json:"api_interface,omitempty"` Salt []byte `protobuf:"bytes,6,opt,name=salt,proto3" json:"salt,omitempty"` } @@ -248,7 +248,7 @@ func (m *RelayPrivateData) GetSalt() []byte { type RelayRequest struct { RelaySession *RelaySession `protobuf:"bytes,1,opt,name=relay_session,json=relaySession,proto3" json:"relay_session,omitempty"` RelayData *RelayPrivateData `protobuf:"bytes,2,opt,name=relay_data,json=relayData,proto3" json:"relay_data,omitempty"` - DataReliability *VRFData `protobuf:"bytes,3,opt,name=DataReliability,proto3" json:"DataReliability,omitempty"` + DataReliability *VRFData `protobuf:"bytes,3,opt,name=data_reliability,json=dataReliability,proto3" json:"data_reliability,omitempty"` } func (m *RelayRequest) Reset() { *m = RelayRequest{} } @@ -466,14 +466,14 @@ func (m *RelayReply) GetSigBlocks() []byte { } type VRFData struct { - ChainID string `protobuf:"bytes,1,opt,name=chainID,proto3" json:"chainID,omitempty"` + ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` Epoch int64 `protobuf:"varint,2,opt,name=epoch,proto3" json:"epoch,omitempty"` Differentiator bool `protobuf:"varint,3,opt,name=differentiator,proto3" json:"differentiator,omitempty"` VrfValue []byte `protobuf:"bytes,4,opt,name=vrf_value,json=vrfValue,proto3" json:"vrf_value,omitempty"` VrfProof []byte `protobuf:"bytes,5,opt,name=vrf_proof,json=vrfProof,proto3" json:"vrf_proof,omitempty"` ProviderSig []byte `protobuf:"bytes,6,opt,name=provider_sig,json=providerSig,proto3" json:"provider_sig,omitempty"` - AllDataHash []byte `protobuf:"bytes,7,opt,name=allDataHash,proto3" json:"allDataHash,omitempty"` - QueryHash []byte `protobuf:"bytes,8,opt,name=queryHash,proto3" json:"queryHash,omitempty"` + AllDataHash []byte `protobuf:"bytes,7,opt,name=all_data_hash,json=allDataHash,proto3" json:"all_data_hash,omitempty"` + QueryHash []byte `protobuf:"bytes,8,opt,name=query_hash,json=queryHash,proto3" json:"query_hash,omitempty"` Sig []byte `protobuf:"bytes,9,opt,name=sig,proto3" json:"sig,omitempty"` } @@ -510,9 +510,9 @@ func (m *VRFData) XXX_DiscardUnknown() { var xxx_messageInfo_VRFData proto.InternalMessageInfo -func (m *VRFData) GetChainID() string { +func (m *VRFData) GetChainId() string { if m != nil { - return m.ChainID + return m.ChainId } return "" } @@ -625,72 +625,72 @@ func init() { func init() { proto.RegisterFile("pairing/relay.proto", fileDescriptor_10cd1bfeb9978acf) } var fileDescriptor_10cd1bfeb9978acf = []byte{ - // 1029 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x41, 0x6f, 0x1b, 0x45, - 0x14, 0xce, 0x3a, 0x76, 0x6c, 0xbf, 0xdd, 0xa6, 0xd5, 0x34, 0x4d, 0x97, 0x94, 0x3a, 0x66, 0x91, - 0xd2, 0x1c, 0xc0, 0x86, 0x20, 0x38, 0x20, 0x21, 0x51, 0x93, 0xd2, 0x06, 0x21, 0x9a, 0x8c, 0xa1, - 0x87, 0x5c, 0x56, 0xe3, 0xf1, 0xd8, 0x1e, 0xb2, 0xde, 0xd9, 0xce, 0xec, 0x5a, 0x98, 0x5f, 0xd1, - 0x1b, 0xff, 0x83, 0x03, 0x07, 0x8e, 0x48, 0x48, 0x3d, 0xf6, 0x88, 0x38, 0x44, 0x28, 0x39, 0x70, - 0xe7, 0x17, 0xa0, 0x79, 0xbb, 0xeb, 0xb8, 0x51, 0x88, 0x54, 0x89, 0xd3, 0xce, 0x7c, 0xf3, 0xe6, - 0x9b, 0x79, 0xef, 0xfb, 0xe6, 0x69, 0xe1, 0x76, 0xc2, 0xa4, 0x96, 0xf1, 0xb8, 0xab, 0x45, 0xc4, - 0xe6, 0x9d, 0x44, 0xab, 0x54, 0x91, 0x8d, 0x88, 0xcd, 0x58, 0x2c, 0xd2, 0x8e, 0xfd, 0x76, 0x8a, - 0x88, 0xad, 0x8d, 0xb1, 0x1a, 0x2b, 0x0c, 0xe8, 0xda, 0x51, 0x1e, 0x1b, 0xfc, 0xba, 0x0a, 0x1e, - 0xb5, 0x7b, 0xfb, 0xc2, 0x18, 0xa9, 0x62, 0xb2, 0x09, 0x6b, 0x26, 0x11, 0xfc, 0x60, 0xdf, 0x77, - 0xda, 0xce, 0x6e, 0x93, 0x16, 0x33, 0xf2, 0x0e, 0x78, 0x5c, 0xc5, 0xa9, 0x88, 0xd3, 0x70, 0xc2, - 0xcc, 0xc4, 0xaf, 0xb4, 0x9d, 0x5d, 0x8f, 0xba, 0x05, 0xf6, 0x84, 0x99, 0x09, 0xb9, 0x0f, 0x60, - 0x72, 0x96, 0x50, 0x0e, 0xfd, 0xd5, 0xb6, 0xb3, 0x5b, 0xa5, 0xcd, 0x02, 0x39, 0x18, 0x92, 0x3b, - 0xb0, 0xc6, 0xb3, 0xd0, 0x64, 0x53, 0xbf, 0x8a, 0x4b, 0x35, 0x9e, 0xf5, 0xb3, 0x29, 0xd9, 0x82, - 0x46, 0xa2, 0xd5, 0x4c, 0x0e, 0x85, 0xf6, 0x6b, 0x78, 0xe4, 0x62, 0x4e, 0xee, 0x41, 0x13, 0x13, - 0x0b, 0xe3, 0x6c, 0xea, 0xaf, 0xe1, 0xae, 0x06, 0x02, 0xdf, 0x64, 0x53, 0xf2, 0x15, 0x34, 0x8f, - 0x54, 0x9f, 0x8a, 0x44, 0xe9, 0xd4, 0xaf, 0xb7, 0x9d, 0x5d, 0x77, 0xef, 0xbd, 0xce, 0x55, 0xa9, - 0x77, 0x8e, 0x32, 0x16, 0xc9, 0x74, 0xfe, 0x74, 0xd4, 0x17, 0x7a, 0x26, 0xb9, 0xc8, 0xf7, 0xd0, - 0x8b, 0xed, 0x64, 0x03, 0x6a, 0x22, 0x51, 0x7c, 0xe2, 0x37, 0xda, 0xce, 0xee, 0x2a, 0xcd, 0x27, - 0xe4, 0x63, 0xd8, 0xcc, 0x62, 0x2d, 0x4c, 0xa2, 0x62, 0x23, 0x67, 0x22, 0x2c, 0xef, 0x65, 0xfc, - 0x26, 0x66, 0x7f, 0x67, 0x79, 0xf5, 0xb0, 0x5c, 0x24, 0x01, 0xdc, 0xb0, 0xc7, 0x87, 0x7c, 0xc2, - 0x24, 0x96, 0x02, 0x30, 0x2d, 0xd7, 0x82, 0x5f, 0x58, 0xec, 0x60, 0x48, 0x6e, 0xc1, 0xaa, 0x91, - 0x63, 0xdf, 0x45, 0x1e, 0x3b, 0x24, 0x1f, 0x42, 0x6d, 0xc0, 0x86, 0x63, 0xe1, 0x7b, 0x98, 0xca, - 0xbd, 0xab, 0x53, 0xe9, 0xd9, 0x10, 0x9a, 0x47, 0x06, 0xbf, 0x3b, 0x70, 0x0b, 0xc5, 0x3b, 0xd4, - 0x72, 0xc6, 0x52, 0xb1, 0xcf, 0x52, 0x46, 0x1e, 0xc0, 0x4d, 0xae, 0xe2, 0x58, 0xf0, 0xd4, 0x0a, - 0x91, 0xce, 0x13, 0x51, 0x28, 0xb9, 0x7e, 0x01, 0x7f, 0x3b, 0x4f, 0x04, 0xb9, 0x0b, 0x75, 0x96, - 0xc8, 0x30, 0xd3, 0x11, 0x8a, 0xd9, 0xa4, 0x6b, 0x2c, 0x91, 0xdf, 0xe9, 0x88, 0x10, 0xa8, 0x0e, - 0x59, 0xca, 0x50, 0x41, 0x8f, 0xe2, 0x98, 0xbc, 0x0b, 0x37, 0xb4, 0x78, 0x9e, 0x09, 0x93, 0x86, - 0x83, 0x48, 0xf1, 0x13, 0xd4, 0x70, 0x95, 0x7a, 0x05, 0xd8, 0xb3, 0x18, 0x09, 0xc0, 0x63, 0x89, - 0x3c, 0x88, 0x53, 0xa1, 0x47, 0x8c, 0x8b, 0x42, 0xce, 0xd7, 0x30, 0x4b, 0x6e, 0x58, 0x94, 0xa2, - 0x9a, 0x1e, 0xc5, 0x71, 0xf0, 0xb7, 0x53, 0x98, 0x90, 0xe6, 0x6c, 0xe4, 0xb1, 0x3d, 0xcd, 0xea, - 0x5e, 0xb8, 0x07, 0x33, 0x70, 0xf7, 0x82, 0xab, 0x6b, 0xb2, 0xec, 0x5f, 0x7b, 0xa3, 0x25, 0x37, - 0x3f, 0x02, 0xc8, 0x89, 0x30, 0xa1, 0x0a, 0xb2, 0xec, 0x5c, 0xc3, 0xb2, 0x54, 0x48, 0x9a, 0x5b, - 0x0f, 0x6b, 0xfa, 0x18, 0x6e, 0x22, 0x24, 0x22, 0xc9, 0x06, 0xd2, 0x7a, 0x09, 0x8b, 0xe3, 0xee, - 0xdd, 0xbf, 0x9a, 0xeb, 0x19, 0xfd, 0x12, 0xe3, 0x2f, 0xef, 0x0a, 0x7e, 0x72, 0xa0, 0x86, 0x12, - 0xda, 0x82, 0xf2, 0x2c, 0x64, 0x51, 0xa4, 0x38, 0x4b, 0xcb, 0x14, 0xab, 0xd4, 0xe3, 0xd9, 0xc3, - 0x05, 0x76, 0x61, 0xcb, 0xca, 0xb2, 0x2d, 0xdf, 0x82, 0x06, 0xea, 0x1f, 0x26, 0x27, 0x85, 0x46, - 0x75, 0x9c, 0x1f, 0x9e, 0x58, 0x4d, 0xed, 0x7b, 0xb5, 0xa6, 0xab, 0x2e, 0x3d, 0xdf, 0x21, 0xd9, - 0x06, 0x37, 0xd1, 0xea, 0x7b, 0xc1, 0xd3, 0xd0, 0xfa, 0xae, 0x86, 0xdb, 0xa0, 0x80, 0xfa, 0x72, - 0x1c, 0xfc, 0xe6, 0x00, 0x14, 0x1a, 0x24, 0xd1, 0x7c, 0xe1, 0x01, 0x67, 0xc9, 0x03, 0x85, 0x67, - 0x2b, 0x17, 0x9e, 0xdd, 0x80, 0x5a, 0xac, 0x62, 0x2e, 0xf0, 0x1a, 0x37, 0x68, 0x3e, 0xb1, 0xad, - 0x22, 0x62, 0xe9, 0x65, 0xab, 0xb8, 0x39, 0x96, 0x3b, 0xe5, 0x13, 0xb8, 0x3b, 0x92, 0x31, 0x8b, - 0xe4, 0x8f, 0x62, 0x98, 0x47, 0x19, 0x6c, 0x2b, 0xc2, 0x14, 0x57, 0xbb, 0xb3, 0x58, 0xc6, 0x0d, - 0xe6, 0x09, 0x2e, 0x62, 0x8b, 0x91, 0xe3, 0x62, 0x47, 0xe1, 0xa1, 0xa6, 0x91, 0xe3, 0x3c, 0x28, - 0x78, 0x51, 0x81, 0x7a, 0x51, 0x7b, 0xe2, 0x43, 0x1d, 0x1f, 0xe0, 0xa2, 0x93, 0x95, 0xd3, 0xff, - 0xa8, 0xea, 0x0e, 0xac, 0x0f, 0xe5, 0x68, 0x24, 0xb4, 0x88, 0x53, 0xc9, 0x52, 0xa5, 0x31, 0xa9, - 0x06, 0xbd, 0x84, 0xda, 0x9e, 0x34, 0xd3, 0xa3, 0x70, 0xc6, 0xa2, 0x4c, 0x60, 0x6a, 0x1e, 0x6d, - 0xcc, 0xf4, 0xe8, 0x99, 0x9d, 0x97, 0x8b, 0x89, 0x56, 0x6a, 0x54, 0x64, 0x62, 0x17, 0x0f, 0xed, - 0xdc, 0xd6, 0xa5, 0xec, 0x20, 0x28, 0x42, 0x7e, 0x7d, 0xb7, 0xc4, 0xfa, 0x72, 0x4c, 0xda, 0xe0, - 0xb2, 0x28, 0xb2, 0xf7, 0xb7, 0x09, 0x63, 0x57, 0xf3, 0xe8, 0x32, 0x44, 0xde, 0x86, 0xe6, 0xf3, - 0x4c, 0xe8, 0x39, 0xae, 0x37, 0xf2, 0x02, 0x2c, 0x80, 0x52, 0xa2, 0xe6, 0x42, 0xa2, 0xe0, 0xe7, - 0x0a, 0x6c, 0x5e, 0xdd, 0xff, 0xc8, 0x31, 0xd4, 0xad, 0x26, 0x31, 0x9f, 0xe7, 0x15, 0xea, 0x7d, - 0xfe, 0xf2, 0x74, 0x7b, 0xe5, 0xcf, 0xd3, 0xed, 0x9d, 0xb1, 0x4c, 0x27, 0xd9, 0xa0, 0xc3, 0xd5, - 0xb4, 0xcb, 0x95, 0x99, 0x2a, 0x53, 0x7c, 0xde, 0x37, 0xc3, 0x93, 0xae, 0x6d, 0x29, 0xa6, 0xb3, - 0x2f, 0xf8, 0x3f, 0xa7, 0xdb, 0xeb, 0x73, 0x36, 0x8d, 0x3e, 0x0d, 0xbe, 0xce, 0x69, 0x02, 0x5a, - 0x12, 0x12, 0x09, 0x1e, 0x9b, 0x31, 0x19, 0x95, 0xcf, 0x05, 0x3b, 0x4c, 0xef, 0xd1, 0x1b, 0x1f, - 0x70, 0x3b, 0x3f, 0x60, 0x99, 0x2b, 0xa0, 0xaf, 0x51, 0x93, 0x23, 0xa8, 0x9a, 0x79, 0xcc, 0x51, - 0xae, 0x66, 0xef, 0xb3, 0x37, 0x3e, 0xc2, 0xcd, 0x8f, 0xb0, 0x1c, 0x01, 0x45, 0xaa, 0xbd, 0x5f, - 0x1c, 0xa8, 0xe3, 0x63, 0x10, 0x9a, 0x3c, 0x85, 0x1a, 0x0e, 0xc9, 0x75, 0xdd, 0xa7, 0x68, 0x5c, - 0x5b, 0xed, 0x6b, 0x63, 0x92, 0x68, 0x1e, 0xac, 0x90, 0x63, 0x58, 0xcf, 0x3b, 0x56, 0x36, 0x30, - 0x5c, 0xcb, 0x81, 0xf8, 0xbf, 0x98, 0x3f, 0x70, 0x7a, 0x0f, 0x5f, 0x9e, 0xb5, 0x9c, 0x57, 0x67, - 0x2d, 0xe7, 0xaf, 0xb3, 0x96, 0xf3, 0xe2, 0xbc, 0xb5, 0xf2, 0xea, 0xbc, 0xb5, 0xf2, 0xc7, 0x79, - 0x6b, 0xe5, 0xf8, 0xc1, 0x52, 0x3d, 0x0a, 0x26, 0xfc, 0x76, 0x7f, 0xe8, 0x96, 0xff, 0x10, 0x58, - 0x94, 0xc1, 0x1a, 0xfe, 0x18, 0x7c, 0xf4, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x4d, 0x5d, - 0xac, 0x5b, 0x08, 0x00, 0x00, + // 1036 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0x3a, 0x76, 0x6c, 0x8f, 0x37, 0x69, 0x34, 0x4d, 0x5a, 0x93, 0xaa, 0x8e, 0x59, 0xa4, + 0x34, 0x07, 0xb0, 0x21, 0x08, 0x0e, 0x48, 0x48, 0xd4, 0xb4, 0x90, 0x08, 0x44, 0xd3, 0x09, 0xf4, + 0x90, 0xcb, 0x6a, 0x3c, 0x1e, 0xdb, 0x43, 0xc6, 0x3b, 0x9b, 0x99, 0x5d, 0x0b, 0xf3, 0x29, 0x38, + 0x20, 0xf1, 0x3d, 0x38, 0x70, 0xe2, 0xc4, 0x01, 0xf5, 0xd8, 0x23, 0xe2, 0x10, 0xa1, 0xe4, 0xc2, + 0x99, 0x4f, 0x80, 0xe6, 0xcd, 0xac, 0xe3, 0x56, 0x56, 0xa4, 0x4a, 0x3d, 0xed, 0xcc, 0xfb, 0xf3, + 0x9b, 0x79, 0xef, 0xf7, 0x9b, 0xa7, 0x45, 0xb7, 0x53, 0x2a, 0xb4, 0x48, 0x46, 0x5d, 0xcd, 0x25, + 0x9d, 0x75, 0x52, 0xad, 0x32, 0x85, 0xb7, 0x24, 0x9d, 0xd2, 0x84, 0x67, 0x1d, 0xfb, 0xed, 0xf8, + 0x88, 0x9d, 0xad, 0x91, 0x1a, 0x29, 0x08, 0xe8, 0xda, 0x95, 0x8b, 0x8d, 0x7e, 0x5f, 0x45, 0x21, + 0xb1, 0xb9, 0x27, 0xdc, 0x18, 0xa1, 0x12, 0x7c, 0x17, 0x55, 0x4d, 0xca, 0x59, 0x2c, 0x06, 0xcd, + 0xa0, 0x1d, 0xec, 0xd7, 0xc9, 0x9a, 0xdd, 0x1e, 0x0d, 0xf0, 0xdb, 0x28, 0x64, 0x2a, 0xc9, 0x78, + 0x92, 0xc5, 0x63, 0x6a, 0xc6, 0xcd, 0x52, 0x3b, 0xd8, 0x0f, 0x49, 0xc3, 0xdb, 0x0e, 0xa9, 0x19, + 0xe3, 0xfb, 0x08, 0x19, 0x07, 0x63, 0xd3, 0x57, 0xdb, 0xc1, 0x7e, 0x99, 0xd4, 0xbd, 0xe5, 0x68, + 0x80, 0xb7, 0xd1, 0x1a, 0xcb, 0x63, 0x93, 0x4f, 0x9a, 0x65, 0x70, 0x55, 0x58, 0x7e, 0x92, 0x4f, + 0xf0, 0x0e, 0xaa, 0xa5, 0x5a, 0x4d, 0xc5, 0x80, 0xeb, 0x66, 0x05, 0x8e, 0x9c, 0xef, 0xf1, 0x3d, + 0x54, 0x87, 0xca, 0xe2, 0x24, 0x9f, 0x34, 0xd7, 0x20, 0xab, 0x06, 0x86, 0x6f, 0xf2, 0x09, 0xfe, + 0x0a, 0xa1, 0x73, 0x65, 0x62, 0xcd, 0x53, 0xa5, 0xb3, 0x66, 0xb5, 0x1d, 0xec, 0x37, 0x0e, 0xde, + 0xed, 0x2c, 0x2b, 0xbe, 0xf3, 0x34, 0xa7, 0x52, 0x64, 0xb3, 0x27, 0xc3, 0x13, 0xae, 0xa7, 0x82, + 0x71, 0x02, 0x39, 0xa4, 0x7e, 0xae, 0x8c, 0x5b, 0xe2, 0x2d, 0x54, 0xe1, 0xa9, 0x62, 0xe3, 0x66, + 0xad, 0x1d, 0xec, 0xaf, 0x12, 0xb7, 0xc1, 0x1f, 0xa1, 0x3b, 0x79, 0xa2, 0xb9, 0x49, 0x55, 0x62, + 0xc4, 0x94, 0xc7, 0xc5, 0xc5, 0x4c, 0xb3, 0x0e, 0xe5, 0x6f, 0x2f, 0x7a, 0x8f, 0x0b, 0x27, 0x8e, + 0xd0, 0xba, 0x3d, 0x3e, 0x66, 0x63, 0x2a, 0xa0, 0x17, 0x08, 0xea, 0x6a, 0x58, 0xe3, 0xe7, 0xd6, + 0x76, 0x34, 0xc0, 0x9b, 0x68, 0xd5, 0x88, 0x51, 0xb3, 0x01, 0x38, 0x76, 0x89, 0x3f, 0x40, 0x95, + 0x3e, 0x1d, 0x8c, 0x78, 0x33, 0x84, 0x52, 0xee, 0x2d, 0x2f, 0xa5, 0x67, 0x43, 0x88, 0x8b, 0x8c, + 0xfe, 0x0c, 0xd0, 0x26, 0xd0, 0x77, 0xac, 0xc5, 0x94, 0x66, 0xfc, 0x11, 0xcd, 0x28, 0x7e, 0x80, + 0x6e, 0x31, 0x95, 0x24, 0x9c, 0x65, 0x96, 0x89, 0x6c, 0x96, 0x72, 0x4f, 0xe5, 0xc6, 0xb5, 0xf9, + 0xdb, 0x59, 0xca, 0x2d, 0xd7, 0x34, 0x15, 0x71, 0xae, 0x25, 0xb0, 0x59, 0x27, 0x6b, 0x34, 0x15, + 0xdf, 0x69, 0x89, 0x31, 0x2a, 0x0f, 0x68, 0x46, 0x81, 0xc2, 0x90, 0xc0, 0x1a, 0xbf, 0x83, 0xd6, + 0x35, 0x3f, 0xcf, 0xb9, 0xc9, 0xe2, 0xbe, 0x54, 0xec, 0x0c, 0x48, 0x5c, 0x25, 0xa1, 0x37, 0xf6, + 0xac, 0xcd, 0x06, 0x59, 0x44, 0x91, 0x64, 0x5c, 0x0f, 0x29, 0xe3, 0x9e, 0xd0, 0x90, 0xa6, 0xe2, + 0xa8, 0xb0, 0x59, 0x74, 0x43, 0x65, 0x06, 0x7c, 0x86, 0x04, 0xd6, 0xd1, 0xbf, 0x81, 0xd7, 0x21, + 0x71, 0x70, 0xf8, 0x4b, 0x7b, 0x9c, 0x65, 0xde, 0xeb, 0x07, 0x4a, 0x68, 0x1c, 0x44, 0xcb, 0x9b, + 0xb2, 0x28, 0x61, 0x7b, 0xa5, 0x05, 0x41, 0x3f, 0x46, 0xc8, 0x01, 0x41, 0x45, 0x25, 0x40, 0xd9, + 0xbb, 0x01, 0x65, 0xa1, 0x93, 0xc4, 0x89, 0x0f, 0x9a, 0x7a, 0x88, 0x36, 0x2d, 0x40, 0xac, 0xb9, + 0x14, 0xb4, 0x2f, 0xac, 0x9a, 0xa0, 0x3d, 0x8d, 0x83, 0xfb, 0xcb, 0xc1, 0x9e, 0x91, 0x2f, 0x00, + 0xe3, 0x96, 0x4d, 0x23, 0xd7, 0x59, 0xd1, 0x2f, 0x01, 0xaa, 0x00, 0x89, 0xb6, 0x5b, 0x2c, 0x8f, + 0xa9, 0x94, 0x8a, 0xd1, 0xac, 0xa8, 0xb1, 0x4c, 0x42, 0x96, 0x3f, 0x9c, 0xdb, 0xae, 0x85, 0x59, + 0x5a, 0x14, 0xe6, 0x5b, 0xa8, 0x06, 0x0a, 0x88, 0xd3, 0x33, 0xcf, 0x52, 0x15, 0xf6, 0xc7, 0x67, + 0x8b, 0x2f, 0xb8, 0xfc, 0xd2, 0x0b, 0xde, 0x45, 0x8d, 0x54, 0xab, 0xef, 0x39, 0xcb, 0x62, 0xab, + 0xbc, 0x0a, 0xa4, 0x21, 0x6f, 0x3a, 0x11, 0xa3, 0xe8, 0x8f, 0x00, 0x21, 0x4f, 0x42, 0x2a, 0x67, + 0x73, 0x15, 0x04, 0x0b, 0x2a, 0xf0, 0xaa, 0x2d, 0x5d, 0xab, 0x76, 0x0b, 0x55, 0x12, 0x95, 0x30, + 0x0e, 0xd7, 0x58, 0x27, 0x6e, 0x63, 0xa7, 0x85, 0xa4, 0xd9, 0xab, 0x62, 0x69, 0x38, 0x9b, 0xd3, + 0xca, 0xc7, 0xe8, 0xee, 0x50, 0x24, 0x54, 0x8a, 0x1f, 0xf9, 0xc0, 0x45, 0x19, 0x98, 0x2c, 0xdc, + 0xf8, 0xab, 0x6d, 0xcf, 0xdd, 0x90, 0x60, 0x0e, 0xc1, 0x09, 0x53, 0x46, 0x8c, 0x7c, 0x86, 0x17, + 0x51, 0xdd, 0x88, 0x91, 0x0b, 0x8a, 0x7e, 0x2e, 0xa1, 0xaa, 0xef, 0xbd, 0xed, 0xd2, 0xfc, 0x09, + 0xba, 0x27, 0x50, 0x65, 0xfe, 0xf9, 0x2d, 0x6f, 0xeb, 0x1e, 0xda, 0x18, 0x88, 0xe1, 0x90, 0x6b, + 0x9e, 0x64, 0x82, 0x66, 0x4a, 0x43, 0x55, 0x35, 0xf2, 0x8a, 0xd5, 0xce, 0xa5, 0xa9, 0x1e, 0xc6, + 0x53, 0x2a, 0x73, 0x0e, 0xb5, 0x85, 0xa4, 0x36, 0xd5, 0xc3, 0x67, 0x76, 0x5f, 0x38, 0x53, 0xad, + 0xd4, 0xd0, 0x97, 0x62, 0x9d, 0xc7, 0x76, 0x6f, 0x1b, 0x53, 0x0c, 0x11, 0x60, 0xc1, 0xdd, 0xbf, + 0x51, 0xd8, 0x4e, 0xc4, 0xc8, 0x4e, 0x0f, 0x2a, 0x25, 0xe8, 0xd5, 0x8d, 0xda, 0xaa, 0x8b, 0xa1, + 0x52, 0xda, 0xaa, 0x8a, 0x51, 0x7b, 0x9e, 0x73, 0x3d, 0x73, 0x01, 0x35, 0xd7, 0x04, 0xb0, 0x80, + 0xdb, 0xd3, 0x54, 0x9f, 0xd3, 0x14, 0xfd, 0x5a, 0x42, 0x77, 0x96, 0x4f, 0x41, 0x7c, 0x8a, 0xaa, + 0x96, 0x97, 0x84, 0xcd, 0x5c, 0x93, 0x7a, 0x9f, 0x3d, 0xbf, 0xd8, 0x5d, 0xf9, 0xfb, 0x62, 0x77, + 0x6f, 0x24, 0xb2, 0x71, 0xde, 0xef, 0x30, 0x35, 0xe9, 0x32, 0x65, 0x26, 0xca, 0xf8, 0xcf, 0x7b, + 0x66, 0x70, 0xd6, 0xb5, 0x83, 0xc5, 0x74, 0x1e, 0x71, 0xf6, 0xdf, 0xc5, 0xee, 0xc6, 0x8c, 0x4e, + 0xe4, 0x27, 0xd1, 0xd7, 0x0e, 0x26, 0x22, 0x05, 0x20, 0x16, 0x28, 0xa4, 0x53, 0x2a, 0x64, 0xf1, + 0x64, 0x60, 0xce, 0xf4, 0x1e, 0xbf, 0xf6, 0x01, 0xb7, 0xdd, 0x01, 0x8b, 0x58, 0x11, 0x79, 0x09, + 0x1a, 0x3f, 0x45, 0x65, 0x33, 0x4b, 0x18, 0x30, 0x56, 0xef, 0x7d, 0xfa, 0xda, 0x47, 0x34, 0xdc, + 0x11, 0x16, 0x23, 0x22, 0x00, 0x75, 0xf0, 0x5b, 0x80, 0xaa, 0xf0, 0x20, 0xb8, 0xc6, 0x4f, 0x50, + 0x05, 0x96, 0xf8, 0xa6, 0x11, 0xe4, 0xa7, 0xd7, 0x4e, 0xfb, 0xc6, 0x98, 0x54, 0xce, 0xa2, 0x15, + 0x7c, 0x8a, 0x36, 0xdc, 0xd8, 0xca, 0xfb, 0x86, 0x69, 0xd1, 0xe7, 0x6f, 0x0a, 0xf9, 0xfd, 0xa0, + 0xf7, 0xf0, 0xf9, 0x65, 0x2b, 0x78, 0x71, 0xd9, 0x0a, 0xfe, 0xb9, 0x6c, 0x05, 0x3f, 0x5d, 0xb5, + 0x56, 0x5e, 0x5c, 0xb5, 0x56, 0xfe, 0xba, 0x6a, 0xad, 0x9c, 0x3e, 0x58, 0xe8, 0x87, 0x47, 0x82, + 0x6f, 0xf7, 0x87, 0x6e, 0xf1, 0x2f, 0x01, 0x4d, 0xe9, 0xaf, 0xc1, 0x0f, 0xc2, 0x87, 0xff, 0x07, + 0x00, 0x00, 0xff, 0xff, 0xe0, 0x2d, 0xe8, 0x9a, 0x63, 0x08, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -895,9 +895,9 @@ func (m *RelaySession) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x40 } - if m.QoSReport != nil { + if m.QosReport != nil { { - size, err := m.QoSReport.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.QosReport.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -936,10 +936,10 @@ func (m *RelaySession) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if len(m.SpecID) > 0 { - i -= len(m.SpecID) - copy(dAtA[i:], m.SpecID) - i = encodeVarintRelay(dAtA, i, uint64(len(m.SpecID))) + if len(m.SpecId) > 0 { + i -= len(m.SpecId) + copy(dAtA[i:], m.SpecId) + i = encodeVarintRelay(dAtA, i, uint64(len(m.SpecId))) i-- dAtA[i] = 0xa } @@ -1260,10 +1260,10 @@ func (m *VRFData) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x10 } - if len(m.ChainID) > 0 { - i -= len(m.ChainID) - copy(dAtA[i:], m.ChainID) - i = encodeVarintRelay(dAtA, i, uint64(len(m.ChainID))) + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintRelay(dAtA, i, uint64(len(m.ChainId))) i-- dAtA[i] = 0xa } @@ -1340,7 +1340,7 @@ func (m *RelaySession) Size() (n int) { } var l int _ = l - l = len(m.SpecID) + l = len(m.SpecId) if l > 0 { n += 1 + l + sovRelay(uint64(l)) } @@ -1361,8 +1361,8 @@ func (m *RelaySession) Size() (n int) { if m.RelayNum != 0 { n += 1 + sovRelay(uint64(m.RelayNum)) } - if m.QoSReport != nil { - l = m.QoSReport.Size() + if m.QosReport != nil { + l = m.QosReport.Size() n += 1 + l + sovRelay(uint64(l)) } if m.Epoch != 0 { @@ -1504,7 +1504,7 @@ func (m *VRFData) Size() (n int) { } var l int _ = l - l = len(m.ChainID) + l = len(m.ChainId) if l > 0 { n += 1 + l + sovRelay(uint64(l)) } @@ -1593,7 +1593,7 @@ func (m *RelaySession) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpecID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SpecId", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1621,7 +1621,7 @@ func (m *RelaySession) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SpecID = string(dAtA[iNdEx:postIndex]) + m.SpecId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -1748,7 +1748,7 @@ func (m *RelaySession) Unmarshal(dAtA []byte) error { } case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field QoSReport", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field QosReport", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1775,10 +1775,10 @@ func (m *RelaySession) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.QoSReport == nil { - m.QoSReport = &QualityOfServiceReport{} + if m.QosReport == nil { + m.QosReport = &QualityOfServiceReport{} } - if err := m.QoSReport.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.QosReport.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2792,7 +2792,7 @@ func (m *VRFData) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2820,7 +2820,7 @@ func (m *VRFData) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ChainID = string(dAtA[iNdEx:postIndex]) + m.ChainId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 {